Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.17 commit in: /
Date: Wed, 18 May 2022 09:38:43
Message-Id: 1652866703.5aac3dbc0e90b2a0dda13a7b7026af118e288358.mpagano@gentoo
1 commit: 5aac3dbc0e90b2a0dda13a7b7026af118e288358
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed May 18 09:38:23 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed May 18 09:38:23 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5aac3dbc
7
8 Linux patch 5.17.9
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1008_linux-5.17.9.patch | 3768 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3772 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 30109237..ad4b906b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -75,6 +75,10 @@ Patch: 1007_linux-5.17.8.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.17.8
23
24 +Patch: 1008_linux-5.17.9.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.17.9
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1008_linux-5.17.9.patch b/1008_linux-5.17.9.patch
33 new file mode 100644
34 index 00000000..e6f7c06a
35 --- /dev/null
36 +++ b/1008_linux-5.17.9.patch
37 @@ -0,0 +1,3768 @@
38 +diff --git a/Makefile b/Makefile
39 +index 3cf179812f0f9..aba139bbd1c70 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 17
46 +-SUBLEVEL = 8
47 ++SUBLEVEL = 9
48 + EXTRAVERSION =
49 + NAME = Superb Owl
50 +
51 +diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
52 +index 0c70eb688a00c..2a0739a2350be 100644
53 +--- a/arch/arm/include/asm/io.h
54 ++++ b/arch/arm/include/asm/io.h
55 +@@ -440,6 +440,9 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
56 + #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
57 + extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
58 + extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
59 ++extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
60 ++ unsigned long flags);
61 ++#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
62 + #endif
63 +
64 + /*
65 +diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
66 +index 197f8eb3a7752..09c0bb9aeb3c5 100644
67 +--- a/arch/arm/mm/ioremap.c
68 ++++ b/arch/arm/mm/ioremap.c
69 +@@ -489,3 +489,11 @@ void __init early_ioremap_init(void)
70 + {
71 + early_ioremap_setup();
72 + }
73 ++
74 ++bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
75 ++ unsigned long flags)
76 ++{
77 ++ unsigned long pfn = PHYS_PFN(offset);
78 ++
79 ++ return memblock_is_map_memory(pfn);
80 ++}
81 +diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
82 +index 7fd836bea7eb4..3995652daf81a 100644
83 +--- a/arch/arm64/include/asm/io.h
84 ++++ b/arch/arm64/include/asm/io.h
85 +@@ -192,4 +192,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
86 + extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
87 + extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
88 +
89 ++extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
90 ++ unsigned long flags);
91 ++#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
92 ++
93 + #endif /* __ASM_IO_H */
94 +diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
95 +index 88b3e2a214084..db557856854e7 100644
96 +--- a/arch/arm64/kernel/Makefile
97 ++++ b/arch/arm64/kernel/Makefile
98 +@@ -74,6 +74,10 @@ obj-$(CONFIG_ARM64_MTE) += mte.o
99 + obj-y += vdso-wrap.o
100 + obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
101 +
102 ++# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
103 ++$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
104 ++$(obj)/vdso32-wrap.o: $(obj)/vdso32/vdso.so
105 ++
106 + obj-y += probes/
107 + head-y := head.o
108 + extra-y += $(head-y) vmlinux.lds
109 +diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
110 +index 172452f79e462..ac1964ebed1ef 100644
111 +--- a/arch/arm64/kernel/vdso/Makefile
112 ++++ b/arch/arm64/kernel/vdso/Makefile
113 +@@ -52,9 +52,6 @@ GCOV_PROFILE := n
114 + targets += vdso.lds
115 + CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
116 +
117 +-# Force dependency (incbin is bad)
118 +-$(obj)/vdso.o : $(obj)/vdso.so
119 +-
120 + # Link rule for the .so file, .lds has to be first
121 + $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
122 + $(call if_changed,vdsold_and_vdso_check)
123 +diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
124 +index 6c01b63ff56df..2c2036eb0df7b 100644
125 +--- a/arch/arm64/kernel/vdso32/Makefile
126 ++++ b/arch/arm64/kernel/vdso32/Makefile
127 +@@ -130,9 +130,6 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
128 + targets += vdso.lds
129 + CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
130 +
131 +-# Force dependency (vdso.s includes vdso.so through incbin)
132 +-$(obj)/vdso.o: $(obj)/vdso.so
133 +-
134 + include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE
135 + $(call if_changed,vdsosym)
136 +
137 +diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
138 +index b7c81dacabf07..b21f91cd830db 100644
139 +--- a/arch/arm64/mm/ioremap.c
140 ++++ b/arch/arm64/mm/ioremap.c
141 +@@ -99,3 +99,11 @@ void __init early_ioremap_init(void)
142 + {
143 + early_ioremap_setup();
144 + }
145 ++
146 ++bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
147 ++ unsigned long flags)
148 ++{
149 ++ unsigned long pfn = PHYS_PFN(offset);
150 ++
151 ++ return pfn_is_map_memory(pfn);
152 ++}
153 +diff --git a/arch/powerpc/kvm/book3s_32_sr.S b/arch/powerpc/kvm/book3s_32_sr.S
154 +index e3ab9df6cf199..6cfcd20d46686 100644
155 +--- a/arch/powerpc/kvm/book3s_32_sr.S
156 ++++ b/arch/powerpc/kvm/book3s_32_sr.S
157 +@@ -122,11 +122,27 @@
158 +
159 + /* 0x0 - 0xb */
160 +
161 +- /* 'current->mm' needs to be in r4 */
162 +- tophys(r4, r2)
163 +- lwz r4, MM(r4)
164 +- tophys(r4, r4)
165 +- /* This only clobbers r0, r3, r4 and r5 */
166 ++ /* switch_mmu_context() needs paging, let's enable it */
167 ++ mfmsr r9
168 ++ ori r11, r9, MSR_DR
169 ++ mtmsr r11
170 ++ sync
171 ++
172 ++ /* switch_mmu_context() clobbers r12, rescue it */
173 ++ SAVE_GPR(12, r1)
174 ++
175 ++ /* Calling switch_mmu_context(<inv>, current->mm, <inv>); */
176 ++ lwz r4, MM(r2)
177 + bl switch_mmu_context
178 +
179 ++ /* restore r12 */
180 ++ REST_GPR(12, r1)
181 ++
182 ++ /* Disable paging again */
183 ++ mfmsr r9
184 ++ li r6, MSR_DR
185 ++ andc r9, r9, r6
186 ++ mtmsr r9
187 ++ sync
188 ++
189 + .endm
190 +diff --git a/arch/s390/Makefile b/arch/s390/Makefile
191 +index 609e3697324b1..6e42252214dd8 100644
192 +--- a/arch/s390/Makefile
193 ++++ b/arch/s390/Makefile
194 +@@ -30,6 +30,16 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
195 + KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
196 + KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
197 + KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
198 ++
199 ++ifdef CONFIG_CC_IS_GCC
200 ++ ifeq ($(call cc-ifversion, -ge, 1200, y), y)
201 ++ ifeq ($(call cc-ifversion, -lt, 1300, y), y)
202 ++ KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
203 ++ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds)
204 ++ endif
205 ++ endif
206 ++endif
207 ++
208 + UTS_MACHINE := s390x
209 + STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384)
210 + CHECKFLAGS += -D__s390__ -D__s390x__
211 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
212 +index 96d34ebb20a9e..e2942335d1437 100644
213 +--- a/arch/x86/mm/init_64.c
214 ++++ b/arch/x86/mm/init_64.c
215 +@@ -902,6 +902,8 @@ static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end
216 +
217 + static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
218 + {
219 ++ const unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
220 ++
221 + vmemmap_flush_unused_pmd();
222 +
223 + /*
224 +@@ -914,8 +916,7 @@ static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long
225 + * Mark with PAGE_UNUSED the unused parts of the new memmap range
226 + */
227 + if (!IS_ALIGNED(start, PMD_SIZE))
228 +- memset((void *)start, PAGE_UNUSED,
229 +- start - ALIGN_DOWN(start, PMD_SIZE));
230 ++ memset((void *)page, PAGE_UNUSED, start - page);
231 +
232 + /*
233 + * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
234 +diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
235 +index 94d1789a233e0..406a907a4caec 100644
236 +--- a/drivers/base/firmware_loader/main.c
237 ++++ b/drivers/base/firmware_loader/main.c
238 +@@ -735,6 +735,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
239 + size_t offset, u32 opt_flags)
240 + {
241 + struct firmware *fw = NULL;
242 ++ struct cred *kern_cred = NULL;
243 ++ const struct cred *old_cred;
244 + bool nondirect = false;
245 + int ret;
246 +
247 +@@ -751,6 +753,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
248 + if (ret <= 0) /* error or already assigned */
249 + goto out;
250 +
251 ++ /*
252 ++ * We are about to try to access the firmware file. Because we may have been
253 ++ * called by a driver when serving an unrelated request from userland, we use
254 ++ * the kernel credentials to read the file.
255 ++ */
256 ++ kern_cred = prepare_kernel_cred(NULL);
257 ++ if (!kern_cred) {
258 ++ ret = -ENOMEM;
259 ++ goto out;
260 ++ }
261 ++ old_cred = override_creds(kern_cred);
262 ++
263 + ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
264 +
265 + /* Only full reads can support decompression, platform, and sysfs. */
266 +@@ -776,6 +790,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
267 + } else
268 + ret = assign_fw(fw, device);
269 +
270 ++ revert_creds(old_cred);
271 ++ put_cred(kern_cred);
272 ++
273 + out:
274 + if (ret < 0) {
275 + fw_abort_batch_reqs(fw);
276 +diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
277 +index 602b12d7470d2..a6fc96e426687 100644
278 +--- a/drivers/dma-buf/dma-buf.c
279 ++++ b/drivers/dma-buf/dma-buf.c
280 +@@ -543,10 +543,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
281 + file->f_mode |= FMODE_LSEEK;
282 + dmabuf->file = file;
283 +
284 +- ret = dma_buf_stats_setup(dmabuf);
285 +- if (ret)
286 +- goto err_sysfs;
287 +-
288 + mutex_init(&dmabuf->lock);
289 + INIT_LIST_HEAD(&dmabuf->attachments);
290 +
291 +@@ -554,6 +550,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
292 + list_add(&dmabuf->list_node, &db_list.head);
293 + mutex_unlock(&db_list.lock);
294 +
295 ++ ret = dma_buf_stats_setup(dmabuf);
296 ++ if (ret)
297 ++ goto err_sysfs;
298 ++
299 + return dmabuf;
300 +
301 + err_sysfs:
302 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
303 +index b51368fa30253..86acff764e7f6 100644
304 +--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
305 ++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
306 +@@ -1405,14 +1405,8 @@ static int smu_disable_dpms(struct smu_context *smu)
307 + {
308 + struct amdgpu_device *adev = smu->adev;
309 + int ret = 0;
310 +- /*
311 +- * TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair
312 +- * the workaround which always reset the asic in suspend.
313 +- * It's likely that workaround will be dropped in the future.
314 +- * Then the change here should be dropped together.
315 +- */
316 + bool use_baco = !smu->is_apu &&
317 +- (((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) &&
318 ++ ((amdgpu_in_reset(adev) &&
319 + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
320 + ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
321 +
322 +diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
323 +index daf9f87477ba1..a2141d3d9b1d2 100644
324 +--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
325 ++++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
326 +@@ -46,8 +46,9 @@ static bool
327 + nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
328 + struct nouveau_backlight *bl)
329 + {
330 +- const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL);
331 +- if (nb < 0 || nb >= 100)
332 ++ const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL);
333 ++
334 ++ if (nb < 0)
335 + return false;
336 + if (nb > 0)
337 + snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
338 +@@ -414,7 +415,7 @@ nouveau_backlight_init(struct drm_connector *connector)
339 + nv_encoder, ops, &props);
340 + if (IS_ERR(bl->dev)) {
341 + if (bl->id >= 0)
342 +- ida_simple_remove(&bl_ida, bl->id);
343 ++ ida_free(&bl_ida, bl->id);
344 + ret = PTR_ERR(bl->dev);
345 + goto fail_alloc;
346 + }
347 +@@ -442,7 +443,7 @@ nouveau_backlight_fini(struct drm_connector *connector)
348 + return;
349 +
350 + if (bl->id >= 0)
351 +- ida_simple_remove(&bl_ida, bl->id);
352 ++ ida_free(&bl_ida, bl->id);
353 +
354 + backlight_device_unregister(bl->dev);
355 + nv_conn->backlight = NULL;
356 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
357 +index d0d52c1d4aee0..950a3de3e1166 100644
358 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
359 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
360 +@@ -123,7 +123,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
361 +
362 + mutex_init(&tdev->iommu.mutex);
363 +
364 +- if (iommu_present(&platform_bus_type)) {
365 ++ if (device_iommu_mapped(dev)) {
366 + tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
367 + if (!tdev->iommu.domain)
368 + goto error;
369 +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
370 +index 3a1626f261e5a..3f651364ed7ad 100644
371 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
372 ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
373 +@@ -38,6 +38,7 @@
374 + #include <drm/drm_scdc_helper.h>
375 + #include <linux/clk.h>
376 + #include <linux/component.h>
377 ++#include <linux/gpio/consumer.h>
378 + #include <linux/i2c.h>
379 + #include <linux/of_address.h>
380 + #include <linux/of_gpio.h>
381 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
382 +index a3bfbb6c3e14a..162dfeb1cc5ad 100644
383 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
384 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
385 +@@ -528,7 +528,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
386 + *seqno = atomic_add_return(1, &dev_priv->marker_seq);
387 + } while (*seqno == 0);
388 +
389 +- if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {
390 ++ if (!vmw_has_fences(dev_priv)) {
391 +
392 + /*
393 + * Don't request hardware to send a fence. The
394 +@@ -675,11 +675,14 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
395 + */
396 + bool vmw_cmd_supported(struct vmw_private *vmw)
397 + {
398 +- if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
399 +- SVGA_CAP_CMD_BUFFERS_2)) != 0)
400 +- return true;
401 ++ bool has_cmdbufs =
402 ++ (vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
403 ++ SVGA_CAP_CMD_BUFFERS_2)) != 0;
404 ++ if (vmw_is_svga_v3(vmw))
405 ++ return (has_cmdbufs &&
406 ++ (vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
407 + /*
408 + * We have FIFO cmd's
409 + */
410 +- return vmw->fifo_mem != NULL;
411 ++ return has_cmdbufs || vmw->fifo_mem != NULL;
412 + }
413 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
414 +index ea3ecdda561dc..6de0b9ef5c773 100644
415 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
416 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
417 +@@ -1679,4 +1679,12 @@ static inline void vmw_irq_status_write(struct vmw_private *vmw,
418 + outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT);
419 + }
420 +
421 ++static inline bool vmw_has_fences(struct vmw_private *vmw)
422 ++{
423 ++ if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
424 ++ SVGA_CAP_CMD_BUFFERS_2)) != 0)
425 ++ return true;
426 ++ return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
427 ++}
428 ++
429 + #endif
430 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
431 +index 8ee34576c7d08..adf17c740656d 100644
432 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
433 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
434 +@@ -483,7 +483,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
435 +
436 + static int vmw_fb_kms_framebuffer(struct fb_info *info)
437 + {
438 +- struct drm_mode_fb_cmd2 mode_cmd;
439 ++ struct drm_mode_fb_cmd2 mode_cmd = {0};
440 + struct vmw_fb_par *par = info->par;
441 + struct fb_var_screeninfo *var = &info->var;
442 + struct drm_framebuffer *cur_fb;
443 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
444 +index 5001b87aebe81..a16b854ca18a7 100644
445 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
446 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
447 +@@ -82,6 +82,22 @@ fman_from_fence(struct vmw_fence_obj *fence)
448 + return container_of(fence->base.lock, struct vmw_fence_manager, lock);
449 + }
450 +
451 ++static u32 vmw_fence_goal_read(struct vmw_private *vmw)
452 ++{
453 ++ if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
454 ++ return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
455 ++ else
456 ++ return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
457 ++}
458 ++
459 ++static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
460 ++{
461 ++ if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
462 ++ vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
463 ++ else
464 ++ vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
465 ++}
466 ++
467 + /*
468 + * Note on fencing subsystem usage of irqs:
469 + * Typically the vmw_fences_update function is called
470 +@@ -392,7 +408,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
471 + if (likely(!fman->seqno_valid))
472 + return false;
473 +
474 +- goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
475 ++ goal_seqno = vmw_fence_goal_read(fman->dev_priv);
476 + if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
477 + return false;
478 +
479 +@@ -400,9 +416,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
480 + list_for_each_entry(fence, &fman->fence_list, head) {
481 + if (!list_empty(&fence->seq_passed_actions)) {
482 + fman->seqno_valid = true;
483 +- vmw_fifo_mem_write(fman->dev_priv,
484 +- SVGA_FIFO_FENCE_GOAL,
485 +- fence->base.seqno);
486 ++ vmw_fence_goal_write(fman->dev_priv,
487 ++ fence->base.seqno);
488 + break;
489 + }
490 + }
491 +@@ -434,13 +449,12 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
492 + if (dma_fence_is_signaled_locked(&fence->base))
493 + return false;
494 +
495 +- goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
496 ++ goal_seqno = vmw_fence_goal_read(fman->dev_priv);
497 + if (likely(fman->seqno_valid &&
498 + goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
499 + return false;
500 +
501 +- vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
502 +- fence->base.seqno);
503 ++ vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
504 + fman->seqno_valid = true;
505 +
506 + return true;
507 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
508 +index c5191de365ca1..fe4732bf2c9d2 100644
509 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
510 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
511 +@@ -32,6 +32,14 @@
512 +
513 + #define VMW_FENCE_WRAP (1 << 24)
514 +
515 ++static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw)
516 ++{
517 ++ if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
518 ++ return SVGA_IRQFLAG_REG_FENCE_GOAL;
519 ++ else
520 ++ return SVGA_IRQFLAG_FENCE_GOAL;
521 ++}
522 ++
523 + /**
524 + * vmw_thread_fn - Deferred (process context) irq handler
525 + *
526 +@@ -96,7 +104,7 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg)
527 + wake_up_all(&dev_priv->fifo_queue);
528 +
529 + if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
530 +- SVGA_IRQFLAG_FENCE_GOAL)) &&
531 ++ vmw_irqflag_fence_goal(dev_priv))) &&
532 + !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
533 + ret = IRQ_WAKE_THREAD;
534 +
535 +@@ -137,8 +145,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
536 + if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
537 + return true;
538 +
539 +- if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) &&
540 +- vmw_fifo_idle(dev_priv, seqno))
541 ++ if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
542 + return true;
543 +
544 + /**
545 +@@ -160,6 +167,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
546 + unsigned long timeout)
547 + {
548 + struct vmw_fifo_state *fifo_state = dev_priv->fifo;
549 ++ bool fifo_down = false;
550 +
551 + uint32_t count = 0;
552 + uint32_t signal_seq;
553 +@@ -176,12 +184,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
554 + */
555 +
556 + if (fifo_idle) {
557 +- down_read(&fifo_state->rwsem);
558 + if (dev_priv->cman) {
559 + ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
560 + 10*HZ);
561 + if (ret)
562 + goto out_err;
563 ++ } else if (fifo_state) {
564 ++ down_read(&fifo_state->rwsem);
565 ++ fifo_down = true;
566 + }
567 + }
568 +
569 +@@ -218,12 +228,12 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
570 + }
571 + }
572 + finish_wait(&dev_priv->fence_queue, &__wait);
573 +- if (ret == 0 && fifo_idle)
574 ++ if (ret == 0 && fifo_idle && fifo_state)
575 + vmw_fence_write(dev_priv, signal_seq);
576 +
577 + wake_up_all(&dev_priv->fence_queue);
578 + out_err:
579 +- if (fifo_idle)
580 ++ if (fifo_down)
581 + up_read(&fifo_state->rwsem);
582 +
583 + return ret;
584 +@@ -266,13 +276,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
585 +
586 + void vmw_goal_waiter_add(struct vmw_private *dev_priv)
587 + {
588 +- vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
589 ++ vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
590 + &dev_priv->goal_queue_waiters);
591 + }
592 +
593 + void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
594 + {
595 +- vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
596 ++ vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
597 + &dev_priv->goal_queue_waiters);
598 + }
599 +
600 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
601 +index bbd2f4ec08ec1..93431e8f66060 100644
602 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
603 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
604 +@@ -1344,7 +1344,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
605 + ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
606 + mode_cmd,
607 + is_bo_proxy);
608 +-
609 + /*
610 + * vmw_create_bo_proxy() adds a reference that is no longer
611 + * needed
612 +@@ -1385,13 +1384,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
613 + ret = vmw_user_lookup_handle(dev_priv, file_priv,
614 + mode_cmd->handles[0],
615 + &surface, &bo);
616 +- if (ret)
617 ++ if (ret) {
618 ++ DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
619 ++ mode_cmd->handles[0], mode_cmd->handles[0]);
620 + goto err_out;
621 ++ }
622 +
623 +
624 + if (!bo &&
625 + !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
626 +- DRM_ERROR("Surface size cannot exceed %dx%d",
627 ++ DRM_ERROR("Surface size cannot exceed %dx%d\n",
628 + dev_priv->texture_max_width,
629 + dev_priv->texture_max_height);
630 + goto err_out;
631 +diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
632 +index 8df25f1079bac..d958d87b7edcf 100644
633 +--- a/drivers/hwmon/Kconfig
634 ++++ b/drivers/hwmon/Kconfig
635 +@@ -944,7 +944,7 @@ config SENSORS_LTC4261
636 +
637 + config SENSORS_LTQ_CPUTEMP
638 + bool "Lantiq cpu temperature sensor driver"
639 +- depends on LANTIQ
640 ++ depends on SOC_XWAY
641 + help
642 + If you say yes here you get support for the temperature
643 + sensor inside your CPU.
644 +diff --git a/drivers/hwmon/asus_wmi_sensors.c b/drivers/hwmon/asus_wmi_sensors.c
645 +index c80eee874b6c0..49784a6ea23a3 100644
646 +--- a/drivers/hwmon/asus_wmi_sensors.c
647 ++++ b/drivers/hwmon/asus_wmi_sensors.c
648 +@@ -71,7 +71,7 @@ static const struct dmi_system_id asus_wmi_dmi_table[] = {
649 + DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X399-A"),
650 + DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO"),
651 + DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI EXTREME"),
652 +- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI HERO"),
653 ++ DMI_EXACT_MATCH_ASUS_BOARD_NAME("CROSSHAIR VI HERO"),
654 + DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI HERO (WI-FI AC)"),
655 + DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO"),
656 + DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO (WI-FI)"),
657 +diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
658 +index 938a8b9ec70dd..6830e029995dc 100644
659 +--- a/drivers/hwmon/f71882fg.c
660 ++++ b/drivers/hwmon/f71882fg.c
661 +@@ -1578,8 +1578,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
662 + temp *= 125;
663 + if (sign)
664 + temp -= 128000;
665 +- } else
666 +- temp = data->temp[nr] * 1000;
667 ++ } else {
668 ++ temp = ((s8)data->temp[nr]) * 1000;
669 ++ }
670 +
671 + return sprintf(buf, "%d\n", temp);
672 + }
673 +diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
674 +index b86d9df7105d1..52c9e7d3f2ae7 100644
675 +--- a/drivers/hwmon/tmp401.c
676 ++++ b/drivers/hwmon/tmp401.c
677 +@@ -708,10 +708,21 @@ static int tmp401_probe(struct i2c_client *client)
678 + return 0;
679 + }
680 +
681 ++static const struct of_device_id __maybe_unused tmp4xx_of_match[] = {
682 ++ { .compatible = "ti,tmp401", },
683 ++ { .compatible = "ti,tmp411", },
684 ++ { .compatible = "ti,tmp431", },
685 ++ { .compatible = "ti,tmp432", },
686 ++ { .compatible = "ti,tmp435", },
687 ++ { },
688 ++};
689 ++MODULE_DEVICE_TABLE(of, tmp4xx_of_match);
690 ++
691 + static struct i2c_driver tmp401_driver = {
692 + .class = I2C_CLASS_HWMON,
693 + .driver = {
694 + .name = "tmp401",
695 ++ .of_match_table = of_match_ptr(tmp4xx_of_match),
696 + },
697 + .probe_new = tmp401_probe,
698 + .id_table = tmp401_id,
699 +diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
700 +index 082a3ddb0fa3b..632f65e53b63f 100644
701 +--- a/drivers/infiniband/hw/irdma/cm.c
702 ++++ b/drivers/infiniband/hw/irdma/cm.c
703 +@@ -3242,15 +3242,10 @@ enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev,
704 + */
705 + void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core)
706 + {
707 +- unsigned long flags;
708 +-
709 + if (!cm_core)
710 + return;
711 +
712 +- spin_lock_irqsave(&cm_core->ht_lock, flags);
713 +- if (timer_pending(&cm_core->tcp_timer))
714 +- del_timer_sync(&cm_core->tcp_timer);
715 +- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
716 ++ del_timer_sync(&cm_core->tcp_timer);
717 +
718 + destroy_workqueue(cm_core->event_wq);
719 + cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
720 +diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
721 +index 9050ca1f4285c..808f6e7a80482 100644
722 +--- a/drivers/interconnect/core.c
723 ++++ b/drivers/interconnect/core.c
724 +@@ -1087,9 +1087,15 @@ static int of_count_icc_providers(struct device_node *np)
725 + {
726 + struct device_node *child;
727 + int count = 0;
728 ++ const struct of_device_id __maybe_unused ignore_list[] = {
729 ++ { .compatible = "qcom,sc7180-ipa-virt" },
730 ++ { .compatible = "qcom,sdx55-ipa-virt" },
731 ++ {}
732 ++ };
733 +
734 + for_each_available_child_of_node(np, child) {
735 +- if (of_property_read_bool(child, "#interconnect-cells"))
736 ++ if (of_property_read_bool(child, "#interconnect-cells") &&
737 ++ likely(!of_match_node(ignore_list, child)))
738 + count++;
739 + count += of_count_icc_providers(child);
740 + }
741 +diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
742 +index 01e9b50b10a18..87bf522b9d2ee 100644
743 +--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
744 ++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
745 +@@ -258,6 +258,34 @@ static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct devi
746 + dev_name(dev), err);
747 + }
748 +
749 ++static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,
750 ++ struct io_pgtable_cfg *pgtbl_cfg,
751 ++ struct device *dev)
752 ++{
753 ++ struct arm_smmu_device *smmu = smmu_domain->smmu;
754 ++ const struct device_node *np = smmu->dev->of_node;
755 ++
756 ++ /*
757 ++ * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache
758 ++ * entries to not be invalidated correctly. The problem is that the walk
759 ++ * cache index generated for IOVA is not same across translation and
760 ++ * invalidation requests. This is leading to page faults when PMD entry
761 ++ * is released during unmap and populated with new PTE table during
762 ++ * subsequent map request. Disabling large page mappings avoids the
763 ++ * release of PMD entry and avoid translations seeing stale PMD entry in
764 ++ * walk cache.
765 ++ * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and
766 ++ * Tegra234.
767 ++ */
768 ++ if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
769 ++ of_device_is_compatible(np, "nvidia,tegra194-smmu")) {
770 ++ smmu->pgsize_bitmap = PAGE_SIZE;
771 ++ pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;
772 ++ }
773 ++
774 ++ return 0;
775 ++}
776 ++
777 + static const struct arm_smmu_impl nvidia_smmu_impl = {
778 + .read_reg = nvidia_smmu_read_reg,
779 + .write_reg = nvidia_smmu_write_reg,
780 +@@ -268,10 +296,12 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
781 + .global_fault = nvidia_smmu_global_fault,
782 + .context_fault = nvidia_smmu_context_fault,
783 + .probe_finalize = nvidia_smmu_probe_finalize,
784 ++ .init_context = nvidia_smmu_init_context,
785 + };
786 +
787 + static const struct arm_smmu_impl nvidia_smmu_single_impl = {
788 + .probe_finalize = nvidia_smmu_probe_finalize,
789 ++ .init_context = nvidia_smmu_init_context,
790 + };
791 +
792 + struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
793 +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
794 +index 6afb5db8244cd..6d15a743219f0 100644
795 +--- a/drivers/net/dsa/bcm_sf2.c
796 ++++ b/drivers/net/dsa/bcm_sf2.c
797 +@@ -833,6 +833,9 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
798 + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
799 + u32 reg, offset;
800 +
801 ++ if (priv->wol_ports_mask & BIT(port))
802 ++ return;
803 ++
804 + if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
805 + if (priv->type == BCM4908_DEVICE_ID ||
806 + priv->type == BCM7445_DEVICE_ID)
807 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
808 +index 3a529ee8c8340..831833911a525 100644
809 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
810 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
811 +@@ -449,7 +449,7 @@ static int aq_pm_freeze(struct device *dev)
812 +
813 + static int aq_pm_suspend_poweroff(struct device *dev)
814 + {
815 +- return aq_suspend_common(dev, false);
816 ++ return aq_suspend_common(dev, true);
817 + }
818 +
819 + static int aq_pm_thaw(struct device *dev)
820 +@@ -459,7 +459,7 @@ static int aq_pm_thaw(struct device *dev)
821 +
822 + static int aq_pm_resume_restore(struct device *dev)
823 + {
824 +- return atl_resume_common(dev, false);
825 ++ return atl_resume_common(dev, true);
826 + }
827 +
828 + static const struct dev_pm_ops aq_pm_ops = {
829 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
830 +index c2bfb25e087c1..64bf31ceb6d90 100644
831 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
832 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
833 +@@ -3999,6 +3999,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
834 + goto err;
835 + }
836 + priv->wol_irq = platform_get_irq_optional(pdev, 2);
837 ++ if (priv->wol_irq == -EPROBE_DEFER) {
838 ++ err = priv->wol_irq;
839 ++ goto err;
840 ++ }
841 +
842 + priv->base = devm_platform_ioremap_resource(pdev, 0);
843 + if (IS_ERR(priv->base)) {
844 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
845 +index e7b4e3ed056c7..8d719f82854a9 100644
846 +--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
847 ++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
848 +@@ -2793,14 +2793,14 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
849 + goto out;
850 + na = ret;
851 +
852 +- memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
853 ++ memcpy(p->id, vpd + id, min_t(unsigned int, id_len, ID_LEN));
854 + strim(p->id);
855 +- memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
856 ++ memcpy(p->sn, vpd + sn, min_t(unsigned int, sn_len, SERNUM_LEN));
857 + strim(p->sn);
858 +- memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
859 ++ memcpy(p->pn, vpd + pn, min_t(unsigned int, pn_len, PN_LEN));
860 + strim(p->pn);
861 +- memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
862 +- strim((char *)p->na);
863 ++ memcpy(p->na, vpd + na, min_t(unsigned int, na_len, MACADDR_LEN));
864 ++ strim(p->na);
865 +
866 + out:
867 + vfree(vpd);
868 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
869 +index 31b03fe78d3be..313a798fe3a9f 100644
870 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
871 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
872 +@@ -7535,42 +7535,43 @@ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
873 + static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
874 + struct i40e_fwd_adapter *fwd)
875 + {
876 ++ struct i40e_channel *ch = NULL, *ch_tmp, *iter;
877 + int ret = 0, num_tc = 1, i, aq_err;
878 +- struct i40e_channel *ch, *ch_tmp;
879 + struct i40e_pf *pf = vsi->back;
880 + struct i40e_hw *hw = &pf->hw;
881 +
882 +- if (list_empty(&vsi->macvlan_list))
883 +- return -EINVAL;
884 +-
885 + /* Go through the list and find an available channel */
886 +- list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
887 +- if (!i40e_is_channel_macvlan(ch)) {
888 +- ch->fwd = fwd;
889 ++ list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
890 ++ if (!i40e_is_channel_macvlan(iter)) {
891 ++ iter->fwd = fwd;
892 + /* record configuration for macvlan interface in vdev */
893 + for (i = 0; i < num_tc; i++)
894 + netdev_bind_sb_channel_queue(vsi->netdev, vdev,
895 + i,
896 +- ch->num_queue_pairs,
897 +- ch->base_queue);
898 +- for (i = 0; i < ch->num_queue_pairs; i++) {
899 ++ iter->num_queue_pairs,
900 ++ iter->base_queue);
901 ++ for (i = 0; i < iter->num_queue_pairs; i++) {
902 + struct i40e_ring *tx_ring, *rx_ring;
903 + u16 pf_q;
904 +
905 +- pf_q = ch->base_queue + i;
906 ++ pf_q = iter->base_queue + i;
907 +
908 + /* Get to TX ring ptr */
909 + tx_ring = vsi->tx_rings[pf_q];
910 +- tx_ring->ch = ch;
911 ++ tx_ring->ch = iter;
912 +
913 + /* Get the RX ring ptr */
914 + rx_ring = vsi->rx_rings[pf_q];
915 +- rx_ring->ch = ch;
916 ++ rx_ring->ch = iter;
917 + }
918 ++ ch = iter;
919 + break;
920 + }
921 + }
922 +
923 ++ if (!ch)
924 ++ return -EINVAL;
925 ++
926 + /* Guarantee all rings are updated before we update the
927 + * MAC address filter.
928 + */
929 +diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
930 +index 9c04a71a9fca3..e2ffdf2726fde 100644
931 +--- a/drivers/net/ethernet/intel/ice/ice.h
932 ++++ b/drivers/net/ethernet/intel/ice/ice.h
933 +@@ -546,6 +546,7 @@ struct ice_pf {
934 + struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
935 + struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
936 + struct mutex tc_mutex; /* lock to protect TC changes */
937 ++ struct mutex adev_mutex; /* lock to protect aux device access */
938 + u32 msg_enable;
939 + struct ice_ptp ptp;
940 + u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
941 +diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
942 +index 5559230eff8b5..0deed090645f8 100644
943 +--- a/drivers/net/ethernet/intel/ice/ice_idc.c
944 ++++ b/drivers/net/ethernet/intel/ice/ice_idc.c
945 +@@ -37,14 +37,17 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
946 + if (WARN_ON_ONCE(!in_task()))
947 + return;
948 +
949 ++ mutex_lock(&pf->adev_mutex);
950 + if (!pf->adev)
951 +- return;
952 ++ goto finish;
953 +
954 + device_lock(&pf->adev->dev);
955 + iadrv = ice_get_auxiliary_drv(pf);
956 + if (iadrv && iadrv->event_handler)
957 + iadrv->event_handler(pf, event);
958 + device_unlock(&pf->adev->dev);
959 ++finish:
960 ++ mutex_unlock(&pf->adev_mutex);
961 + }
962 +
963 + /**
964 +@@ -285,7 +288,6 @@ int ice_plug_aux_dev(struct ice_pf *pf)
965 + return -ENOMEM;
966 +
967 + adev = &iadev->adev;
968 +- pf->adev = adev;
969 + iadev->pf = pf;
970 +
971 + adev->id = pf->aux_idx;
972 +@@ -295,18 +297,20 @@ int ice_plug_aux_dev(struct ice_pf *pf)
973 +
974 + ret = auxiliary_device_init(adev);
975 + if (ret) {
976 +- pf->adev = NULL;
977 + kfree(iadev);
978 + return ret;
979 + }
980 +
981 + ret = auxiliary_device_add(adev);
982 + if (ret) {
983 +- pf->adev = NULL;
984 + auxiliary_device_uninit(adev);
985 + return ret;
986 + }
987 +
988 ++ mutex_lock(&pf->adev_mutex);
989 ++ pf->adev = adev;
990 ++ mutex_unlock(&pf->adev_mutex);
991 ++
992 + return 0;
993 + }
994 +
995 +@@ -315,12 +319,17 @@ int ice_plug_aux_dev(struct ice_pf *pf)
996 + */
997 + void ice_unplug_aux_dev(struct ice_pf *pf)
998 + {
999 +- if (!pf->adev)
1000 +- return;
1001 ++ struct auxiliary_device *adev;
1002 +
1003 +- auxiliary_device_delete(pf->adev);
1004 +- auxiliary_device_uninit(pf->adev);
1005 ++ mutex_lock(&pf->adev_mutex);
1006 ++ adev = pf->adev;
1007 + pf->adev = NULL;
1008 ++ mutex_unlock(&pf->adev_mutex);
1009 ++
1010 ++ if (adev) {
1011 ++ auxiliary_device_delete(adev);
1012 ++ auxiliary_device_uninit(adev);
1013 ++ }
1014 + }
1015 +
1016 + /**
1017 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
1018 +index e347030ee2e33..7f6715eb862fe 100644
1019 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
1020 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
1021 +@@ -3682,6 +3682,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
1022 + static void ice_deinit_pf(struct ice_pf *pf)
1023 + {
1024 + ice_service_task_stop(pf);
1025 ++ mutex_destroy(&pf->adev_mutex);
1026 + mutex_destroy(&pf->sw_mutex);
1027 + mutex_destroy(&pf->tc_mutex);
1028 + mutex_destroy(&pf->avail_q_mutex);
1029 +@@ -3762,6 +3763,7 @@ static int ice_init_pf(struct ice_pf *pf)
1030 +
1031 + mutex_init(&pf->sw_mutex);
1032 + mutex_init(&pf->tc_mutex);
1033 ++ mutex_init(&pf->adev_mutex);
1034 +
1035 + INIT_HLIST_HEAD(&pf->aq_wait_list);
1036 + spin_lock_init(&pf->aq_wait_lock);
1037 +diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
1038 +index 000c39d163a28..45ae97b8b97db 100644
1039 +--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
1040 ++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
1041 +@@ -2279,6 +2279,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
1042 +
1043 + /**
1044 + * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
1045 ++ * @hw: pointer to the hw struct
1046 + * @tx: PTP Tx tracker to clean up
1047 + *
1048 + * Loop through the Tx timestamp requests and see if any of them have been
1049 +@@ -2287,7 +2288,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
1050 + * timestamp will never be captured. This might happen if the packet gets
1051 + * discarded before it reaches the PHY timestamping block.
1052 + */
1053 +-static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
1054 ++static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
1055 + {
1056 + u8 idx;
1057 +
1058 +@@ -2296,11 +2297,16 @@ static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
1059 +
1060 + for_each_set_bit(idx, tx->in_use, tx->len) {
1061 + struct sk_buff *skb;
1062 ++ u64 raw_tstamp;
1063 +
1064 + /* Check if this SKB has been waiting for too long */
1065 + if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
1066 + continue;
1067 +
1068 ++ /* Read tstamp to be able to use this register again */
1069 ++ ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
1070 ++ &raw_tstamp);
1071 ++
1072 + spin_lock(&tx->lock);
1073 + skb = tx->tstamps[idx].skb;
1074 + tx->tstamps[idx].skb = NULL;
1075 +@@ -2322,7 +2328,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
1076 +
1077 + ice_ptp_update_cached_phctime(pf);
1078 +
1079 +- ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
1080 ++ ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
1081 +
1082 + /* Run twice a second */
1083 + kthread_queue_delayed_work(ptp->kworker, &ptp->work,
1084 +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1085 +index 2bee8f10ad89c..0cc8b7e06b72a 100644
1086 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1087 ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1088 +@@ -3300,13 +3300,52 @@ error_param:
1089 + NULL, 0);
1090 + }
1091 +
1092 ++/**
1093 ++ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
1094 ++ * @vf: VF to disable queue for
1095 ++ * @vsi: VSI for the VF
1096 ++ * @q_id: VF relative (0-based) queue ID
1097 ++ *
1098 ++ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
1099 ++ * disabled then clear q_id bit in the enabled queues bitmap and return
1100 ++ * success. Otherwise return error.
1101 ++ */
1102 ++static int
1103 ++ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
1104 ++{
1105 ++ struct ice_txq_meta txq_meta = { 0 };
1106 ++ struct ice_tx_ring *ring;
1107 ++ int err;
1108 ++
1109 ++ if (!test_bit(q_id, vf->txq_ena))
1110 ++ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
1111 ++ q_id, vsi->vsi_num);
1112 ++
1113 ++ ring = vsi->tx_rings[q_id];
1114 ++ if (!ring)
1115 ++ return -EINVAL;
1116 ++
1117 ++ ice_fill_txq_meta(vsi, ring, &txq_meta);
1118 ++
1119 ++ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
1120 ++ if (err) {
1121 ++ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
1122 ++ q_id, vsi->vsi_num);
1123 ++ return err;
1124 ++ }
1125 ++
1126 ++ /* Clear enabled queues flag */
1127 ++ clear_bit(q_id, vf->txq_ena);
1128 ++
1129 ++ return 0;
1130 ++}
1131 ++
1132 + /**
1133 + * ice_vc_dis_qs_msg
1134 + * @vf: pointer to the VF info
1135 + * @msg: pointer to the msg buffer
1136 + *
1137 +- * called from the VF to disable all or specific
1138 +- * queue(s)
1139 ++ * called from the VF to disable all or specific queue(s)
1140 + */
1141 + static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
1142 + {
1143 +@@ -3343,30 +3382,15 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
1144 + q_map = vqs->tx_queues;
1145 +
1146 + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1147 +- struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
1148 +- struct ice_txq_meta txq_meta = { 0 };
1149 +-
1150 + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1151 + v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1152 + goto error_param;
1153 + }
1154 +
1155 +- if (!test_bit(vf_q_id, vf->txq_ena))
1156 +- dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
1157 +- vf_q_id, vsi->vsi_num);
1158 +-
1159 +- ice_fill_txq_meta(vsi, ring, &txq_meta);
1160 +-
1161 +- if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
1162 +- ring, &txq_meta)) {
1163 +- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
1164 +- vf_q_id, vsi->vsi_num);
1165 ++ if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
1166 + v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1167 + goto error_param;
1168 + }
1169 +-
1170 +- /* Clear enabled queues flag */
1171 +- clear_bit(vf_q_id, vf->txq_ena);
1172 + }
1173 + }
1174 +
1175 +@@ -3615,6 +3639,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
1176 + if (qpi->txq.ring_len > 0) {
1177 + vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
1178 + vsi->tx_rings[i]->count = qpi->txq.ring_len;
1179 ++
1180 ++ /* Disable any existing queue first */
1181 ++ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
1182 ++ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1183 ++ goto error_param;
1184 ++ }
1185 ++
1186 ++ /* Configure a queue with the requested settings */
1187 + if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
1188 + v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1189 + goto error_param;
1190 +diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
1191 +index 3ad10c793308e..66298e2235c91 100644
1192 +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
1193 ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
1194 +@@ -395,7 +395,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1195 + static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
1196 + int i, k;
1197 +
1198 +- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
1199 ++ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
1200 +
1201 + if (!IS_ENABLED(CONFIG_SOC_MT7621))
1202 + return;
1203 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
1204 +index 01cf5a6a26bd3..a2ee695a3f178 100644
1205 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
1206 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
1207 +@@ -568,10 +568,8 @@ static int
1208 + mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
1209 + struct mlxsw_sp_ipip_entry *ipip_entry)
1210 + {
1211 +- struct __ip6_tnl_parm parms6;
1212 +-
1213 +- parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
1214 +- return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr,
1215 ++ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
1216 ++ &ipip_entry->parms.daddr.addr6,
1217 + &ipip_entry->dip_kvdl_index);
1218 + }
1219 +
1220 +@@ -579,10 +577,7 @@ static void
1221 + mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
1222 + const struct mlxsw_sp_ipip_entry *ipip_entry)
1223 + {
1224 +- struct __ip6_tnl_parm parms6;
1225 +-
1226 +- parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
1227 +- mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr);
1228 ++ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipip_entry->parms.daddr.addr6);
1229 + }
1230 +
1231 + static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
1232 +diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
1233 +index fdb4d7e7296c8..cb602a2261497 100644
1234 +--- a/drivers/net/ethernet/mscc/ocelot_flower.c
1235 ++++ b/drivers/net/ethernet/mscc/ocelot_flower.c
1236 +@@ -278,9 +278,10 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
1237 + filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
1238 + break;
1239 + case FLOW_ACTION_TRAP:
1240 +- if (filter->block_id != VCAP_IS2) {
1241 ++ if (filter->block_id != VCAP_IS2 ||
1242 ++ filter->lookup != 0) {
1243 + NL_SET_ERR_MSG_MOD(extack,
1244 +- "Trap action can only be offloaded to VCAP IS2");
1245 ++ "Trap action can only be offloaded to VCAP IS2 lookup 0");
1246 + return -EOPNOTSUPP;
1247 + }
1248 + if (filter->goto_target != -1) {
1249 +diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
1250 +index d3544413a8a43..f159726788bad 100644
1251 +--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
1252 ++++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
1253 +@@ -373,7 +373,6 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
1254 + OCELOT_VCAP_BIT_0);
1255 + vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
1256 + ~filter->ingress_port_mask);
1257 +- vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY);
1258 + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
1259 + OCELOT_VCAP_BIT_ANY);
1260 + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
1261 +@@ -1182,6 +1181,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
1262 + struct ocelot_vcap_filter *tmp;
1263 +
1264 + tmp = ocelot_vcap_block_find_filter_by_index(block, i);
1265 ++ /* Read back the filter's counters before moving it */
1266 ++ vcap_entry_get(ocelot, i - 1, tmp);
1267 + vcap_entry_set(ocelot, i, tmp);
1268 + }
1269 +
1270 +@@ -1221,7 +1222,11 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
1271 + struct ocelot_vcap_filter del_filter;
1272 + int i, index;
1273 +
1274 ++ /* Need to inherit the block_id so that vcap_entry_set()
1275 ++ * does not get confused and knows where to install it.
1276 ++ */
1277 + memset(&del_filter, 0, sizeof(del_filter));
1278 ++ del_filter.block_id = filter->block_id;
1279 +
1280 + /* Gets index of the filter */
1281 + index = ocelot_vcap_block_get_filter_index(block, filter);
1282 +@@ -1236,6 +1241,8 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
1283 + struct ocelot_vcap_filter *tmp;
1284 +
1285 + tmp = ocelot_vcap_block_find_filter_by_index(block, i);
1286 ++ /* Read back the filter's counters before moving it */
1287 ++ vcap_entry_get(ocelot, i + 1, tmp);
1288 + vcap_entry_set(ocelot, i, tmp);
1289 + }
1290 +
1291 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
1292 +index 40fa5bce2ac2c..d324c292318b3 100644
1293 +--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
1294 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
1295 +@@ -255,7 +255,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1296 +
1297 + err = ionic_map_bars(ionic);
1298 + if (err)
1299 +- goto err_out_pci_disable_device;
1300 ++ goto err_out_pci_release_regions;
1301 +
1302 + /* Configure the device */
1303 + err = ionic_setup(ionic);
1304 +@@ -359,6 +359,7 @@ err_out_teardown:
1305 +
1306 + err_out_unmap_bars:
1307 + ionic_unmap_bars(ionic);
1308 ++err_out_pci_release_regions:
1309 + pci_release_regions(pdev);
1310 + err_out_pci_disable_device:
1311 + pci_disable_device(pdev);
1312 +diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
1313 +index cf366ed2557cc..1ab725d554a51 100644
1314 +--- a/drivers/net/ethernet/sfc/ef10.c
1315 ++++ b/drivers/net/ethernet/sfc/ef10.c
1316 +@@ -3579,6 +3579,11 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
1317 + n_parts++;
1318 + }
1319 +
1320 ++ if (!n_parts) {
1321 ++ kfree(parts);
1322 ++ return 0;
1323 ++ }
1324 ++
1325 + rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
1326 + fail:
1327 + if (rc)
1328 +diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
1329 +index 40bfd0ad7d053..eec0db76d888c 100644
1330 +--- a/drivers/net/ethernet/sfc/efx_channels.c
1331 ++++ b/drivers/net/ethernet/sfc/efx_channels.c
1332 +@@ -845,7 +845,9 @@ static void efx_set_xdp_channels(struct efx_nic *efx)
1333 +
1334 + int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
1335 + {
1336 +- struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
1337 ++ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
1338 ++ *ptp_channel = efx_ptp_channel(efx);
1339 ++ struct efx_ptp_data *ptp_data = efx->ptp_data;
1340 + unsigned int i, next_buffer_table = 0;
1341 + u32 old_rxq_entries, old_txq_entries;
1342 + int rc, rc2;
1343 +@@ -916,6 +918,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
1344 +
1345 + efx_set_xdp_channels(efx);
1346 + out:
1347 ++ efx->ptp_data = NULL;
1348 + /* Destroy unused channel structures */
1349 + for (i = 0; i < efx->n_channels; i++) {
1350 + channel = other_channel[i];
1351 +@@ -926,6 +929,7 @@ out:
1352 + }
1353 + }
1354 +
1355 ++ efx->ptp_data = ptp_data;
1356 + rc2 = efx_soft_enable_interrupts(efx);
1357 + if (rc2) {
1358 + rc = rc ? rc : rc2;
1359 +@@ -944,6 +948,7 @@ rollback:
1360 + efx->txq_entries = old_txq_entries;
1361 + for (i = 0; i < efx->n_channels; i++)
1362 + swap(efx->channel[i], other_channel[i]);
1363 ++ efx_ptp_update_channel(efx, ptp_channel);
1364 + goto out;
1365 + }
1366 +
1367 +diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
1368 +index f0ef515e2ade5..4625f85acab2e 100644
1369 +--- a/drivers/net/ethernet/sfc/ptp.c
1370 ++++ b/drivers/net/ethernet/sfc/ptp.c
1371 +@@ -45,6 +45,7 @@
1372 + #include "farch_regs.h"
1373 + #include "tx.h"
1374 + #include "nic.h" /* indirectly includes ptp.h */
1375 ++#include "efx_channels.h"
1376 +
1377 + /* Maximum number of events expected to make up a PTP event */
1378 + #define MAX_EVENT_FRAGS 3
1379 +@@ -541,6 +542,12 @@ struct efx_channel *efx_ptp_channel(struct efx_nic *efx)
1380 + return efx->ptp_data ? efx->ptp_data->channel : NULL;
1381 + }
1382 +
1383 ++void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel)
1384 ++{
1385 ++ if (efx->ptp_data)
1386 ++ efx->ptp_data->channel = channel;
1387 ++}
1388 ++
1389 + static u32 last_sync_timestamp_major(struct efx_nic *efx)
1390 + {
1391 + struct efx_channel *channel = efx_ptp_channel(efx);
1392 +@@ -1443,6 +1450,11 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
1393 + int rc = 0;
1394 + unsigned int pos;
1395 +
1396 ++ if (efx->ptp_data) {
1397 ++ efx->ptp_data->channel = channel;
1398 ++ return 0;
1399 ++ }
1400 ++
1401 + ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
1402 + efx->ptp_data = ptp;
1403 + if (!efx->ptp_data)
1404 +@@ -2176,7 +2188,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
1405 + .pre_probe = efx_ptp_probe_channel,
1406 + .post_remove = efx_ptp_remove_channel,
1407 + .get_name = efx_ptp_get_channel_name,
1408 +- /* no copy operation; there is no need to reallocate this channel */
1409 ++ .copy = efx_copy_channel,
1410 + .receive_skb = efx_ptp_rx,
1411 + .want_txqs = efx_ptp_want_txqs,
1412 + .keep_eventq = false,
1413 +diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
1414 +index 9855e8c9e544d..7b1ef7002b3f0 100644
1415 +--- a/drivers/net/ethernet/sfc/ptp.h
1416 ++++ b/drivers/net/ethernet/sfc/ptp.h
1417 +@@ -16,6 +16,7 @@ struct ethtool_ts_info;
1418 + int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
1419 + void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
1420 + struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
1421 ++void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel);
1422 + void efx_ptp_remove(struct efx_nic *efx);
1423 + int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
1424 + int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
1425 +diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
1426 +index 08a670bf2cd19..c2b142cf75eb4 100644
1427 +--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
1428 ++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
1429 +@@ -935,8 +935,6 @@ static int xemaclite_open(struct net_device *dev)
1430 + xemaclite_disable_interrupts(lp);
1431 +
1432 + if (lp->phy_node) {
1433 +- u32 bmcr;
1434 +-
1435 + lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
1436 + xemaclite_adjust_link, 0,
1437 + PHY_INTERFACE_MODE_MII);
1438 +@@ -947,19 +945,6 @@ static int xemaclite_open(struct net_device *dev)
1439 +
1440 + /* EmacLite doesn't support giga-bit speeds */
1441 + phy_set_max_speed(lp->phy_dev, SPEED_100);
1442 +-
1443 +- /* Don't advertise 1000BASE-T Full/Half duplex speeds */
1444 +- phy_write(lp->phy_dev, MII_CTRL1000, 0);
1445 +-
1446 +- /* Advertise only 10 and 100mbps full/half duplex speeds */
1447 +- phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
1448 +- ADVERTISE_CSMA);
1449 +-
1450 +- /* Restart auto negotiation */
1451 +- bmcr = phy_read(lp->phy_dev, MII_BMCR);
1452 +- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1453 +- phy_write(lp->phy_dev, MII_BMCR, bmcr);
1454 +-
1455 + phy_start(lp->phy_dev);
1456 + }
1457 +
1458 +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
1459 +index 281cebc3d00cc..cfb5378bbb390 100644
1460 +--- a/drivers/net/phy/micrel.c
1461 ++++ b/drivers/net/phy/micrel.c
1462 +@@ -1594,7 +1594,7 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
1463 +
1464 + static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
1465 + {
1466 +- u32 data;
1467 ++ int data;
1468 +
1469 + phy_lock_mdio_bus(phydev);
1470 + __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
1471 +@@ -1725,6 +1725,7 @@ static struct phy_driver ksphy_driver[] = {
1472 + .name = "Micrel KS8737",
1473 + /* PHY_BASIC_FEATURES */
1474 + .driver_data = &ks8737_type,
1475 ++ .probe = kszphy_probe,
1476 + .config_init = kszphy_config_init,
1477 + .config_intr = kszphy_config_intr,
1478 + .handle_interrupt = kszphy_handle_interrupt,
1479 +@@ -1850,8 +1851,8 @@ static struct phy_driver ksphy_driver[] = {
1480 + .config_init = ksz8061_config_init,
1481 + .config_intr = kszphy_config_intr,
1482 + .handle_interrupt = kszphy_handle_interrupt,
1483 +- .suspend = kszphy_suspend,
1484 +- .resume = kszphy_resume,
1485 ++ .suspend = genphy_suspend,
1486 ++ .resume = genphy_resume,
1487 + }, {
1488 + .phy_id = PHY_ID_KSZ9021,
1489 + .phy_id_mask = 0x000ffffe,
1490 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1491 +index beb2b66da1324..f122026c46826 100644
1492 +--- a/drivers/net/phy/phy.c
1493 ++++ b/drivers/net/phy/phy.c
1494 +@@ -970,8 +970,13 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
1495 + {
1496 + struct phy_device *phydev = phy_dat;
1497 + struct phy_driver *drv = phydev->drv;
1498 ++ irqreturn_t ret;
1499 +
1500 +- return drv->handle_interrupt(phydev);
1501 ++ mutex_lock(&phydev->lock);
1502 ++ ret = drv->handle_interrupt(phydev);
1503 ++ mutex_unlock(&phydev->lock);
1504 ++
1505 ++ return ret;
1506 + }
1507 +
1508 + /**
1509 +diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
1510 +index 4720b24ca51b5..90dfefc1f5f8d 100644
1511 +--- a/drivers/net/phy/sfp.c
1512 ++++ b/drivers/net/phy/sfp.c
1513 +@@ -250,6 +250,7 @@ struct sfp {
1514 + struct sfp_eeprom_id id;
1515 + unsigned int module_power_mW;
1516 + unsigned int module_t_start_up;
1517 ++ bool tx_fault_ignore;
1518 +
1519 + #if IS_ENABLED(CONFIG_HWMON)
1520 + struct sfp_diag diag;
1521 +@@ -1945,6 +1946,12 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
1522 + else
1523 + sfp->module_t_start_up = T_START_UP;
1524 +
1525 ++ if (!memcmp(id.base.vendor_name, "HUAWEI ", 16) &&
1526 ++ !memcmp(id.base.vendor_pn, "MA5671A ", 16))
1527 ++ sfp->tx_fault_ignore = true;
1528 ++ else
1529 ++ sfp->tx_fault_ignore = false;
1530 ++
1531 + return 0;
1532 + }
1533 +
1534 +@@ -2397,7 +2404,10 @@ static void sfp_check_state(struct sfp *sfp)
1535 + mutex_lock(&sfp->st_mutex);
1536 + state = sfp_get_state(sfp);
1537 + changed = state ^ sfp->state;
1538 +- changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
1539 ++ if (sfp->tx_fault_ignore)
1540 ++ changed &= SFP_F_PRESENT | SFP_F_LOS;
1541 ++ else
1542 ++ changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
1543 +
1544 + for (i = 0; i < GPIO_MAX; i++)
1545 + if (changed & BIT(i))
1546 +diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
1547 +index 293563b3f7842..412d812a07997 100644
1548 +--- a/drivers/net/wireless/ath/ath11k/core.c
1549 ++++ b/drivers/net/wireless/ath/ath11k/core.c
1550 +@@ -1275,6 +1275,7 @@ static void ath11k_core_restart(struct work_struct *work)
1551 +
1552 + ieee80211_stop_queues(ar->hw);
1553 + ath11k_mac_drain_tx(ar);
1554 ++ complete(&ar->completed_11d_scan);
1555 + complete(&ar->scan.started);
1556 + complete(&ar->scan.completed);
1557 + complete(&ar->peer_assoc_done);
1558 +diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
1559 +index 9e88ccca5ca75..09a2c1744a54c 100644
1560 +--- a/drivers/net/wireless/ath/ath11k/core.h
1561 ++++ b/drivers/net/wireless/ath/ath11k/core.h
1562 +@@ -38,6 +38,8 @@
1563 +
1564 + extern unsigned int ath11k_frame_mode;
1565 +
1566 ++#define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ)
1567 ++
1568 + #define ATH11K_MON_TIMER_INTERVAL 10
1569 +
1570 + enum ath11k_supported_bw {
1571 +@@ -189,6 +191,12 @@ enum ath11k_scan_state {
1572 + ATH11K_SCAN_ABORTING,
1573 + };
1574 +
1575 ++enum ath11k_11d_state {
1576 ++ ATH11K_11D_IDLE,
1577 ++ ATH11K_11D_PREPARING,
1578 ++ ATH11K_11D_RUNNING,
1579 ++};
1580 ++
1581 + enum ath11k_dev_flags {
1582 + ATH11K_CAC_RUNNING,
1583 + ATH11K_FLAG_CORE_REGISTERED,
1584 +@@ -599,9 +607,8 @@ struct ath11k {
1585 + bool dfs_block_radar_events;
1586 + struct ath11k_thermal thermal;
1587 + u32 vdev_id_11d_scan;
1588 +- struct completion finish_11d_scan;
1589 +- struct completion finish_11d_ch_list;
1590 +- bool pending_11d;
1591 ++ struct completion completed_11d_scan;
1592 ++ enum ath11k_11d_state state_11d;
1593 + bool regdom_set_by_user;
1594 + };
1595 +
1596 +diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
1597 +index f54d5819477a4..b2dac859dfe16 100644
1598 +--- a/drivers/net/wireless/ath/ath11k/mac.c
1599 ++++ b/drivers/net/wireless/ath/ath11k/mac.c
1600 +@@ -3596,26 +3596,6 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
1601 + if (ret)
1602 + goto exit;
1603 +
1604 +- /* Currently the pending_11d=true only happened 1 time while
1605 +- * wlan interface up in ath11k_mac_11d_scan_start(), it is called by
1606 +- * ath11k_mac_op_add_interface(), after wlan interface up,
1607 +- * pending_11d=false always.
1608 +- * If remove below wait, it always happened scan fail and lead connect
1609 +- * fail while wlan interface up, because it has a 11d scan which is running
1610 +- * in firmware, and lead this scan failed.
1611 +- */
1612 +- if (ar->pending_11d) {
1613 +- long time_left;
1614 +- unsigned long timeout = 5 * HZ;
1615 +-
1616 +- if (ar->supports_6ghz)
1617 +- timeout += 5 * HZ;
1618 +-
1619 +- time_left = wait_for_completion_timeout(&ar->finish_11d_ch_list, timeout);
1620 +- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
1621 +- "mac wait 11d channel list time left %ld\n", time_left);
1622 +- }
1623 +-
1624 + memset(&arg, 0, sizeof(arg));
1625 + ath11k_wmi_start_scan_init(ar, &arg);
1626 + arg.vdev_id = arvif->vdev_id;
1627 +@@ -3681,6 +3661,10 @@ exit:
1628 + kfree(arg.extraie.ptr);
1629 +
1630 + mutex_unlock(&ar->conf_mutex);
1631 ++
1632 ++ if (ar->state_11d == ATH11K_11D_PREPARING)
1633 ++ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
1634 ++
1635 + return ret;
1636 + }
1637 +
1638 +@@ -5809,7 +5793,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
1639 +
1640 + /* TODO: Do we need to enable ANI? */
1641 +
1642 +- ath11k_reg_update_chan_list(ar);
1643 ++ ath11k_reg_update_chan_list(ar, false);
1644 +
1645 + ar->num_started_vdevs = 0;
1646 + ar->num_created_vdevs = 0;
1647 +@@ -5876,6 +5860,11 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
1648 + cancel_work_sync(&ar->ab->update_11d_work);
1649 + cancel_work_sync(&ar->ab->rfkill_work);
1650 +
1651 ++ if (ar->state_11d == ATH11K_11D_PREPARING) {
1652 ++ ar->state_11d = ATH11K_11D_IDLE;
1653 ++ complete(&ar->completed_11d_scan);
1654 ++ }
1655 ++
1656 + spin_lock_bh(&ar->data_lock);
1657 + list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
1658 + list_del(&ppdu_stats->list);
1659 +@@ -6046,7 +6035,7 @@ static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
1660 + return false;
1661 + }
1662 +
1663 +-void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
1664 ++void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id)
1665 + {
1666 + struct wmi_11d_scan_start_params param;
1667 + int ret;
1668 +@@ -6074,28 +6063,22 @@ void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
1669 +
1670 + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n");
1671 +
1672 +- if (wait)
1673 +- reinit_completion(&ar->finish_11d_scan);
1674 +-
1675 + ret = ath11k_wmi_send_11d_scan_start_cmd(ar, &param);
1676 + if (ret) {
1677 + ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
1678 + vdev_id, ret);
1679 + } else {
1680 + ar->vdev_id_11d_scan = vdev_id;
1681 +- if (wait) {
1682 +- ar->pending_11d = true;
1683 +- ret = wait_for_completion_timeout(&ar->finish_11d_scan,
1684 +- 5 * HZ);
1685 +- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
1686 +- "mac 11d scan left time %d\n", ret);
1687 +-
1688 +- if (!ret)
1689 +- ar->pending_11d = false;
1690 +- }
1691 ++ if (ar->state_11d == ATH11K_11D_PREPARING)
1692 ++ ar->state_11d = ATH11K_11D_RUNNING;
1693 + }
1694 +
1695 + fin:
1696 ++ if (ar->state_11d == ATH11K_11D_PREPARING) {
1697 ++ ar->state_11d = ATH11K_11D_IDLE;
1698 ++ complete(&ar->completed_11d_scan);
1699 ++ }
1700 ++
1701 + mutex_unlock(&ar->ab->vdev_id_11d_lock);
1702 + }
1703 +
1704 +@@ -6118,12 +6101,15 @@ void ath11k_mac_11d_scan_stop(struct ath11k *ar)
1705 + vdev_id = ar->vdev_id_11d_scan;
1706 +
1707 + ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
1708 +- if (ret)
1709 ++ if (ret) {
1710 + ath11k_warn(ar->ab,
1711 + "failed to stopt 11d scan vdev %d ret: %d\n",
1712 + vdev_id, ret);
1713 +- else
1714 ++ } else {
1715 + ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
1716 ++ ar->state_11d = ATH11K_11D_IDLE;
1717 ++ complete(&ar->completed_11d_scan);
1718 ++ }
1719 + }
1720 + mutex_unlock(&ar->ab->vdev_id_11d_lock);
1721 + }
1722 +@@ -6319,8 +6305,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
1723 + goto err_peer_del;
1724 + }
1725 +
1726 +- ath11k_mac_11d_scan_start(ar, arvif->vdev_id, true);
1727 +-
1728 ++ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) {
1729 ++ reinit_completion(&ar->completed_11d_scan);
1730 ++ ar->state_11d = ATH11K_11D_PREPARING;
1731 ++ }
1732 + break;
1733 + case WMI_VDEV_TYPE_MONITOR:
1734 + set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
1735 +@@ -7144,7 +7132,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
1736 + }
1737 +
1738 + if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
1739 +- ath11k_mac_11d_scan_start(ar, arvif->vdev_id, false);
1740 ++ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
1741 +
1742 + mutex_unlock(&ar->conf_mutex);
1743 + }
1744 +@@ -8625,8 +8613,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
1745 + ar->monitor_vdev_id = -1;
1746 + clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
1747 + ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
1748 +- init_completion(&ar->finish_11d_scan);
1749 +- init_completion(&ar->finish_11d_ch_list);
1750 ++ init_completion(&ar->completed_11d_scan);
1751 + }
1752 +
1753 + return 0;
1754 +diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
1755 +index 0e6c870b09c88..29b523af66dd2 100644
1756 +--- a/drivers/net/wireless/ath/ath11k/mac.h
1757 ++++ b/drivers/net/wireless/ath/ath11k/mac.h
1758 +@@ -130,7 +130,7 @@ extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
1759 + #define ATH11K_SCAN_11D_INTERVAL 600000
1760 + #define ATH11K_11D_INVALID_VDEV_ID 0xFFFF
1761 +
1762 +-void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait);
1763 ++void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id);
1764 + void ath11k_mac_11d_scan_stop(struct ath11k *ar);
1765 + void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
1766 +
1767 +diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
1768 +index d6575feca5a26..eca935f5a95e1 100644
1769 +--- a/drivers/net/wireless/ath/ath11k/reg.c
1770 ++++ b/drivers/net/wireless/ath/ath11k/reg.c
1771 +@@ -93,7 +93,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
1772 + ar->regdom_set_by_user = true;
1773 + }
1774 +
1775 +-int ath11k_reg_update_chan_list(struct ath11k *ar)
1776 ++int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
1777 + {
1778 + struct ieee80211_supported_band **bands;
1779 + struct scan_chan_list_params *params;
1780 +@@ -102,7 +102,32 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
1781 + struct channel_param *ch;
1782 + enum nl80211_band band;
1783 + int num_channels = 0;
1784 +- int i, ret;
1785 ++ int i, ret, left;
1786 ++
1787 ++ if (wait && ar->state_11d != ATH11K_11D_IDLE) {
1788 ++ left = wait_for_completion_timeout(&ar->completed_11d_scan,
1789 ++ ATH11K_SCAN_TIMEOUT_HZ);
1790 ++ if (!left) {
1791 ++ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
1792 ++ "failed to receive 11d scan complete: timed out\n");
1793 ++ ar->state_11d = ATH11K_11D_IDLE;
1794 ++ }
1795 ++ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
1796 ++ "reg 11d scan wait left time %d\n", left);
1797 ++ }
1798 ++
1799 ++ if (wait &&
1800 ++ (ar->scan.state == ATH11K_SCAN_STARTING ||
1801 ++ ar->scan.state == ATH11K_SCAN_RUNNING)) {
1802 ++ left = wait_for_completion_timeout(&ar->scan.completed,
1803 ++ ATH11K_SCAN_TIMEOUT_HZ);
1804 ++ if (!left)
1805 ++ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
1806 ++ "failed to receive hw scan complete: timed out\n");
1807 ++
1808 ++ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
1809 ++ "reg hw scan wait left time %d\n", left);
1810 ++ }
1811 +
1812 + bands = hw->wiphy->bands;
1813 + for (band = 0; band < NUM_NL80211_BANDS; band++) {
1814 +@@ -184,11 +209,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
1815 + ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
1816 + kfree(params);
1817 +
1818 +- if (ar->pending_11d) {
1819 +- complete(&ar->finish_11d_ch_list);
1820 +- ar->pending_11d = false;
1821 +- }
1822 +-
1823 + return ret;
1824 + }
1825 +
1826 +@@ -254,15 +274,8 @@ int ath11k_regd_update(struct ath11k *ar)
1827 + goto err;
1828 + }
1829 +
1830 +- if (ar->pending_11d)
1831 +- complete(&ar->finish_11d_scan);
1832 +-
1833 + rtnl_lock();
1834 + wiphy_lock(ar->hw->wiphy);
1835 +-
1836 +- if (ar->pending_11d)
1837 +- reinit_completion(&ar->finish_11d_ch_list);
1838 +-
1839 + ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
1840 + wiphy_unlock(ar->hw->wiphy);
1841 + rtnl_unlock();
1842 +@@ -273,7 +286,7 @@ int ath11k_regd_update(struct ath11k *ar)
1843 + goto err;
1844 +
1845 + if (ar->state == ATH11K_STATE_ON) {
1846 +- ret = ath11k_reg_update_chan_list(ar);
1847 ++ ret = ath11k_reg_update_chan_list(ar, true);
1848 + if (ret)
1849 + goto err;
1850 + }
1851 +diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
1852 +index 5fb9dc03a74e8..2f284f26378d1 100644
1853 +--- a/drivers/net/wireless/ath/ath11k/reg.h
1854 ++++ b/drivers/net/wireless/ath/ath11k/reg.h
1855 +@@ -32,5 +32,5 @@ struct ieee80211_regdomain *
1856 + ath11k_reg_build_regd(struct ath11k_base *ab,
1857 + struct cur_regulatory_info *reg_info, bool intersect);
1858 + int ath11k_regd_update(struct ath11k *ar);
1859 +-int ath11k_reg_update_chan_list(struct ath11k *ar);
1860 ++int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
1861 + #endif
1862 +diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
1863 +index 6b68ccf65e390..22921673e956e 100644
1864 +--- a/drivers/net/wireless/ath/ath11k/wmi.c
1865 ++++ b/drivers/net/wireless/ath/ath11k/wmi.c
1866 +@@ -2013,7 +2013,10 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
1867 + {
1868 + /* setup commonly used values */
1869 + arg->scan_req_id = 1;
1870 +- arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
1871 ++ if (ar->state_11d == ATH11K_11D_PREPARING)
1872 ++ arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
1873 ++ else
1874 ++ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
1875 + arg->dwell_time_active = 50;
1876 + arg->dwell_time_active_2g = 0;
1877 + arg->dwell_time_passive = 150;
1878 +@@ -6177,8 +6180,10 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
1879 + static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
1880 + {
1881 + const struct wmi_11d_new_cc_ev *ev;
1882 ++ struct ath11k *ar;
1883 ++ struct ath11k_pdev *pdev;
1884 + const void **tb;
1885 +- int ret;
1886 ++ int ret, i;
1887 +
1888 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
1889 + if (IS_ERR(tb)) {
1890 +@@ -6204,6 +6209,13 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
1891 +
1892 + kfree(tb);
1893 +
1894 ++ for (i = 0; i < ab->num_radios; i++) {
1895 ++ pdev = &ab->pdevs[i];
1896 ++ ar = pdev->ar;
1897 ++ ar->state_11d = ATH11K_11D_IDLE;
1898 ++ complete(&ar->completed_11d_scan);
1899 ++ }
1900 ++
1901 + queue_work(ab->workqueue, &ab->update_11d_work);
1902 +
1903 + return 0;
1904 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1905 +index 42f6f8bb83be9..901600ca6f0ec 100644
1906 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1907 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
1908 +@@ -362,7 +362,7 @@ void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
1909 + struct iwl_dbg_tlv_timer_node *node, *tmp;
1910 +
1911 + list_for_each_entry_safe(node, tmp, timer_list, list) {
1912 +- del_timer(&node->timer);
1913 ++ del_timer_sync(&node->timer);
1914 + list_del(&node->list);
1915 + kfree(node);
1916 + }
1917 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
1918 +index fc5725f6daee6..4a91d5cb75c3e 100644
1919 +--- a/drivers/net/wireless/mac80211_hwsim.c
1920 ++++ b/drivers/net/wireless/mac80211_hwsim.c
1921 +@@ -2336,11 +2336,13 @@ static void hw_scan_work(struct work_struct *work)
1922 + if (req->ie_len)
1923 + skb_put_data(probe, req->ie, req->ie_len);
1924 +
1925 ++ rcu_read_lock();
1926 + if (!ieee80211_tx_prepare_skb(hwsim->hw,
1927 + hwsim->hw_scan_vif,
1928 + probe,
1929 + hwsim->tmp_chan->band,
1930 + NULL)) {
1931 ++ rcu_read_unlock();
1932 + kfree_skb(probe);
1933 + continue;
1934 + }
1935 +@@ -2348,6 +2350,7 @@ static void hw_scan_work(struct work_struct *work)
1936 + local_bh_disable();
1937 + mac80211_hwsim_tx_frame(hwsim->hw, probe,
1938 + hwsim->tmp_chan);
1939 ++ rcu_read_unlock();
1940 + local_bh_enable();
1941 + }
1942 + }
1943 +diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
1944 +index d384d36098c27..a62c5dfe42d64 100644
1945 +--- a/drivers/platform/surface/aggregator/core.c
1946 ++++ b/drivers/platform/surface/aggregator/core.c
1947 +@@ -817,7 +817,7 @@ err_cpkg:
1948 + err_bus:
1949 + return status;
1950 + }
1951 +-module_init(ssam_core_init);
1952 ++subsys_initcall(ssam_core_init);
1953 +
1954 + static void __exit ssam_core_exit(void)
1955 + {
1956 +diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
1957 +index 88abfb5e8045c..8ac213a551418 100644
1958 +--- a/drivers/s390/net/ctcm_mpc.c
1959 ++++ b/drivers/s390/net/ctcm_mpc.c
1960 +@@ -626,8 +626,6 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
1961 + ctcm_clear_busy_do(dev);
1962 + }
1963 +
1964 +- kfree(mpcginfo);
1965 +-
1966 + return;
1967 +
1968 + }
1969 +@@ -1192,10 +1190,10 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
1970 + CTCM_FUNTAIL, dev->name);
1971 + priv->stats.rx_dropped++;
1972 + /* mpcginfo only used for non-data transfers */
1973 +- kfree(mpcginfo);
1974 + if (do_debug_data)
1975 + ctcmpc_dump_skb(pskb, -8);
1976 + }
1977 ++ kfree(mpcginfo);
1978 + }
1979 + done:
1980 +
1981 +@@ -1977,7 +1975,6 @@ static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
1982 + }
1983 + break;
1984 + }
1985 +- kfree(mpcginfo);
1986 +
1987 + CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
1988 + __func__, ch->id, grp->outstanding_xid2,
1989 +@@ -2038,7 +2035,6 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
1990 + mpc_validate_xid(mpcginfo);
1991 + break;
1992 + }
1993 +- kfree(mpcginfo);
1994 + return;
1995 + }
1996 +
1997 +diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
1998 +index ded1930a00b2d..e3813a7aa5e68 100644
1999 +--- a/drivers/s390/net/ctcm_sysfs.c
2000 ++++ b/drivers/s390/net/ctcm_sysfs.c
2001 +@@ -39,11 +39,12 @@ static ssize_t ctcm_buffer_write(struct device *dev,
2002 + struct ctcm_priv *priv = dev_get_drvdata(dev);
2003 + int rc;
2004 +
2005 +- ndev = priv->channel[CTCM_READ]->netdev;
2006 +- if (!(priv && priv->channel[CTCM_READ] && ndev)) {
2007 ++ if (!(priv && priv->channel[CTCM_READ] &&
2008 ++ priv->channel[CTCM_READ]->netdev)) {
2009 + CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
2010 + return -ENODEV;
2011 + }
2012 ++ ndev = priv->channel[CTCM_READ]->netdev;
2013 +
2014 + rc = kstrtouint(buf, 0, &bs1);
2015 + if (rc)
2016 +diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
2017 +index a61d38a1b4ed1..66c2893badadf 100644
2018 +--- a/drivers/s390/net/lcs.c
2019 ++++ b/drivers/s390/net/lcs.c
2020 +@@ -1736,10 +1736,11 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
2021 + lcs_schedule_recovery(card);
2022 + break;
2023 + case LCS_CMD_STOPLAN:
2024 +- pr_warn("Stoplan for %s initiated by LGW\n",
2025 +- card->dev->name);
2026 +- if (card->dev)
2027 ++ if (card->dev) {
2028 ++ pr_warn("Stoplan for %s initiated by LGW\n",
2029 ++ card->dev->name);
2030 + netif_carrier_off(card->dev);
2031 ++ }
2032 + break;
2033 + default:
2034 + LCS_DBF_TEXT(5, trace, "noLGWcmd");
2035 +diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
2036 +index f04b961b96cd4..ec58091fc948a 100644
2037 +--- a/drivers/slimbus/qcom-ctrl.c
2038 ++++ b/drivers/slimbus/qcom-ctrl.c
2039 +@@ -510,9 +510,9 @@ static int qcom_slim_probe(struct platform_device *pdev)
2040 + }
2041 +
2042 + ctrl->irq = platform_get_irq(pdev, 0);
2043 +- if (!ctrl->irq) {
2044 ++ if (ctrl->irq < 0) {
2045 + dev_err(&pdev->dev, "no slimbus IRQ\n");
2046 +- return -ENODEV;
2047 ++ return ctrl->irq;
2048 + }
2049 +
2050 + sctrl = &ctrl->ctrl;
2051 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
2052 +index a38b922bcbc10..fd8b86dde5255 100644
2053 +--- a/drivers/tty/n_gsm.c
2054 ++++ b/drivers/tty/n_gsm.c
2055 +@@ -137,6 +137,7 @@ struct gsm_dlci {
2056 + int retries;
2057 + /* Uplink tty if active */
2058 + struct tty_port port; /* The tty bound to this DLCI if there is one */
2059 ++#define TX_SIZE 4096 /* Must be power of 2. */
2060 + struct kfifo fifo; /* Queue fifo for the DLCI */
2061 + int adaption; /* Adaption layer in use */
2062 + int prev_adaption;
2063 +@@ -1658,6 +1659,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
2064 + if (len == 0)
2065 + return;
2066 + }
2067 ++ len--;
2068 + slen++;
2069 + tty = tty_port_tty_get(port);
2070 + if (tty) {
2071 +@@ -1730,7 +1732,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
2072 + return NULL;
2073 + spin_lock_init(&dlci->lock);
2074 + mutex_init(&dlci->mutex);
2075 +- if (kfifo_alloc(&dlci->fifo, 4096, GFP_KERNEL) < 0) {
2076 ++ if (kfifo_alloc(&dlci->fifo, TX_SIZE, GFP_KERNEL) < 0) {
2077 + kfree(dlci);
2078 + return NULL;
2079 + }
2080 +@@ -2351,6 +2353,7 @@ static void gsm_copy_config_values(struct gsm_mux *gsm,
2081 +
2082 + static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
2083 + {
2084 ++ int ret = 0;
2085 + int need_close = 0;
2086 + int need_restart = 0;
2087 +
2088 +@@ -2418,10 +2421,13 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
2089 + * FIXME: We need to separate activation/deactivation from adding
2090 + * and removing from the mux array
2091 + */
2092 +- if (need_restart)
2093 +- gsm_activate_mux(gsm);
2094 +- if (gsm->initiator && need_close)
2095 +- gsm_dlci_begin_open(gsm->dlci[0]);
2096 ++ if (gsm->dead) {
2097 ++ ret = gsm_activate_mux(gsm);
2098 ++ if (ret)
2099 ++ return ret;
2100 ++ if (gsm->initiator)
2101 ++ gsm_dlci_begin_open(gsm->dlci[0]);
2102 ++ }
2103 + return 0;
2104 + }
2105 +
2106 +@@ -2971,8 +2977,6 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
2107 + * Virtual tty side
2108 + */
2109 +
2110 +-#define TX_SIZE 512
2111 +-
2112 + /**
2113 + * gsm_modem_upd_via_data - send modem bits via convergence layer
2114 + * @dlci: channel
2115 +@@ -3212,7 +3216,7 @@ static unsigned int gsmtty_write_room(struct tty_struct *tty)
2116 + struct gsm_dlci *dlci = tty->driver_data;
2117 + if (dlci->state == DLCI_CLOSED)
2118 + return 0;
2119 +- return TX_SIZE - kfifo_len(&dlci->fifo);
2120 ++ return kfifo_avail(&dlci->fifo);
2121 + }
2122 +
2123 + static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty)
2124 +diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
2125 +index fb65dc601b237..de48a58460f47 100644
2126 +--- a/drivers/tty/serial/8250/8250_mtk.c
2127 ++++ b/drivers/tty/serial/8250/8250_mtk.c
2128 +@@ -37,6 +37,7 @@
2129 + #define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
2130 + #define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
2131 +
2132 ++#define MTK_UART_EFR 38 /* I/O: Extended Features Register */
2133 + #define MTK_UART_EFR_EN 0x10 /* Enable enhancement feature */
2134 + #define MTK_UART_EFR_RTS 0x40 /* Enable hardware rx flow control */
2135 + #define MTK_UART_EFR_CTS 0x80 /* Enable hardware tx flow control */
2136 +@@ -53,6 +54,9 @@
2137 + #define MTK_UART_TX_TRIGGER 1
2138 + #define MTK_UART_RX_TRIGGER MTK_UART_RX_SIZE
2139 +
2140 ++#define MTK_UART_XON1 40 /* I/O: Xon character 1 */
2141 ++#define MTK_UART_XOFF1 42 /* I/O: Xoff character 1 */
2142 ++
2143 + #ifdef CONFIG_SERIAL_8250_DMA
2144 + enum dma_rx_status {
2145 + DMA_RX_START = 0,
2146 +@@ -169,7 +173,7 @@ static void mtk8250_dma_enable(struct uart_8250_port *up)
2147 + MTK_UART_DMA_EN_RX | MTK_UART_DMA_EN_TX);
2148 +
2149 + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
2150 +- serial_out(up, UART_EFR, UART_EFR_ECB);
2151 ++ serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
2152 + serial_out(up, UART_LCR, lcr);
2153 +
2154 + if (dmaengine_slave_config(dma->rxchan, &dma->rxconf) != 0)
2155 +@@ -232,7 +236,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
2156 + int lcr = serial_in(up, UART_LCR);
2157 +
2158 + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
2159 +- serial_out(up, UART_EFR, UART_EFR_ECB);
2160 ++ serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
2161 + serial_out(up, UART_LCR, lcr);
2162 + lcr = serial_in(up, UART_LCR);
2163 +
2164 +@@ -241,7 +245,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
2165 + serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR);
2166 + serial_out(up, MTK_UART_ESCAPE_EN, 0x00);
2167 + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
2168 +- serial_out(up, UART_EFR, serial_in(up, UART_EFR) &
2169 ++ serial_out(up, MTK_UART_EFR, serial_in(up, MTK_UART_EFR) &
2170 + (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)));
2171 + serial_out(up, UART_LCR, lcr);
2172 + mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI |
2173 +@@ -255,8 +259,8 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
2174 + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
2175 +
2176 + /*enable hw flow control*/
2177 +- serial_out(up, UART_EFR, MTK_UART_EFR_HW_FC |
2178 +- (serial_in(up, UART_EFR) &
2179 ++ serial_out(up, MTK_UART_EFR, MTK_UART_EFR_HW_FC |
2180 ++ (serial_in(up, MTK_UART_EFR) &
2181 + (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
2182 +
2183 + serial_out(up, UART_LCR, lcr);
2184 +@@ -270,12 +274,12 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
2185 + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
2186 +
2187 + /*enable sw flow control */
2188 +- serial_out(up, UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
2189 +- (serial_in(up, UART_EFR) &
2190 ++ serial_out(up, MTK_UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
2191 ++ (serial_in(up, MTK_UART_EFR) &
2192 + (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
2193 +
2194 +- serial_out(up, UART_XON1, START_CHAR(port->state->port.tty));
2195 +- serial_out(up, UART_XOFF1, STOP_CHAR(port->state->port.tty));
2196 ++ serial_out(up, MTK_UART_XON1, START_CHAR(port->state->port.tty));
2197 ++ serial_out(up, MTK_UART_XOFF1, STOP_CHAR(port->state->port.tty));
2198 + serial_out(up, UART_LCR, lcr);
2199 + mtk8250_disable_intrs(up, MTK_UART_IER_CTSI|MTK_UART_IER_RTSI);
2200 + mtk8250_enable_intrs(up, MTK_UART_IER_XOFFI);
2201 +diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
2202 +index 13ac36e2da4f0..c7f81aa1ce912 100644
2203 +--- a/drivers/tty/serial/digicolor-usart.c
2204 ++++ b/drivers/tty/serial/digicolor-usart.c
2205 +@@ -471,11 +471,10 @@ static int digicolor_uart_probe(struct platform_device *pdev)
2206 + if (IS_ERR(uart_clk))
2207 + return PTR_ERR(uart_clk);
2208 +
2209 +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2210 +- dp->port.mapbase = res->start;
2211 +- dp->port.membase = devm_ioremap_resource(&pdev->dev, res);
2212 ++ dp->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2213 + if (IS_ERR(dp->port.membase))
2214 + return PTR_ERR(dp->port.membase);
2215 ++ dp->port.mapbase = res->start;
2216 +
2217 + irq = platform_get_irq(pdev, 0);
2218 + if (irq < 0)
2219 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
2220 +index ce3e261446898..d32c25bc973b4 100644
2221 +--- a/drivers/tty/serial/fsl_lpuart.c
2222 ++++ b/drivers/tty/serial/fsl_lpuart.c
2223 +@@ -2658,6 +2658,7 @@ static int lpuart_probe(struct platform_device *pdev)
2224 + struct device_node *np = pdev->dev.of_node;
2225 + struct lpuart_port *sport;
2226 + struct resource *res;
2227 ++ irq_handler_t handler;
2228 + int ret;
2229 +
2230 + sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
2231 +@@ -2735,17 +2736,11 @@ static int lpuart_probe(struct platform_device *pdev)
2232 +
2233 + if (lpuart_is_32(sport)) {
2234 + lpuart_reg.cons = LPUART32_CONSOLE;
2235 +- ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0,
2236 +- DRIVER_NAME, sport);
2237 ++ handler = lpuart32_int;
2238 + } else {
2239 + lpuart_reg.cons = LPUART_CONSOLE;
2240 +- ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0,
2241 +- DRIVER_NAME, sport);
2242 ++ handler = lpuart_int;
2243 + }
2244 +-
2245 +- if (ret)
2246 +- goto failed_irq_request;
2247 +-
2248 + ret = uart_add_one_port(&lpuart_reg, &sport->port);
2249 + if (ret)
2250 + goto failed_attach_port;
2251 +@@ -2767,13 +2762,18 @@ static int lpuart_probe(struct platform_device *pdev)
2252 +
2253 + sport->port.rs485_config(&sport->port, &sport->port.rs485);
2254 +
2255 ++ ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
2256 ++ DRIVER_NAME, sport);
2257 ++ if (ret)
2258 ++ goto failed_irq_request;
2259 ++
2260 + return 0;
2261 +
2262 ++failed_irq_request:
2263 + failed_get_rs485:
2264 + failed_reset:
2265 + uart_remove_one_port(&lpuart_reg, &sport->port);
2266 + failed_attach_port:
2267 +-failed_irq_request:
2268 + lpuart_disable_clks(sport);
2269 + failed_clock_enable:
2270 + failed_out_of_range:
2271 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
2272 +index 7f2c83f299d32..eebe782380fb9 100644
2273 +--- a/drivers/usb/class/cdc-wdm.c
2274 ++++ b/drivers/usb/class/cdc-wdm.c
2275 +@@ -774,6 +774,7 @@ static int wdm_release(struct inode *inode, struct file *file)
2276 + poison_urbs(desc);
2277 + spin_lock_irq(&desc->iuspin);
2278 + desc->resp_count = 0;
2279 ++ clear_bit(WDM_RESPONDING, &desc->flags);
2280 + spin_unlock_irq(&desc->iuspin);
2281 + desc->manage_power(desc->intf, 0);
2282 + unpoison_urbs(desc);
2283 +diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
2284 +index 71bb5e477dbad..d37965867b230 100644
2285 +--- a/drivers/usb/gadget/function/f_uvc.c
2286 ++++ b/drivers/usb/gadget/function/f_uvc.c
2287 +@@ -890,13 +890,37 @@ static void uvc_function_unbind(struct usb_configuration *c,
2288 + {
2289 + struct usb_composite_dev *cdev = c->cdev;
2290 + struct uvc_device *uvc = to_uvc(f);
2291 ++ long wait_ret = 1;
2292 +
2293 + uvcg_info(f, "%s()\n", __func__);
2294 +
2295 ++ /* If we know we're connected via v4l2, then there should be a cleanup
2296 ++ * of the device from userspace either via UVC_EVENT_DISCONNECT or
2297 ++ * though the video device removal uevent. Allow some time for the
2298 ++ * application to close out before things get deleted.
2299 ++ */
2300 ++ if (uvc->func_connected) {
2301 ++ uvcg_dbg(f, "waiting for clean disconnect\n");
2302 ++ wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
2303 ++ uvc->func_connected == false, msecs_to_jiffies(500));
2304 ++ uvcg_dbg(f, "done waiting with ret: %ld\n", wait_ret);
2305 ++ }
2306 ++
2307 + device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
2308 + video_unregister_device(&uvc->vdev);
2309 + v4l2_device_unregister(&uvc->v4l2_dev);
2310 +
2311 ++ if (uvc->func_connected) {
2312 ++ /* Wait for the release to occur to ensure there are no longer any
2313 ++ * pending operations that may cause panics when resources are cleaned
2314 ++ * up.
2315 ++ */
2316 ++ uvcg_warn(f, "%s no clean disconnect, wait for release\n", __func__);
2317 ++ wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
2318 ++ uvc->func_connected == false, msecs_to_jiffies(1000));
2319 ++ uvcg_dbg(f, "done waiting for release with ret: %ld\n", wait_ret);
2320 ++ }
2321 ++
2322 + usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
2323 + kfree(uvc->control_buf);
2324 +
2325 +@@ -915,6 +939,7 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
2326 +
2327 + mutex_init(&uvc->video.mutex);
2328 + uvc->state = UVC_STATE_DISCONNECTED;
2329 ++ init_waitqueue_head(&uvc->func_connected_queue);
2330 + opts = fi_to_f_uvc_opts(fi);
2331 +
2332 + mutex_lock(&opts->lock);
2333 +diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
2334 +index c3607a32b9862..886103a1fe9b7 100644
2335 +--- a/drivers/usb/gadget/function/uvc.h
2336 ++++ b/drivers/usb/gadget/function/uvc.h
2337 +@@ -14,6 +14,7 @@
2338 + #include <linux/spinlock.h>
2339 + #include <linux/usb/composite.h>
2340 + #include <linux/videodev2.h>
2341 ++#include <linux/wait.h>
2342 +
2343 + #include <media/v4l2-device.h>
2344 + #include <media/v4l2-dev.h>
2345 +@@ -129,6 +130,7 @@ struct uvc_device {
2346 + struct usb_function func;
2347 + struct uvc_video video;
2348 + bool func_connected;
2349 ++ wait_queue_head_t func_connected_queue;
2350 +
2351 + /* Descriptors */
2352 + struct {
2353 +diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
2354 +index a2c78690c5c28..fd8f73bb726dd 100644
2355 +--- a/drivers/usb/gadget/function/uvc_v4l2.c
2356 ++++ b/drivers/usb/gadget/function/uvc_v4l2.c
2357 +@@ -253,10 +253,11 @@ uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
2358 +
2359 + static void uvc_v4l2_disable(struct uvc_device *uvc)
2360 + {
2361 +- uvc->func_connected = false;
2362 + uvc_function_disconnect(uvc);
2363 + uvcg_video_enable(&uvc->video, 0);
2364 + uvcg_free_buffers(&uvc->video.queue);
2365 ++ uvc->func_connected = false;
2366 ++ wake_up_interruptible(&uvc->func_connected_queue);
2367 + }
2368 +
2369 + static int
2370 +diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
2371 +index edbfa82c65659..74ce52b34317e 100644
2372 +--- a/drivers/usb/host/xhci-mtk-sch.c
2373 ++++ b/drivers/usb/host/xhci-mtk-sch.c
2374 +@@ -465,7 +465,7 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
2375 + */
2376 + for (j = 0; j < sch_ep->num_budget_microframes; j++) {
2377 + k = XHCI_MTK_BW_INDEX(base + j);
2378 +- tmp = tt->fs_bus_bw[k] + sch_ep->bw_budget_table[j];
2379 ++ tmp = tt->fs_bus_bw[k] + sch_ep->bw_cost_per_microframe;
2380 + if (tmp > FS_PAYLOAD_MAX)
2381 + return -ESCH_BW_OVERFLOW;
2382 + }
2383 +@@ -539,19 +539,17 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
2384 + static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
2385 + {
2386 + struct mu3h_sch_tt *tt = sch_ep->sch_tt;
2387 ++ int bw_updated;
2388 + u32 base;
2389 +- int i, j, k;
2390 ++ int i, j;
2391 ++
2392 ++ bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
2393 +
2394 + for (i = 0; i < sch_ep->num_esit; i++) {
2395 + base = sch_ep->offset + i * sch_ep->esit;
2396 +
2397 +- for (j = 0; j < sch_ep->num_budget_microframes; j++) {
2398 +- k = XHCI_MTK_BW_INDEX(base + j);
2399 +- if (used)
2400 +- tt->fs_bus_bw[k] += sch_ep->bw_budget_table[j];
2401 +- else
2402 +- tt->fs_bus_bw[k] -= sch_ep->bw_budget_table[j];
2403 +- }
2404 ++ for (j = 0; j < sch_ep->num_budget_microframes; j++)
2405 ++ tt->fs_bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
2406 + }
2407 +
2408 + if (used)
2409 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2410 +index 1364ce7f0abf0..152ad882657d7 100644
2411 +--- a/drivers/usb/serial/option.c
2412 ++++ b/drivers/usb/serial/option.c
2413 +@@ -2123,10 +2123,14 @@ static const struct usb_device_id option_ids[] = {
2414 + .driver_info = RSVD(3) },
2415 + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
2416 + .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
2417 ++ { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
2418 ++ { USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
2419 + { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
2420 + .driver_info = RSVD(4) | RSVD(5) },
2421 + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
2422 + .driver_info = RSVD(6) },
2423 ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0106, 0xff) }, /* Fibocom MA510 (ECM mode w/ diag intf.) */
2424 ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
2425 + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
2426 + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
2427 + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
2428 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
2429 +index 88b284d61681a..1d878d05a6584 100644
2430 +--- a/drivers/usb/serial/pl2303.c
2431 ++++ b/drivers/usb/serial/pl2303.c
2432 +@@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = {
2433 + { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
2434 + { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
2435 + { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
2436 ++ { USB_DEVICE(HP_VENDOR_ID, HP_LM930_PRODUCT_ID) },
2437 + { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
2438 + { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
2439 + { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
2440 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
2441 +index c5406452b774e..732f9b13ad5d5 100644
2442 +--- a/drivers/usb/serial/pl2303.h
2443 ++++ b/drivers/usb/serial/pl2303.h
2444 +@@ -135,6 +135,7 @@
2445 + #define HP_TD620_PRODUCT_ID 0x0956
2446 + #define HP_LD960_PRODUCT_ID 0x0b39
2447 + #define HP_LD381_PRODUCT_ID 0x0f7f
2448 ++#define HP_LM930_PRODUCT_ID 0x0f9b
2449 + #define HP_LCM220_PRODUCT_ID 0x3139
2450 + #define HP_LCM960_PRODUCT_ID 0x3239
2451 + #define HP_LD220_PRODUCT_ID 0x3524
2452 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
2453 +index c18bf8164bc2e..586ef5551e76e 100644
2454 +--- a/drivers/usb/serial/qcserial.c
2455 ++++ b/drivers/usb/serial/qcserial.c
2456 +@@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = {
2457 + {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
2458 + {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
2459 + {DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */
2460 ++ {DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */
2461 ++ {DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */
2462 + {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
2463 + {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
2464 + {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
2465 +diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
2466 +index e07d26a3cd8e1..f33e08eb76709 100644
2467 +--- a/drivers/usb/typec/tcpm/tcpci.c
2468 ++++ b/drivers/usb/typec/tcpm/tcpci.c
2469 +@@ -877,7 +877,7 @@ static int tcpci_remove(struct i2c_client *client)
2470 + /* Disable chip interrupts before unregistering port */
2471 + err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
2472 + if (err < 0)
2473 +- return err;
2474 ++ dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err));
2475 +
2476 + tcpci_unregister_port(chip->tcpci);
2477 +
2478 +diff --git a/drivers/usb/typec/tcpm/tcpci_mt6360.c b/drivers/usb/typec/tcpm/tcpci_mt6360.c
2479 +index f1bd9e09bc87f..8a952eaf90163 100644
2480 +--- a/drivers/usb/typec/tcpm/tcpci_mt6360.c
2481 ++++ b/drivers/usb/typec/tcpm/tcpci_mt6360.c
2482 +@@ -15,6 +15,9 @@
2483 +
2484 + #include "tcpci.h"
2485 +
2486 ++#define MT6360_REG_PHYCTRL1 0x80
2487 ++#define MT6360_REG_PHYCTRL3 0x82
2488 ++#define MT6360_REG_PHYCTRL7 0x86
2489 + #define MT6360_REG_VCONNCTRL1 0x8C
2490 + #define MT6360_REG_MODECTRL2 0x8F
2491 + #define MT6360_REG_SWRESET 0xA0
2492 +@@ -22,6 +25,8 @@
2493 + #define MT6360_REG_DRPCTRL1 0xA2
2494 + #define MT6360_REG_DRPCTRL2 0xA3
2495 + #define MT6360_REG_I2CTORST 0xBF
2496 ++#define MT6360_REG_PHYCTRL11 0xCA
2497 ++#define MT6360_REG_RXCTRL1 0xCE
2498 + #define MT6360_REG_RXCTRL2 0xCF
2499 + #define MT6360_REG_CTDCTRL2 0xEC
2500 +
2501 +@@ -106,6 +111,27 @@ static int mt6360_tcpc_init(struct tcpci *tcpci, struct tcpci_data *tdata)
2502 + if (ret)
2503 + return ret;
2504 +
2505 ++ /* BMC PHY */
2506 ++ ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL1, 0x3A70);
2507 ++ if (ret)
2508 ++ return ret;
2509 ++
2510 ++ ret = regmap_write(regmap, MT6360_REG_PHYCTRL3, 0x82);
2511 ++ if (ret)
2512 ++ return ret;
2513 ++
2514 ++ ret = regmap_write(regmap, MT6360_REG_PHYCTRL7, 0x36);
2515 ++ if (ret)
2516 ++ return ret;
2517 ++
2518 ++ ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL11, 0x3C60);
2519 ++ if (ret)
2520 ++ return ret;
2521 ++
2522 ++ ret = regmap_write(regmap, MT6360_REG_RXCTRL1, 0xE8);
2523 ++ if (ret)
2524 ++ return ret;
2525 ++
2526 + /* Set shipping mode off, AUTOIDLE on */
2527 + return regmap_write(regmap, MT6360_REG_MODECTRL2, 0x7A);
2528 + }
2529 +diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
2530 +index ea42ba6445b2d..b3d5f884c5445 100644
2531 +--- a/drivers/video/fbdev/efifb.c
2532 ++++ b/drivers/video/fbdev/efifb.c
2533 +@@ -243,6 +243,10 @@ error:
2534 + static inline void efifb_show_boot_graphics(struct fb_info *info) {}
2535 + #endif
2536 +
2537 ++/*
2538 ++ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
2539 ++ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
2540 ++ */
2541 + static void efifb_destroy(struct fb_info *info)
2542 + {
2543 + if (efifb_pci_dev)
2544 +@@ -254,10 +258,13 @@ static void efifb_destroy(struct fb_info *info)
2545 + else
2546 + memunmap(info->screen_base);
2547 + }
2548 ++
2549 + if (request_mem_succeeded)
2550 + release_mem_region(info->apertures->ranges[0].base,
2551 + info->apertures->ranges[0].size);
2552 + fb_dealloc_cmap(&info->cmap);
2553 ++
2554 ++ framebuffer_release(info);
2555 + }
2556 +
2557 + static const struct fb_ops efifb_ops = {
2558 +@@ -620,9 +627,9 @@ static int efifb_remove(struct platform_device *pdev)
2559 + {
2560 + struct fb_info *info = platform_get_drvdata(pdev);
2561 +
2562 ++ /* efifb_destroy takes care of info cleanup */
2563 + unregister_framebuffer(info);
2564 + sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
2565 +- framebuffer_release(info);
2566 +
2567 + return 0;
2568 + }
2569 +diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
2570 +index 57541887188b1..efce6ef8532d2 100644
2571 +--- a/drivers/video/fbdev/simplefb.c
2572 ++++ b/drivers/video/fbdev/simplefb.c
2573 +@@ -70,12 +70,18 @@ struct simplefb_par;
2574 + static void simplefb_clocks_destroy(struct simplefb_par *par);
2575 + static void simplefb_regulators_destroy(struct simplefb_par *par);
2576 +
2577 ++/*
2578 ++ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
2579 ++ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
2580 ++ */
2581 + static void simplefb_destroy(struct fb_info *info)
2582 + {
2583 + simplefb_regulators_destroy(info->par);
2584 + simplefb_clocks_destroy(info->par);
2585 + if (info->screen_base)
2586 + iounmap(info->screen_base);
2587 ++
2588 ++ framebuffer_release(info);
2589 + }
2590 +
2591 + static const struct fb_ops simplefb_ops = {
2592 +@@ -520,8 +526,8 @@ static int simplefb_remove(struct platform_device *pdev)
2593 + {
2594 + struct fb_info *info = platform_get_drvdata(pdev);
2595 +
2596 ++ /* simplefb_destroy takes care of info cleanup */
2597 + unregister_framebuffer(info);
2598 +- framebuffer_release(info);
2599 +
2600 + return 0;
2601 + }
2602 +diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
2603 +index df6de5a9dd4cd..e25e8de5ff672 100644
2604 +--- a/drivers/video/fbdev/vesafb.c
2605 ++++ b/drivers/video/fbdev/vesafb.c
2606 +@@ -179,6 +179,10 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
2607 + return err;
2608 + }
2609 +
2610 ++/*
2611 ++ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
2612 ++ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
2613 ++ */
2614 + static void vesafb_destroy(struct fb_info *info)
2615 + {
2616 + struct vesafb_par *par = info->par;
2617 +@@ -188,6 +192,8 @@ static void vesafb_destroy(struct fb_info *info)
2618 + if (info->screen_base)
2619 + iounmap(info->screen_base);
2620 + release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
2621 ++
2622 ++ framebuffer_release(info);
2623 + }
2624 +
2625 + static struct fb_ops vesafb_ops = {
2626 +@@ -484,10 +490,10 @@ static int vesafb_remove(struct platform_device *pdev)
2627 + {
2628 + struct fb_info *info = platform_get_drvdata(pdev);
2629 +
2630 ++ /* vesafb_destroy takes care of info cleanup */
2631 + unregister_framebuffer(info);
2632 + if (((struct vesafb_par *)(info->par))->region)
2633 + release_region(0x3c0, 32);
2634 +- framebuffer_release(info);
2635 +
2636 + return 0;
2637 + }
2638 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
2639 +index bbed3224ad689..52268cd6df167 100644
2640 +--- a/fs/ceph/file.c
2641 ++++ b/fs/ceph/file.c
2642 +@@ -598,9 +598,15 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
2643 + iinfo.change_attr = 1;
2644 + ceph_encode_timespec64(&iinfo.btime, &now);
2645 +
2646 +- iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
2647 +- iinfo.xattr_data = xattr_buf;
2648 +- memset(iinfo.xattr_data, 0, iinfo.xattr_len);
2649 ++ if (req->r_pagelist) {
2650 ++ iinfo.xattr_len = req->r_pagelist->length;
2651 ++ iinfo.xattr_data = req->r_pagelist->mapped_tail;
2652 ++ } else {
2653 ++ /* fake it */
2654 ++ iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
2655 ++ iinfo.xattr_data = xattr_buf;
2656 ++ memset(iinfo.xattr_data, 0, iinfo.xattr_len);
2657 ++ }
2658 +
2659 + in.ino = cpu_to_le64(vino.ino);
2660 + in.snapid = cpu_to_le64(CEPH_NOSNAP);
2661 +@@ -712,6 +718,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
2662 + err = ceph_security_init_secctx(dentry, mode, &as_ctx);
2663 + if (err < 0)
2664 + goto out_ctx;
2665 ++ /* Async create can't handle more than a page of xattrs */
2666 ++ if (as_ctx.pagelist &&
2667 ++ !list_is_singular(&as_ctx.pagelist->head))
2668 ++ try_async = false;
2669 + } else if (!d_in_lookup(dentry)) {
2670 + /* If it's not being looked up, it's negative */
2671 + return -ENOENT;
2672 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
2673 +index f8d7fe6db989e..ab5fa50664f66 100644
2674 +--- a/fs/fs-writeback.c
2675 ++++ b/fs/fs-writeback.c
2676 +@@ -1749,6 +1749,10 @@ static int writeback_single_inode(struct inode *inode,
2677 + */
2678 + if (!(inode->i_state & I_DIRTY_ALL))
2679 + inode_cgwb_move_to_attached(inode, wb);
2680 ++ else if (!(inode->i_state & I_SYNC_QUEUED) &&
2681 ++ (inode->i_state & I_DIRTY))
2682 ++ redirty_tail_locked(inode, wb);
2683 ++
2684 + spin_unlock(&wb->list_lock);
2685 + inode_sync_complete(inode);
2686 + out:
2687 +diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
2688 +index fbdb7a30470a3..f785af2aa23cf 100644
2689 +--- a/fs/gfs2/bmap.c
2690 ++++ b/fs/gfs2/bmap.c
2691 +@@ -1154,13 +1154,12 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
2692 +
2693 + if (length != written && (iomap->flags & IOMAP_F_NEW)) {
2694 + /* Deallocate blocks that were just allocated. */
2695 +- loff_t blockmask = i_blocksize(inode) - 1;
2696 +- loff_t end = (pos + length) & ~blockmask;
2697 ++ loff_t hstart = round_up(pos + written, i_blocksize(inode));
2698 ++ loff_t hend = iomap->offset + iomap->length;
2699 +
2700 +- pos = (pos + written + blockmask) & ~blockmask;
2701 +- if (pos < end) {
2702 +- truncate_pagecache_range(inode, pos, end - 1);
2703 +- punch_hole(ip, pos, end - pos);
2704 ++ if (hstart < hend) {
2705 ++ truncate_pagecache_range(inode, hstart, hend - 1);
2706 ++ punch_hole(ip, hstart, hend - hstart);
2707 + }
2708 + }
2709 +
2710 +diff --git a/fs/io_uring.c b/fs/io_uring.c
2711 +index 87df379120551..a0680046ff3c7 100644
2712 +--- a/fs/io_uring.c
2713 ++++ b/fs/io_uring.c
2714 +@@ -6572,7 +6572,12 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2715 +
2716 + static int io_req_prep_async(struct io_kiocb *req)
2717 + {
2718 +- if (!io_op_defs[req->opcode].needs_async_setup)
2719 ++ const struct io_op_def *def = &io_op_defs[req->opcode];
2720 ++
2721 ++ /* assign early for deferred execution for non-fixed file */
2722 ++ if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
2723 ++ req->file = io_file_get_normal(req, req->fd);
2724 ++ if (!def->needs_async_setup)
2725 + return 0;
2726 + if (WARN_ON_ONCE(req_has_async_data(req)))
2727 + return -EFAULT;
2728 +diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
2729 +index ea17fa1f31ecb..d208911621451 100644
2730 +--- a/fs/nfs/fs_context.c
2731 ++++ b/fs/nfs/fs_context.c
2732 +@@ -515,7 +515,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
2733 + if (result.negated)
2734 + ctx->flags &= ~NFS_MOUNT_SOFTREVAL;
2735 + else
2736 +- ctx->flags &= NFS_MOUNT_SOFTREVAL;
2737 ++ ctx->flags |= NFS_MOUNT_SOFTREVAL;
2738 + break;
2739 + case Opt_posix:
2740 + if (result.negated)
2741 +diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
2742 +index 2ff6bd85ba8f6..f2a1947ec5ee0 100644
2743 +--- a/fs/notify/fanotify/fanotify_user.c
2744 ++++ b/fs/notify/fanotify/fanotify_user.c
2745 +@@ -1638,6 +1638,19 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
2746 + else
2747 + mnt = path.mnt;
2748 +
2749 ++ /*
2750 ++ * FAN_RENAME is not allowed on non-dir (for now).
2751 ++ * We shouldn't have allowed setting any dirent events in mask of
2752 ++ * non-dir, but because we always allowed it, error only if group
2753 ++ * was initialized with the new flag FAN_REPORT_TARGET_FID.
2754 ++ */
2755 ++ ret = -ENOTDIR;
2756 ++ if (inode && !S_ISDIR(inode->i_mode) &&
2757 ++ ((mask & FAN_RENAME) ||
2758 ++ ((mask & FANOTIFY_DIRENT_EVENTS) &&
2759 ++ FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID))))
2760 ++ goto path_put_and_out;
2761 ++
2762 + /* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */
2763 + if (mnt || !S_ISDIR(inode->i_mode)) {
2764 + mask &= ~FAN_EVENT_ON_CHILD;
2765 +diff --git a/fs/proc/fd.c b/fs/proc/fd.c
2766 +index 172c86270b312..913bef0d2a36c 100644
2767 +--- a/fs/proc/fd.c
2768 ++++ b/fs/proc/fd.c
2769 +@@ -72,7 +72,7 @@ out:
2770 + return 0;
2771 + }
2772 +
2773 +-static int seq_fdinfo_open(struct inode *inode, struct file *file)
2774 ++static int proc_fdinfo_access_allowed(struct inode *inode)
2775 + {
2776 + bool allowed = false;
2777 + struct task_struct *task = get_proc_task(inode);
2778 +@@ -86,6 +86,16 @@ static int seq_fdinfo_open(struct inode *inode, struct file *file)
2779 + if (!allowed)
2780 + return -EACCES;
2781 +
2782 ++ return 0;
2783 ++}
2784 ++
2785 ++static int seq_fdinfo_open(struct inode *inode, struct file *file)
2786 ++{
2787 ++ int ret = proc_fdinfo_access_allowed(inode);
2788 ++
2789 ++ if (ret)
2790 ++ return ret;
2791 ++
2792 + return single_open(file, seq_show, inode);
2793 + }
2794 +
2795 +@@ -348,12 +358,23 @@ static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
2796 + proc_fdinfo_instantiate);
2797 + }
2798 +
2799 ++static int proc_open_fdinfo(struct inode *inode, struct file *file)
2800 ++{
2801 ++ int ret = proc_fdinfo_access_allowed(inode);
2802 ++
2803 ++ if (ret)
2804 ++ return ret;
2805 ++
2806 ++ return 0;
2807 ++}
2808 ++
2809 + const struct inode_operations proc_fdinfo_inode_operations = {
2810 + .lookup = proc_lookupfdinfo,
2811 + .setattr = proc_setattr,
2812 + };
2813 +
2814 + const struct file_operations proc_fdinfo_operations = {
2815 ++ .open = proc_open_fdinfo,
2816 + .read = generic_read_dir,
2817 + .iterate_shared = proc_readfdinfo,
2818 + .llseek = generic_file_llseek,
2819 +diff --git a/include/linux/bio.h b/include/linux/bio.h
2820 +index 117d7f248ac96..2ca54c084d5ad 100644
2821 +--- a/include/linux/bio.h
2822 ++++ b/include/linux/bio.h
2823 +@@ -272,6 +272,7 @@ struct folio_iter {
2824 + size_t offset;
2825 + size_t length;
2826 + /* private: for use by the iterator */
2827 ++ struct folio *_next;
2828 + size_t _seg_count;
2829 + int _i;
2830 + };
2831 +@@ -286,6 +287,7 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
2832 + PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
2833 + fi->_seg_count = bvec->bv_len;
2834 + fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
2835 ++ fi->_next = folio_next(fi->folio);
2836 + fi->_i = i;
2837 + }
2838 +
2839 +@@ -293,9 +295,10 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
2840 + {
2841 + fi->_seg_count -= fi->length;
2842 + if (fi->_seg_count) {
2843 +- fi->folio = folio_next(fi->folio);
2844 ++ fi->folio = fi->_next;
2845 + fi->offset = 0;
2846 + fi->length = min(folio_size(fi->folio), fi->_seg_count);
2847 ++ fi->_next = folio_next(fi->folio);
2848 + } else if (fi->_i + 1 < bio->bi_vcnt) {
2849 + bio_first_folio(fi, bio, fi->_i + 1);
2850 + } else {
2851 +diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
2852 +index 2c6b9e4162254..7c2d77d75a888 100644
2853 +--- a/include/linux/netdev_features.h
2854 ++++ b/include/linux/netdev_features.h
2855 +@@ -169,7 +169,7 @@ enum {
2856 + #define NETIF_F_HW_HSR_FWD __NETIF_F(HW_HSR_FWD)
2857 + #define NETIF_F_HW_HSR_DUP __NETIF_F(HW_HSR_DUP)
2858 +
2859 +-/* Finds the next feature with the highest number of the range of start till 0.
2860 ++/* Finds the next feature with the highest number of the range of start-1 till 0.
2861 + */
2862 + static inline int find_next_netdev_feature(u64 feature, unsigned long start)
2863 + {
2864 +@@ -188,7 +188,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
2865 + for ((bit) = find_next_netdev_feature((mask_addr), \
2866 + NETDEV_FEATURE_COUNT); \
2867 + (bit) >= 0; \
2868 +- (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
2869 ++ (bit) = find_next_netdev_feature((mask_addr), (bit)))
2870 +
2871 + /* Features valid for ethtool to change */
2872 + /* = all defined minus driver/device-class-related */
2873 +diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
2874 +index 267b7aeaf1a69..90501404fa49f 100644
2875 +--- a/include/linux/sunrpc/clnt.h
2876 ++++ b/include/linux/sunrpc/clnt.h
2877 +@@ -160,6 +160,7 @@ struct rpc_add_xprt_test {
2878 + #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
2879 + #define RPC_CLNT_CREATE_SOFTERR (1UL << 10)
2880 + #define RPC_CLNT_CREATE_REUSEPORT (1UL << 11)
2881 ++#define RPC_CLNT_CREATE_CONNECTED (1UL << 12)
2882 +
2883 + struct rpc_clnt *rpc_create(struct rpc_create_args *args);
2884 + struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
2885 +diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
2886 +index f72ec113ae568..98e1ec1a14f03 100644
2887 +--- a/include/net/inet_hashtables.h
2888 ++++ b/include/net/inet_hashtables.h
2889 +@@ -425,7 +425,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
2890 + }
2891 +
2892 + int __inet_hash_connect(struct inet_timewait_death_row *death_row,
2893 +- struct sock *sk, u32 port_offset,
2894 ++ struct sock *sk, u64 port_offset,
2895 + int (*check_established)(struct inet_timewait_death_row *,
2896 + struct sock *, __u16,
2897 + struct inet_timewait_sock **));
2898 +diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
2899 +index d7d2495f83c27..dac91aa38c5af 100644
2900 +--- a/include/net/secure_seq.h
2901 ++++ b/include/net/secure_seq.h
2902 +@@ -4,8 +4,8 @@
2903 +
2904 + #include <linux/types.h>
2905 +
2906 +-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
2907 +-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
2908 ++u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
2909 ++u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
2910 + __be16 dport);
2911 + u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
2912 + __be16 sport, __be16 dport);
2913 +diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
2914 +index 748cf87a4d7ea..3e02709a1df65 100644
2915 +--- a/include/net/tc_act/tc_pedit.h
2916 ++++ b/include/net/tc_act/tc_pedit.h
2917 +@@ -14,6 +14,7 @@ struct tcf_pedit {
2918 + struct tc_action common;
2919 + unsigned char tcfp_nkeys;
2920 + unsigned char tcfp_flags;
2921 ++ u32 tcfp_off_max_hint;
2922 + struct tc_pedit_key *tcfp_keys;
2923 + struct tcf_pedit_key_ex *tcfp_keys_ex;
2924 + };
2925 +diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
2926 +index 80d76b75bccd9..7aa2eb7662050 100644
2927 +--- a/include/uapi/linux/virtio_ids.h
2928 ++++ b/include/uapi/linux/virtio_ids.h
2929 +@@ -73,12 +73,12 @@
2930 + * Virtio Transitional IDs
2931 + */
2932 +
2933 +-#define VIRTIO_TRANS_ID_NET 1000 /* transitional virtio net */
2934 +-#define VIRTIO_TRANS_ID_BLOCK 1001 /* transitional virtio block */
2935 +-#define VIRTIO_TRANS_ID_BALLOON 1002 /* transitional virtio balloon */
2936 +-#define VIRTIO_TRANS_ID_CONSOLE 1003 /* transitional virtio console */
2937 +-#define VIRTIO_TRANS_ID_SCSI 1004 /* transitional virtio SCSI */
2938 +-#define VIRTIO_TRANS_ID_RNG 1005 /* transitional virtio rng */
2939 +-#define VIRTIO_TRANS_ID_9P 1009 /* transitional virtio 9p console */
2940 ++#define VIRTIO_TRANS_ID_NET 0x1000 /* transitional virtio net */
2941 ++#define VIRTIO_TRANS_ID_BLOCK 0x1001 /* transitional virtio block */
2942 ++#define VIRTIO_TRANS_ID_BALLOON 0x1002 /* transitional virtio balloon */
2943 ++#define VIRTIO_TRANS_ID_CONSOLE 0x1003 /* transitional virtio console */
2944 ++#define VIRTIO_TRANS_ID_SCSI 0x1004 /* transitional virtio SCSI */
2945 ++#define VIRTIO_TRANS_ID_RNG 0x1005 /* transitional virtio rng */
2946 ++#define VIRTIO_TRANS_ID_9P 0x1009 /* transitional virtio 9p console */
2947 +
2948 + #endif /* _LINUX_VIRTIO_IDS_H */
2949 +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
2950 +index 5de18448016cd..f9dd3aaa8486d 100644
2951 +--- a/kernel/cgroup/cpuset.c
2952 ++++ b/kernel/cgroup/cpuset.c
2953 +@@ -3390,8 +3390,11 @@ static struct notifier_block cpuset_track_online_nodes_nb = {
2954 + */
2955 + void __init cpuset_init_smp(void)
2956 + {
2957 +- cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2958 +- top_cpuset.mems_allowed = node_states[N_MEMORY];
2959 ++ /*
2960 ++ * cpus_allowd/mems_allowed set to v2 values in the initial
2961 ++ * cpuset_bind() call will be reset to v1 values in another
2962 ++ * cpuset_bind() call when v1 cpuset is mounted.
2963 ++ */
2964 + top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
2965 +
2966 + cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
2967 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
2968 +index a4426a00b9edf..db3e91ba58cd4 100644
2969 +--- a/kernel/irq/irqdesc.c
2970 ++++ b/kernel/irq/irqdesc.c
2971 +@@ -678,7 +678,6 @@ EXPORT_SYMBOL_GPL(generic_handle_irq);
2972 + */
2973 + int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq)
2974 + {
2975 +- WARN_ON_ONCE(!in_irq());
2976 + return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
2977 + }
2978 + EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
2979 +diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
2980 +index 06811d866775c..53f6b9c6e9366 100644
2981 +--- a/lib/dim/net_dim.c
2982 ++++ b/lib/dim/net_dim.c
2983 +@@ -12,41 +12,41 @@
2984 + * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
2985 + */
2986 + #define NET_DIM_PARAMS_NUM_PROFILES 5
2987 +-#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
2988 +-#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
2989 ++#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
2990 ++#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
2991 + #define NET_DIM_DEF_PROFILE_CQE 1
2992 + #define NET_DIM_DEF_PROFILE_EQE 1
2993 +
2994 + #define NET_DIM_RX_EQE_PROFILES { \
2995 +- {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
2996 +- {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
2997 +- {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
2998 +- {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
2999 +- {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
3000 ++ {.usec = 1, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
3001 ++ {.usec = 8, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
3002 ++ {.usec = 64, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
3003 ++ {.usec = 128, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
3004 ++ {.usec = 256, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,} \
3005 + }
3006 +
3007 + #define NET_DIM_RX_CQE_PROFILES { \
3008 +- {2, 256}, \
3009 +- {8, 128}, \
3010 +- {16, 64}, \
3011 +- {32, 64}, \
3012 +- {64, 64} \
3013 ++ {.usec = 2, .pkts = 256,}, \
3014 ++ {.usec = 8, .pkts = 128,}, \
3015 ++ {.usec = 16, .pkts = 64,}, \
3016 ++ {.usec = 32, .pkts = 64,}, \
3017 ++ {.usec = 64, .pkts = 64,} \
3018 + }
3019 +
3020 + #define NET_DIM_TX_EQE_PROFILES { \
3021 +- {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
3022 +- {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
3023 +- {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
3024 +- {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
3025 +- {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
3026 ++ {.usec = 1, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
3027 ++ {.usec = 8, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
3028 ++ {.usec = 32, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
3029 ++ {.usec = 64, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
3030 ++ {.usec = 128, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,} \
3031 + }
3032 +
3033 + #define NET_DIM_TX_CQE_PROFILES { \
3034 +- {5, 128}, \
3035 +- {8, 64}, \
3036 +- {16, 32}, \
3037 +- {32, 32}, \
3038 +- {64, 32} \
3039 ++ {.usec = 5, .pkts = 128,}, \
3040 ++ {.usec = 8, .pkts = 64,}, \
3041 ++ {.usec = 16, .pkts = 32,}, \
3042 ++ {.usec = 32, .pkts = 32,}, \
3043 ++ {.usec = 64, .pkts = 32,} \
3044 + }
3045 +
3046 + static const struct dim_cq_moder
3047 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
3048 +index 406a3c28c0266..fb91636917057 100644
3049 +--- a/mm/huge_memory.c
3050 ++++ b/mm/huge_memory.c
3051 +@@ -2609,11 +2609,16 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
3052 + struct address_space *mapping = NULL;
3053 + int extra_pins, ret;
3054 + pgoff_t end;
3055 ++ bool is_hzp;
3056 +
3057 +- VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
3058 + VM_BUG_ON_PAGE(!PageLocked(head), head);
3059 + VM_BUG_ON_PAGE(!PageCompound(head), head);
3060 +
3061 ++ is_hzp = is_huge_zero_page(head);
3062 ++ VM_WARN_ON_ONCE_PAGE(is_hzp, head);
3063 ++ if (is_hzp)
3064 ++ return -EBUSY;
3065 ++
3066 + if (PageWriteback(head))
3067 + return -EBUSY;
3068 +
3069 +diff --git a/mm/kfence/core.c b/mm/kfence/core.c
3070 +index af82c6f7d7239..a527f2769ea82 100644
3071 +--- a/mm/kfence/core.c
3072 ++++ b/mm/kfence/core.c
3073 +@@ -515,6 +515,7 @@ static bool __init kfence_init_pool(void)
3074 + {
3075 + unsigned long addr = (unsigned long)__kfence_pool;
3076 + struct page *pages;
3077 ++ char *p;
3078 + int i;
3079 +
3080 + if (!__kfence_pool)
3081 +@@ -598,6 +599,16 @@ err:
3082 + * fails for the first page, and therefore expect addr==__kfence_pool in
3083 + * most failure cases.
3084 + */
3085 ++ for (p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
3086 ++ struct slab *slab = virt_to_slab(p);
3087 ++
3088 ++ if (!slab)
3089 ++ continue;
3090 ++#ifdef CONFIG_MEMCG
3091 ++ slab->memcg_data = 0;
3092 ++#endif
3093 ++ __folio_clear_slab(slab_folio(slab));
3094 ++ }
3095 + memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
3096 + __kfence_pool = NULL;
3097 + return false;
3098 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
3099 +index 682eedb5ea75b..7889d2612384d 100644
3100 +--- a/mm/memory-failure.c
3101 ++++ b/mm/memory-failure.c
3102 +@@ -1275,7 +1275,7 @@ try_again:
3103 + }
3104 + out:
3105 + if (ret == -EIO)
3106 +- dump_page(p, "hwpoison: unhandlable page");
3107 ++ pr_err("Memory failure: %#lx: unhandlable page.\n", page_to_pfn(p));
3108 +
3109 + return ret;
3110 + }
3111 +@@ -1781,19 +1781,6 @@ try_again:
3112 + }
3113 +
3114 + if (PageTransHuge(hpage)) {
3115 +- /*
3116 +- * Bail out before SetPageHasHWPoisoned() if hpage is
3117 +- * huge_zero_page, although PG_has_hwpoisoned is not
3118 +- * checked in set_huge_zero_page().
3119 +- *
3120 +- * TODO: Handle memory failure of huge_zero_page thoroughly.
3121 +- */
3122 +- if (is_huge_zero_page(hpage)) {
3123 +- action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
3124 +- res = -EBUSY;
3125 +- goto unlock_mutex;
3126 +- }
3127 +-
3128 + /*
3129 + * The flag must be set after the refcount is bumped
3130 + * otherwise it may race with THP split.
3131 +diff --git a/mm/mremap.c b/mm/mremap.c
3132 +index 0e175aef536e1..bbcf48fd56a0d 100644
3133 +--- a/mm/mremap.c
3134 ++++ b/mm/mremap.c
3135 +@@ -947,7 +947,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
3136 + return -EINTR;
3137 + vma = find_vma(mm, addr);
3138 + if (!vma || vma->vm_start > addr) {
3139 +- ret = EFAULT;
3140 ++ ret = -EFAULT;
3141 + goto out;
3142 + }
3143 +
3144 +diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
3145 +index 0899a729a23f4..c120c7c6d25fc 100644
3146 +--- a/net/batman-adv/fragmentation.c
3147 ++++ b/net/batman-adv/fragmentation.c
3148 +@@ -475,6 +475,17 @@ int batadv_frag_send_packet(struct sk_buff *skb,
3149 + goto free_skb;
3150 + }
3151 +
3152 ++ /* GRO might have added fragments to the fragment list instead of
3153 ++ * frags[]. But this is not handled by skb_split and must be
3154 ++ * linearized to avoid incorrect length information after all
3155 ++ * batman-adv fragments were created and submitted to the
3156 ++ * hard-interface
3157 ++ */
3158 ++ if (skb_has_frag_list(skb) && __skb_linearize(skb)) {
3159 ++ ret = -ENOMEM;
3160 ++ goto free_skb;
3161 ++ }
3162 ++
3163 + /* Create one header to be copied to all fragments */
3164 + frag_header.packet_type = BATADV_UNICAST_FRAG;
3165 + frag_header.version = BATADV_COMPAT_VERSION;
3166 +diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
3167 +index 9b8443774449f..5f85e01d4093b 100644
3168 +--- a/net/core/secure_seq.c
3169 ++++ b/net/core/secure_seq.c
3170 +@@ -22,6 +22,8 @@
3171 + static siphash_aligned_key_t net_secret;
3172 + static siphash_aligned_key_t ts_secret;
3173 +
3174 ++#define EPHEMERAL_PORT_SHUFFLE_PERIOD (10 * HZ)
3175 ++
3176 + static __always_inline void net_secret_init(void)
3177 + {
3178 + net_get_random_once(&net_secret, sizeof(net_secret));
3179 +@@ -94,17 +96,19 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
3180 + }
3181 + EXPORT_SYMBOL(secure_tcpv6_seq);
3182 +
3183 +-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
3184 ++u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
3185 + __be16 dport)
3186 + {
3187 + const struct {
3188 + struct in6_addr saddr;
3189 + struct in6_addr daddr;
3190 ++ unsigned int timeseed;
3191 + __be16 dport;
3192 + } __aligned(SIPHASH_ALIGNMENT) combined = {
3193 + .saddr = *(struct in6_addr *)saddr,
3194 + .daddr = *(struct in6_addr *)daddr,
3195 +- .dport = dport
3196 ++ .timeseed = jiffies / EPHEMERAL_PORT_SHUFFLE_PERIOD,
3197 ++ .dport = dport,
3198 + };
3199 + net_secret_init();
3200 + return siphash(&combined, offsetofend(typeof(combined), dport),
3201 +@@ -142,11 +146,13 @@ u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
3202 + }
3203 + EXPORT_SYMBOL_GPL(secure_tcp_seq);
3204 +
3205 +-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
3206 ++u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
3207 + {
3208 + net_secret_init();
3209 +- return siphash_3u32((__force u32)saddr, (__force u32)daddr,
3210 +- (__force u16)dport, &net_secret);
3211 ++ return siphash_4u32((__force u32)saddr, (__force u32)daddr,
3212 ++ (__force u16)dport,
3213 ++ jiffies / EPHEMERAL_PORT_SHUFFLE_PERIOD,
3214 ++ &net_secret);
3215 + }
3216 + EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
3217 + #endif
3218 +diff --git a/net/dsa/port.c b/net/dsa/port.c
3219 +index 4368fd32c4a50..f4bd063f83151 100644
3220 +--- a/net/dsa/port.c
3221 ++++ b/net/dsa/port.c
3222 +@@ -367,6 +367,7 @@ out_rollback_unoffload:
3223 + switchdev_bridge_port_unoffload(brport_dev, dp,
3224 + &dsa_slave_switchdev_notifier,
3225 + &dsa_slave_switchdev_blocking_notifier);
3226 ++ dsa_flush_workqueue();
3227 + out_rollback_unbridge:
3228 + dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
3229 + out_rollback:
3230 +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
3231 +index 17440840a7914..a5d57fa679caa 100644
3232 +--- a/net/ipv4/inet_hashtables.c
3233 ++++ b/net/ipv4/inet_hashtables.c
3234 +@@ -504,7 +504,7 @@ not_unique:
3235 + return -EADDRNOTAVAIL;
3236 + }
3237 +
3238 +-static u32 inet_sk_port_offset(const struct sock *sk)
3239 ++static u64 inet_sk_port_offset(const struct sock *sk)
3240 + {
3241 + const struct inet_sock *inet = inet_sk(sk);
3242 +
3243 +@@ -726,15 +726,17 @@ EXPORT_SYMBOL_GPL(inet_unhash);
3244 + * Note that we use 32bit integers (vs RFC 'short integers')
3245 + * because 2^16 is not a multiple of num_ephemeral and this
3246 + * property might be used by clever attacker.
3247 +- * RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
3248 +- * we use 256 instead to really give more isolation and
3249 +- * privacy, this only consumes 1 KB of kernel memory.
3250 ++ * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
3251 ++ * attacks were since demonstrated, thus we use 65536 instead to really
3252 ++ * give more isolation and privacy, at the expense of 256kB of kernel
3253 ++ * memory.
3254 + */
3255 +-#define INET_TABLE_PERTURB_SHIFT 8
3256 +-static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
3257 ++#define INET_TABLE_PERTURB_SHIFT 16
3258 ++#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT)
3259 ++static u32 *table_perturb;
3260 +
3261 + int __inet_hash_connect(struct inet_timewait_death_row *death_row,
3262 +- struct sock *sk, u32 port_offset,
3263 ++ struct sock *sk, u64 port_offset,
3264 + int (*check_established)(struct inet_timewait_death_row *,
3265 + struct sock *, __u16, struct inet_timewait_sock **))
3266 + {
3267 +@@ -774,10 +776,13 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
3268 + if (likely(remaining > 1))
3269 + remaining &= ~1U;
3270 +
3271 +- net_get_random_once(table_perturb, sizeof(table_perturb));
3272 +- index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
3273 ++ net_get_random_once(table_perturb,
3274 ++ INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
3275 ++ index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
3276 ++
3277 ++ offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
3278 ++ offset %= remaining;
3279 +
3280 +- offset = (READ_ONCE(table_perturb[index]) + port_offset) % remaining;
3281 + /* In first pass we try ports of @low parity.
3282 + * inet_csk_get_port() does the opposite choice.
3283 + */
3284 +@@ -831,11 +836,12 @@ next_port:
3285 + return -EADDRNOTAVAIL;
3286 +
3287 + ok:
3288 +- /* If our first attempt found a candidate, skip next candidate
3289 +- * in 1/16 of cases to add some noise.
3290 ++ /* Here we want to add a little bit of randomness to the next source
3291 ++ * port that will be chosen. We use a max() with a random here so that
3292 ++ * on low contention the randomness is maximal and on high contention
3293 ++ * it may be inexistent.
3294 + */
3295 +- if (!i && !(prandom_u32() % 16))
3296 +- i = 2;
3297 ++ i = max_t(int, i, (prandom_u32() & 7) * 2);
3298 + WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
3299 +
3300 + /* Head lock still held and bh's disabled */
3301 +@@ -859,7 +865,7 @@ ok:
3302 + int inet_hash_connect(struct inet_timewait_death_row *death_row,
3303 + struct sock *sk)
3304 + {
3305 +- u32 port_offset = 0;
3306 ++ u64 port_offset = 0;
3307 +
3308 + if (!inet_sk(sk)->inet_num)
3309 + port_offset = inet_sk_port_offset(sk);
3310 +@@ -909,6 +915,12 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
3311 + low_limit,
3312 + high_limit);
3313 + init_hashinfo_lhash2(h);
3314 ++
3315 ++ /* this one is used for source ports of outgoing connections */
3316 ++ table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE,
3317 ++ sizeof(*table_perturb), GFP_KERNEL);
3318 ++ if (!table_perturb)
3319 ++ panic("TCP: failed to alloc table_perturb");
3320 + }
3321 +
3322 + int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
3323 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
3324 +index 3ee947557b883..aa9a11b20d18e 100644
3325 +--- a/net/ipv4/ping.c
3326 ++++ b/net/ipv4/ping.c
3327 +@@ -305,6 +305,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
3328 + struct net *net = sock_net(sk);
3329 + if (sk->sk_family == AF_INET) {
3330 + struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
3331 ++ u32 tb_id = RT_TABLE_LOCAL;
3332 + int chk_addr_ret;
3333 +
3334 + if (addr_len < sizeof(*addr))
3335 +@@ -318,7 +319,8 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
3336 + pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
3337 + sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
3338 +
3339 +- chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
3340 ++ tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
3341 ++ chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
3342 +
3343 + if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk),
3344 + addr->sin_addr.s_addr,
3345 +@@ -355,6 +357,14 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
3346 + return -ENODEV;
3347 + }
3348 + }
3349 ++
3350 ++ if (!dev && sk->sk_bound_dev_if) {
3351 ++ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
3352 ++ if (!dev) {
3353 ++ rcu_read_unlock();
3354 ++ return -ENODEV;
3355 ++ }
3356 ++ }
3357 + has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
3358 + scoped);
3359 + rcu_read_unlock();
3360 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3361 +index d5d058de36646..eef07b62b2d88 100644
3362 +--- a/net/ipv4/route.c
3363 ++++ b/net/ipv4/route.c
3364 +@@ -1748,6 +1748,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
3365 + #endif
3366 + RT_CACHE_STAT_INC(in_slow_mc);
3367 +
3368 ++ skb_dst_drop(skb);
3369 + skb_dst_set(skb, &rth->dst);
3370 + return 0;
3371 + }
3372 +diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
3373 +index 4740afecf7c62..32ccac10bd625 100644
3374 +--- a/net/ipv6/inet6_hashtables.c
3375 ++++ b/net/ipv6/inet6_hashtables.c
3376 +@@ -308,7 +308,7 @@ not_unique:
3377 + return -EADDRNOTAVAIL;
3378 + }
3379 +
3380 +-static u32 inet6_sk_port_offset(const struct sock *sk)
3381 ++static u64 inet6_sk_port_offset(const struct sock *sk)
3382 + {
3383 + const struct inet_sock *inet = inet_sk(sk);
3384 +
3385 +@@ -320,7 +320,7 @@ static u32 inet6_sk_port_offset(const struct sock *sk)
3386 + int inet6_hash_connect(struct inet_timewait_death_row *death_row,
3387 + struct sock *sk)
3388 + {
3389 +- u32 port_offset = 0;
3390 ++ u64 port_offset = 0;
3391 +
3392 + if (!inet_sk(sk)->inet_num)
3393 + port_offset = inet6_sk_port_offset(sk);
3394 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
3395 +index c4d3e2da73f23..b5ac06b96329a 100644
3396 +--- a/net/mac80211/mlme.c
3397 ++++ b/net/mac80211/mlme.c
3398 +@@ -3574,6 +3574,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
3399 + cbss->transmitted_bss->bssid);
3400 + bss_conf->bssid_indicator = cbss->max_bssid_indicator;
3401 + bss_conf->bssid_index = cbss->bssid_index;
3402 ++ } else {
3403 ++ bss_conf->nontransmitted = false;
3404 ++ memset(bss_conf->transmitter_bssid, 0,
3405 ++ sizeof(bss_conf->transmitter_bssid));
3406 ++ bss_conf->bssid_indicator = 0;
3407 ++ bss_conf->bssid_index = 0;
3408 + }
3409 +
3410 + /*
3411 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
3412 +index 05a3795eac8e9..73e9c0a9c1876 100644
3413 +--- a/net/netlink/af_netlink.c
3414 ++++ b/net/netlink/af_netlink.c
3415 +@@ -1975,7 +1975,6 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3416 + copied = len;
3417 + }
3418 +
3419 +- skb_reset_transport_header(data_skb);
3420 + err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
3421 +
3422 + if (msg->msg_name) {
3423 +diff --git a/net/rds/tcp.c b/net/rds/tcp.c
3424 +index 2f638f8b7b1e7..73ee2771093d6 100644
3425 +--- a/net/rds/tcp.c
3426 ++++ b/net/rds/tcp.c
3427 +@@ -487,11 +487,11 @@ struct rds_tcp_net {
3428 + /* All module specific customizations to the RDS-TCP socket should be done in
3429 + * rds_tcp_tune() and applied after socket creation.
3430 + */
3431 +-void rds_tcp_tune(struct socket *sock)
3432 ++bool rds_tcp_tune(struct socket *sock)
3433 + {
3434 + struct sock *sk = sock->sk;
3435 + struct net *net = sock_net(sk);
3436 +- struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
3437 ++ struct rds_tcp_net *rtn;
3438 +
3439 + tcp_sock_set_nodelay(sock->sk);
3440 + lock_sock(sk);
3441 +@@ -499,10 +499,15 @@ void rds_tcp_tune(struct socket *sock)
3442 + * a process which created this net namespace terminated.
3443 + */
3444 + if (!sk->sk_net_refcnt) {
3445 ++ if (!maybe_get_net(net)) {
3446 ++ release_sock(sk);
3447 ++ return false;
3448 ++ }
3449 + sk->sk_net_refcnt = 1;
3450 +- get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
3451 ++ netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL);
3452 + sock_inuse_add(net, 1);
3453 + }
3454 ++ rtn = net_generic(net, rds_tcp_netid);
3455 + if (rtn->sndbuf_size > 0) {
3456 + sk->sk_sndbuf = rtn->sndbuf_size;
3457 + sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
3458 +@@ -512,6 +517,7 @@ void rds_tcp_tune(struct socket *sock)
3459 + sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
3460 + }
3461 + release_sock(sk);
3462 ++ return true;
3463 + }
3464 +
3465 + static void rds_tcp_accept_worker(struct work_struct *work)
3466 +diff --git a/net/rds/tcp.h b/net/rds/tcp.h
3467 +index dc8d745d68575..f8b5930d7b343 100644
3468 +--- a/net/rds/tcp.h
3469 ++++ b/net/rds/tcp.h
3470 +@@ -49,7 +49,7 @@ struct rds_tcp_statistics {
3471 + };
3472 +
3473 + /* tcp.c */
3474 +-void rds_tcp_tune(struct socket *sock);
3475 ++bool rds_tcp_tune(struct socket *sock);
3476 + void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
3477 + void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
3478 + void rds_tcp_restore_callbacks(struct socket *sock,
3479 +diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
3480 +index 5461d77fff4f4..f0c477c5d1db4 100644
3481 +--- a/net/rds/tcp_connect.c
3482 ++++ b/net/rds/tcp_connect.c
3483 +@@ -124,7 +124,10 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
3484 + if (ret < 0)
3485 + goto out;
3486 +
3487 +- rds_tcp_tune(sock);
3488 ++ if (!rds_tcp_tune(sock)) {
3489 ++ ret = -EINVAL;
3490 ++ goto out;
3491 ++ }
3492 +
3493 + if (isv6) {
3494 + sin6.sin6_family = AF_INET6;
3495 +diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
3496 +index 09cadd556d1e1..7edf2e69d3fed 100644
3497 +--- a/net/rds/tcp_listen.c
3498 ++++ b/net/rds/tcp_listen.c
3499 +@@ -133,7 +133,10 @@ int rds_tcp_accept_one(struct socket *sock)
3500 + __module_get(new_sock->ops->owner);
3501 +
3502 + rds_tcp_keepalive(new_sock);
3503 +- rds_tcp_tune(new_sock);
3504 ++ if (!rds_tcp_tune(new_sock)) {
3505 ++ ret = -EINVAL;
3506 ++ goto out;
3507 ++ }
3508 +
3509 + inet = inet_sk(new_sock->sk);
3510 +
3511 +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
3512 +index 31fcd279c1776..0eaaf1f45de17 100644
3513 +--- a/net/sched/act_pedit.c
3514 ++++ b/net/sched/act_pedit.c
3515 +@@ -149,7 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
3516 + struct nlattr *pattr;
3517 + struct tcf_pedit *p;
3518 + int ret = 0, err;
3519 +- int ksize;
3520 ++ int i, ksize;
3521 + u32 index;
3522 +
3523 + if (!nla) {
3524 +@@ -228,6 +228,18 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
3525 + p->tcfp_nkeys = parm->nkeys;
3526 + }
3527 + memcpy(p->tcfp_keys, parm->keys, ksize);
3528 ++ p->tcfp_off_max_hint = 0;
3529 ++ for (i = 0; i < p->tcfp_nkeys; ++i) {
3530 ++ u32 cur = p->tcfp_keys[i].off;
3531 ++
3532 ++ /* The AT option can read a single byte, we can bound the actual
3533 ++ * value with uchar max.
3534 ++ */
3535 ++ cur += (0xff & p->tcfp_keys[i].offmask) >> p->tcfp_keys[i].shift;
3536 ++
3537 ++ /* Each key touches 4 bytes starting from the computed offset */
3538 ++ p->tcfp_off_max_hint = max(p->tcfp_off_max_hint, cur + 4);
3539 ++ }
3540 +
3541 + p->tcfp_flags = parm->flags;
3542 + goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
3543 +@@ -308,13 +320,18 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
3544 + struct tcf_result *res)
3545 + {
3546 + struct tcf_pedit *p = to_pedit(a);
3547 ++ u32 max_offset;
3548 + int i;
3549 +
3550 +- if (skb_unclone(skb, GFP_ATOMIC))
3551 +- return p->tcf_action;
3552 +-
3553 + spin_lock(&p->tcf_lock);
3554 +
3555 ++ max_offset = (skb_transport_header_was_set(skb) ?
3556 ++ skb_transport_offset(skb) :
3557 ++ skb_network_offset(skb)) +
3558 ++ p->tcfp_off_max_hint;
3559 ++ if (skb_ensure_writable(skb, min(skb->len, max_offset)))
3560 ++ goto unlock;
3561 ++
3562 + tcf_lastuse_update(&p->tcf_tm);
3563 +
3564 + if (p->tcfp_nkeys > 0) {
3565 +@@ -403,6 +420,7 @@ bad:
3566 + p->tcf_qstats.overlimits++;
3567 + done:
3568 + bstats_update(&p->tcf_bstats, skb);
3569 ++unlock:
3570 + spin_unlock(&p->tcf_lock);
3571 + return p->tcf_action;
3572 + }
3573 +diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
3574 +index 51e8eb2933ff4..338b9ef806e82 100644
3575 +--- a/net/smc/smc_rx.c
3576 ++++ b/net/smc/smc_rx.c
3577 +@@ -355,12 +355,12 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
3578 + }
3579 + break;
3580 + }
3581 ++ if (!timeo)
3582 ++ return -EAGAIN;
3583 + if (signal_pending(current)) {
3584 + read_done = sock_intr_errno(timeo);
3585 + break;
3586 + }
3587 +- if (!timeo)
3588 +- return -EAGAIN;
3589 + }
3590 +
3591 + if (!smc_rx_data_available(conn)) {
3592 +diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
3593 +index 61c276bddaf25..f549e4c05defc 100644
3594 +--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
3595 ++++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
3596 +@@ -98,6 +98,7 @@ static int gssp_rpc_create(struct net *net, struct rpc_clnt **_clnt)
3597 + * done without the correct namespace:
3598 + */
3599 + .flags = RPC_CLNT_CREATE_NOPING |
3600 ++ RPC_CLNT_CREATE_CONNECTED |
3601 + RPC_CLNT_CREATE_NO_IDLE_TIMEOUT
3602 + };
3603 + struct rpc_clnt *clnt;
3604 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
3605 +index 258ebc194ee2b..1dacd669faadd 100644
3606 +--- a/net/sunrpc/clnt.c
3607 ++++ b/net/sunrpc/clnt.c
3608 +@@ -76,6 +76,7 @@ static int rpc_encode_header(struct rpc_task *task,
3609 + static int rpc_decode_header(struct rpc_task *task,
3610 + struct xdr_stream *xdr);
3611 + static int rpc_ping(struct rpc_clnt *clnt);
3612 ++static int rpc_ping_noreply(struct rpc_clnt *clnt);
3613 + static void rpc_check_timeout(struct rpc_task *task);
3614 +
3615 + static void rpc_register_client(struct rpc_clnt *clnt)
3616 +@@ -483,6 +484,12 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
3617 + rpc_shutdown_client(clnt);
3618 + return ERR_PTR(err);
3619 + }
3620 ++ } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) {
3621 ++ int err = rpc_ping_noreply(clnt);
3622 ++ if (err != 0) {
3623 ++ rpc_shutdown_client(clnt);
3624 ++ return ERR_PTR(err);
3625 ++ }
3626 + }
3627 +
3628 + clnt->cl_softrtry = 1;
3629 +@@ -2699,6 +2706,10 @@ static const struct rpc_procinfo rpcproc_null = {
3630 + .p_decode = rpcproc_decode_null,
3631 + };
3632 +
3633 ++static const struct rpc_procinfo rpcproc_null_noreply = {
3634 ++ .p_encode = rpcproc_encode_null,
3635 ++};
3636 ++
3637 + static void
3638 + rpc_null_call_prepare(struct rpc_task *task, void *data)
3639 + {
3640 +@@ -2752,6 +2763,28 @@ static int rpc_ping(struct rpc_clnt *clnt)
3641 + return status;
3642 + }
3643 +
3644 ++static int rpc_ping_noreply(struct rpc_clnt *clnt)
3645 ++{
3646 ++ struct rpc_message msg = {
3647 ++ .rpc_proc = &rpcproc_null_noreply,
3648 ++ };
3649 ++ struct rpc_task_setup task_setup_data = {
3650 ++ .rpc_client = clnt,
3651 ++ .rpc_message = &msg,
3652 ++ .callback_ops = &rpc_null_ops,
3653 ++ .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS,
3654 ++ };
3655 ++ struct rpc_task *task;
3656 ++ int status;
3657 ++
3658 ++ task = rpc_run_task(&task_setup_data);
3659 ++ if (IS_ERR(task))
3660 ++ return PTR_ERR(task);
3661 ++ status = task->tk_status;
3662 ++ rpc_put_task(task);
3663 ++ return status;
3664 ++}
3665 ++
3666 + struct rpc_cb_add_xprt_calldata {
3667 + struct rpc_xprt_switch *xps;
3668 + struct rpc_xprt *xprt;
3669 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
3670 +index a40553e83f8b2..f3e3d009cf1cf 100644
3671 +--- a/net/tls/tls_device.c
3672 ++++ b/net/tls/tls_device.c
3673 +@@ -1347,7 +1347,10 @@ static int tls_device_down(struct net_device *netdev)
3674 +
3675 + /* Device contexts for RX and TX will be freed in on sk_destruct
3676 + * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
3677 ++ * Now release the ref taken above.
3678 + */
3679 ++ if (refcount_dec_and_test(&ctx->refcount))
3680 ++ tls_device_free_ctx(ctx);
3681 + }
3682 +
3683 + up_write(&device_offload_lock);
3684 +diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
3685 +index b45ec35cd63c3..62b41ca050a20 100644
3686 +--- a/sound/soc/codecs/max98090.c
3687 ++++ b/sound/soc/codecs/max98090.c
3688 +@@ -413,6 +413,9 @@ static int max98090_put_enab_tlv(struct snd_kcontrol *kcontrol,
3689 +
3690 + val = (val >> mc->shift) & mask;
3691 +
3692 ++ if (sel < 0 || sel > mc->max)
3693 ++ return -EINVAL;
3694 ++
3695 + *select = sel;
3696 +
3697 + /* Setting a volume is only valid if it is already On */
3698 +@@ -427,7 +430,7 @@ static int max98090_put_enab_tlv(struct snd_kcontrol *kcontrol,
3699 + mask << mc->shift,
3700 + sel << mc->shift);
3701 +
3702 +- return 0;
3703 ++ return *select != val;
3704 + }
3705 +
3706 + static const char *max98090_perf_pwr_text[] =
3707 +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
3708 +index 58347eadd219b..e693070f51fe8 100644
3709 +--- a/sound/soc/soc-ops.c
3710 ++++ b/sound/soc/soc-ops.c
3711 +@@ -519,7 +519,15 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
3712 + unsigned int mask = (1 << fls(max)) - 1;
3713 + unsigned int invert = mc->invert;
3714 + unsigned int val, val_mask;
3715 +- int err, ret;
3716 ++ int err, ret, tmp;
3717 ++
3718 ++ tmp = ucontrol->value.integer.value[0];
3719 ++ if (tmp < 0)
3720 ++ return -EINVAL;
3721 ++ if (mc->platform_max && tmp > mc->platform_max)
3722 ++ return -EINVAL;
3723 ++ if (tmp > mc->max - mc->min + 1)
3724 ++ return -EINVAL;
3725 +
3726 + if (invert)
3727 + val = (max - ucontrol->value.integer.value[0]) & mask;
3728 +@@ -534,6 +542,14 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
3729 + ret = err;
3730 +
3731 + if (snd_soc_volsw_is_stereo(mc)) {
3732 ++ tmp = ucontrol->value.integer.value[1];
3733 ++ if (tmp < 0)
3734 ++ return -EINVAL;
3735 ++ if (mc->platform_max && tmp > mc->platform_max)
3736 ++ return -EINVAL;
3737 ++ if (tmp > mc->max - mc->min + 1)
3738 ++ return -EINVAL;
3739 ++
3740 + if (invert)
3741 + val = (max - ucontrol->value.integer.value[1]) & mask;
3742 + else
3743 +diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
3744 +index 20c6ca37dbc44..53e97abbe6e3b 100644
3745 +--- a/sound/soc/sof/sof-pci-dev.c
3746 ++++ b/sound/soc/sof/sof-pci-dev.c
3747 +@@ -130,6 +130,11 @@ int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
3748 +
3749 + dev_dbg(&pci->dev, "PCI DSP detected");
3750 +
3751 ++ if (!desc) {
3752 ++ dev_err(dev, "error: no matching PCI descriptor\n");
3753 ++ return -ENODEV;
3754 ++ }
3755 ++
3756 + if (!desc->ops) {
3757 + dev_err(dev, "error: no matching PCI descriptor ops\n");
3758 + return -ENODEV;
3759 +diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
3760 +index 6de53b7ef5ffd..e4cb4f1806ffa 100755
3761 +--- a/tools/perf/tests/shell/test_arm_coresight.sh
3762 ++++ b/tools/perf/tests/shell/test_arm_coresight.sh
3763 +@@ -29,7 +29,6 @@ cleanup_files()
3764 + rm -f ${file}
3765 + rm -f "${perfdata}.old"
3766 + trap - exit term int
3767 +- kill -2 $$
3768 + exit $glb_err
3769 + }
3770 +
3771 +diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
3772 +index 1530c3e0242ef..259df83ecd2ee 100644
3773 +--- a/tools/testing/selftests/vm/Makefile
3774 ++++ b/tools/testing/selftests/vm/Makefile
3775 +@@ -55,9 +55,9 @@ CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_prog
3776 + CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_64bit_program.c)
3777 + CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_program.c -no-pie)
3778 +
3779 +-TARGETS := protection_keys
3780 +-BINARIES_32 := $(TARGETS:%=%_32)
3781 +-BINARIES_64 := $(TARGETS:%=%_64)
3782 ++VMTARGETS := protection_keys
3783 ++BINARIES_32 := $(VMTARGETS:%=%_32)
3784 ++BINARIES_64 := $(VMTARGETS:%=%_64)
3785 +
3786 + ifeq ($(CAN_BUILD_WITH_NOPIE),1)
3787 + CFLAGS += -no-pie
3788 +@@ -110,7 +110,7 @@ $(BINARIES_32): CFLAGS += -m32 -mxsave
3789 + $(BINARIES_32): LDLIBS += -lrt -ldl -lm
3790 + $(BINARIES_32): $(OUTPUT)/%_32: %.c
3791 + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
3792 +-$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t))))
3793 ++$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-32,$(t))))
3794 + endif
3795 +
3796 + ifeq ($(CAN_BUILD_X86_64),1)
3797 +@@ -118,7 +118,7 @@ $(BINARIES_64): CFLAGS += -m64 -mxsave
3798 + $(BINARIES_64): LDLIBS += -lrt -ldl
3799 + $(BINARIES_64): $(OUTPUT)/%_64: %.c
3800 + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
3801 +-$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t))))
3802 ++$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-64,$(t))))
3803 + endif
3804 +
3805 + # x86_64 users should be encouraged to install 32-bit libraries