Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Wed, 02 Mar 2022 13:06:00
Message-Id: 1646226346.d57ebc2bda9fc50ca58f83d4b1982751edfc53b7.mpagano@gentoo
1 commit: d57ebc2bda9fc50ca58f83d4b1982751edfc53b7
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Mar 2 13:05:46 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Mar 2 13:05:46 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d57ebc2b
7
8 Linux patch 5.15.26
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1025_linux-5.15.26.patch | 5191 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5195 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index d129a8c2..d4bd67e2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -143,6 +143,10 @@ Patch: 1024_linux-5.15.25.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.25
23
24 +Patch: 1025_linux-5.15.26.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.26
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1025_linux-5.15.26.patch b/1025_linux-5.15.26.patch
33 new file mode 100644
34 index 00000000..81808f4b
35 --- /dev/null
36 +++ b/1025_linux-5.15.26.patch
37 @@ -0,0 +1,5191 @@
38 +diff --git a/Makefile b/Makefile
39 +index c50d4ec83be8d..9479b440d708f 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 15
46 +-SUBLEVEL = 25
47 ++SUBLEVEL = 26
48 + EXTRAVERSION =
49 + NAME = Trick or Treat
50 +
51 +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
52 +index 237d20dd5622d..286cec4d86d7b 100644
53 +--- a/arch/parisc/kernel/unaligned.c
54 ++++ b/arch/parisc/kernel/unaligned.c
55 +@@ -340,7 +340,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
56 + : "r" (val), "r" (regs->ior), "r" (regs->isr)
57 + : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
58 +
59 +- return 0;
60 ++ return ret;
61 + }
62 + static int emulate_std(struct pt_regs *regs, int frreg, int flop)
63 + {
64 +@@ -397,7 +397,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
65 + __asm__ __volatile__ (
66 + " mtsp %4, %%sr1\n"
67 + " zdep %2, 29, 2, %%r19\n"
68 +-" dep %%r0, 31, 2, %2\n"
69 ++" dep %%r0, 31, 2, %3\n"
70 + " mtsar %%r19\n"
71 + " zvdepi -2, 32, %%r19\n"
72 + "1: ldw 0(%%sr1,%3),%%r20\n"
73 +@@ -409,7 +409,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
74 + " andcm %%r21, %%r19, %%r21\n"
75 + " or %1, %%r20, %1\n"
76 + " or %2, %%r21, %2\n"
77 +-"3: stw %1,0(%%sr1,%1)\n"
78 ++"3: stw %1,0(%%sr1,%3)\n"
79 + "4: stw %%r1,4(%%sr1,%3)\n"
80 + "5: stw %2,8(%%sr1,%3)\n"
81 + " copy %%r0, %0\n"
82 +@@ -596,7 +596,6 @@ void handle_unaligned(struct pt_regs *regs)
83 + ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
84 + break;
85 + }
86 +-#ifdef CONFIG_PA20
87 + switch (regs->iir & OPCODE2_MASK)
88 + {
89 + case OPCODE_FLDD_L:
90 +@@ -607,22 +606,23 @@ void handle_unaligned(struct pt_regs *regs)
91 + flop=1;
92 + ret = emulate_std(regs, R2(regs->iir),1);
93 + break;
94 ++#ifdef CONFIG_PA20
95 + case OPCODE_LDD_L:
96 + ret = emulate_ldd(regs, R2(regs->iir),0);
97 + break;
98 + case OPCODE_STD_L:
99 + ret = emulate_std(regs, R2(regs->iir),0);
100 + break;
101 +- }
102 + #endif
103 ++ }
104 + switch (regs->iir & OPCODE3_MASK)
105 + {
106 + case OPCODE_FLDW_L:
107 + flop=1;
108 +- ret = emulate_ldw(regs, R2(regs->iir),0);
109 ++ ret = emulate_ldw(regs, R2(regs->iir), 1);
110 + break;
111 + case OPCODE_LDW_M:
112 +- ret = emulate_ldw(regs, R2(regs->iir),1);
113 ++ ret = emulate_ldw(regs, R2(regs->iir), 0);
114 + break;
115 +
116 + case OPCODE_FSTW_L:
117 +diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
118 +index d68b743d580f8..15d1fd0a70184 100644
119 +--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
120 ++++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
121 +@@ -23,7 +23,7 @@ CONFIG_SLOB=y
122 + CONFIG_SOC_CANAAN=y
123 + CONFIG_SMP=y
124 + CONFIG_NR_CPUS=2
125 +-CONFIG_CMDLINE="earlycon console=ttySIF0 rootdelay=2 root=/dev/mmcblk0p1 ro"
126 ++CONFIG_CMDLINE="earlycon console=ttySIF0 root=/dev/mmcblk0p1 rootwait ro"
127 + CONFIG_CMDLINE_FORCE=y
128 + # CONFIG_SECCOMP is not set
129 + # CONFIG_STACKPROTECTOR is not set
130 +diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
131 +index 3397ddac1a30c..16308ef1e5787 100644
132 +--- a/arch/riscv/kernel/Makefile
133 ++++ b/arch/riscv/kernel/Makefile
134 +@@ -50,6 +50,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
135 + obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
136 + obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
137 +
138 ++obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o
139 ++
140 + obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o
141 + obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
142 + obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
143 +diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
144 +index 98f502654edd3..7e52ad5d61adb 100644
145 +--- a/arch/riscv/kernel/entry.S
146 ++++ b/arch/riscv/kernel/entry.S
147 +@@ -108,7 +108,7 @@ _save_context:
148 + .option pop
149 +
150 + #ifdef CONFIG_TRACE_IRQFLAGS
151 +- call trace_hardirqs_off
152 ++ call __trace_hardirqs_off
153 + #endif
154 +
155 + #ifdef CONFIG_CONTEXT_TRACKING
156 +@@ -144,7 +144,7 @@ skip_context_tracking:
157 + li t0, EXC_BREAKPOINT
158 + beq s4, t0, 1f
159 + #ifdef CONFIG_TRACE_IRQFLAGS
160 +- call trace_hardirqs_on
161 ++ call __trace_hardirqs_on
162 + #endif
163 + csrs CSR_STATUS, SR_IE
164 +
165 +@@ -235,7 +235,7 @@ ret_from_exception:
166 + REG_L s0, PT_STATUS(sp)
167 + csrc CSR_STATUS, SR_IE
168 + #ifdef CONFIG_TRACE_IRQFLAGS
169 +- call trace_hardirqs_off
170 ++ call __trace_hardirqs_off
171 + #endif
172 + #ifdef CONFIG_RISCV_M_MODE
173 + /* the MPP value is too large to be used as an immediate arg for addi */
174 +@@ -271,10 +271,10 @@ restore_all:
175 + REG_L s1, PT_STATUS(sp)
176 + andi t0, s1, SR_PIE
177 + beqz t0, 1f
178 +- call trace_hardirqs_on
179 ++ call __trace_hardirqs_on
180 + j 2f
181 + 1:
182 +- call trace_hardirqs_off
183 ++ call __trace_hardirqs_off
184 + 2:
185 + #endif
186 + REG_L a0, PT_STATUS(sp)
187 +diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c
188 +new file mode 100644
189 +index 0000000000000..095ac976d7da1
190 +--- /dev/null
191 ++++ b/arch/riscv/kernel/trace_irq.c
192 +@@ -0,0 +1,27 @@
193 ++// SPDX-License-Identifier: GPL-2.0
194 ++/*
195 ++ * Copyright (C) 2022 Changbin Du <changbin.du@×××××.com>
196 ++ */
197 ++
198 ++#include <linux/irqflags.h>
199 ++#include <linux/kprobes.h>
200 ++#include "trace_irq.h"
201 ++
202 ++/*
203 ++ * trace_hardirqs_on/off require the caller to setup frame pointer properly.
204 ++ * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel.
205 ++ * Here we add one extra level so they can be safely called by low
206 ++ * level entry code which $fp is used for other purpose.
207 ++ */
208 ++
209 ++void __trace_hardirqs_on(void)
210 ++{
211 ++ trace_hardirqs_on();
212 ++}
213 ++NOKPROBE_SYMBOL(__trace_hardirqs_on);
214 ++
215 ++void __trace_hardirqs_off(void)
216 ++{
217 ++ trace_hardirqs_off();
218 ++}
219 ++NOKPROBE_SYMBOL(__trace_hardirqs_off);
220 +diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h
221 +new file mode 100644
222 +index 0000000000000..99fe67377e5ed
223 +--- /dev/null
224 ++++ b/arch/riscv/kernel/trace_irq.h
225 +@@ -0,0 +1,11 @@
226 ++/* SPDX-License-Identifier: GPL-2.0 */
227 ++/*
228 ++ * Copyright (C) 2022 Changbin Du <changbin.du@×××××.com>
229 ++ */
230 ++#ifndef __TRACE_IRQ_H
231 ++#define __TRACE_IRQ_H
232 ++
233 ++void __trace_hardirqs_on(void);
234 ++void __trace_hardirqs_off(void);
235 ++
236 ++#endif /* __TRACE_IRQ_H */
237 +diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
238 +index 66ed317ebc0d3..125cbbe10fefa 100644
239 +--- a/arch/x86/kernel/fpu/regset.c
240 ++++ b/arch/x86/kernel/fpu/regset.c
241 +@@ -87,11 +87,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
242 + const void *kbuf, const void __user *ubuf)
243 + {
244 + struct fpu *fpu = &target->thread.fpu;
245 +- struct user32_fxsr_struct newstate;
246 ++ struct fxregs_state newstate;
247 + int ret;
248 +
249 +- BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state));
250 +-
251 + if (!cpu_feature_enabled(X86_FEATURE_FXSR))
252 + return -ENODEV;
253 +
254 +@@ -112,9 +110,10 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
255 + /* Copy the state */
256 + memcpy(&fpu->state.fxsave, &newstate, sizeof(newstate));
257 +
258 +- /* Clear xmm8..15 */
259 ++ /* Clear xmm8..15 for 32-bit callers */
260 + BUILD_BUG_ON(sizeof(fpu->state.fxsave.xmm_space) != 16 * 16);
261 +- memset(&fpu->state.fxsave.xmm_space[8], 0, 8 * 16);
262 ++ if (in_ia32_syscall())
263 ++ memset(&fpu->state.fxsave.xmm_space[8*4], 0, 8 * 16);
264 +
265 + /* Mark FP and SSE as in use when XSAVE is enabled */
266 + if (use_xsave())
267 +diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
268 +index 4c208ea3bd9f3..033d9c6a94689 100644
269 +--- a/arch/x86/kernel/ptrace.c
270 ++++ b/arch/x86/kernel/ptrace.c
271 +@@ -1224,7 +1224,7 @@ static struct user_regset x86_64_regsets[] __ro_after_init = {
272 + },
273 + [REGSET_FP] = {
274 + .core_note_type = NT_PRFPREG,
275 +- .n = sizeof(struct user_i387_struct) / sizeof(long),
276 ++ .n = sizeof(struct fxregs_state) / sizeof(long),
277 + .size = sizeof(long), .align = sizeof(long),
278 + .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
279 + },
280 +@@ -1271,7 +1271,7 @@ static struct user_regset x86_32_regsets[] __ro_after_init = {
281 + },
282 + [REGSET_XFP] = {
283 + .core_note_type = NT_PRXFPREG,
284 +- .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
285 ++ .n = sizeof(struct fxregs_state) / sizeof(u32),
286 + .size = sizeof(u32), .align = sizeof(u32),
287 + .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
288 + },
289 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
290 +index 0a88cb4f731f4..ccb9aa571b033 100644
291 +--- a/arch/x86/kvm/mmu/mmu.c
292 ++++ b/arch/x86/kvm/mmu/mmu.c
293 +@@ -3889,12 +3889,23 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
294 + walk_shadow_page_lockless_end(vcpu);
295 + }
296 +
297 ++static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
298 ++{
299 ++ /* make sure the token value is not 0 */
300 ++ u32 id = vcpu->arch.apf.id;
301 ++
302 ++ if (id << 12 == 0)
303 ++ vcpu->arch.apf.id = 1;
304 ++
305 ++ return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
306 ++}
307 ++
308 + static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
309 + gfn_t gfn)
310 + {
311 + struct kvm_arch_async_pf arch;
312 +
313 +- arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
314 ++ arch.token = alloc_apf_token(vcpu);
315 + arch.gfn = gfn;
316 + arch.direct_map = vcpu->arch.mmu->direct_map;
317 + arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
318 +diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
319 +index f242157bc81bb..ae8375e9d2681 100644
320 +--- a/drivers/ata/pata_hpt37x.c
321 ++++ b/drivers/ata/pata_hpt37x.c
322 +@@ -919,6 +919,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
323 + irqmask &= ~0x10;
324 + pci_write_config_byte(dev, 0x5a, irqmask);
325 +
326 ++ /*
327 ++ * HPT371 chips physically have only one channel, the secondary one,
328 ++ * but the primary channel registers do exist! Go figure...
329 ++ * So, we manually disable the non-existing channel here
330 ++ * (if the BIOS hasn't done this already).
331 ++ */
332 ++ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
333 ++ u8 mcr1;
334 ++
335 ++ pci_read_config_byte(dev, 0x50, &mcr1);
336 ++ mcr1 &= ~0x04;
337 ++ pci_write_config_byte(dev, 0x50, mcr1);
338 ++ }
339 ++
340 + /*
341 + * default to pci clock. make sure MA15/16 are set to output
342 + * to prevent drives having problems with 40-pin cables. Needed
343 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
344 +index 68ea1f949daa9..6b66306932016 100644
345 +--- a/drivers/base/dd.c
346 ++++ b/drivers/base/dd.c
347 +@@ -629,6 +629,9 @@ re_probe:
348 + drv->remove(dev);
349 +
350 + devres_release_all(dev);
351 ++ arch_teardown_dma_ops(dev);
352 ++ kfree(dev->dma_range_map);
353 ++ dev->dma_range_map = NULL;
354 + driver_sysfs_remove(dev);
355 + dev->driver = NULL;
356 + dev_set_drvdata(dev, NULL);
357 +@@ -1208,6 +1211,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
358 +
359 + devres_release_all(dev);
360 + arch_teardown_dma_ops(dev);
361 ++ kfree(dev->dma_range_map);
362 ++ dev->dma_range_map = NULL;
363 + dev->driver = NULL;
364 + dev_set_drvdata(dev, NULL);
365 + if (dev->pm_domain && dev->pm_domain->dismiss)
366 +diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
367 +index d2656581a6085..4a446259a184e 100644
368 +--- a/drivers/base/regmap/regmap-irq.c
369 ++++ b/drivers/base/regmap/regmap-irq.c
370 +@@ -189,11 +189,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
371 + ret = regmap_write(map, reg, d->mask_buf[i]);
372 + if (d->chip->clear_ack) {
373 + if (d->chip->ack_invert && !ret)
374 +- ret = regmap_write(map, reg,
375 +- d->mask_buf[i]);
376 ++ ret = regmap_write(map, reg, UINT_MAX);
377 + else if (!ret)
378 +- ret = regmap_write(map, reg,
379 +- ~d->mask_buf[i]);
380 ++ ret = regmap_write(map, reg, 0);
381 + }
382 + if (ret != 0)
383 + dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
384 +@@ -556,11 +554,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
385 + data->status_buf[i]);
386 + if (chip->clear_ack) {
387 + if (chip->ack_invert && !ret)
388 +- ret = regmap_write(map, reg,
389 +- data->status_buf[i]);
390 ++ ret = regmap_write(map, reg, UINT_MAX);
391 + else if (!ret)
392 +- ret = regmap_write(map, reg,
393 +- ~data->status_buf[i]);
394 ++ ret = regmap_write(map, reg, 0);
395 + }
396 + if (ret != 0)
397 + dev_err(map->dev, "Failed to ack 0x%x: %d\n",
398 +@@ -817,13 +813,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
399 + d->status_buf[i] & d->mask_buf[i]);
400 + if (chip->clear_ack) {
401 + if (chip->ack_invert && !ret)
402 +- ret = regmap_write(map, reg,
403 +- (d->status_buf[i] &
404 +- d->mask_buf[i]));
405 ++ ret = regmap_write(map, reg, UINT_MAX);
406 + else if (!ret)
407 +- ret = regmap_write(map, reg,
408 +- ~(d->status_buf[i] &
409 +- d->mask_buf[i]));
410 ++ ret = regmap_write(map, reg, 0);
411 + }
412 + if (ret != 0) {
413 + dev_err(map->dev, "Failed to ack 0x%x: %d\n",
414 +diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
415 +index 5154b0cf8ad6c..66ff141da0a42 100644
416 +--- a/drivers/clk/ingenic/jz4725b-cgu.c
417 ++++ b/drivers/clk/ingenic/jz4725b-cgu.c
418 +@@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
419 + },
420 +
421 + [JZ4725B_CLK_I2S] = {
422 +- "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
423 ++ "i2s", CGU_CLK_MUX | CGU_CLK_DIV,
424 + .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 },
425 + .mux = { CGU_REG_CPCCR, 31, 1 },
426 + .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 },
427 +- .gate = { CGU_REG_CLKGR, 6 },
428 + },
429 +
430 + [JZ4725B_CLK_SPI] = {
431 +diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
432 +index ce63cbd14d69a..24155c038f6d0 100644
433 +--- a/drivers/gpio/gpio-rockchip.c
434 ++++ b/drivers/gpio/gpio-rockchip.c
435 +@@ -410,10 +410,8 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
436 + level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type);
437 + polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity);
438 +
439 +- switch (type) {
440 +- case IRQ_TYPE_EDGE_BOTH:
441 ++ if (type == IRQ_TYPE_EDGE_BOTH) {
442 + if (bank->gpio_type == GPIO_TYPE_V2) {
443 +- bank->toggle_edge_mode &= ~mask;
444 + rockchip_gpio_writel_bit(bank, d->hwirq, 1,
445 + bank->gpio_regs->int_bothedge);
446 + goto out;
447 +@@ -431,30 +429,34 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
448 + else
449 + polarity |= mask;
450 + }
451 +- break;
452 +- case IRQ_TYPE_EDGE_RISING:
453 +- bank->toggle_edge_mode &= ~mask;
454 +- level |= mask;
455 +- polarity |= mask;
456 +- break;
457 +- case IRQ_TYPE_EDGE_FALLING:
458 +- bank->toggle_edge_mode &= ~mask;
459 +- level |= mask;
460 +- polarity &= ~mask;
461 +- break;
462 +- case IRQ_TYPE_LEVEL_HIGH:
463 +- bank->toggle_edge_mode &= ~mask;
464 +- level &= ~mask;
465 +- polarity |= mask;
466 +- break;
467 +- case IRQ_TYPE_LEVEL_LOW:
468 +- bank->toggle_edge_mode &= ~mask;
469 +- level &= ~mask;
470 +- polarity &= ~mask;
471 +- break;
472 +- default:
473 +- ret = -EINVAL;
474 +- goto out;
475 ++ } else {
476 ++ if (bank->gpio_type == GPIO_TYPE_V2) {
477 ++ rockchip_gpio_writel_bit(bank, d->hwirq, 0,
478 ++ bank->gpio_regs->int_bothedge);
479 ++ } else {
480 ++ bank->toggle_edge_mode &= ~mask;
481 ++ }
482 ++ switch (type) {
483 ++ case IRQ_TYPE_EDGE_RISING:
484 ++ level |= mask;
485 ++ polarity |= mask;
486 ++ break;
487 ++ case IRQ_TYPE_EDGE_FALLING:
488 ++ level |= mask;
489 ++ polarity &= ~mask;
490 ++ break;
491 ++ case IRQ_TYPE_LEVEL_HIGH:
492 ++ level &= ~mask;
493 ++ polarity |= mask;
494 ++ break;
495 ++ case IRQ_TYPE_LEVEL_LOW:
496 ++ level &= ~mask;
497 ++ polarity &= ~mask;
498 ++ break;
499 ++ default:
500 ++ ret = -EINVAL;
501 ++ goto out;
502 ++ }
503 + }
504 +
505 + rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type);
506 +diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
507 +index c99858f40a27e..00762de3d4096 100644
508 +--- a/drivers/gpio/gpio-tegra186.c
509 ++++ b/drivers/gpio/gpio-tegra186.c
510 +@@ -337,9 +337,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
511 + return offset + pin;
512 + }
513 +
514 ++#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio)
515 ++
516 + static void tegra186_irq_ack(struct irq_data *data)
517 + {
518 +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
519 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
520 ++ struct tegra_gpio *gpio = to_tegra_gpio(gc);
521 + void __iomem *base;
522 +
523 + base = tegra186_gpio_get_base(gpio, data->hwirq);
524 +@@ -351,7 +354,8 @@ static void tegra186_irq_ack(struct irq_data *data)
525 +
526 + static void tegra186_irq_mask(struct irq_data *data)
527 + {
528 +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
529 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
530 ++ struct tegra_gpio *gpio = to_tegra_gpio(gc);
531 + void __iomem *base;
532 + u32 value;
533 +
534 +@@ -366,7 +370,8 @@ static void tegra186_irq_mask(struct irq_data *data)
535 +
536 + static void tegra186_irq_unmask(struct irq_data *data)
537 + {
538 +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
539 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
540 ++ struct tegra_gpio *gpio = to_tegra_gpio(gc);
541 + void __iomem *base;
542 + u32 value;
543 +
544 +@@ -381,7 +386,8 @@ static void tegra186_irq_unmask(struct irq_data *data)
545 +
546 + static int tegra186_irq_set_type(struct irq_data *data, unsigned int type)
547 + {
548 +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
549 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
550 ++ struct tegra_gpio *gpio = to_tegra_gpio(gc);
551 + void __iomem *base;
552 + u32 value;
553 +
554 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
555 +index b7509d3f7c1c7..a8465e3195a67 100644
556 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
557 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
558 +@@ -1278,6 +1278,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
559 + bool is_fw_fb;
560 + resource_size_t base, size;
561 +
562 ++ if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev))
563 ++ amdgpu_aspm = 0;
564 ++
565 + if (amdgpu_virtual_display ||
566 + amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
567 + supports_atomic = true;
568 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
569 +index 0fc97c364fd76..6439d5c3d8d8b 100644
570 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
571 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
572 +@@ -607,8 +607,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
573 + static int soc15_asic_reset(struct amdgpu_device *adev)
574 + {
575 + /* original raven doesn't have full asic reset */
576 +- if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
577 +- !(adev->apu_flags & AMD_APU_IS_RAVEN2))
578 ++ if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
579 ++ (adev->apu_flags & AMD_APU_IS_RAVEN2))
580 + return 0;
581 +
582 + switch (soc15_asic_reset_method(adev)) {
583 +@@ -1273,8 +1273,11 @@ static int soc15_common_early_init(void *handle)
584 + AMD_CG_SUPPORT_SDMA_LS |
585 + AMD_CG_SUPPORT_VCN_MGCG;
586 +
587 ++ /*
588 ++ * MMHUB PG needs to be disabled for Picasso for
589 ++ * stability reasons.
590 ++ */
591 + adev->pg_flags = AMD_PG_SUPPORT_SDMA |
592 +- AMD_PG_SUPPORT_MMHUB |
593 + AMD_PG_SUPPORT_VCN;
594 + } else {
595 + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
596 +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
597 +index 1861a147a7fa1..5c5cbeb59c4d9 100644
598 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
599 ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
600 +@@ -437,8 +437,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
601 + clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
602 +
603 + /* Refresh bounding box */
604 ++ DC_FP_START();
605 + clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
606 + clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
607 ++ DC_FP_END();
608 + }
609 +
610 + static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
611 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
612 +index 4fae73478840c..b37c4d2e7a1e0 100644
613 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
614 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
615 +@@ -891,10 +891,13 @@ static bool dc_construct(struct dc *dc,
616 + goto fail;
617 + #ifdef CONFIG_DRM_AMD_DC_DCN
618 + dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
619 +-#endif
620 +
621 +- if (dc->res_pool->funcs->update_bw_bounding_box)
622 ++ if (dc->res_pool->funcs->update_bw_bounding_box) {
623 ++ DC_FP_START();
624 + dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
625 ++ DC_FP_END();
626 ++ }
627 ++#endif
628 +
629 + /* Creation of current_state must occur after dc->dml
630 + * is initialized in dc_create_resource_pool because
631 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
632 +index f89bf49965fcd..b8896882b6f01 100644
633 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
634 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
635 +@@ -418,6 +418,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu)
636 + return 0;
637 + }
638 +
639 ++static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu)
640 ++{
641 ++ struct amdgpu_device *adev = smu->adev;
642 ++ uint32_t *board_reserved;
643 ++ uint16_t *freq_table_gfx;
644 ++ uint32_t i;
645 ++
646 ++ /* Fix some OEM SKU specific stability issues */
647 ++ GET_PPTABLE_MEMBER(BoardReserved, &board_reserved);
648 ++ if ((adev->pdev->device == 0x73DF) &&
649 ++ (adev->pdev->revision == 0XC3) &&
650 ++ (adev->pdev->subsystem_device == 0x16C2) &&
651 ++ (adev->pdev->subsystem_vendor == 0x1043))
652 ++ board_reserved[0] = 1387;
653 ++
654 ++ GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx);
655 ++ if ((adev->pdev->device == 0x73DF) &&
656 ++ (adev->pdev->revision == 0XC3) &&
657 ++ ((adev->pdev->subsystem_device == 0x16C2) ||
658 ++ (adev->pdev->subsystem_device == 0x133C)) &&
659 ++ (adev->pdev->subsystem_vendor == 0x1043)) {
660 ++ for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) {
661 ++ if (freq_table_gfx[i] > 2500)
662 ++ freq_table_gfx[i] = 2500;
663 ++ }
664 ++ }
665 ++
666 ++ return 0;
667 ++}
668 ++
669 + static int sienna_cichlid_setup_pptable(struct smu_context *smu)
670 + {
671 + int ret = 0;
672 +@@ -438,7 +468,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu)
673 + if (ret)
674 + return ret;
675 +
676 +- return ret;
677 ++ return sienna_cichlid_patch_pptable_quirk(smu);
678 + }
679 +
680 + static int sienna_cichlid_tables_init(struct smu_context *smu)
681 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
682 +index ea9a79bc95839..6ad4361a5cbc7 100644
683 +--- a/drivers/gpu/drm/drm_edid.c
684 ++++ b/drivers/gpu/drm/drm_edid.c
685 +@@ -5205,6 +5205,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
686 + if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
687 + return quirks;
688 +
689 ++ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
690 + drm_parse_cea_ext(connector, edid);
691 +
692 + /*
693 +@@ -5253,7 +5254,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
694 + DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
695 + connector->name, info->bpc);
696 +
697 +- info->color_formats |= DRM_COLOR_FORMAT_RGB444;
698 + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
699 + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
700 + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
701 +diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
702 +index 4b94256d73197..7144c76ac9701 100644
703 +--- a/drivers/gpu/drm/i915/display/intel_bw.c
704 ++++ b/drivers/gpu/drm/i915/display/intel_bw.c
705 +@@ -681,6 +681,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
706 + unsigned int max_bw_point = 0, max_bw = 0;
707 + unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
708 + unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
709 ++ bool changed = false;
710 + u32 mask = 0;
711 +
712 + /* FIXME earlier gens need some checks too */
713 +@@ -724,6 +725,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
714 + new_bw_state->data_rate[crtc->pipe] = new_data_rate;
715 + new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
716 +
717 ++ changed = true;
718 ++
719 + drm_dbg_kms(&dev_priv->drm,
720 + "pipe %c data rate %u num active planes %u\n",
721 + pipe_name(crtc->pipe),
722 +@@ -731,7 +734,19 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
723 + new_bw_state->num_active_planes[crtc->pipe]);
724 + }
725 +
726 +- if (!new_bw_state)
727 ++ old_bw_state = intel_atomic_get_old_bw_state(state);
728 ++ new_bw_state = intel_atomic_get_new_bw_state(state);
729 ++
730 ++ if (new_bw_state &&
731 ++ intel_can_enable_sagv(dev_priv, old_bw_state) !=
732 ++ intel_can_enable_sagv(dev_priv, new_bw_state))
733 ++ changed = true;
734 ++
735 ++ /*
736 ++ * If none of our inputs (data rates, number of active
737 ++ * planes, SAGV yes/no) changed then nothing to do here.
738 ++ */
739 ++ if (!changed)
740 + return 0;
741 +
742 + ret = intel_atomic_lock_global_state(&new_bw_state->base);
743 +@@ -814,7 +829,6 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
744 + */
745 + new_bw_state->qgv_points_mask = ~allowed_points & mask;
746 +
747 +- old_bw_state = intel_atomic_get_old_bw_state(state);
748 + /*
749 + * If the actual mask had changed we need to make sure that
750 + * the commits are serialized(in case this is a nomodeset, nonblocking)
751 +diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
752 +index 46c6eecbd9175..0ceaed1c96562 100644
753 +--- a/drivers/gpu/drm/i915/display/intel_bw.h
754 ++++ b/drivers/gpu/drm/i915/display/intel_bw.h
755 +@@ -30,19 +30,19 @@ struct intel_bw_state {
756 + */
757 + u8 pipe_sagv_reject;
758 +
759 ++ /* bitmask of active pipes */
760 ++ u8 active_pipes;
761 ++
762 + /*
763 + * Current QGV points mask, which restricts
764 + * some particular SAGV states, not to confuse
765 + * with pipe_sagv_mask.
766 + */
767 +- u8 qgv_points_mask;
768 ++ u16 qgv_points_mask;
769 +
770 + unsigned int data_rate[I915_MAX_PIPES];
771 + u8 num_active_planes[I915_MAX_PIPES];
772 +
773 +- /* bitmask of active pipes */
774 +- u8 active_pipes;
775 +-
776 + int min_cdclk;
777 + };
778 +
779 +diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
780 +index 18b52b64af955..536b319ffe5ba 100644
781 +--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
782 ++++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
783 +@@ -32,7 +32,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv)
784 + if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy),
785 + DG2_PHY_DP_TX_ACK_MASK, 25))
786 + DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n",
787 +- phy);
788 ++ phy_name(phy));
789 + }
790 + }
791 +
792 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
793 +index 8937bc8985d6d..9c5e4758947b6 100644
794 +--- a/drivers/gpu/drm/i915/intel_pm.c
795 ++++ b/drivers/gpu/drm/i915/intel_pm.c
796 +@@ -4020,6 +4020,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
797 + return ret;
798 + }
799 +
800 ++ if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
801 ++ intel_can_enable_sagv(dev_priv, old_bw_state)) {
802 ++ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
803 ++ if (ret)
804 ++ return ret;
805 ++ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
806 ++ ret = intel_atomic_lock_global_state(&new_bw_state->base);
807 ++ if (ret)
808 ++ return ret;
809 ++ }
810 ++
811 + for_each_new_intel_crtc_in_state(state, crtc,
812 + new_crtc_state, i) {
813 + struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
814 +@@ -4035,17 +4046,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
815 + intel_can_enable_sagv(dev_priv, new_bw_state);
816 + }
817 +
818 +- if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
819 +- intel_can_enable_sagv(dev_priv, old_bw_state)) {
820 +- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
821 +- if (ret)
822 +- return ret;
823 +- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
824 +- ret = intel_atomic_lock_global_state(&new_bw_state->base);
825 +- if (ret)
826 +- return ret;
827 +- }
828 +-
829 + return 0;
830 + }
831 +
832 +diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
833 +index e3ed52d96f423..3e61184e194c9 100644
834 +--- a/drivers/gpu/drm/vc4/vc4_crtc.c
835 ++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
836 +@@ -538,9 +538,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
837 + if (ret)
838 + return ret;
839 +
840 +- ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
841 +- if (ret)
842 +- return ret;
843 ++ /*
844 ++ * post_crtc_powerdown will have called pm_runtime_put, so we
845 ++ * don't need it here otherwise we'll get the reference counting
846 ++ * wrong.
847 ++ */
848 +
849 + return 0;
850 + }
851 +diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
852 +index d198a10848c6b..a89a408182e60 100644
853 +--- a/drivers/gpu/host1x/syncpt.c
854 ++++ b/drivers/gpu/host1x/syncpt.c
855 +@@ -225,27 +225,12 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
856 + void *ref;
857 + struct host1x_waitlist *waiter;
858 + int err = 0, check_count = 0;
859 +- u32 val;
860 +
861 + if (value)
862 +- *value = 0;
863 +-
864 +- /* first check cache */
865 +- if (host1x_syncpt_is_expired(sp, thresh)) {
866 +- if (value)
867 +- *value = host1x_syncpt_load(sp);
868 ++ *value = host1x_syncpt_load(sp);
869 +
870 ++ if (host1x_syncpt_is_expired(sp, thresh))
871 + return 0;
872 +- }
873 +-
874 +- /* try to read from register */
875 +- val = host1x_hw_syncpt_load(sp->host, sp);
876 +- if (host1x_syncpt_is_expired(sp, thresh)) {
877 +- if (value)
878 +- *value = val;
879 +-
880 +- goto done;
881 +- }
882 +
883 + if (!timeout) {
884 + err = -EAGAIN;
885 +diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
886 +index 3501a3ead4ba6..3ae961986fc31 100644
887 +--- a/drivers/hwmon/hwmon.c
888 ++++ b/drivers/hwmon/hwmon.c
889 +@@ -214,12 +214,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index)
890 +
891 + tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
892 + &hwmon_thermal_ops);
893 +- /*
894 +- * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
895 +- * so ignore that error but forward any other error.
896 +- */
897 +- if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
898 +- return PTR_ERR(tzd);
899 ++ if (IS_ERR(tzd)) {
900 ++ if (PTR_ERR(tzd) != -ENODEV)
901 ++ return PTR_ERR(tzd);
902 ++ dev_info(dev, "temp%d_input not attached to any thermal zone\n",
903 ++ index + 1);
904 ++ devm_kfree(dev, tdata);
905 ++ return 0;
906 ++ }
907 +
908 + err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node);
909 + if (err)
910 +diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
911 +index e8693a42ad464..3af763b4a9737 100644
912 +--- a/drivers/iio/accel/bmc150-accel-core.c
913 ++++ b/drivers/iio/accel/bmc150-accel-core.c
914 +@@ -1782,11 +1782,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
915 + ret = iio_device_register(indio_dev);
916 + if (ret < 0) {
917 + dev_err(dev, "Unable to register iio device\n");
918 +- goto err_trigger_unregister;
919 ++ goto err_pm_cleanup;
920 + }
921 +
922 + return 0;
923 +
924 ++err_pm_cleanup:
925 ++ pm_runtime_dont_use_autosuspend(dev);
926 ++ pm_runtime_disable(dev);
927 + err_trigger_unregister:
928 + bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
929 + err_buffer_cleanup:
930 +diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
931 +index f41db9e0249a7..a2d29cabb3892 100644
932 +--- a/drivers/iio/accel/fxls8962af-core.c
933 ++++ b/drivers/iio/accel/fxls8962af-core.c
934 +@@ -154,12 +154,20 @@ struct fxls8962af_data {
935 + u8 watermark;
936 + };
937 +
938 +-const struct regmap_config fxls8962af_regmap_conf = {
939 ++const struct regmap_config fxls8962af_i2c_regmap_conf = {
940 + .reg_bits = 8,
941 + .val_bits = 8,
942 + .max_register = FXLS8962AF_MAX_REG,
943 + };
944 +-EXPORT_SYMBOL_GPL(fxls8962af_regmap_conf);
945 ++EXPORT_SYMBOL_GPL(fxls8962af_i2c_regmap_conf);
946 ++
947 ++const struct regmap_config fxls8962af_spi_regmap_conf = {
948 ++ .reg_bits = 8,
949 ++ .pad_bits = 8,
950 ++ .val_bits = 8,
951 ++ .max_register = FXLS8962AF_MAX_REG,
952 ++};
953 ++EXPORT_SYMBOL_GPL(fxls8962af_spi_regmap_conf);
954 +
955 + enum {
956 + fxls8962af_idx_x,
957 +diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c
958 +index cfb004b204559..6bde9891effbf 100644
959 +--- a/drivers/iio/accel/fxls8962af-i2c.c
960 ++++ b/drivers/iio/accel/fxls8962af-i2c.c
961 +@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct i2c_client *client)
962 + {
963 + struct regmap *regmap;
964 +
965 +- regmap = devm_regmap_init_i2c(client, &fxls8962af_regmap_conf);
966 ++ regmap = devm_regmap_init_i2c(client, &fxls8962af_i2c_regmap_conf);
967 + if (IS_ERR(regmap)) {
968 + dev_err(&client->dev, "Failed to initialize i2c regmap\n");
969 + return PTR_ERR(regmap);
970 +diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c
971 +index 57108d3d480b6..6f4dff3238d3c 100644
972 +--- a/drivers/iio/accel/fxls8962af-spi.c
973 ++++ b/drivers/iio/accel/fxls8962af-spi.c
974 +@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct spi_device *spi)
975 + {
976 + struct regmap *regmap;
977 +
978 +- regmap = devm_regmap_init_spi(spi, &fxls8962af_regmap_conf);
979 ++ regmap = devm_regmap_init_spi(spi, &fxls8962af_spi_regmap_conf);
980 + if (IS_ERR(regmap)) {
981 + dev_err(&spi->dev, "Failed to initialize spi regmap\n");
982 + return PTR_ERR(regmap);
983 +diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h
984 +index b67572c3ef069..9cbe98c3ba9a2 100644
985 +--- a/drivers/iio/accel/fxls8962af.h
986 ++++ b/drivers/iio/accel/fxls8962af.h
987 +@@ -17,6 +17,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq);
988 + int fxls8962af_core_remove(struct device *dev);
989 +
990 + extern const struct dev_pm_ops fxls8962af_pm_ops;
991 +-extern const struct regmap_config fxls8962af_regmap_conf;
992 ++extern const struct regmap_config fxls8962af_i2c_regmap_conf;
993 ++extern const struct regmap_config fxls8962af_spi_regmap_conf;
994 +
995 + #endif /* _FXLS8962AF_H_ */
996 +diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
997 +index 24c9387c29687..ba6c8ca488b1a 100644
998 +--- a/drivers/iio/accel/kxcjk-1013.c
999 ++++ b/drivers/iio/accel/kxcjk-1013.c
1000 +@@ -1589,11 +1589,14 @@ static int kxcjk1013_probe(struct i2c_client *client,
1001 + ret = iio_device_register(indio_dev);
1002 + if (ret < 0) {
1003 + dev_err(&client->dev, "unable to register iio device\n");
1004 +- goto err_buffer_cleanup;
1005 ++ goto err_pm_cleanup;
1006 + }
1007 +
1008 + return 0;
1009 +
1010 ++err_pm_cleanup:
1011 ++ pm_runtime_dont_use_autosuspend(&client->dev);
1012 ++ pm_runtime_disable(&client->dev);
1013 + err_buffer_cleanup:
1014 + iio_triggered_buffer_cleanup(indio_dev);
1015 + err_trigger_unregister:
1016 +diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
1017 +index 4c359fb054801..c53a3398b14c4 100644
1018 +--- a/drivers/iio/accel/mma9551.c
1019 ++++ b/drivers/iio/accel/mma9551.c
1020 +@@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client,
1021 + ret = iio_device_register(indio_dev);
1022 + if (ret < 0) {
1023 + dev_err(&client->dev, "unable to register iio device\n");
1024 +- goto out_poweroff;
1025 ++ goto err_pm_cleanup;
1026 + }
1027 +
1028 + return 0;
1029 +
1030 ++err_pm_cleanup:
1031 ++ pm_runtime_dont_use_autosuspend(&client->dev);
1032 ++ pm_runtime_disable(&client->dev);
1033 + out_poweroff:
1034 + mma9551_set_device_state(client, false);
1035 +
1036 +diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
1037 +index ba3ecb3b57dcd..1599b75724d4f 100644
1038 +--- a/drivers/iio/accel/mma9553.c
1039 ++++ b/drivers/iio/accel/mma9553.c
1040 +@@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client,
1041 + ret = iio_device_register(indio_dev);
1042 + if (ret < 0) {
1043 + dev_err(&client->dev, "unable to register iio device\n");
1044 +- goto out_poweroff;
1045 ++ goto err_pm_cleanup;
1046 + }
1047 +
1048 + dev_dbg(&indio_dev->dev, "Registered device %s\n", name);
1049 + return 0;
1050 +
1051 ++err_pm_cleanup:
1052 ++ pm_runtime_dont_use_autosuspend(&client->dev);
1053 ++ pm_runtime_disable(&client->dev);
1054 + out_poweroff:
1055 + mma9551_set_device_state(client, false);
1056 + return ret;
1057 +diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
1058 +index e45c600fccc0b..18c154afbd7ac 100644
1059 +--- a/drivers/iio/adc/ad7124.c
1060 ++++ b/drivers/iio/adc/ad7124.c
1061 +@@ -76,7 +76,7 @@
1062 + #define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x)
1063 + #define AD7124_CONFIG_PGA_MSK GENMASK(2, 0)
1064 + #define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x)
1065 +-#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6)
1066 ++#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5)
1067 + #define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x)
1068 +
1069 + /* AD7124_FILTER_X */
1070 +diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
1071 +index 42ea8bc7e7805..adc5ceaef8c93 100644
1072 +--- a/drivers/iio/adc/men_z188_adc.c
1073 ++++ b/drivers/iio/adc/men_z188_adc.c
1074 +@@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev,
1075 + struct z188_adc *adc;
1076 + struct iio_dev *indio_dev;
1077 + struct resource *mem;
1078 ++ int ret;
1079 +
1080 + indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
1081 + if (!indio_dev)
1082 +@@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev,
1083 + adc->mem = mem;
1084 + mcb_set_drvdata(dev, indio_dev);
1085 +
1086 +- return iio_device_register(indio_dev);
1087 ++ ret = iio_device_register(indio_dev);
1088 ++ if (ret)
1089 ++ goto err_unmap;
1090 ++
1091 ++ return 0;
1092 +
1093 ++err_unmap:
1094 ++ iounmap(adc->base);
1095 + err:
1096 + mcb_release_mem(mem);
1097 + return -ENXIO;
1098 +diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
1099 +index d84ae6b008c1b..e8fc4d01f30b6 100644
1100 +--- a/drivers/iio/adc/ti-tsc2046.c
1101 ++++ b/drivers/iio/adc/ti-tsc2046.c
1102 +@@ -388,7 +388,7 @@ static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev,
1103 + mutex_lock(&priv->slock);
1104 +
1105 + size = 0;
1106 +- for_each_set_bit(ch_idx, active_scan_mask, indio_dev->num_channels) {
1107 ++ for_each_set_bit(ch_idx, active_scan_mask, ARRAY_SIZE(priv->l)) {
1108 + size += tsc2046_adc_group_set_layout(priv, group, ch_idx);
1109 + tsc2046_adc_group_set_cmd(priv, group, ch_idx);
1110 + group++;
1111 +@@ -548,7 +548,7 @@ static int tsc2046_adc_setup_spi_msg(struct tsc2046_adc_priv *priv)
1112 + * enabled.
1113 + */
1114 + size = 0;
1115 +- for (ch_idx = 0; ch_idx < priv->dcfg->num_channels; ch_idx++)
1116 ++ for (ch_idx = 0; ch_idx < ARRAY_SIZE(priv->l); ch_idx++)
1117 + size += tsc2046_adc_group_set_layout(priv, ch_idx, ch_idx);
1118 +
1119 + priv->tx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL);
1120 +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
1121 +index 17b939a367ad0..81a6d09788bd7 100644
1122 +--- a/drivers/iio/gyro/bmg160_core.c
1123 ++++ b/drivers/iio/gyro/bmg160_core.c
1124 +@@ -1188,11 +1188,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
1125 + ret = iio_device_register(indio_dev);
1126 + if (ret < 0) {
1127 + dev_err(dev, "unable to register iio device\n");
1128 +- goto err_buffer_cleanup;
1129 ++ goto err_pm_cleanup;
1130 + }
1131 +
1132 + return 0;
1133 +
1134 ++err_pm_cleanup:
1135 ++ pm_runtime_dont_use_autosuspend(dev);
1136 ++ pm_runtime_disable(dev);
1137 + err_buffer_cleanup:
1138 + iio_triggered_buffer_cleanup(indio_dev);
1139 + err_trigger_unregister:
1140 +diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
1141 +index ed129321a14da..f9b4540db1f43 100644
1142 +--- a/drivers/iio/imu/adis16480.c
1143 ++++ b/drivers/iio/imu/adis16480.c
1144 +@@ -1403,6 +1403,7 @@ static int adis16480_probe(struct spi_device *spi)
1145 + {
1146 + const struct spi_device_id *id = spi_get_device_id(spi);
1147 + const struct adis_data *adis16480_data;
1148 ++ irq_handler_t trigger_handler = NULL;
1149 + struct iio_dev *indio_dev;
1150 + struct adis16480 *st;
1151 + int ret;
1152 +@@ -1474,8 +1475,12 @@ static int adis16480_probe(struct spi_device *spi)
1153 + st->clk_freq = st->chip_info->int_clk;
1154 + }
1155 +
1156 ++ /* Only use our trigger handler if burst mode is supported */
1157 ++ if (adis16480_data->burst_len)
1158 ++ trigger_handler = adis16480_trigger_handler;
1159 ++
1160 + ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
1161 +- adis16480_trigger_handler);
1162 ++ trigger_handler);
1163 + if (ret)
1164 + return ret;
1165 +
1166 +diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
1167 +index 1dabfd615dabf..f89724481df93 100644
1168 +--- a/drivers/iio/imu/kmx61.c
1169 ++++ b/drivers/iio/imu/kmx61.c
1170 +@@ -1385,7 +1385,7 @@ static int kmx61_probe(struct i2c_client *client,
1171 + ret = iio_device_register(data->acc_indio_dev);
1172 + if (ret < 0) {
1173 + dev_err(&client->dev, "Failed to register acc iio device\n");
1174 +- goto err_buffer_cleanup_mag;
1175 ++ goto err_pm_cleanup;
1176 + }
1177 +
1178 + ret = iio_device_register(data->mag_indio_dev);
1179 +@@ -1398,6 +1398,9 @@ static int kmx61_probe(struct i2c_client *client,
1180 +
1181 + err_iio_unregister_acc:
1182 + iio_device_unregister(data->acc_indio_dev);
1183 ++err_pm_cleanup:
1184 ++ pm_runtime_dont_use_autosuspend(&client->dev);
1185 ++ pm_runtime_disable(&client->dev);
1186 + err_buffer_cleanup_mag:
1187 + if (client->irq > 0)
1188 + iio_triggered_buffer_cleanup(data->mag_indio_dev);
1189 +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
1190 +index 8dbf744c5651f..a778aceba3b10 100644
1191 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
1192 ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
1193 +@@ -1372,8 +1372,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
1194 + if (err < 0)
1195 + return err;
1196 +
1197 ++ /*
1198 ++ * we need to wait for sensor settling time before
1199 ++ * reading data in order to avoid corrupted samples
1200 ++ */
1201 + delay = 1000000000 / sensor->odr;
1202 +- usleep_range(delay, 2 * delay);
1203 ++ usleep_range(3 * delay, 4 * delay);
1204 +
1205 + err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data));
1206 + if (err < 0)
1207 +diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
1208 +index f96f531753495..3d4d21f979fab 100644
1209 +--- a/drivers/iio/magnetometer/bmc150_magn.c
1210 ++++ b/drivers/iio/magnetometer/bmc150_magn.c
1211 +@@ -962,13 +962,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
1212 + ret = iio_device_register(indio_dev);
1213 + if (ret < 0) {
1214 + dev_err(dev, "unable to register iio device\n");
1215 +- goto err_disable_runtime_pm;
1216 ++ goto err_pm_cleanup;
1217 + }
1218 +
1219 + dev_dbg(dev, "Registered device %s\n", name);
1220 + return 0;
1221 +
1222 +-err_disable_runtime_pm:
1223 ++err_pm_cleanup:
1224 ++ pm_runtime_dont_use_autosuspend(dev);
1225 + pm_runtime_disable(dev);
1226 + err_buffer_cleanup:
1227 + iio_triggered_buffer_cleanup(indio_dev);
1228 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1229 +index 13679c7b65774..db7b5de3bc76b 100644
1230 +--- a/drivers/infiniband/core/cma.c
1231 ++++ b/drivers/infiniband/core/cma.c
1232 +@@ -3368,22 +3368,30 @@ err:
1233 + static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1234 + const struct sockaddr *dst_addr)
1235 + {
1236 +- if (!src_addr || !src_addr->sa_family) {
1237 +- src_addr = (struct sockaddr *) &id->route.addr.src_addr;
1238 +- src_addr->sa_family = dst_addr->sa_family;
1239 +- if (IS_ENABLED(CONFIG_IPV6) &&
1240 +- dst_addr->sa_family == AF_INET6) {
1241 +- struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
1242 +- struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
1243 +- src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
1244 +- if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
1245 +- id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
1246 +- } else if (dst_addr->sa_family == AF_IB) {
1247 +- ((struct sockaddr_ib *) src_addr)->sib_pkey =
1248 +- ((struct sockaddr_ib *) dst_addr)->sib_pkey;
1249 +- }
1250 +- }
1251 +- return rdma_bind_addr(id, src_addr);
1252 ++ struct sockaddr_storage zero_sock = {};
1253 ++
1254 ++ if (src_addr && src_addr->sa_family)
1255 ++ return rdma_bind_addr(id, src_addr);
1256 ++
1257 ++ /*
1258 ++ * When the src_addr is not specified, automatically supply an any addr
1259 ++ */
1260 ++ zero_sock.ss_family = dst_addr->sa_family;
1261 ++ if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
1262 ++ struct sockaddr_in6 *src_addr6 =
1263 ++ (struct sockaddr_in6 *)&zero_sock;
1264 ++ struct sockaddr_in6 *dst_addr6 =
1265 ++ (struct sockaddr_in6 *)dst_addr;
1266 ++
1267 ++ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
1268 ++ if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
1269 ++ id->route.addr.dev_addr.bound_dev_if =
1270 ++ dst_addr6->sin6_scope_id;
1271 ++ } else if (dst_addr->sa_family == AF_IB) {
1272 ++ ((struct sockaddr_ib *)&zero_sock)->sib_pkey =
1273 ++ ((struct sockaddr_ib *)dst_addr)->sib_pkey;
1274 ++ }
1275 ++ return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
1276 + }
1277 +
1278 + /*
1279 +diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
1280 +index 0a3b28142c05b..41c272980f91c 100644
1281 +--- a/drivers/infiniband/hw/qib/qib_sysfs.c
1282 ++++ b/drivers/infiniband/hw/qib/qib_sysfs.c
1283 +@@ -541,7 +541,7 @@ static struct attribute *port_diagc_attributes[] = {
1284 + };
1285 +
1286 + static const struct attribute_group port_diagc_group = {
1287 +- .name = "linkcontrol",
1288 ++ .name = "diag_counters",
1289 + .attrs = port_diagc_attributes,
1290 + };
1291 +
1292 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
1293 +index 55ebe01ec9951..a23438bacf12c 100644
1294 +--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
1295 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
1296 +@@ -2664,6 +2664,8 @@ static void rtrs_clt_dev_release(struct device *dev)
1297 + {
1298 + struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
1299 +
1300 ++ mutex_destroy(&clt->paths_ev_mutex);
1301 ++ mutex_destroy(&clt->paths_mutex);
1302 + kfree(clt);
1303 + }
1304 +
1305 +@@ -2693,6 +2695,8 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
1306 + return ERR_PTR(-ENOMEM);
1307 + }
1308 +
1309 ++ clt->dev.class = rtrs_clt_dev_class;
1310 ++ clt->dev.release = rtrs_clt_dev_release;
1311 + uuid_gen(&clt->paths_uuid);
1312 + INIT_LIST_HEAD_RCU(&clt->paths_list);
1313 + clt->paths_num = paths_num;
1314 +@@ -2709,53 +2713,51 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
1315 + init_waitqueue_head(&clt->permits_wait);
1316 + mutex_init(&clt->paths_ev_mutex);
1317 + mutex_init(&clt->paths_mutex);
1318 ++ device_initialize(&clt->dev);
1319 +
1320 +- clt->dev.class = rtrs_clt_dev_class;
1321 +- clt->dev.release = rtrs_clt_dev_release;
1322 + err = dev_set_name(&clt->dev, "%s", sessname);
1323 + if (err)
1324 +- goto err;
1325 ++ goto err_put;
1326 ++
1327 + /*
1328 + * Suppress user space notification until
1329 + * sysfs files are created
1330 + */
1331 + dev_set_uevent_suppress(&clt->dev, true);
1332 +- err = device_register(&clt->dev);
1333 +- if (err) {
1334 +- put_device(&clt->dev);
1335 +- goto err;
1336 +- }
1337 ++ err = device_add(&clt->dev);
1338 ++ if (err)
1339 ++ goto err_put;
1340 +
1341 + clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
1342 + if (!clt->kobj_paths) {
1343 + err = -ENOMEM;
1344 +- goto err_dev;
1345 ++ goto err_del;
1346 + }
1347 + err = rtrs_clt_create_sysfs_root_files(clt);
1348 + if (err) {
1349 + kobject_del(clt->kobj_paths);
1350 + kobject_put(clt->kobj_paths);
1351 +- goto err_dev;
1352 ++ goto err_del;
1353 + }
1354 + dev_set_uevent_suppress(&clt->dev, false);
1355 + kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
1356 +
1357 + return clt;
1358 +-err_dev:
1359 +- device_unregister(&clt->dev);
1360 +-err:
1361 ++err_del:
1362 ++ device_del(&clt->dev);
1363 ++err_put:
1364 + free_percpu(clt->pcpu_path);
1365 +- kfree(clt);
1366 ++ put_device(&clt->dev);
1367 + return ERR_PTR(err);
1368 + }
1369 +
1370 + static void free_clt(struct rtrs_clt *clt)
1371 + {
1372 +- free_permits(clt);
1373 + free_percpu(clt->pcpu_path);
1374 +- mutex_destroy(&clt->paths_ev_mutex);
1375 +- mutex_destroy(&clt->paths_mutex);
1376 +- /* release callback will free clt in last put */
1377 ++
1378 ++ /*
1379 ++ * release callback will free clt and destroy mutexes in last put
1380 ++ */
1381 + device_unregister(&clt->dev);
1382 + }
1383 +
1384 +@@ -2866,6 +2868,7 @@ void rtrs_clt_close(struct rtrs_clt *clt)
1385 + rtrs_clt_destroy_sess_files(sess, NULL);
1386 + kobject_put(&sess->kobj);
1387 + }
1388 ++ free_permits(clt);
1389 + free_clt(clt);
1390 + }
1391 + EXPORT_SYMBOL(rtrs_clt_close);
1392 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1393 +index 71eda91e810cf..5d416ec228717 100644
1394 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
1395 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
1396 +@@ -4038,9 +4038,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
1397 + spin_unlock(&host->target_lock);
1398 +
1399 + /*
1400 +- * Wait for tl_err and target port removal tasks.
1401 ++ * srp_queue_remove_work() queues a call to
1402 ++ * srp_remove_target(). The latter function cancels
1403 ++ * target->tl_err_work so waiting for the remove works to
1404 ++ * finish is sufficient.
1405 + */
1406 +- flush_workqueue(system_long_wq);
1407 + flush_workqueue(srp_remove_wq);
1408 +
1409 + kfree(host);
1410 +diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
1411 +index 54df9cfd588ea..61f236e0378a6 100644
1412 +--- a/drivers/mtd/mtdcore.c
1413 ++++ b/drivers/mtd/mtdcore.c
1414 +@@ -546,6 +546,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
1415 + config.stride = 1;
1416 + config.read_only = true;
1417 + config.root_only = true;
1418 ++ config.ignore_wp = true;
1419 + config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
1420 + config.priv = mtd;
1421 +
1422 +@@ -830,6 +831,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
1423 + config.owner = THIS_MODULE;
1424 + config.type = NVMEM_TYPE_OTP;
1425 + config.root_only = true;
1426 ++ config.ignore_wp = true;
1427 + config.reg_read = reg_read;
1428 + config.size = size;
1429 + config.of_node = np;
1430 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1431 +index 10a5b43976d20..dc70f6f96d024 100644
1432 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1433 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1434 +@@ -100,6 +100,9 @@ MODULE_LICENSE("GPL");
1435 + MODULE_FIRMWARE(FW_FILE_NAME_E1);
1436 + MODULE_FIRMWARE(FW_FILE_NAME_E1H);
1437 + MODULE_FIRMWARE(FW_FILE_NAME_E2);
1438 ++MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
1439 ++MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
1440 ++MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
1441 +
1442 + int bnx2x_num_queues;
1443 + module_param_named(num_queues, bnx2x_num_queues, int, 0444);
1444 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1445 +index a8855a200a3c5..f92bea4faa019 100644
1446 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1447 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1448 +@@ -4757,8 +4757,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
1449 + return rc;
1450 +
1451 + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
1452 +- req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
1453 +- req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
1454 ++ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
1455 ++ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
1456 ++ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
1457 ++ }
1458 + req->mask = cpu_to_le32(vnic->rx_mask);
1459 + return hwrm_req_send_silent(bp, req);
1460 + }
1461 +@@ -8624,7 +8626,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
1462 + if (bp->dev->flags & IFF_ALLMULTI) {
1463 + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1464 + vnic->mc_list_count = 0;
1465 +- } else {
1466 ++ } else if (bp->dev->flags & IFF_MULTICAST) {
1467 + u32 mask = 0;
1468 +
1469 + bnxt_mc_list_updated(bp, &mask);
1470 +@@ -10295,12 +10297,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
1471 + goto half_open_err;
1472 + }
1473 +
1474 +- rc = bnxt_alloc_mem(bp, false);
1475 ++ rc = bnxt_alloc_mem(bp, true);
1476 + if (rc) {
1477 + netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
1478 + goto half_open_err;
1479 + }
1480 +- rc = bnxt_init_nic(bp, false);
1481 ++ rc = bnxt_init_nic(bp, true);
1482 + if (rc) {
1483 + netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
1484 + goto half_open_err;
1485 +@@ -10309,7 +10311,7 @@ int bnxt_half_open_nic(struct bnxt *bp)
1486 +
1487 + half_open_err:
1488 + bnxt_free_skbs(bp);
1489 +- bnxt_free_mem(bp, false);
1490 ++ bnxt_free_mem(bp, true);
1491 + dev_close(bp->dev);
1492 + return rc;
1493 + }
1494 +@@ -10319,9 +10321,9 @@ half_open_err:
1495 + */
1496 + void bnxt_half_close_nic(struct bnxt *bp)
1497 + {
1498 +- bnxt_hwrm_resource_free(bp, false, false);
1499 ++ bnxt_hwrm_resource_free(bp, false, true);
1500 + bnxt_free_skbs(bp);
1501 +- bnxt_free_mem(bp, false);
1502 ++ bnxt_free_mem(bp, true);
1503 + }
1504 +
1505 + static void bnxt_reenable_sriov(struct bnxt *bp)
1506 +@@ -10737,7 +10739,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
1507 + if (dev->flags & IFF_ALLMULTI) {
1508 + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1509 + vnic->mc_list_count = 0;
1510 +- } else {
1511 ++ } else if (dev->flags & IFF_MULTICAST) {
1512 + mc_update = bnxt_mc_list_updated(bp, &mask);
1513 + }
1514 +
1515 +@@ -10805,9 +10807,10 @@ skip_uc:
1516 + !bnxt_promisc_ok(bp))
1517 + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1518 + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
1519 +- if (rc && vnic->mc_list_count) {
1520 ++ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
1521 + netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
1522 + rc);
1523 ++ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1524 + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1525 + vnic->mc_list_count = 0;
1526 + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
1527 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1528 +index 2497925105215..da3ee22e8a16f 100644
1529 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1530 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1531 +@@ -25,6 +25,7 @@
1532 + #include "bnxt_hsi.h"
1533 + #include "bnxt.h"
1534 + #include "bnxt_hwrm.h"
1535 ++#include "bnxt_ulp.h"
1536 + #include "bnxt_xdp.h"
1537 + #include "bnxt_ptp.h"
1538 + #include "bnxt_ethtool.h"
1539 +@@ -1942,6 +1943,9 @@ static int bnxt_get_fecparam(struct net_device *dev,
1540 + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
1541 + fec->active_fec |= ETHTOOL_FEC_LLRS;
1542 + break;
1543 ++ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
1544 ++ fec->active_fec |= ETHTOOL_FEC_OFF;
1545 ++ break;
1546 + }
1547 + return 0;
1548 + }
1549 +@@ -3499,9 +3503,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
1550 + if (!offline) {
1551 + bnxt_run_fw_tests(bp, test_mask, &test_results);
1552 + } else {
1553 +- rc = bnxt_close_nic(bp, false, false);
1554 +- if (rc)
1555 ++ bnxt_ulp_stop(bp);
1556 ++ rc = bnxt_close_nic(bp, true, false);
1557 ++ if (rc) {
1558 ++ bnxt_ulp_start(bp, rc);
1559 + return;
1560 ++ }
1561 + bnxt_run_fw_tests(bp, test_mask, &test_results);
1562 +
1563 + buf[BNXT_MACLPBK_TEST_IDX] = 1;
1564 +@@ -3511,6 +3518,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
1565 + if (rc) {
1566 + bnxt_hwrm_mac_loopback(bp, false);
1567 + etest->flags |= ETH_TEST_FL_FAILED;
1568 ++ bnxt_ulp_start(bp, rc);
1569 + return;
1570 + }
1571 + if (bnxt_run_loopback(bp))
1572 +@@ -3536,7 +3544,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
1573 + }
1574 + bnxt_hwrm_phy_loopback(bp, false, false);
1575 + bnxt_half_close_nic(bp);
1576 +- rc = bnxt_open_nic(bp, false, true);
1577 ++ rc = bnxt_open_nic(bp, true, true);
1578 ++ bnxt_ulp_start(bp, rc);
1579 + }
1580 + if (rc || bnxt_test_irq(bp)) {
1581 + buf[BNXT_IRQ_TEST_IDX] = 1;
1582 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
1583 +index 8171f4912fa01..3a0eeb3737767 100644
1584 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
1585 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
1586 +@@ -595,18 +595,24 @@ timeout_abort:
1587 +
1588 + /* Last byte of resp contains valid bit */
1589 + valid = ((u8 *)ctx->resp) + len - 1;
1590 +- for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
1591 ++ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
1592 + /* make sure we read from updated DMA memory */
1593 + dma_rmb();
1594 + if (*valid)
1595 + break;
1596 +- usleep_range(1, 5);
1597 ++ if (j < 10) {
1598 ++ udelay(1);
1599 ++ j++;
1600 ++ } else {
1601 ++ usleep_range(20, 30);
1602 ++ j += 20;
1603 ++ }
1604 + }
1605 +
1606 + if (j >= HWRM_VALID_BIT_DELAY_USEC) {
1607 + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
1608 + netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
1609 +- hwrm_total_timeout(i),
1610 ++ hwrm_total_timeout(i) + j,
1611 + le16_to_cpu(ctx->req->req_type),
1612 + le16_to_cpu(ctx->req->seq_id), len,
1613 + *valid);
1614 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
1615 +index 9a9fc4e8041b6..380ef69afb51b 100644
1616 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
1617 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
1618 +@@ -94,7 +94,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n)
1619 + }
1620 +
1621 +
1622 +-#define HWRM_VALID_BIT_DELAY_USEC 150
1623 ++#define HWRM_VALID_BIT_DELAY_USEC 50000
1624 +
1625 + static inline bool bnxt_cfa_hwrm_message(u16 req_type)
1626 + {
1627 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
1628 +index 5c7371dc83848..14a729ba737a8 100644
1629 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
1630 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
1631 +@@ -5733,10 +5733,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
1632 + be64_to_cpu(session_token));
1633 + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1634 + H_SESSION_ERR_DETECTED, session_token, 0, 0);
1635 +- if (rc)
1636 ++ if (rc) {
1637 + netdev_err(netdev,
1638 + "H_VIOCTL initiated failover failed, rc %ld\n",
1639 + rc);
1640 ++ goto last_resort;
1641 ++ }
1642 ++
1643 ++ return count;
1644 +
1645 + last_resort:
1646 + netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
1647 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
1648 +index 063ded36b902e..ad73dd2540e71 100644
1649 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
1650 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
1651 +@@ -5372,15 +5372,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
1652 + /* There is no need to reset BW when mqprio mode is on. */
1653 + if (pf->flags & I40E_FLAG_TC_MQPRIO)
1654 + return 0;
1655 +-
1656 +- if (!vsi->mqprio_qopt.qopt.hw) {
1657 +- if (pf->flags & I40E_FLAG_DCB_ENABLED)
1658 +- goto skip_reset;
1659 +-
1660 +- if (IS_ENABLED(CONFIG_I40E_DCB) &&
1661 +- i40e_dcb_hw_get_num_tc(&pf->hw) == 1)
1662 +- goto skip_reset;
1663 +-
1664 ++ if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
1665 + ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
1666 + if (ret)
1667 + dev_info(&pf->pdev->dev,
1668 +@@ -5388,8 +5380,6 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
1669 + vsi->seid);
1670 + return ret;
1671 + }
1672 +-
1673 +-skip_reset:
1674 + memset(&bw_data, 0, sizeof(bw_data));
1675 + bw_data.tc_valid_bits = enabled_tc;
1676 + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
1677 +diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
1678 +index d119812755b7a..387322615e080 100644
1679 +--- a/drivers/net/ethernet/intel/ice/ice.h
1680 ++++ b/drivers/net/ethernet/intel/ice/ice.h
1681 +@@ -231,7 +231,6 @@ enum ice_pf_state {
1682 + ICE_VFLR_EVENT_PENDING,
1683 + ICE_FLTR_OVERFLOW_PROMISC,
1684 + ICE_VF_DIS,
1685 +- ICE_VF_DEINIT_IN_PROGRESS,
1686 + ICE_CFG_BUSY,
1687 + ICE_SERVICE_SCHED,
1688 + ICE_SERVICE_DIS,
1689 +diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
1690 +index f4463e962d524..3de6f16f985ab 100644
1691 +--- a/drivers/net/ethernet/intel/ice/ice_common.c
1692 ++++ b/drivers/net/ethernet/intel/ice/ice_common.c
1693 +@@ -3270,7 +3270,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
1694 +
1695 + if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
1696 + !ice_fw_supports_report_dflt_cfg(hw)) {
1697 +- struct ice_link_default_override_tlv tlv;
1698 ++ struct ice_link_default_override_tlv tlv = { 0 };
1699 +
1700 + status = ice_get_link_default_override(&tlv, pi);
1701 + if (status)
1702 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
1703 +index ab2dea0d2c1ae..8a0c928853e6a 100644
1704 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
1705 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
1706 +@@ -1679,7 +1679,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
1707 + * reset, so print the event prior to reset.
1708 + */
1709 + ice_print_vf_rx_mdd_event(vf);
1710 ++ mutex_lock(&pf->vf[i].cfg_lock);
1711 + ice_reset_vf(&pf->vf[i], false);
1712 ++ mutex_unlock(&pf->vf[i].cfg_lock);
1713 + }
1714 + }
1715 + }
1716 +diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
1717 +index ac27a4fe8b94c..eb91936825795 100644
1718 +--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
1719 ++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
1720 +@@ -846,9 +846,12 @@ exit:
1721 + static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
1722 + {
1723 + struct timespec64 now, then;
1724 ++ int ret;
1725 +
1726 + then = ns_to_timespec64(delta);
1727 +- ice_ptp_gettimex64(info, &now, NULL);
1728 ++ ret = ice_ptp_gettimex64(info, &now, NULL);
1729 ++ if (ret)
1730 ++ return ret;
1731 + now = timespec64_add(now, then);
1732 +
1733 + return ice_ptp_settime64(info, (const struct timespec64 *)&now);
1734 +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1735 +index a78e8f00cf71b..4054adb5279c3 100644
1736 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1737 ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1738 +@@ -615,8 +615,6 @@ void ice_free_vfs(struct ice_pf *pf)
1739 + struct ice_hw *hw = &pf->hw;
1740 + unsigned int tmp, i;
1741 +
1742 +- set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
1743 +-
1744 + if (!pf->vf)
1745 + return;
1746 +
1747 +@@ -632,20 +630,26 @@ void ice_free_vfs(struct ice_pf *pf)
1748 + else
1749 + dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
1750 +
1751 +- /* Avoid wait time by stopping all VFs at the same time */
1752 +- ice_for_each_vf(pf, i)
1753 +- ice_dis_vf_qs(&pf->vf[i]);
1754 +-
1755 + tmp = pf->num_alloc_vfs;
1756 + pf->num_qps_per_vf = 0;
1757 + pf->num_alloc_vfs = 0;
1758 + for (i = 0; i < tmp; i++) {
1759 +- if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
1760 ++ struct ice_vf *vf = &pf->vf[i];
1761 ++
1762 ++ mutex_lock(&vf->cfg_lock);
1763 ++
1764 ++ ice_dis_vf_qs(vf);
1765 ++
1766 ++ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1767 + /* disable VF qp mappings and set VF disable state */
1768 +- ice_dis_vf_mappings(&pf->vf[i]);
1769 +- set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
1770 +- ice_free_vf_res(&pf->vf[i]);
1771 ++ ice_dis_vf_mappings(vf);
1772 ++ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1773 ++ ice_free_vf_res(vf);
1774 + }
1775 ++
1776 ++ mutex_unlock(&vf->cfg_lock);
1777 ++
1778 ++ mutex_destroy(&vf->cfg_lock);
1779 + }
1780 +
1781 + if (ice_sriov_free_msix_res(pf))
1782 +@@ -681,7 +685,6 @@ void ice_free_vfs(struct ice_pf *pf)
1783 + i);
1784 +
1785 + clear_bit(ICE_VF_DIS, pf->state);
1786 +- clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
1787 + clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1788 + }
1789 +
1790 +@@ -1565,6 +1568,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1791 + ice_for_each_vf(pf, v) {
1792 + vf = &pf->vf[v];
1793 +
1794 ++ mutex_lock(&vf->cfg_lock);
1795 ++
1796 + vf->driver_caps = 0;
1797 + ice_vc_set_default_allowlist(vf);
1798 +
1799 +@@ -1579,6 +1584,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1800 + ice_vf_pre_vsi_rebuild(vf);
1801 + ice_vf_rebuild_vsi(vf);
1802 + ice_vf_post_vsi_rebuild(vf);
1803 ++
1804 ++ mutex_unlock(&vf->cfg_lock);
1805 + }
1806 +
1807 + ice_flush(hw);
1808 +@@ -1625,6 +1632,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1809 + u32 reg;
1810 + int i;
1811 +
1812 ++ lockdep_assert_held(&vf->cfg_lock);
1813 ++
1814 + dev = ice_pf_to_dev(pf);
1815 +
1816 + if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
1817 +@@ -1894,6 +1903,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1818 + */
1819 + ice_vf_ctrl_invalidate_vsi(vf);
1820 + ice_vf_fdir_init(vf);
1821 ++
1822 ++ mutex_init(&vf->cfg_lock);
1823 + }
1824 + }
1825 +
1826 +@@ -2109,9 +2120,12 @@ void ice_process_vflr_event(struct ice_pf *pf)
1827 + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1828 + /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1829 + reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1830 +- if (reg & BIT(bit_idx))
1831 ++ if (reg & BIT(bit_idx)) {
1832 + /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1833 ++ mutex_lock(&vf->cfg_lock);
1834 + ice_reset_vf(vf, true);
1835 ++ mutex_unlock(&vf->cfg_lock);
1836 ++ }
1837 + }
1838 + }
1839 +
1840 +@@ -2188,7 +2202,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1841 + if (!vf)
1842 + return;
1843 +
1844 ++ mutex_lock(&vf->cfg_lock);
1845 + ice_vc_reset_vf(vf);
1846 ++ mutex_unlock(&vf->cfg_lock);
1847 + }
1848 +
1849 + /**
1850 +@@ -4082,6 +4098,8 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
1851 + return 0;
1852 + }
1853 +
1854 ++ mutex_lock(&vf->cfg_lock);
1855 ++
1856 + vf->port_vlan_info = vlanprio;
1857 +
1858 + if (vf->port_vlan_info)
1859 +@@ -4091,6 +4109,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
1860 + dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
1861 +
1862 + ice_vc_reset_vf(vf);
1863 ++ mutex_unlock(&vf->cfg_lock);
1864 +
1865 + return 0;
1866 + }
1867 +@@ -4422,10 +4441,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
1868 + struct device *dev;
1869 + int err = 0;
1870 +
1871 +- /* if de-init is underway, don't process messages from VF */
1872 +- if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
1873 +- return;
1874 +-
1875 + dev = ice_pf_to_dev(pf);
1876 + if (ice_validate_vf_id(pf, vf_id)) {
1877 + err = -EINVAL;
1878 +@@ -4465,6 +4480,15 @@ error_handler:
1879 + return;
1880 + }
1881 +
1882 ++ /* VF is being configured in another context that triggers a VFR, so no
1883 ++ * need to process this message
1884 ++ */
1885 ++ if (!mutex_trylock(&vf->cfg_lock)) {
1886 ++ dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
1887 ++ vf->vf_id);
1888 ++ return;
1889 ++ }
1890 ++
1891 + switch (v_opcode) {
1892 + case VIRTCHNL_OP_VERSION:
1893 + err = ice_vc_get_ver_msg(vf, msg);
1894 +@@ -4553,6 +4577,8 @@ error_handler:
1895 + dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
1896 + vf_id, v_opcode, err);
1897 + }
1898 ++
1899 ++ mutex_unlock(&vf->cfg_lock);
1900 + }
1901 +
1902 + /**
1903 +@@ -4668,6 +4694,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1904 + return -EINVAL;
1905 + }
1906 +
1907 ++ mutex_lock(&vf->cfg_lock);
1908 ++
1909 + /* VF is notified of its new MAC via the PF's response to the
1910 + * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
1911 + */
1912 +@@ -4686,6 +4714,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1913 + }
1914 +
1915 + ice_vc_reset_vf(vf);
1916 ++ mutex_unlock(&vf->cfg_lock);
1917 + return 0;
1918 + }
1919 +
1920 +@@ -4715,11 +4744,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
1921 + if (trusted == vf->trusted)
1922 + return 0;
1923 +
1924 ++ mutex_lock(&vf->cfg_lock);
1925 ++
1926 + vf->trusted = trusted;
1927 + ice_vc_reset_vf(vf);
1928 + dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
1929 + vf_id, trusted ? "" : "un");
1930 +
1931 ++ mutex_unlock(&vf->cfg_lock);
1932 ++
1933 + return 0;
1934 + }
1935 +
1936 +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
1937 +index 38b4dc82c5c18..a750e9a9d7125 100644
1938 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
1939 ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
1940 +@@ -74,6 +74,11 @@ struct ice_mdd_vf_events {
1941 + struct ice_vf {
1942 + struct ice_pf *pf;
1943 +
1944 ++ /* Used during virtchnl message handling and NDO ops against the VF
1945 ++ * that will trigger a VFR
1946 ++ */
1947 ++ struct mutex cfg_lock;
1948 ++
1949 + u16 vf_id; /* VF ID in the PF space */
1950 + u16 lan_vsi_idx; /* index into PF struct */
1951 + u16 ctrl_vsi_idx;
1952 +diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
1953 +index 28d5ad296646a..1b61fe2e9b4d5 100644
1954 +--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
1955 ++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
1956 +@@ -2700,6 +2700,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
1957 +
1958 + static struct platform_device *port_platdev[3];
1959 +
1960 ++static void mv643xx_eth_shared_of_remove(void)
1961 ++{
1962 ++ int n;
1963 ++
1964 ++ for (n = 0; n < 3; n++) {
1965 ++ platform_device_del(port_platdev[n]);
1966 ++ port_platdev[n] = NULL;
1967 ++ }
1968 ++}
1969 ++
1970 + static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
1971 + struct device_node *pnp)
1972 + {
1973 +@@ -2736,7 +2746,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
1974 + return -EINVAL;
1975 + }
1976 +
1977 +- of_get_mac_address(pnp, ppd.mac_addr);
1978 ++ ret = of_get_mac_address(pnp, ppd.mac_addr);
1979 ++ if (ret)
1980 ++ return ret;
1981 +
1982 + mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
1983 + mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
1984 +@@ -2800,21 +2812,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
1985 + ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
1986 + if (ret) {
1987 + of_node_put(pnp);
1988 ++ mv643xx_eth_shared_of_remove();
1989 + return ret;
1990 + }
1991 + }
1992 + return 0;
1993 + }
1994 +
1995 +-static void mv643xx_eth_shared_of_remove(void)
1996 +-{
1997 +- int n;
1998 +-
1999 +- for (n = 0; n < 3; n++) {
2000 +- platform_device_del(port_platdev[n]);
2001 +- port_platdev[n] = NULL;
2002 +- }
2003 +-}
2004 + #else
2005 + static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2006 + {
2007 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
2008 +index 60952b33b5688..d2333310b56fe 100644
2009 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
2010 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
2011 +@@ -60,37 +60,31 @@ static int parse_tunnel(struct mlx5e_priv *priv,
2012 + void *headers_v)
2013 + {
2014 + struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2015 +- struct flow_match_enc_keyid enc_keyid;
2016 + struct flow_match_mpls match;
2017 + void *misc2_c;
2018 + void *misc2_v;
2019 +
2020 +- misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2021 +- misc_parameters_2);
2022 +- misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2023 +- misc_parameters_2);
2024 +-
2025 +- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
2026 +- return 0;
2027 +-
2028 +- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
2029 +- return 0;
2030 +-
2031 +- flow_rule_match_enc_keyid(rule, &enc_keyid);
2032 +-
2033 +- if (!enc_keyid.mask->keyid)
2034 +- return 0;
2035 +-
2036 + if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
2037 + !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
2038 + return -EOPNOTSUPP;
2039 +
2040 ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
2041 ++ return -EOPNOTSUPP;
2042 ++
2043 ++ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
2044 ++ return 0;
2045 ++
2046 + flow_rule_match_mpls(rule, &match);
2047 +
2048 + /* Only support matching the first LSE */
2049 + if (match.mask->used_lses != 1)
2050 + return -EOPNOTSUPP;
2051 +
2052 ++ misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2053 ++ misc_parameters_2);
2054 ++ misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2055 ++ misc_parameters_2);
2056 ++
2057 + MLX5_SET(fte_match_set_misc2, misc2_c,
2058 + outer_first_mpls_over_udp.mpls_label,
2059 + match.mask->ls[0].mpls_label);
2060 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2061 +index dc9b8718c3c10..2d3cd237355a6 100644
2062 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2063 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2064 +@@ -1754,7 +1754,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
2065 + if (size_read < 0) {
2066 + netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
2067 + __func__, size_read);
2068 +- return 0;
2069 ++ return size_read;
2070 + }
2071 +
2072 + i += size_read;
2073 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2074 +index 0015545d5235b..d2de1e6c514c1 100644
2075 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2076 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2077 +@@ -987,7 +987,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
2078 + }
2079 +
2080 + /* True when explicitly set via priv flag, or XDP prog is loaded */
2081 +- if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
2082 ++ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
2083 ++ get_cqe_tls_offload(cqe))
2084 + goto csum_unnecessary;
2085 +
2086 + /* CQE csum doesn't cover padding octets in short ethernet
2087 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2088 +index f3f23fdc20229..3194cdcd2f630 100644
2089 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2090 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2091 +@@ -2784,10 +2784,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2092 + if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2093 + return false;
2094 +
2095 +- if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2096 +- mlx5_ecpf_vport_exists(esw->dev))
2097 +- return false;
2098 +-
2099 + return true;
2100 + }
2101 +
2102 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2103 +index fe501ba88bea9..00834c914dc64 100644
2104 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2105 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2106 +@@ -2041,6 +2041,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2107 + fte->node.del_hw_func = NULL;
2108 + up_write_ref_node(&fte->node, false);
2109 + tree_put_node(&fte->node, false);
2110 ++ } else {
2111 ++ up_write_ref_node(&fte->node, false);
2112 + }
2113 + kfree(handle);
2114 + }
2115 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
2116 +index df58cba37930a..1e8ec4f236b28 100644
2117 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
2118 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
2119 +@@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
2120 +
2121 + u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
2122 + {
2123 ++ if (!mlx5_chains_prios_supported(chains))
2124 ++ return 1;
2125 ++
2126 + if (mlx5_chains_ignore_flow_level_supported(chains))
2127 + return UINT_MAX;
2128 +
2129 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2130 +index 29b7297a836a5..097ab6fe371ca 100644
2131 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
2132 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2133 +@@ -516,7 +516,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
2134 +
2135 + /* Check log_max_qp from HCA caps to set in current profile */
2136 + if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
2137 +- prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
2138 ++ prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
2139 + } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
2140 + mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
2141 + prof->log_max_qp,
2142 +@@ -1762,10 +1762,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
2143 + { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
2144 + { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
2145 + { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
2146 ++ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
2147 + { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
2148 + { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
2149 + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
2150 + { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
2151 ++ { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */
2152 + { 0, }
2153 + };
2154 +
2155 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
2156 +index 66c24767e3b00..8ad8d73e17f06 100644
2157 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
2158 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
2159 +@@ -4,7 +4,6 @@
2160 + #include "dr_types.h"
2161 +
2162 + #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
2163 +-#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024)
2164 +
2165 + struct mlx5dr_icm_pool {
2166 + enum mlx5dr_icm_type icm_type;
2167 +@@ -136,37 +135,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
2168 + kvfree(icm_mr);
2169 + }
2170 +
2171 +-static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
2172 ++static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
2173 + {
2174 +- chunk->ste_arr = kvzalloc(chunk->num_of_entries *
2175 +- sizeof(chunk->ste_arr[0]), GFP_KERNEL);
2176 +- if (!chunk->ste_arr)
2177 +- return -ENOMEM;
2178 +-
2179 +- chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries *
2180 +- DR_STE_SIZE_REDUCED, GFP_KERNEL);
2181 +- if (!chunk->hw_ste_arr)
2182 +- goto out_free_ste_arr;
2183 +-
2184 +- chunk->miss_list = kvmalloc(chunk->num_of_entries *
2185 +- sizeof(chunk->miss_list[0]), GFP_KERNEL);
2186 +- if (!chunk->miss_list)
2187 +- goto out_free_hw_ste_arr;
2188 ++ /* We support only one type of STE size, both for ConnectX-5 and later
2189 ++ * devices. Once the support for match STE which has a larger tag is
2190 ++ * added (32B instead of 16B), the STE size for devices later than
2191 ++ * ConnectX-5 needs to account for that.
2192 ++ */
2193 ++ return DR_STE_SIZE_REDUCED;
2194 ++}
2195 +
2196 +- return 0;
2197 ++static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
2198 ++{
2199 ++ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
2200 ++ int index = offset / DR_STE_SIZE;
2201 +
2202 +-out_free_hw_ste_arr:
2203 +- kvfree(chunk->hw_ste_arr);
2204 +-out_free_ste_arr:
2205 +- kvfree(chunk->ste_arr);
2206 +- return -ENOMEM;
2207 ++ chunk->ste_arr = &buddy->ste_arr[index];
2208 ++ chunk->miss_list = &buddy->miss_list[index];
2209 ++ chunk->hw_ste_arr = buddy->hw_ste_arr +
2210 ++ index * dr_icm_buddy_get_ste_size(buddy);
2211 + }
2212 +
2213 + static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
2214 + {
2215 +- kvfree(chunk->miss_list);
2216 +- kvfree(chunk->hw_ste_arr);
2217 +- kvfree(chunk->ste_arr);
2218 ++ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
2219 ++
2220 ++ memset(chunk->hw_ste_arr, 0,
2221 ++ chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
2222 ++ memset(chunk->ste_arr, 0,
2223 ++ chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
2224 + }
2225 +
2226 + static enum mlx5dr_icm_type
2227 +@@ -189,6 +186,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
2228 + kvfree(chunk);
2229 + }
2230 +
2231 ++static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
2232 ++{
2233 ++ int num_of_entries =
2234 ++ mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
2235 ++
2236 ++ buddy->ste_arr = kvcalloc(num_of_entries,
2237 ++ sizeof(struct mlx5dr_ste), GFP_KERNEL);
2238 ++ if (!buddy->ste_arr)
2239 ++ return -ENOMEM;
2240 ++
2241 ++ /* Preallocate full STE size on non-ConnectX-5 devices since
2242 ++ * we need to support both full and reduced with the same cache.
2243 ++ */
2244 ++ buddy->hw_ste_arr = kvcalloc(num_of_entries,
2245 ++ dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
2246 ++ if (!buddy->hw_ste_arr)
2247 ++ goto free_ste_arr;
2248 ++
2249 ++ buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
2250 ++ if (!buddy->miss_list)
2251 ++ goto free_hw_ste_arr;
2252 ++
2253 ++ return 0;
2254 ++
2255 ++free_hw_ste_arr:
2256 ++ kvfree(buddy->hw_ste_arr);
2257 ++free_ste_arr:
2258 ++ kvfree(buddy->ste_arr);
2259 ++ return -ENOMEM;
2260 ++}
2261 ++
2262 ++static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
2263 ++{
2264 ++ kvfree(buddy->ste_arr);
2265 ++ kvfree(buddy->hw_ste_arr);
2266 ++ kvfree(buddy->miss_list);
2267 ++}
2268 ++
2269 + static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
2270 + {
2271 + struct mlx5dr_icm_buddy_mem *buddy;
2272 +@@ -208,11 +243,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
2273 + buddy->icm_mr = icm_mr;
2274 + buddy->pool = pool;
2275 +
2276 ++ if (pool->icm_type == DR_ICM_TYPE_STE) {
2277 ++ /* Reduce allocations by preallocating and reusing the STE structures */
2278 ++ if (dr_icm_buddy_init_ste_cache(buddy))
2279 ++ goto err_cleanup_buddy;
2280 ++ }
2281 ++
2282 + /* add it to the -start- of the list in order to search in it first */
2283 + list_add(&buddy->list_node, &pool->buddy_mem_list);
2284 +
2285 + return 0;
2286 +
2287 ++err_cleanup_buddy:
2288 ++ mlx5dr_buddy_cleanup(buddy);
2289 + err_free_buddy:
2290 + kvfree(buddy);
2291 + free_mr:
2292 +@@ -234,6 +277,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
2293 +
2294 + mlx5dr_buddy_cleanup(buddy);
2295 +
2296 ++ if (buddy->pool->icm_type == DR_ICM_TYPE_STE)
2297 ++ dr_icm_buddy_cleanup_ste_cache(buddy);
2298 ++
2299 + kvfree(buddy);
2300 + }
2301 +
2302 +@@ -261,34 +307,30 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
2303 + chunk->byte_size =
2304 + mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
2305 + chunk->seg = seg;
2306 ++ chunk->buddy_mem = buddy_mem_pool;
2307 +
2308 +- if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) {
2309 +- mlx5dr_err(pool->dmn,
2310 +- "Failed to init ste arrays (order: %d)\n",
2311 +- chunk_size);
2312 +- goto out_free_chunk;
2313 +- }
2314 ++ if (pool->icm_type == DR_ICM_TYPE_STE)
2315 ++ dr_icm_chunk_ste_init(chunk, offset);
2316 +
2317 + buddy_mem_pool->used_memory += chunk->byte_size;
2318 +- chunk->buddy_mem = buddy_mem_pool;
2319 + INIT_LIST_HEAD(&chunk->chunk_list);
2320 +
2321 + /* chunk now is part of the used_list */
2322 + list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
2323 +
2324 + return chunk;
2325 +-
2326 +-out_free_chunk:
2327 +- kvfree(chunk);
2328 +- return NULL;
2329 + }
2330 +
2331 + static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
2332 + {
2333 +- if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL)
2334 +- return true;
2335 ++ int allow_hot_size;
2336 ++
2337 ++ /* sync when hot memory reaches half of the pool size */
2338 ++ allow_hot_size =
2339 ++ mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
2340 ++ pool->icm_type) / 2;
2341 +
2342 +- return false;
2343 ++ return pool->hot_memory_size > allow_hot_size;
2344 + }
2345 +
2346 + static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
2347 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
2348 +index b5409cc021d33..a19e8157c1002 100644
2349 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
2350 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
2351 +@@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
2352 + return (spec->dmac_47_16 || spec->dmac_15_0);
2353 + }
2354 +
2355 +-static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
2356 +-{
2357 +- return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
2358 +- spec->src_ip_63_32 || spec->src_ip_31_0);
2359 +-}
2360 +-
2361 +-static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
2362 +-{
2363 +- return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
2364 +- spec->dst_ip_63_32 || spec->dst_ip_31_0);
2365 +-}
2366 +-
2367 + static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
2368 + {
2369 + return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
2370 +@@ -480,11 +468,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
2371 + &mask, inner, rx);
2372 +
2373 + if (outer_ipv == DR_RULE_IPV6) {
2374 +- if (dr_mask_is_dst_addr_set(&mask.outer))
2375 ++ if (DR_MASK_IS_DST_IP_SET(&mask.outer))
2376 + mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
2377 + &mask, inner, rx);
2378 +
2379 +- if (dr_mask_is_src_addr_set(&mask.outer))
2380 ++ if (DR_MASK_IS_SRC_IP_SET(&mask.outer))
2381 + mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
2382 + &mask, inner, rx);
2383 +
2384 +@@ -580,11 +568,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
2385 + &mask, inner, rx);
2386 +
2387 + if (inner_ipv == DR_RULE_IPV6) {
2388 +- if (dr_mask_is_dst_addr_set(&mask.inner))
2389 ++ if (DR_MASK_IS_DST_IP_SET(&mask.inner))
2390 + mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
2391 + &mask, inner, rx);
2392 +
2393 +- if (dr_mask_is_src_addr_set(&mask.inner))
2394 ++ if (DR_MASK_IS_SRC_IP_SET(&mask.inner))
2395 + mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
2396 + &mask, inner, rx);
2397 +
2398 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
2399 +index 1cdfe4fccc7a9..01246a1ae7d13 100644
2400 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
2401 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
2402 +@@ -602,12 +602,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
2403 + used_hw_action_num);
2404 + }
2405 +
2406 ++static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
2407 ++ struct mlx5dr_match_spec *spec)
2408 ++{
2409 ++ if (spec->ip_version) {
2410 ++ if (spec->ip_version != 0xf) {
2411 ++ mlx5dr_err(dmn,
2412 ++ "Partial ip_version mask with src/dst IP is not supported\n");
2413 ++ return -EINVAL;
2414 ++ }
2415 ++ } else if (spec->ethertype != 0xffff &&
2416 ++ (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
2417 ++ mlx5dr_err(dmn,
2418 ++ "Partial/no ethertype mask with src/dst IP is not supported\n");
2419 ++ return -EINVAL;
2420 ++ }
2421 ++
2422 ++ return 0;
2423 ++}
2424 ++
2425 + int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
2426 + u8 match_criteria,
2427 + struct mlx5dr_match_param *mask,
2428 + struct mlx5dr_match_param *value)
2429 + {
2430 +- if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
2431 ++ if (value)
2432 ++ return 0;
2433 ++
2434 ++ if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
2435 + if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
2436 + mlx5dr_err(dmn,
2437 + "Partial mask source_port is not supported\n");
2438 +@@ -621,6 +643,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
2439 + }
2440 + }
2441 +
2442 ++ if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
2443 ++ dr_ste_build_pre_check_spec(dmn, &mask->outer))
2444 ++ return -EINVAL;
2445 ++
2446 ++ if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
2447 ++ dr_ste_build_pre_check_spec(dmn, &mask->inner))
2448 ++ return -EINVAL;
2449 ++
2450 + return 0;
2451 + }
2452 +
2453 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
2454 +index b20e8aabb861b..3d4e035698dd3 100644
2455 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
2456 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
2457 +@@ -740,6 +740,16 @@ struct mlx5dr_match_param {
2458 + (_misc3)->icmpv4_code || \
2459 + (_misc3)->icmpv4_header_data)
2460 +
2461 ++#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \
2462 ++ (_spec)->src_ip_95_64 || \
2463 ++ (_spec)->src_ip_63_32 || \
2464 ++ (_spec)->src_ip_31_0)
2465 ++
2466 ++#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \
2467 ++ (_spec)->dst_ip_95_64 || \
2468 ++ (_spec)->dst_ip_63_32 || \
2469 ++ (_spec)->dst_ip_31_0)
2470 ++
2471 + struct mlx5dr_esw_caps {
2472 + u64 drop_icm_address_rx;
2473 + u64 drop_icm_address_tx;
2474 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
2475 +index c5a8b16019991..5ef1995434794 100644
2476 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
2477 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
2478 +@@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem {
2479 + * sync_ste command sets them free.
2480 + */
2481 + struct list_head hot_list;
2482 ++
2483 ++ /* Memory optimisation */
2484 ++ struct mlx5dr_ste *ste_arr;
2485 ++ struct list_head *miss_list;
2486 ++ u8 *hw_ste_arr;
2487 + };
2488 +
2489 + int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
2490 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
2491 +index 6521675be85ce..babd374333f34 100644
2492 +--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
2493 ++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
2494 +@@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
2495 + int port, bool mod)
2496 + {
2497 + struct nfp_flower_priv *priv = app->priv;
2498 +- int ida_idx = NFP_MAX_MAC_INDEX, err;
2499 + struct nfp_tun_offloaded_mac *entry;
2500 ++ int ida_idx = -1, err;
2501 + u16 nfp_mac_idx = 0;
2502 +
2503 + entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
2504 +@@ -997,7 +997,7 @@ err_remove_hash:
2505 + err_free_entry:
2506 + kfree(entry);
2507 + err_free_ida:
2508 +- if (ida_idx != NFP_MAX_MAC_INDEX)
2509 ++ if (ida_idx != -1)
2510 + ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
2511 +
2512 + return err;
2513 +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
2514 +index 463094ced104a..2ab29efa6b6e4 100644
2515 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
2516 ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
2517 +@@ -1427,6 +1427,8 @@ static int temac_probe(struct platform_device *pdev)
2518 + lp->indirect_lock = devm_kmalloc(&pdev->dev,
2519 + sizeof(*lp->indirect_lock),
2520 + GFP_KERNEL);
2521 ++ if (!lp->indirect_lock)
2522 ++ return -ENOMEM;
2523 + spin_lock_init(lp->indirect_lock);
2524 + }
2525 +
2526 +diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
2527 +index 5f4cd24a0241d..4eba5a91075c0 100644
2528 +--- a/drivers/net/mdio/mdio-ipq4019.c
2529 ++++ b/drivers/net/mdio/mdio-ipq4019.c
2530 +@@ -200,7 +200,11 @@ static int ipq_mdio_reset(struct mii_bus *bus)
2531 + if (ret)
2532 + return ret;
2533 +
2534 +- return clk_prepare_enable(priv->mdio_clk);
2535 ++ ret = clk_prepare_enable(priv->mdio_clk);
2536 ++ if (ret == 0)
2537 ++ mdelay(10);
2538 ++
2539 ++ return ret;
2540 + }
2541 +
2542 + static int ipq4019_mdio_probe(struct platform_device *pdev)
2543 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
2544 +index eb3817d70f2b8..9b4dfa3001d6e 100644
2545 +--- a/drivers/net/usb/cdc_ether.c
2546 ++++ b/drivers/net/usb/cdc_ether.c
2547 +@@ -583,6 +583,11 @@ static const struct usb_device_id products[] = {
2548 + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
2549 + .bInterfaceProtocol = USB_CDC_PROTO_NONE
2550 +
2551 ++#define ZAURUS_FAKE_INTERFACE \
2552 ++ .bInterfaceClass = USB_CLASS_COMM, \
2553 ++ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
2554 ++ .bInterfaceProtocol = USB_CDC_PROTO_NONE
2555 ++
2556 + /* SA-1100 based Sharp Zaurus ("collie"), or compatible;
2557 + * wire-incompatible with true CDC Ethernet implementations.
2558 + * (And, it seems, needlessly so...)
2559 +@@ -636,6 +641,13 @@ static const struct usb_device_id products[] = {
2560 + .idProduct = 0x9032, /* SL-6000 */
2561 + ZAURUS_MASTER_INTERFACE,
2562 + .driver_info = 0,
2563 ++}, {
2564 ++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
2565 ++ | USB_DEVICE_ID_MATCH_DEVICE,
2566 ++ .idVendor = 0x04DD,
2567 ++ .idProduct = 0x9032, /* SL-6000 */
2568 ++ ZAURUS_FAKE_INTERFACE,
2569 ++ .driver_info = 0,
2570 + }, {
2571 + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
2572 + | USB_DEVICE_ID_MATCH_DEVICE,
2573 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2574 +index e303b522efb50..15f91d691bba3 100644
2575 +--- a/drivers/net/usb/cdc_ncm.c
2576 ++++ b/drivers/net/usb/cdc_ncm.c
2577 +@@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
2578 + {
2579 + struct sk_buff *skb;
2580 + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
2581 +- int len;
2582 ++ unsigned int len;
2583 + int nframes;
2584 + int x;
2585 +- int offset;
2586 ++ unsigned int offset;
2587 + union {
2588 + struct usb_cdc_ncm_ndp16 *ndp16;
2589 + struct usb_cdc_ncm_ndp32 *ndp32;
2590 +@@ -1790,8 +1790,8 @@ next_ndp:
2591 + break;
2592 + }
2593 +
2594 +- /* sanity checking */
2595 +- if (((offset + len) > skb_in->len) ||
2596 ++ /* sanity checking - watch out for integer wrap*/
2597 ++ if ((offset > skb_in->len) || (len > skb_in->len - offset) ||
2598 + (len > ctx->rx_max) || (len < ETH_HLEN)) {
2599 + netif_dbg(dev, rx_err, dev->net,
2600 + "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
2601 +diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
2602 +index 6516a37893e27..0c50f24671da3 100644
2603 +--- a/drivers/net/usb/sr9700.c
2604 ++++ b/drivers/net/usb/sr9700.c
2605 +@@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2606 + /* ignore the CRC length */
2607 + len = (skb->data[1] | (skb->data[2] << 8)) - 4;
2608 +
2609 +- if (len > ETH_FRAME_LEN)
2610 ++ if (len > ETH_FRAME_LEN || len > skb->len)
2611 + return 0;
2612 +
2613 + /* the last packet of current skb */
2614 +diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
2615 +index 8e717a0b559b3..7984f2157d222 100644
2616 +--- a/drivers/net/usb/zaurus.c
2617 ++++ b/drivers/net/usb/zaurus.c
2618 +@@ -256,6 +256,11 @@ static const struct usb_device_id products [] = {
2619 + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
2620 + .bInterfaceProtocol = USB_CDC_PROTO_NONE
2621 +
2622 ++#define ZAURUS_FAKE_INTERFACE \
2623 ++ .bInterfaceClass = USB_CLASS_COMM, \
2624 ++ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
2625 ++ .bInterfaceProtocol = USB_CDC_PROTO_NONE
2626 ++
2627 + /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
2628 + {
2629 + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
2630 +@@ -313,6 +318,13 @@ static const struct usb_device_id products [] = {
2631 + .idProduct = 0x9032, /* SL-6000 */
2632 + ZAURUS_MASTER_INTERFACE,
2633 + .driver_info = ZAURUS_PXA_INFO,
2634 ++}, {
2635 ++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
2636 ++ | USB_DEVICE_ID_MATCH_DEVICE,
2637 ++ .idVendor = 0x04DD,
2638 ++ .idProduct = 0x9032, /* SL-6000 */
2639 ++ ZAURUS_FAKE_INTERFACE,
2640 ++ .driver_info = (unsigned long)&bogus_mdlm_info,
2641 + }, {
2642 + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
2643 + | USB_DEVICE_ID_MATCH_DEVICE,
2644 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2645 +index a480e1af48e8e..d5d5d035d6775 100644
2646 +--- a/drivers/nvme/host/core.c
2647 ++++ b/drivers/nvme/host/core.c
2648 +@@ -1914,7 +1914,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
2649 + if (blk_queue_is_zoned(ns->queue)) {
2650 + ret = nvme_revalidate_zones(ns);
2651 + if (ret && !nvme_first_scan(ns->disk))
2652 +- goto out;
2653 ++ return ret;
2654 + }
2655 +
2656 + if (nvme_ns_head_multipath(ns->head)) {
2657 +@@ -1929,16 +1929,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
2658 + return 0;
2659 +
2660 + out_unfreeze:
2661 +- blk_mq_unfreeze_queue(ns->disk->queue);
2662 +-out:
2663 + /*
2664 + * If probing fails due an unsupported feature, hide the block device,
2665 + * but still allow other access.
2666 + */
2667 + if (ret == -ENODEV) {
2668 + ns->disk->flags |= GENHD_FL_HIDDEN;
2669 ++ set_bit(NVME_NS_READY, &ns->flags);
2670 + ret = 0;
2671 + }
2672 ++ blk_mq_unfreeze_queue(ns->disk->queue);
2673 + return ret;
2674 + }
2675 +
2676 +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
2677 +index 9aecb83021a2d..fb7840c73765a 100644
2678 +--- a/drivers/nvmem/core.c
2679 ++++ b/drivers/nvmem/core.c
2680 +@@ -768,7 +768,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
2681 +
2682 + if (config->wp_gpio)
2683 + nvmem->wp_gpio = config->wp_gpio;
2684 +- else
2685 ++ else if (!config->ignore_wp)
2686 + nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
2687 + GPIOD_OUT_HIGH);
2688 + if (IS_ERR(nvmem->wp_gpio)) {
2689 +diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
2690 +index 49e32684dbb25..ecab6bf63dc6d 100644
2691 +--- a/drivers/pinctrl/pinctrl-k210.c
2692 ++++ b/drivers/pinctrl/pinctrl-k210.c
2693 +@@ -482,7 +482,7 @@ static int k210_pinconf_get_drive(unsigned int max_strength_ua)
2694 + {
2695 + int i;
2696 +
2697 +- for (i = K210_PC_DRIVE_MAX; i; i--) {
2698 ++ for (i = K210_PC_DRIVE_MAX; i >= 0; i--) {
2699 + if (k210_pinconf_drive_strength[i] <= max_strength_ua)
2700 + return i;
2701 + }
2702 +@@ -527,7 +527,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev,
2703 + case PIN_CONFIG_BIAS_PULL_UP:
2704 + if (!arg)
2705 + return -EINVAL;
2706 +- val |= K210_PC_PD;
2707 ++ val |= K210_PC_PU;
2708 + break;
2709 + case PIN_CONFIG_DRIVE_STRENGTH:
2710 + arg *= 1000;
2711 +diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c
2712 +index 90c1568ea4e09..3cc004c68bdba 100644
2713 +--- a/drivers/platform/surface/surface3_power.c
2714 ++++ b/drivers/platform/surface/surface3_power.c
2715 +@@ -233,14 +233,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix)
2716 + }
2717 + bix->last_full_charg_capacity = ret;
2718 +
2719 +- /* get serial number */
2720 ++ /*
2721 ++ * Get serial number, on some devices (with unofficial replacement
2722 ++ * battery?) reading any of the serial number range addresses gets
2723 ++ * nacked in this case just leave the serial number empty.
2724 ++ */
2725 + ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO,
2726 + sizeof(buf), buf);
2727 +- if (ret != sizeof(buf)) {
2728 ++ if (ret == -EREMOTEIO) {
2729 ++ /* no serial number available */
2730 ++ } else if (ret != sizeof(buf)) {
2731 + dev_err(&client->dev, "Error reading serial no: %d\n", ret);
2732 + return ret;
2733 ++ } else {
2734 ++ snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
2735 + }
2736 +- snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
2737 +
2738 + /* get cycle count */
2739 + ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT);
2740 +diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
2741 +index cfa222c9bd5e7..78f31b61a2aac 100644
2742 +--- a/drivers/spi/spi-zynq-qspi.c
2743 ++++ b/drivers/spi/spi-zynq-qspi.c
2744 +@@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
2745 +
2746 + if (op->dummy.nbytes) {
2747 + tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
2748 ++ if (!tmpbuf)
2749 ++ return -ENOMEM;
2750 ++
2751 + memset(tmpbuf, 0xff, op->dummy.nbytes);
2752 + reinit_completion(&xqspi->data_completion);
2753 + xqspi->txbuf = tmpbuf;
2754 +diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c
2755 +index abe9395a0aefd..861a154144e66 100644
2756 +--- a/drivers/staging/fbtft/fb_st7789v.c
2757 ++++ b/drivers/staging/fbtft/fb_st7789v.c
2758 +@@ -144,6 +144,8 @@ static int init_display(struct fbtft_par *par)
2759 + {
2760 + int rc;
2761 +
2762 ++ par->fbtftops.reset(par);
2763 ++
2764 + rc = init_tearing_effect_line(par);
2765 + if (rc)
2766 + return rc;
2767 +diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
2768 +index 5363ebebfc357..50c0d839fe751 100644
2769 +--- a/drivers/tee/optee/core.c
2770 ++++ b/drivers/tee/optee/core.c
2771 +@@ -588,6 +588,7 @@ static int optee_remove(struct platform_device *pdev)
2772 + /* Unregister OP-TEE specific client devices on TEE bus */
2773 + optee_unregister_devices();
2774 +
2775 ++ teedev_close_context(optee->ctx);
2776 + /*
2777 + * Ask OP-TEE to free all cached shared memory objects to decrease
2778 + * reference counters and also avoid wild pointers in secure world
2779 +@@ -633,6 +634,7 @@ static int optee_probe(struct platform_device *pdev)
2780 + struct optee *optee = NULL;
2781 + void *memremaped_shm = NULL;
2782 + struct tee_device *teedev;
2783 ++ struct tee_context *ctx;
2784 + u32 sec_caps;
2785 + int rc;
2786 +
2787 +@@ -719,6 +721,12 @@ static int optee_probe(struct platform_device *pdev)
2788 + optee_supp_init(&optee->supp);
2789 + optee->memremaped_shm = memremaped_shm;
2790 + optee->pool = pool;
2791 ++ ctx = teedev_open(optee->teedev);
2792 ++ if (IS_ERR(ctx)) {
2793 ++ rc = PTR_ERR(ctx);
2794 ++ goto err;
2795 ++ }
2796 ++ optee->ctx = ctx;
2797 +
2798 + /*
2799 + * Ensure that there are no pre-existing shm objects before enabling
2800 +diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
2801 +index f6bb4a763ba94..ea09533e30cde 100644
2802 +--- a/drivers/tee/optee/optee_private.h
2803 ++++ b/drivers/tee/optee/optee_private.h
2804 +@@ -70,6 +70,7 @@ struct optee_supp {
2805 + * struct optee - main service struct
2806 + * @supp_teedev: supplicant device
2807 + * @teedev: client device
2808 ++ * @ctx: driver internal TEE context
2809 + * @invoke_fn: function to issue smc or hvc
2810 + * @call_queue: queue of threads waiting to call @invoke_fn
2811 + * @wait_queue: queue of threads from secure world waiting for a
2812 +@@ -87,6 +88,7 @@ struct optee {
2813 + struct tee_device *supp_teedev;
2814 + struct tee_device *teedev;
2815 + optee_invoke_fn *invoke_fn;
2816 ++ struct tee_context *ctx;
2817 + struct optee_call_queue call_queue;
2818 + struct optee_wait_queue wait_queue;
2819 + struct optee_supp supp;
2820 +diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
2821 +index efbaff7ad7e59..456833d820078 100644
2822 +--- a/drivers/tee/optee/rpc.c
2823 ++++ b/drivers/tee/optee/rpc.c
2824 +@@ -285,6 +285,7 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
2825 + }
2826 +
2827 + static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
2828 ++ struct optee *optee,
2829 + struct optee_msg_arg *arg,
2830 + struct optee_call_ctx *call_ctx)
2831 + {
2832 +@@ -314,7 +315,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
2833 + shm = cmd_alloc_suppl(ctx, sz);
2834 + break;
2835 + case OPTEE_RPC_SHM_TYPE_KERNEL:
2836 +- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
2837 ++ shm = tee_shm_alloc(optee->ctx, sz,
2838 ++ TEE_SHM_MAPPED | TEE_SHM_PRIV);
2839 + break;
2840 + default:
2841 + arg->ret = TEEC_ERROR_BAD_PARAMETERS;
2842 +@@ -471,7 +473,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
2843 + break;
2844 + case OPTEE_RPC_CMD_SHM_ALLOC:
2845 + free_pages_list(call_ctx);
2846 +- handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
2847 ++ handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
2848 + break;
2849 + case OPTEE_RPC_CMD_SHM_FREE:
2850 + handle_rpc_func_cmd_shm_free(ctx, arg);
2851 +@@ -502,7 +504,7 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
2852 +
2853 + switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
2854 + case OPTEE_SMC_RPC_FUNC_ALLOC:
2855 +- shm = tee_shm_alloc(ctx, param->a1,
2856 ++ shm = tee_shm_alloc(optee->ctx, param->a1,
2857 + TEE_SHM_MAPPED | TEE_SHM_PRIV);
2858 + if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
2859 + reg_pair_from_64(&param->a1, &param->a2, pa);
2860 +diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
2861 +index 85102d12d7169..3fc426dad2df3 100644
2862 +--- a/drivers/tee/tee_core.c
2863 ++++ b/drivers/tee/tee_core.c
2864 +@@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(driver_lock);
2865 + static struct class *tee_class;
2866 + static dev_t tee_devt;
2867 +
2868 +-static struct tee_context *teedev_open(struct tee_device *teedev)
2869 ++struct tee_context *teedev_open(struct tee_device *teedev)
2870 + {
2871 + int rc;
2872 + struct tee_context *ctx;
2873 +@@ -70,6 +70,7 @@ err:
2874 + return ERR_PTR(rc);
2875 +
2876 + }
2877 ++EXPORT_SYMBOL_GPL(teedev_open);
2878 +
2879 + void teedev_ctx_get(struct tee_context *ctx)
2880 + {
2881 +@@ -96,13 +97,14 @@ void teedev_ctx_put(struct tee_context *ctx)
2882 + kref_put(&ctx->refcount, teedev_ctx_release);
2883 + }
2884 +
2885 +-static void teedev_close_context(struct tee_context *ctx)
2886 ++void teedev_close_context(struct tee_context *ctx)
2887 + {
2888 + struct tee_device *teedev = ctx->teedev;
2889 +
2890 + teedev_ctx_put(ctx);
2891 + tee_device_put(teedev);
2892 + }
2893 ++EXPORT_SYMBOL_GPL(teedev_close_context);
2894 +
2895 + static int tee_open(struct inode *inode, struct file *filp)
2896 + {
2897 +diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
2898 +index 19926beeb3b71..176b8e5d21246 100644
2899 +--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
2900 ++++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
2901 +@@ -405,6 +405,10 @@ static void int3400_notify(acpi_handle handle,
2902 + thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event);
2903 + thermal_prop[4] = NULL;
2904 + kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop);
2905 ++ kfree(thermal_prop[0]);
2906 ++ kfree(thermal_prop[1]);
2907 ++ kfree(thermal_prop[2]);
2908 ++ kfree(thermal_prop[3]);
2909 + }
2910 +
2911 + static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
2912 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
2913 +index 09a14f7c79f40..8643b143c408e 100644
2914 +--- a/drivers/tty/n_gsm.c
2915 ++++ b/drivers/tty/n_gsm.c
2916 +@@ -435,7 +435,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
2917 + modembits |= MDM_RTR;
2918 + if (dlci->modem_tx & TIOCM_RI)
2919 + modembits |= MDM_IC;
2920 +- if (dlci->modem_tx & TIOCM_CD)
2921 ++ if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator)
2922 + modembits |= MDM_DV;
2923 + return modembits;
2924 + }
2925 +@@ -1009,25 +1009,25 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data,
2926 + * @tty: virtual tty bound to the DLCI
2927 + * @dlci: DLCI to affect
2928 + * @modem: modem bits (full EA)
2929 +- * @clen: command length
2930 ++ * @slen: number of signal octets
2931 + *
2932 + * Used when a modem control message or line state inline in adaption
2933 + * layer 2 is processed. Sort out the local modem state and throttles
2934 + */
2935 +
2936 + static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
2937 +- u32 modem, int clen)
2938 ++ u32 modem, int slen)
2939 + {
2940 + int mlines = 0;
2941 + u8 brk = 0;
2942 + int fc;
2943 +
2944 +- /* The modem status command can either contain one octet (v.24 signals)
2945 +- or two octets (v.24 signals + break signals). The length field will
2946 +- either be 2 or 3 respectively. This is specified in section
2947 +- 5.4.6.3.7 of the 27.010 mux spec. */
2948 ++ /* The modem status command can either contain one octet (V.24 signals)
2949 ++ * or two octets (V.24 signals + break signals). This is specified in
2950 ++ * section 5.4.6.3.7 of the 07.10 mux spec.
2951 ++ */
2952 +
2953 +- if (clen == 2)
2954 ++ if (slen == 1)
2955 + modem = modem & 0x7f;
2956 + else {
2957 + brk = modem & 0x7f;
2958 +@@ -1084,6 +1084,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
2959 + unsigned int brk = 0;
2960 + struct gsm_dlci *dlci;
2961 + int len = clen;
2962 ++ int slen;
2963 + const u8 *dp = data;
2964 + struct tty_struct *tty;
2965 +
2966 +@@ -1103,6 +1104,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
2967 + return;
2968 + dlci = gsm->dlci[addr];
2969 +
2970 ++ slen = len;
2971 + while (gsm_read_ea(&modem, *dp++) == 0) {
2972 + len--;
2973 + if (len == 0)
2974 +@@ -1119,7 +1121,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
2975 + modem |= (brk & 0x7f);
2976 + }
2977 + tty = tty_port_tty_get(&dlci->port);
2978 +- gsm_process_modem(tty, dlci, modem, clen);
2979 ++ gsm_process_modem(tty, dlci, modem, slen);
2980 + if (tty) {
2981 + tty_wakeup(tty);
2982 + tty_kref_put(tty);
2983 +@@ -1429,6 +1431,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci)
2984 + if (dlci->addr != 0) {
2985 + tty_port_tty_hangup(&dlci->port, false);
2986 + kfifo_reset(&dlci->fifo);
2987 ++ /* Ensure that gsmtty_open() can return. */
2988 ++ tty_port_set_initialized(&dlci->port, 0);
2989 ++ wake_up_interruptible(&dlci->port.open_wait);
2990 + } else
2991 + dlci->gsm->dead = true;
2992 + wake_up(&dlci->gsm->event);
2993 +@@ -1488,7 +1493,7 @@ static void gsm_dlci_t1(struct timer_list *t)
2994 + dlci->mode = DLCI_MODE_ADM;
2995 + gsm_dlci_open(dlci);
2996 + } else {
2997 +- gsm_dlci_close(dlci);
2998 ++ gsm_dlci_begin_close(dlci); /* prevent half open link */
2999 + }
3000 +
3001 + break;
3002 +@@ -1567,6 +1572,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
3003 + struct tty_struct *tty;
3004 + unsigned int modem = 0;
3005 + int len = clen;
3006 ++ int slen = 0;
3007 +
3008 + if (debug & 16)
3009 + pr_debug("%d bytes for tty\n", len);
3010 +@@ -1579,12 +1585,14 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
3011 + case 2: /* Asynchronous serial with line state in each frame */
3012 + while (gsm_read_ea(&modem, *data++) == 0) {
3013 + len--;
3014 ++ slen++;
3015 + if (len == 0)
3016 + return;
3017 + }
3018 ++ slen++;
3019 + tty = tty_port_tty_get(port);
3020 + if (tty) {
3021 +- gsm_process_modem(tty, dlci, modem, clen);
3022 ++ gsm_process_modem(tty, dlci, modem, slen);
3023 + tty_kref_put(tty);
3024 + }
3025 + fallthrough;
3026 +@@ -1722,7 +1730,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
3027 + gsm_destroy_network(dlci);
3028 + mutex_unlock(&dlci->mutex);
3029 +
3030 +- tty_hangup(tty);
3031 ++ /* We cannot use tty_hangup() because in tty_kref_put() the tty
3032 ++ * driver assumes that the hangup queue is free and reuses it to
3033 ++ * queue release_one_tty() -> NULL pointer panic in
3034 ++ * process_one_work().
3035 ++ */
3036 ++ tty_vhangup(tty);
3037 +
3038 + tty_port_tty_set(&dlci->port, NULL);
3039 + tty_kref_put(tty);
3040 +@@ -3175,9 +3188,9 @@ static void gsmtty_throttle(struct tty_struct *tty)
3041 + if (dlci->state == DLCI_CLOSED)
3042 + return;
3043 + if (C_CRTSCTS(tty))
3044 +- dlci->modem_tx &= ~TIOCM_DTR;
3045 ++ dlci->modem_tx &= ~TIOCM_RTS;
3046 + dlci->throttled = true;
3047 +- /* Send an MSC with DTR cleared */
3048 ++ /* Send an MSC with RTS cleared */
3049 + gsmtty_modem_update(dlci, 0);
3050 + }
3051 +
3052 +@@ -3187,9 +3200,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty)
3053 + if (dlci->state == DLCI_CLOSED)
3054 + return;
3055 + if (C_CRTSCTS(tty))
3056 +- dlci->modem_tx |= TIOCM_DTR;
3057 ++ dlci->modem_tx |= TIOCM_RTS;
3058 + dlci->throttled = false;
3059 +- /* Send an MSC with DTR set */
3060 ++ /* Send an MSC with RTS set */
3061 + gsmtty_modem_update(dlci, 0);
3062 + }
3063 +
3064 +diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
3065 +index acbb615dd28fd..0ab788058fa2a 100644
3066 +--- a/drivers/tty/serial/sc16is7xx.c
3067 ++++ b/drivers/tty/serial/sc16is7xx.c
3068 +@@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
3069 + static void sc16is7xx_tx_proc(struct kthread_work *ws)
3070 + {
3071 + struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
3072 ++ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
3073 +
3074 + if ((port->rs485.flags & SER_RS485_ENABLED) &&
3075 + (port->rs485.delay_rts_before_send > 0))
3076 + msleep(port->rs485.delay_rts_before_send);
3077 +
3078 ++ mutex_lock(&s->efr_lock);
3079 + sc16is7xx_handle_tx(port);
3080 ++ mutex_unlock(&s->efr_lock);
3081 + }
3082 +
3083 + static void sc16is7xx_reconf_rs485(struct uart_port *port)
3084 +diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
3085 +index cb9059a8444b5..71e62b3081dbb 100644
3086 +--- a/drivers/usb/dwc2/core.h
3087 ++++ b/drivers/usb/dwc2/core.h
3088 +@@ -1417,6 +1417,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
3089 + void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2);
3090 + int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
3091 + #define dwc2_is_device_connected(hsotg) (hsotg->connected)
3092 ++#define dwc2_is_device_enabled(hsotg) (hsotg->enabled)
3093 + int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg);
3094 + int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup);
3095 + int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg);
3096 +@@ -1453,6 +1454,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
3097 + int testmode)
3098 + { return 0; }
3099 + #define dwc2_is_device_connected(hsotg) (0)
3100 ++#define dwc2_is_device_enabled(hsotg) (0)
3101 + static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
3102 + { return 0; }
3103 + static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg,
3104 +diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
3105 +index aa6eb76f64ddc..36f2c38416e5e 100644
3106 +--- a/drivers/usb/dwc2/drd.c
3107 ++++ b/drivers/usb/dwc2/drd.c
3108 +@@ -109,8 +109,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
3109 + already = dwc2_ovr_avalid(hsotg, true);
3110 + } else if (role == USB_ROLE_DEVICE) {
3111 + already = dwc2_ovr_bvalid(hsotg, true);
3112 +- /* This clear DCTL.SFTDISCON bit */
3113 +- dwc2_hsotg_core_connect(hsotg);
3114 ++ if (dwc2_is_device_enabled(hsotg)) {
3115 ++ /* This clear DCTL.SFTDISCON bit */
3116 ++ dwc2_hsotg_core_connect(hsotg);
3117 ++ }
3118 + } else {
3119 + if (dwc2_is_device_mode(hsotg)) {
3120 + if (!dwc2_ovr_bvalid(hsotg, false))
3121 +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
3122 +index 7ff8fc8f79a9b..1ecedbb1684c8 100644
3123 +--- a/drivers/usb/dwc3/dwc3-pci.c
3124 ++++ b/drivers/usb/dwc3/dwc3-pci.c
3125 +@@ -85,8 +85,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
3126 + static struct gpiod_lookup_table platform_bytcr_gpios = {
3127 + .dev_id = "0000:00:16.0",
3128 + .table = {
3129 +- GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH),
3130 +- GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH),
3131 ++ GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH),
3132 ++ GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH),
3133 + {}
3134 + },
3135 + };
3136 +@@ -119,6 +119,13 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
3137 + {}
3138 + };
3139 +
3140 ++static const struct property_entry dwc3_pci_intel_byt_properties[] = {
3141 ++ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
3142 ++ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
3143 ++ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
3144 ++ {}
3145 ++};
3146 ++
3147 + static const struct property_entry dwc3_pci_mrfld_properties[] = {
3148 + PROPERTY_ENTRY_STRING("dr_mode", "otg"),
3149 + PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
3150 +@@ -161,6 +168,10 @@ static const struct software_node dwc3_pci_intel_swnode = {
3151 + .properties = dwc3_pci_intel_properties,
3152 + };
3153 +
3154 ++static const struct software_node dwc3_pci_intel_byt_swnode = {
3155 ++ .properties = dwc3_pci_intel_byt_properties,
3156 ++};
3157 ++
3158 + static const struct software_node dwc3_pci_intel_mrfld_swnode = {
3159 + .properties = dwc3_pci_mrfld_properties,
3160 + };
3161 +@@ -344,7 +355,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
3162 + (kernel_ulong_t) &dwc3_pci_intel_swnode, },
3163 +
3164 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT),
3165 +- (kernel_ulong_t) &dwc3_pci_intel_swnode, },
3166 ++ (kernel_ulong_t) &dwc3_pci_intel_byt_swnode, },
3167 +
3168 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
3169 + (kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, },
3170 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3171 +index 146cebde33b8d..00cf8ebcb338c 100644
3172 +--- a/drivers/usb/dwc3/gadget.c
3173 ++++ b/drivers/usb/dwc3/gadget.c
3174 +@@ -4131,9 +4131,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
3175 + unsigned long flags;
3176 + irqreturn_t ret = IRQ_NONE;
3177 +
3178 ++ local_bh_disable();
3179 + spin_lock_irqsave(&dwc->lock, flags);
3180 + ret = dwc3_process_event_buf(evt);
3181 + spin_unlock_irqrestore(&dwc->lock, flags);
3182 ++ local_bh_enable();
3183 +
3184 + return ret;
3185 + }
3186 +diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
3187 +index d9ed651f06ac3..0f14c5291af07 100644
3188 +--- a/drivers/usb/gadget/function/rndis.c
3189 ++++ b/drivers/usb/gadget/function/rndis.c
3190 +@@ -922,6 +922,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
3191 + params->resp_avail = resp_avail;
3192 + params->v = v;
3193 + INIT_LIST_HEAD(&params->resp_queue);
3194 ++ spin_lock_init(&params->resp_lock);
3195 + pr_debug("%s: configNr = %d\n", __func__, i);
3196 +
3197 + return params;
3198 +@@ -1015,12 +1016,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf)
3199 + {
3200 + rndis_resp_t *r, *n;
3201 +
3202 ++ spin_lock(&params->resp_lock);
3203 + list_for_each_entry_safe(r, n, &params->resp_queue, list) {
3204 + if (r->buf == buf) {
3205 + list_del(&r->list);
3206 + kfree(r);
3207 + }
3208 + }
3209 ++ spin_unlock(&params->resp_lock);
3210 + }
3211 + EXPORT_SYMBOL_GPL(rndis_free_response);
3212 +
3213 +@@ -1030,14 +1033,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length)
3214 +
3215 + if (!length) return NULL;
3216 +
3217 ++ spin_lock(&params->resp_lock);
3218 + list_for_each_entry_safe(r, n, &params->resp_queue, list) {
3219 + if (!r->send) {
3220 + r->send = 1;
3221 + *length = r->length;
3222 ++ spin_unlock(&params->resp_lock);
3223 + return r->buf;
3224 + }
3225 + }
3226 +
3227 ++ spin_unlock(&params->resp_lock);
3228 + return NULL;
3229 + }
3230 + EXPORT_SYMBOL_GPL(rndis_get_next_response);
3231 +@@ -1054,7 +1060,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length)
3232 + r->length = length;
3233 + r->send = 0;
3234 +
3235 ++ spin_lock(&params->resp_lock);
3236 + list_add_tail(&r->list, &params->resp_queue);
3237 ++ spin_unlock(&params->resp_lock);
3238 + return r;
3239 + }
3240 +
3241 +diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
3242 +index f6167f7fea82b..6206b8b7490f6 100644
3243 +--- a/drivers/usb/gadget/function/rndis.h
3244 ++++ b/drivers/usb/gadget/function/rndis.h
3245 +@@ -174,6 +174,7 @@ typedef struct rndis_params {
3246 + void (*resp_avail)(void *v);
3247 + void *v;
3248 + struct list_head resp_queue;
3249 ++ spinlock_t resp_lock;
3250 + } rndis_params;
3251 +
3252 + /* RNDIS Message parser and other useless functions */
3253 +diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
3254 +index fb4ffedd6f0dd..9cf43731bcd18 100644
3255 +--- a/drivers/usb/gadget/udc/udc-xilinx.c
3256 ++++ b/drivers/usb/gadget/udc/udc-xilinx.c
3257 +@@ -1612,6 +1612,8 @@ static void xudc_getstatus(struct xusb_udc *udc)
3258 + break;
3259 + case USB_RECIP_ENDPOINT:
3260 + epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
3261 ++ if (epnum >= XUSB_MAX_ENDPOINTS)
3262 ++ goto stall;
3263 + target_ep = &udc->ep[epnum];
3264 + epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
3265 + halt = epcfgreg & XUSB_EP_CFG_STALL_MASK;
3266 +@@ -1679,6 +1681,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
3267 + case USB_RECIP_ENDPOINT:
3268 + if (!udc->setup.wValue) {
3269 + endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
3270 ++ if (endpoint >= XUSB_MAX_ENDPOINTS) {
3271 ++ xudc_ep0_stall(udc);
3272 ++ return;
3273 ++ }
3274 + target_ep = &udc->ep[endpoint];
3275 + outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK;
3276 + outinbit = outinbit >> 7;
3277 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3278 +index f5b1bcc875ded..d7c0bf494d930 100644
3279 +--- a/drivers/usb/host/xhci.c
3280 ++++ b/drivers/usb/host/xhci.c
3281 +@@ -1091,6 +1091,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3282 + int retval = 0;
3283 + bool comp_timer_running = false;
3284 + bool pending_portevent = false;
3285 ++ bool reinit_xhc = false;
3286 +
3287 + if (!hcd->state)
3288 + return 0;
3289 +@@ -1107,10 +1108,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3290 + set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
3291 +
3292 + spin_lock_irq(&xhci->lock);
3293 +- if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
3294 +- hibernated = true;
3295 +
3296 +- if (!hibernated) {
3297 ++ if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
3298 ++ reinit_xhc = true;
3299 ++
3300 ++ if (!reinit_xhc) {
3301 + /*
3302 + * Some controllers might lose power during suspend, so wait
3303 + * for controller not ready bit to clear, just as in xHC init.
3304 +@@ -1143,12 +1145,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3305 + spin_unlock_irq(&xhci->lock);
3306 + return -ETIMEDOUT;
3307 + }
3308 +- temp = readl(&xhci->op_regs->status);
3309 + }
3310 +
3311 +- /* If restore operation fails, re-initialize the HC during resume */
3312 +- if ((temp & STS_SRE) || hibernated) {
3313 ++ temp = readl(&xhci->op_regs->status);
3314 +
3315 ++ /* re-initialize the HC on Restore Error, or Host Controller Error */
3316 ++ if (temp & (STS_SRE | STS_HCE)) {
3317 ++ reinit_xhc = true;
3318 ++ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
3319 ++ }
3320 ++
3321 ++ if (reinit_xhc) {
3322 + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
3323 + !(xhci_all_ports_seen_u0(xhci))) {
3324 + del_timer_sync(&xhci->comp_mode_recovery_timer);
3325 +@@ -1604,9 +1611,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
3326 + struct urb_priv *urb_priv;
3327 + int num_tds;
3328 +
3329 +- if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
3330 +- true, true, __func__) <= 0)
3331 ++ if (!urb)
3332 + return -EINVAL;
3333 ++ ret = xhci_check_args(hcd, urb->dev, urb->ep,
3334 ++ true, true, __func__);
3335 ++ if (ret <= 0)
3336 ++ return ret ? ret : -EINVAL;
3337 +
3338 + slot_id = urb->dev->slot_id;
3339 + ep_index = xhci_get_endpoint_index(&urb->ep->desc);
3340 +@@ -3323,7 +3333,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3341 + return -EINVAL;
3342 + ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3343 + if (ret <= 0)
3344 +- return -EINVAL;
3345 ++ return ret ? ret : -EINVAL;
3346 + if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3347 + xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3348 + " descriptor for ep 0x%x does not support streams\n",
3349 +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
3350 +index 4b65e69044996..b5a1864e9cfdc 100644
3351 +--- a/drivers/usb/serial/ch341.c
3352 ++++ b/drivers/usb/serial/ch341.c
3353 +@@ -81,7 +81,6 @@
3354 + #define CH341_QUIRK_SIMULATE_BREAK BIT(1)
3355 +
3356 + static const struct usb_device_id id_table[] = {
3357 +- { USB_DEVICE(0x1a86, 0x5512) },
3358 + { USB_DEVICE(0x1a86, 0x5523) },
3359 + { USB_DEVICE(0x1a86, 0x7522) },
3360 + { USB_DEVICE(0x1a86, 0x7523) },
3361 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3362 +index 962e9943fc20e..e7755d9cfc61a 100644
3363 +--- a/drivers/usb/serial/option.c
3364 ++++ b/drivers/usb/serial/option.c
3365 +@@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb);
3366 +
3367 + #define DELL_PRODUCT_5821E 0x81d7
3368 + #define DELL_PRODUCT_5821E_ESIM 0x81e0
3369 ++#define DELL_PRODUCT_5829E_ESIM 0x81e4
3370 ++#define DELL_PRODUCT_5829E 0x81e6
3371 +
3372 + #define KYOCERA_VENDOR_ID 0x0c88
3373 + #define KYOCERA_PRODUCT_KPC650 0x17da
3374 +@@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = {
3375 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
3376 + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
3377 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
3378 ++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E),
3379 ++ .driver_info = RSVD(0) | RSVD(6) },
3380 ++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
3381 ++ .driver_info = RSVD(0) | RSVD(6) },
3382 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
3383 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
3384 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
3385 +@@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = {
3386 + .driver_info = NCTRL(2) },
3387 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
3388 + .driver_info = NCTRL(2) },
3389 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */
3390 ++ .driver_info = NCTRL(2) },
3391 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */
3392 ++ .driver_info = NCTRL(2) },
3393 + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
3394 + .driver_info = NCTRL(0) | ZLP },
3395 + { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
3396 + .driver_info = NCTRL(0) | ZLP },
3397 ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */
3398 ++ .driver_info = NCTRL(0) | ZLP },
3399 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
3400 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
3401 + .driver_info = RSVD(1) },
3402 +diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
3403 +index 97f50f301f13b..d229d2db44ff0 100644
3404 +--- a/drivers/usb/typec/tipd/core.c
3405 ++++ b/drivers/usb/typec/tipd/core.c
3406 +@@ -618,12 +618,12 @@ static int tps6598x_probe(struct i2c_client *client)
3407 +
3408 + ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
3409 + if (ret < 0)
3410 +- return ret;
3411 ++ goto err_clear_mask;
3412 + trace_tps6598x_status(status);
3413 +
3414 + ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf);
3415 + if (ret < 0)
3416 +- return ret;
3417 ++ goto err_clear_mask;
3418 +
3419 + /*
3420 + * This fwnode has a "compatible" property, but is never populated as a
3421 +@@ -712,7 +712,8 @@ err_role_put:
3422 + usb_role_switch_put(tps->role_sw);
3423 + err_fwnode_put:
3424 + fwnode_handle_put(fwnode);
3425 +-
3426 ++err_clear_mask:
3427 ++ tps6598x_write64(tps, TPS_REG_INT_MASK1, 0);
3428 + return ret;
3429 + }
3430 +
3431 +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
3432 +index 4e3b95af7ee4d..d07a20bbc07b7 100644
3433 +--- a/drivers/vhost/vsock.c
3434 ++++ b/drivers/vhost/vsock.c
3435 +@@ -633,16 +633,18 @@ err:
3436 + return ret;
3437 + }
3438 +
3439 +-static int vhost_vsock_stop(struct vhost_vsock *vsock)
3440 ++static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
3441 + {
3442 + size_t i;
3443 +- int ret;
3444 ++ int ret = 0;
3445 +
3446 + mutex_lock(&vsock->dev.mutex);
3447 +
3448 +- ret = vhost_dev_check_owner(&vsock->dev);
3449 +- if (ret)
3450 +- goto err;
3451 ++ if (check_owner) {
3452 ++ ret = vhost_dev_check_owner(&vsock->dev);
3453 ++ if (ret)
3454 ++ goto err;
3455 ++ }
3456 +
3457 + for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
3458 + struct vhost_virtqueue *vq = &vsock->vqs[i];
3459 +@@ -757,7 +759,12 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
3460 + * inefficient. Room for improvement here. */
3461 + vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
3462 +
3463 +- vhost_vsock_stop(vsock);
3464 ++ /* Don't check the owner, because we are in the release path, so we
3465 ++ * need to stop the vsock device in any case.
3466 ++ * vhost_vsock_stop() can not fail in this case, so we don't need to
3467 ++ * check the return code.
3468 ++ */
3469 ++ vhost_vsock_stop(vsock, false);
3470 + vhost_vsock_flush(vsock);
3471 + vhost_dev_stop(&vsock->dev);
3472 +
3473 +@@ -872,7 +879,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
3474 + if (start)
3475 + return vhost_vsock_start(vsock);
3476 + else
3477 +- return vhost_vsock_stop(vsock);
3478 ++ return vhost_vsock_stop(vsock, true);
3479 + case VHOST_GET_FEATURES:
3480 + features = VHOST_VSOCK_FEATURES;
3481 + if (copy_to_user(argp, &features, sizeof(features)))
3482 +diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
3483 +index 295bbc13ace6a..fcd7eb496478c 100644
3484 +--- a/fs/btrfs/lzo.c
3485 ++++ b/fs/btrfs/lzo.c
3486 +@@ -363,6 +363,17 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
3487 + kunmap(cur_page);
3488 + cur_in += LZO_LEN;
3489 +
3490 ++ if (seg_len > lzo1x_worst_compress(PAGE_SIZE)) {
3491 ++ /*
3492 ++ * seg_len shouldn't be larger than we have allocated
3493 ++ * for workspace->cbuf
3494 ++ */
3495 ++ btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
3496 ++ seg_len);
3497 ++ ret = -EIO;
3498 ++ goto out;
3499 ++ }
3500 ++
3501 + /* Copy the compressed segment payload into workspace */
3502 + copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
3503 +
3504 +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
3505 +index 7733e8ac0a698..51382d2be3d44 100644
3506 +--- a/fs/btrfs/tree-checker.c
3507 ++++ b/fs/btrfs/tree-checker.c
3508 +@@ -965,6 +965,7 @@ static int check_dev_item(struct extent_buffer *leaf,
3509 + struct btrfs_key *key, int slot)
3510 + {
3511 + struct btrfs_dev_item *ditem;
3512 ++ const u32 item_size = btrfs_item_size_nr(leaf, slot);
3513 +
3514 + if (unlikely(key->objectid != BTRFS_DEV_ITEMS_OBJECTID)) {
3515 + dev_item_err(leaf, slot,
3516 +@@ -972,6 +973,13 @@ static int check_dev_item(struct extent_buffer *leaf,
3517 + key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
3518 + return -EUCLEAN;
3519 + }
3520 ++
3521 ++ if (unlikely(item_size != sizeof(*ditem))) {
3522 ++ dev_item_err(leaf, slot, "invalid item size: has %u expect %zu",
3523 ++ item_size, sizeof(*ditem));
3524 ++ return -EUCLEAN;
3525 ++ }
3526 ++
3527 + ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
3528 + if (unlikely(btrfs_device_id(leaf, ditem) != key->offset)) {
3529 + dev_item_err(leaf, slot,
3530 +@@ -1007,6 +1015,7 @@ static int check_inode_item(struct extent_buffer *leaf,
3531 + struct btrfs_inode_item *iitem;
3532 + u64 super_gen = btrfs_super_generation(fs_info->super_copy);
3533 + u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
3534 ++ const u32 item_size = btrfs_item_size_nr(leaf, slot);
3535 + u32 mode;
3536 + int ret;
3537 + u32 flags;
3538 +@@ -1016,6 +1025,12 @@ static int check_inode_item(struct extent_buffer *leaf,
3539 + if (unlikely(ret < 0))
3540 + return ret;
3541 +
3542 ++ if (unlikely(item_size != sizeof(*iitem))) {
3543 ++ generic_err(leaf, slot, "invalid item size: has %u expect %zu",
3544 ++ item_size, sizeof(*iitem));
3545 ++ return -EUCLEAN;
3546 ++ }
3547 ++
3548 + iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
3549 +
3550 + /* Here we use super block generation + 1 to handle log tree */
3551 +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
3552 +index d3cd2a94d1e8c..d1f9d26322027 100644
3553 +--- a/fs/configfs/dir.c
3554 ++++ b/fs/configfs/dir.c
3555 +@@ -34,6 +34,14 @@
3556 + */
3557 + DEFINE_SPINLOCK(configfs_dirent_lock);
3558 +
3559 ++/*
3560 ++ * All of link_obj/unlink_obj/link_group/unlink_group require that
3561 ++ * subsys->su_mutex is held.
3562 ++ * But parent configfs_subsystem is NULL when config_item is root.
3563 ++ * Use this mutex when config_item is root.
3564 ++ */
3565 ++static DEFINE_MUTEX(configfs_subsystem_mutex);
3566 ++
3567 + static void configfs_d_iput(struct dentry * dentry,
3568 + struct inode * inode)
3569 + {
3570 +@@ -1859,7 +1867,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
3571 + group->cg_item.ci_name = group->cg_item.ci_namebuf;
3572 +
3573 + sd = root->d_fsdata;
3574 ++ mutex_lock(&configfs_subsystem_mutex);
3575 + link_group(to_config_group(sd->s_element), group);
3576 ++ mutex_unlock(&configfs_subsystem_mutex);
3577 +
3578 + inode_lock_nested(d_inode(root), I_MUTEX_PARENT);
3579 +
3580 +@@ -1884,7 +1894,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
3581 + inode_unlock(d_inode(root));
3582 +
3583 + if (err) {
3584 ++ mutex_lock(&configfs_subsystem_mutex);
3585 + unlink_group(group);
3586 ++ mutex_unlock(&configfs_subsystem_mutex);
3587 + configfs_release_fs();
3588 + }
3589 + put_fragment(frag);
3590 +@@ -1931,7 +1943,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
3591 +
3592 + dput(dentry);
3593 +
3594 ++ mutex_lock(&configfs_subsystem_mutex);
3595 + unlink_group(group);
3596 ++ mutex_unlock(&configfs_subsystem_mutex);
3597 + configfs_release_fs();
3598 + }
3599 +
3600 +diff --git a/fs/io_uring.c b/fs/io_uring.c
3601 +index 21fc8ce9405d3..d7e49e87b49b9 100644
3602 +--- a/fs/io_uring.c
3603 ++++ b/fs/io_uring.c
3604 +@@ -4454,6 +4454,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3605 + } else {
3606 + list_add_tail(&buf->list, &(*head)->list);
3607 + }
3608 ++ cond_resched();
3609 + }
3610 +
3611 + return i ? i : -ENOMEM;
3612 +@@ -7590,7 +7591,7 @@ static int io_run_task_work_sig(void)
3613 + /* when returns >0, the caller should retry */
3614 + static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
3615 + struct io_wait_queue *iowq,
3616 +- signed long *timeout)
3617 ++ ktime_t timeout)
3618 + {
3619 + int ret;
3620 +
3621 +@@ -7602,8 +7603,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
3622 + if (test_bit(0, &ctx->check_cq_overflow))
3623 + return 1;
3624 +
3625 +- *timeout = schedule_timeout(*timeout);
3626 +- return !*timeout ? -ETIME : 1;
3627 ++ if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
3628 ++ return -ETIME;
3629 ++ return 1;
3630 + }
3631 +
3632 + /*
3633 +@@ -7616,7 +7618,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
3634 + {
3635 + struct io_wait_queue iowq;
3636 + struct io_rings *rings = ctx->rings;
3637 +- signed long timeout = MAX_SCHEDULE_TIMEOUT;
3638 ++ ktime_t timeout = KTIME_MAX;
3639 + int ret;
3640 +
3641 + do {
3642 +@@ -7632,7 +7634,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
3643 +
3644 + if (get_timespec64(&ts, uts))
3645 + return -EFAULT;
3646 +- timeout = timespec64_to_jiffies(&ts);
3647 ++ timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
3648 + }
3649 +
3650 + if (sig) {
3651 +@@ -7664,7 +7666,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
3652 + }
3653 + prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
3654 + TASK_INTERRUPTIBLE);
3655 +- ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
3656 ++ ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
3657 + finish_wait(&ctx->cq_wait, &iowq.wq);
3658 + cond_resched();
3659 + } while (ret > 0);
3660 +@@ -7817,7 +7819,15 @@ static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ct
3661 + ret = wait_for_completion_interruptible(&data->done);
3662 + if (!ret) {
3663 + mutex_lock(&ctx->uring_lock);
3664 +- break;
3665 ++ if (atomic_read(&data->refs) > 0) {
3666 ++ /*
3667 ++ * it has been revived by another thread while
3668 ++ * we were unlocked
3669 ++ */
3670 ++ mutex_unlock(&ctx->uring_lock);
3671 ++ } else {
3672 ++ break;
3673 ++ }
3674 + }
3675 +
3676 + atomic_inc(&data->refs);
3677 +diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
3678 +index 3616839c5c4b6..f2625a372a3ae 100644
3679 +--- a/fs/tracefs/inode.c
3680 ++++ b/fs/tracefs/inode.c
3681 +@@ -264,7 +264,6 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
3682 + if (!gid_valid(gid))
3683 + return -EINVAL;
3684 + opts->gid = gid;
3685 +- set_gid(tracefs_mount->mnt_root, gid);
3686 + break;
3687 + case Opt_mode:
3688 + if (match_octal(&args[0], &option))
3689 +@@ -291,7 +290,9 @@ static int tracefs_apply_options(struct super_block *sb)
3690 + inode->i_mode |= opts->mode;
3691 +
3692 + inode->i_uid = opts->uid;
3693 +- inode->i_gid = opts->gid;
3694 ++
3695 ++ /* Set all the group ids to the mount option */
3696 ++ set_gid(sb->s_root, opts->gid);
3697 +
3698 + return 0;
3699 + }
3700 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
3701 +index 6c4640526f741..d9049f2a78ca8 100644
3702 +--- a/include/linux/bpf.h
3703 ++++ b/include/linux/bpf.h
3704 +@@ -206,11 +206,9 @@ static inline bool map_value_has_timer(const struct bpf_map *map)
3705 + static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
3706 + {
3707 + if (unlikely(map_value_has_spin_lock(map)))
3708 +- *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
3709 +- (struct bpf_spin_lock){};
3710 ++ memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
3711 + if (unlikely(map_value_has_timer(map)))
3712 +- *(struct bpf_timer *)(dst + map->timer_off) =
3713 +- (struct bpf_timer){};
3714 ++ memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
3715 + }
3716 +
3717 + /* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
3718 +@@ -221,7 +219,8 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
3719 + if (unlikely(map_value_has_spin_lock(map))) {
3720 + s_off = map->spin_lock_off;
3721 + s_sz = sizeof(struct bpf_spin_lock);
3722 +- } else if (unlikely(map_value_has_timer(map))) {
3723 ++ }
3724 ++ if (unlikely(map_value_has_timer(map))) {
3725 + t_off = map->timer_off;
3726 + t_sz = sizeof(struct bpf_timer);
3727 + }
3728 +diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
3729 +index 104505e9028f7..87932bdb25d7b 100644
3730 +--- a/include/linux/nvmem-provider.h
3731 ++++ b/include/linux/nvmem-provider.h
3732 +@@ -66,7 +66,8 @@ struct nvmem_keepout {
3733 + * @word_size: Minimum read/write access granularity.
3734 + * @stride: Minimum read/write access stride.
3735 + * @priv: User context passed to read/write callbacks.
3736 +- * @wp-gpio: Write protect pin
3737 ++ * @wp-gpio: Write protect pin
3738 ++ * @ignore_wp: Write Protect pin is managed by the provider.
3739 + *
3740 + * Note: A default "nvmem<id>" name will be assigned to the device if
3741 + * no name is specified in its configuration. In such case "<id>" is
3742 +@@ -88,6 +89,7 @@ struct nvmem_config {
3743 + enum nvmem_type type;
3744 + bool read_only;
3745 + bool root_only;
3746 ++ bool ignore_wp;
3747 + struct device_node *of_node;
3748 + bool no_of_node;
3749 + nvmem_reg_read_t reg_read;
3750 +diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
3751 +index feda1dc7f98ee..38b701b7af4cf 100644
3752 +--- a/include/linux/tee_drv.h
3753 ++++ b/include/linux/tee_drv.h
3754 +@@ -582,4 +582,18 @@ struct tee_client_driver {
3755 + #define to_tee_client_driver(d) \
3756 + container_of(d, struct tee_client_driver, driver)
3757 +
3758 ++/**
3759 ++ * teedev_open() - Open a struct tee_device
3760 ++ * @teedev: Device to open
3761 ++ *
3762 ++ * @return a pointer to struct tee_context on success or an ERR_PTR on failure.
3763 ++ */
3764 ++struct tee_context *teedev_open(struct tee_device *teedev);
3765 ++
3766 ++/**
3767 ++ * teedev_close_context() - closes a struct tee_context
3768 ++ * @ctx: The struct tee_context to close
3769 ++ */
3770 ++void teedev_close_context(struct tee_context *ctx);
3771 ++
3772 + #endif /*__TEE_DRV_H*/
3773 +diff --git a/include/net/checksum.h b/include/net/checksum.h
3774 +index 5b96d5bd6e545..d3b5d368a0caa 100644
3775 +--- a/include/net/checksum.h
3776 ++++ b/include/net/checksum.h
3777 +@@ -22,7 +22,7 @@
3778 + #include <asm/checksum.h>
3779 +
3780 + #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
3781 +-static inline
3782 ++static __always_inline
3783 + __wsum csum_and_copy_from_user (const void __user *src, void *dst,
3784 + int len)
3785 + {
3786 +@@ -33,7 +33,7 @@ __wsum csum_and_copy_from_user (const void __user *src, void *dst,
3787 + #endif
3788 +
3789 + #ifndef HAVE_CSUM_COPY_USER
3790 +-static __inline__ __wsum csum_and_copy_to_user
3791 ++static __always_inline __wsum csum_and_copy_to_user
3792 + (const void *src, void __user *dst, int len)
3793 + {
3794 + __wsum sum = csum_partial(src, len, ~0U);
3795 +@@ -45,7 +45,7 @@ static __inline__ __wsum csum_and_copy_to_user
3796 + #endif
3797 +
3798 + #ifndef _HAVE_ARCH_CSUM_AND_COPY
3799 +-static inline __wsum
3800 ++static __always_inline __wsum
3801 + csum_partial_copy_nocheck(const void *src, void *dst, int len)
3802 + {
3803 + memcpy(dst, src, len);
3804 +@@ -54,7 +54,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len)
3805 + #endif
3806 +
3807 + #ifndef HAVE_ARCH_CSUM_ADD
3808 +-static inline __wsum csum_add(__wsum csum, __wsum addend)
3809 ++static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
3810 + {
3811 + u32 res = (__force u32)csum;
3812 + res += (__force u32)addend;
3813 +@@ -62,12 +62,12 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
3814 + }
3815 + #endif
3816 +
3817 +-static inline __wsum csum_sub(__wsum csum, __wsum addend)
3818 ++static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
3819 + {
3820 + return csum_add(csum, ~addend);
3821 + }
3822 +
3823 +-static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
3824 ++static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
3825 + {
3826 + u16 res = (__force u16)csum;
3827 +
3828 +@@ -75,12 +75,12 @@ static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
3829 + return (__force __sum16)(res + (res < (__force u16)addend));
3830 + }
3831 +
3832 +-static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
3833 ++static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
3834 + {
3835 + return csum16_add(csum, ~addend);
3836 + }
3837 +
3838 +-static inline __wsum csum_shift(__wsum sum, int offset)
3839 ++static __always_inline __wsum csum_shift(__wsum sum, int offset)
3840 + {
3841 + /* rotate sum to align it with a 16b boundary */
3842 + if (offset & 1)
3843 +@@ -88,42 +88,43 @@ static inline __wsum csum_shift(__wsum sum, int offset)
3844 + return sum;
3845 + }
3846 +
3847 +-static inline __wsum
3848 ++static __always_inline __wsum
3849 + csum_block_add(__wsum csum, __wsum csum2, int offset)
3850 + {
3851 + return csum_add(csum, csum_shift(csum2, offset));
3852 + }
3853 +
3854 +-static inline __wsum
3855 ++static __always_inline __wsum
3856 + csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
3857 + {
3858 + return csum_block_add(csum, csum2, offset);
3859 + }
3860 +
3861 +-static inline __wsum
3862 ++static __always_inline __wsum
3863 + csum_block_sub(__wsum csum, __wsum csum2, int offset)
3864 + {
3865 + return csum_block_add(csum, ~csum2, offset);
3866 + }
3867 +
3868 +-static inline __wsum csum_unfold(__sum16 n)
3869 ++static __always_inline __wsum csum_unfold(__sum16 n)
3870 + {
3871 + return (__force __wsum)n;
3872 + }
3873 +
3874 +-static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
3875 ++static __always_inline
3876 ++__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
3877 + {
3878 + return csum_partial(buff, len, sum);
3879 + }
3880 +
3881 + #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
3882 +
3883 +-static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
3884 ++static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
3885 + {
3886 + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
3887 + }
3888 +
3889 +-static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
3890 ++static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
3891 + {
3892 + __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
3893 +
3894 +@@ -136,11 +137,16 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
3895 + * m : old value of a 16bit field
3896 + * m' : new value of a 16bit field
3897 + */
3898 +-static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
3899 ++static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
3900 + {
3901 + *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
3902 + }
3903 +
3904 ++static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
3905 ++{
3906 ++ *csum = csum_add(csum_sub(*csum, old), new);
3907 ++}
3908 ++
3909 + struct sk_buff;
3910 + void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
3911 + __be32 from, __be32 to, bool pseudohdr);
3912 +@@ -150,16 +156,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
3913 + void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
3914 + __wsum diff, bool pseudohdr);
3915 +
3916 +-static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
3917 +- __be16 from, __be16 to,
3918 +- bool pseudohdr)
3919 ++static __always_inline
3920 ++void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
3921 ++ __be16 from, __be16 to, bool pseudohdr)
3922 + {
3923 + inet_proto_csum_replace4(sum, skb, (__force __be32)from,
3924 + (__force __be32)to, pseudohdr);
3925 + }
3926 +
3927 +-static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
3928 +- int start, int offset)
3929 ++static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
3930 ++ int start, int offset)
3931 + {
3932 + __sum16 *psum = (__sum16 *)(ptr + offset);
3933 + __wsum delta;
3934 +@@ -175,7 +181,7 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
3935 + return delta;
3936 + }
3937 +
3938 +-static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
3939 ++static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
3940 + {
3941 + *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
3942 + }
3943 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
3944 +index a16171c5fd9eb..d52a5d776e764 100644
3945 +--- a/include/net/netfilter/nf_tables.h
3946 ++++ b/include/net/netfilter/nf_tables.h
3947 +@@ -883,9 +883,9 @@ struct nft_expr_ops {
3948 + int (*offload)(struct nft_offload_ctx *ctx,
3949 + struct nft_flow_rule *flow,
3950 + const struct nft_expr *expr);
3951 ++ bool (*offload_action)(const struct nft_expr *expr);
3952 + void (*offload_stats)(struct nft_expr *expr,
3953 + const struct flow_stats *stats);
3954 +- u32 offload_flags;
3955 + const struct nft_expr_type *type;
3956 + void *data;
3957 + };
3958 +diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
3959 +index f9d95ff82df83..7971478439580 100644
3960 +--- a/include/net/netfilter/nf_tables_offload.h
3961 ++++ b/include/net/netfilter/nf_tables_offload.h
3962 +@@ -67,8 +67,6 @@ struct nft_flow_rule {
3963 + struct flow_rule *rule;
3964 + };
3965 +
3966 +-#define NFT_OFFLOAD_F_ACTION (1 << 0)
3967 +-
3968 + void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
3969 + enum flow_dissector_key_id addr_type);
3970 +
3971 +diff --git a/include/net/sock.h b/include/net/sock.h
3972 +index dfb92f91d5be5..7d49196a3880e 100644
3973 +--- a/include/net/sock.h
3974 ++++ b/include/net/sock.h
3975 +@@ -506,7 +506,7 @@ struct sock {
3976 + u16 sk_tsflags;
3977 + int sk_bind_phc;
3978 + u8 sk_shutdown;
3979 +- u32 sk_tskey;
3980 ++ atomic_t sk_tskey;
3981 + atomic_t sk_zckey;
3982 +
3983 + u8 sk_clockid;
3984 +@@ -2598,7 +2598,7 @@ static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
3985 + __sock_tx_timestamp(tsflags, tx_flags);
3986 + if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
3987 + tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
3988 +- *tskey = sk->sk_tskey++;
3989 ++ *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
3990 + }
3991 + if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
3992 + *tx_flags |= SKBTX_WIFI_STATUS;
3993 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
3994 +index ecd51a8a8680c..53384622e8dac 100644
3995 +--- a/kernel/bpf/syscall.c
3996 ++++ b/kernel/bpf/syscall.c
3997 +@@ -1337,6 +1337,7 @@ int generic_map_delete_batch(struct bpf_map *map,
3998 + maybe_wait_bpf_programs(map);
3999 + if (err)
4000 + break;
4001 ++ cond_resched();
4002 + }
4003 + if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
4004 + err = -EFAULT;
4005 +@@ -1394,6 +1395,7 @@ int generic_map_update_batch(struct bpf_map *map,
4006 +
4007 + if (err)
4008 + break;
4009 ++ cond_resched();
4010 + }
4011 +
4012 + if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
4013 +@@ -1491,6 +1493,7 @@ int generic_map_lookup_batch(struct bpf_map *map,
4014 + swap(prev_key, key);
4015 + retry = MAP_LOOKUP_RETRIES;
4016 + cp++;
4017 ++ cond_resched();
4018 + }
4019 +
4020 + if (err == -EFAULT)
4021 +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
4022 +index c59aa2c7749b2..58900dc92ac98 100644
4023 +--- a/kernel/cgroup/cgroup-v1.c
4024 ++++ b/kernel/cgroup/cgroup-v1.c
4025 +@@ -549,6 +549,7 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
4026 + char *buf, size_t nbytes, loff_t off)
4027 + {
4028 + struct cgroup *cgrp;
4029 ++ struct cgroup_file_ctx *ctx;
4030 +
4031 + BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
4032 +
4033 +@@ -556,8 +557,9 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
4034 + * Release agent gets called with all capabilities,
4035 + * require capabilities to set release agent.
4036 + */
4037 +- if ((of->file->f_cred->user_ns != &init_user_ns) ||
4038 +- !capable(CAP_SYS_ADMIN))
4039 ++ ctx = of->priv;
4040 ++ if ((ctx->ns->user_ns != &init_user_ns) ||
4041 ++ !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
4042 + return -EPERM;
4043 +
4044 + cgrp = cgroup_kn_lock_live(of->kn, false);
4045 +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
4046 +index 67eae4a4b724a..f6794602ab10c 100644
4047 +--- a/kernel/cgroup/cpuset.c
4048 ++++ b/kernel/cgroup/cpuset.c
4049 +@@ -2249,6 +2249,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
4050 + cgroup_taskset_first(tset, &css);
4051 + cs = css_cs(css);
4052 +
4053 ++ cpus_read_lock();
4054 + percpu_down_write(&cpuset_rwsem);
4055 +
4056 + guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
4057 +@@ -2302,6 +2303,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
4058 + wake_up(&cpuset_attach_wq);
4059 +
4060 + percpu_up_write(&cpuset_rwsem);
4061 ++ cpus_read_unlock();
4062 + }
4063 +
4064 + /* The various types of files and directories in a cpuset file system */
4065 +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
4066 +index 3d5c07239a2a8..67c7979c40c0b 100644
4067 +--- a/kernel/trace/trace_events_trigger.c
4068 ++++ b/kernel/trace/trace_events_trigger.c
4069 +@@ -955,6 +955,16 @@ traceon_trigger(struct event_trigger_data *data,
4070 + struct trace_buffer *buffer, void *rec,
4071 + struct ring_buffer_event *event)
4072 + {
4073 ++ struct trace_event_file *file = data->private_data;
4074 ++
4075 ++ if (file) {
4076 ++ if (tracer_tracing_is_on(file->tr))
4077 ++ return;
4078 ++
4079 ++ tracer_tracing_on(file->tr);
4080 ++ return;
4081 ++ }
4082 ++
4083 + if (tracing_is_on())
4084 + return;
4085 +
4086 +@@ -966,8 +976,15 @@ traceon_count_trigger(struct event_trigger_data *data,
4087 + struct trace_buffer *buffer, void *rec,
4088 + struct ring_buffer_event *event)
4089 + {
4090 +- if (tracing_is_on())
4091 +- return;
4092 ++ struct trace_event_file *file = data->private_data;
4093 ++
4094 ++ if (file) {
4095 ++ if (tracer_tracing_is_on(file->tr))
4096 ++ return;
4097 ++ } else {
4098 ++ if (tracing_is_on())
4099 ++ return;
4100 ++ }
4101 +
4102 + if (!data->count)
4103 + return;
4104 +@@ -975,7 +992,10 @@ traceon_count_trigger(struct event_trigger_data *data,
4105 + if (data->count != -1)
4106 + (data->count)--;
4107 +
4108 +- tracing_on();
4109 ++ if (file)
4110 ++ tracer_tracing_on(file->tr);
4111 ++ else
4112 ++ tracing_on();
4113 + }
4114 +
4115 + static void
4116 +@@ -983,6 +1003,16 @@ traceoff_trigger(struct event_trigger_data *data,
4117 + struct trace_buffer *buffer, void *rec,
4118 + struct ring_buffer_event *event)
4119 + {
4120 ++ struct trace_event_file *file = data->private_data;
4121 ++
4122 ++ if (file) {
4123 ++ if (!tracer_tracing_is_on(file->tr))
4124 ++ return;
4125 ++
4126 ++ tracer_tracing_off(file->tr);
4127 ++ return;
4128 ++ }
4129 ++
4130 + if (!tracing_is_on())
4131 + return;
4132 +
4133 +@@ -994,8 +1024,15 @@ traceoff_count_trigger(struct event_trigger_data *data,
4134 + struct trace_buffer *buffer, void *rec,
4135 + struct ring_buffer_event *event)
4136 + {
4137 +- if (!tracing_is_on())
4138 +- return;
4139 ++ struct trace_event_file *file = data->private_data;
4140 ++
4141 ++ if (file) {
4142 ++ if (!tracer_tracing_is_on(file->tr))
4143 ++ return;
4144 ++ } else {
4145 ++ if (!tracing_is_on())
4146 ++ return;
4147 ++ }
4148 +
4149 + if (!data->count)
4150 + return;
4151 +@@ -1003,7 +1040,10 @@ traceoff_count_trigger(struct event_trigger_data *data,
4152 + if (data->count != -1)
4153 + (data->count)--;
4154 +
4155 +- tracing_off();
4156 ++ if (file)
4157 ++ tracer_tracing_off(file->tr);
4158 ++ else
4159 ++ tracing_off();
4160 + }
4161 +
4162 + static int
4163 +@@ -1200,7 +1240,12 @@ stacktrace_trigger(struct event_trigger_data *data,
4164 + struct trace_buffer *buffer, void *rec,
4165 + struct ring_buffer_event *event)
4166 + {
4167 +- trace_dump_stack(STACK_SKIP);
4168 ++ struct trace_event_file *file = data->private_data;
4169 ++
4170 ++ if (file)
4171 ++ __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
4172 ++ else
4173 ++ trace_dump_stack(STACK_SKIP);
4174 + }
4175 +
4176 + static void
4177 +diff --git a/mm/filemap.c b/mm/filemap.c
4178 +index 82a17c35eb96b..1293c3409e429 100644
4179 +--- a/mm/filemap.c
4180 ++++ b/mm/filemap.c
4181 +@@ -2354,8 +2354,12 @@ static void filemap_get_read_batch(struct address_space *mapping,
4182 + break;
4183 + if (PageReadahead(head))
4184 + break;
4185 +- xas.xa_index = head->index + thp_nr_pages(head) - 1;
4186 +- xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK;
4187 ++ if (PageHead(head)) {
4188 ++ xas_set(&xas, head->index + thp_nr_pages(head));
4189 ++ /* Handle wrap correctly */
4190 ++ if (xas.xa_index - 1 >= max)
4191 ++ break;
4192 ++ }
4193 + continue;
4194 + put_page:
4195 + put_page(head);
4196 +diff --git a/mm/memblock.c b/mm/memblock.c
4197 +index 5096500b26473..2b7397781c99a 100644
4198 +--- a/mm/memblock.c
4199 ++++ b/mm/memblock.c
4200 +@@ -366,14 +366,20 @@ void __init memblock_discard(void)
4201 + addr = __pa(memblock.reserved.regions);
4202 + size = PAGE_ALIGN(sizeof(struct memblock_region) *
4203 + memblock.reserved.max);
4204 +- __memblock_free_late(addr, size);
4205 ++ if (memblock_reserved_in_slab)
4206 ++ kfree(memblock.reserved.regions);
4207 ++ else
4208 ++ __memblock_free_late(addr, size);
4209 + }
4210 +
4211 + if (memblock.memory.regions != memblock_memory_init_regions) {
4212 + addr = __pa(memblock.memory.regions);
4213 + size = PAGE_ALIGN(sizeof(struct memblock_region) *
4214 + memblock.memory.max);
4215 +- __memblock_free_late(addr, size);
4216 ++ if (memblock_memory_in_slab)
4217 ++ kfree(memblock.memory.regions);
4218 ++ else
4219 ++ __memblock_free_late(addr, size);
4220 + }
4221 +
4222 + memblock_memory = NULL;
4223 +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
4224 +index a271688780a2c..307ee1174a6e2 100644
4225 +--- a/net/can/j1939/transport.c
4226 ++++ b/net/can/j1939/transport.c
4227 +@@ -2006,7 +2006,7 @@ struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
4228 + /* set the end-packet for broadcast */
4229 + session->pkt.last = session->pkt.total;
4230 +
4231 +- skcb->tskey = session->sk->sk_tskey++;
4232 ++ skcb->tskey = atomic_inc_return(&session->sk->sk_tskey) - 1;
4233 + session->tskey = skcb->tskey;
4234 +
4235 + return session;
4236 +diff --git a/net/core/filter.c b/net/core/filter.c
4237 +index f207e4782bd0e..76e406965b6f9 100644
4238 +--- a/net/core/filter.c
4239 ++++ b/net/core/filter.c
4240 +@@ -2711,6 +2711,9 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
4241 + if (unlikely(flags))
4242 + return -EINVAL;
4243 +
4244 ++ if (unlikely(len == 0))
4245 ++ return 0;
4246 ++
4247 + /* First find the starting scatterlist element */
4248 + i = msg->sg.start;
4249 + do {
4250 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
4251 +index f7e003571a356..449a96e358ad5 100644
4252 +--- a/net/core/skbuff.c
4253 ++++ b/net/core/skbuff.c
4254 +@@ -2254,7 +2254,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
4255 + /* Free pulled out fragments. */
4256 + while ((list = skb_shinfo(skb)->frag_list) != insp) {
4257 + skb_shinfo(skb)->frag_list = list->next;
4258 +- kfree_skb(list);
4259 ++ consume_skb(list);
4260 + }
4261 + /* And insert new clone at head. */
4262 + if (clone) {
4263 +@@ -4844,7 +4844,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4264 + serr->ee.ee_data = skb_shinfo(skb)->tskey;
4265 + if (sk->sk_protocol == IPPROTO_TCP &&
4266 + sk->sk_type == SOCK_STREAM)
4267 +- serr->ee.ee_data -= sk->sk_tskey;
4268 ++ serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
4269 + }
4270 +
4271 + err = sock_queue_err_skb(sk, skb);
4272 +@@ -6220,7 +6220,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb,
4273 + /* Free pulled out fragments. */
4274 + while ((list = shinfo->frag_list) != insp) {
4275 + shinfo->frag_list = list->next;
4276 +- kfree_skb(list);
4277 ++ consume_skb(list);
4278 + }
4279 + /* And insert new clone at head. */
4280 + if (clone) {
4281 +diff --git a/net/core/sock.c b/net/core/sock.c
4282 +index 6ea317f84edd2..deaed1b206823 100644
4283 +--- a/net/core/sock.c
4284 ++++ b/net/core/sock.c
4285 +@@ -866,9 +866,9 @@ int sock_set_timestamping(struct sock *sk, int optname,
4286 + if ((1 << sk->sk_state) &
4287 + (TCPF_CLOSE | TCPF_LISTEN))
4288 + return -EINVAL;
4289 +- sk->sk_tskey = tcp_sk(sk)->snd_una;
4290 ++ atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una);
4291 + } else {
4292 +- sk->sk_tskey = 0;
4293 ++ atomic_set(&sk->sk_tskey, 0);
4294 + }
4295 + }
4296 +
4297 +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
4298 +index dcea653a5204a..77534b44b8c7c 100644
4299 +--- a/net/ipv4/af_inet.c
4300 ++++ b/net/ipv4/af_inet.c
4301 +@@ -1380,8 +1380,11 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
4302 + }
4303 +
4304 + ops = rcu_dereference(inet_offloads[proto]);
4305 +- if (likely(ops && ops->callbacks.gso_segment))
4306 ++ if (likely(ops && ops->callbacks.gso_segment)) {
4307 + segs = ops->callbacks.gso_segment(skb, features);
4308 ++ if (!segs)
4309 ++ skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
4310 ++ }
4311 +
4312 + if (IS_ERR_OR_NULL(segs))
4313 + goto out;
4314 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
4315 +index a4d2eb691cbc1..131066d0319a2 100644
4316 +--- a/net/ipv4/ip_output.c
4317 ++++ b/net/ipv4/ip_output.c
4318 +@@ -992,7 +992,7 @@ static int __ip_append_data(struct sock *sk,
4319 +
4320 + if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
4321 + sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
4322 +- tskey = sk->sk_tskey++;
4323 ++ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
4324 +
4325 + hh_len = LL_RESERVED_SPACE(rt->dst.dev);
4326 +
4327 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
4328 +index e3a159c8f231e..36e89b6873876 100644
4329 +--- a/net/ipv4/ping.c
4330 ++++ b/net/ipv4/ping.c
4331 +@@ -187,7 +187,6 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
4332 + (int)ident, &ipv6_hdr(skb)->daddr, dif);
4333 + #endif
4334 + } else {
4335 +- pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol));
4336 + return NULL;
4337 + }
4338 +
4339 +diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
4340 +index b91003538d87a..bc3a043a5d5c7 100644
4341 +--- a/net/ipv4/udp_tunnel_nic.c
4342 ++++ b/net/ipv4/udp_tunnel_nic.c
4343 +@@ -846,7 +846,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
4344 + list_for_each_entry(node, &info->shared->devices, list)
4345 + if (node->dev == dev)
4346 + break;
4347 +- if (node->dev != dev)
4348 ++ if (list_entry_is_head(node, &info->shared->devices, list))
4349 + return;
4350 +
4351 + list_del(&node->list);
4352 +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
4353 +index 1b9827ff8ccf4..172565d125704 100644
4354 +--- a/net/ipv6/ip6_offload.c
4355 ++++ b/net/ipv6/ip6_offload.c
4356 +@@ -114,6 +114,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
4357 + if (likely(ops && ops->callbacks.gso_segment)) {
4358 + skb_reset_transport_header(skb);
4359 + segs = ops->callbacks.gso_segment(skb, features);
4360 ++ if (!segs)
4361 ++ skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
4362 + }
4363 +
4364 + if (IS_ERR_OR_NULL(segs))
4365 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
4366 +index ff4e83e2a5068..22bf8fb617165 100644
4367 +--- a/net/ipv6/ip6_output.c
4368 ++++ b/net/ipv6/ip6_output.c
4369 +@@ -1465,7 +1465,7 @@ static int __ip6_append_data(struct sock *sk,
4370 +
4371 + if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
4372 + sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
4373 +- tskey = sk->sk_tskey++;
4374 ++ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
4375 +
4376 + hh_len = LL_RESERVED_SPACE(rt->dst.dev);
4377 +
4378 +diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
4379 +index b21ff9be04c61..8d1c67b935911 100644
4380 +--- a/net/mptcp/mib.c
4381 ++++ b/net/mptcp/mib.c
4382 +@@ -35,12 +35,14 @@ static const struct snmp_mib mptcp_snmp_list[] = {
4383 + SNMP_MIB_ITEM("AddAddr", MPTCP_MIB_ADDADDR),
4384 + SNMP_MIB_ITEM("EchoAdd", MPTCP_MIB_ECHOADD),
4385 + SNMP_MIB_ITEM("PortAdd", MPTCP_MIB_PORTADD),
4386 ++ SNMP_MIB_ITEM("AddAddrDrop", MPTCP_MIB_ADDADDRDROP),
4387 + SNMP_MIB_ITEM("MPJoinPortSynRx", MPTCP_MIB_JOINPORTSYNRX),
4388 + SNMP_MIB_ITEM("MPJoinPortSynAckRx", MPTCP_MIB_JOINPORTSYNACKRX),
4389 + SNMP_MIB_ITEM("MPJoinPortAckRx", MPTCP_MIB_JOINPORTACKRX),
4390 + SNMP_MIB_ITEM("MismatchPortSynRx", MPTCP_MIB_MISMATCHPORTSYNRX),
4391 + SNMP_MIB_ITEM("MismatchPortAckRx", MPTCP_MIB_MISMATCHPORTACKRX),
4392 + SNMP_MIB_ITEM("RmAddr", MPTCP_MIB_RMADDR),
4393 ++ SNMP_MIB_ITEM("RmAddrDrop", MPTCP_MIB_RMADDRDROP),
4394 + SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW),
4395 + SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX),
4396 + SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX),
4397 +diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
4398 +index ecd3d8b117e0b..2966fcb6548ba 100644
4399 +--- a/net/mptcp/mib.h
4400 ++++ b/net/mptcp/mib.h
4401 +@@ -28,12 +28,14 @@ enum linux_mptcp_mib_field {
4402 + MPTCP_MIB_ADDADDR, /* Received ADD_ADDR with echo-flag=0 */
4403 + MPTCP_MIB_ECHOADD, /* Received ADD_ADDR with echo-flag=1 */
4404 + MPTCP_MIB_PORTADD, /* Received ADD_ADDR with a port-number */
4405 ++ MPTCP_MIB_ADDADDRDROP, /* Dropped incoming ADD_ADDR */
4406 + MPTCP_MIB_JOINPORTSYNRX, /* Received a SYN MP_JOIN with a different port-number */
4407 + MPTCP_MIB_JOINPORTSYNACKRX, /* Received a SYNACK MP_JOIN with a different port-number */
4408 + MPTCP_MIB_JOINPORTACKRX, /* Received an ACK MP_JOIN with a different port-number */
4409 + MPTCP_MIB_MISMATCHPORTSYNRX, /* Received a SYN MP_JOIN with a mismatched port-number */
4410 + MPTCP_MIB_MISMATCHPORTACKRX, /* Received an ACK MP_JOIN with a mismatched port-number */
4411 + MPTCP_MIB_RMADDR, /* Received RM_ADDR */
4412 ++ MPTCP_MIB_RMADDRDROP, /* Dropped incoming RM_ADDR */
4413 + MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */
4414 + MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */
4415 + MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */
4416 +diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
4417 +index 6ab386ff32944..d9790d6fbce9c 100644
4418 +--- a/net/mptcp/pm.c
4419 ++++ b/net/mptcp/pm.c
4420 +@@ -194,6 +194,8 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
4421 + mptcp_pm_add_addr_send_ack(msk);
4422 + } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
4423 + pm->remote = *addr;
4424 ++ } else {
4425 ++ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
4426 + }
4427 +
4428 + spin_unlock_bh(&pm->lock);
4429 +@@ -234,8 +236,10 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
4430 + mptcp_event_addr_removed(msk, rm_list->ids[i]);
4431 +
4432 + spin_lock_bh(&pm->lock);
4433 +- mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
4434 +- pm->rm_list_rx = *rm_list;
4435 ++ if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
4436 ++ pm->rm_list_rx = *rm_list;
4437 ++ else
4438 ++ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
4439 + spin_unlock_bh(&pm->lock);
4440 + }
4441 +
4442 +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
4443 +index 320f89b5c59dd..cf0f700f46dd9 100644
4444 +--- a/net/mptcp/pm_netlink.c
4445 ++++ b/net/mptcp/pm_netlink.c
4446 +@@ -606,6 +606,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
4447 + unsigned int add_addr_accept_max;
4448 + struct mptcp_addr_info remote;
4449 + unsigned int subflows_max;
4450 ++ bool reset_port = false;
4451 + int i, nr;
4452 +
4453 + add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
4454 +@@ -615,15 +616,19 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
4455 + msk->pm.add_addr_accepted, add_addr_accept_max,
4456 + msk->pm.remote.family);
4457 +
4458 +- if (lookup_subflow_by_daddr(&msk->conn_list, &msk->pm.remote))
4459 ++ remote = msk->pm.remote;
4460 ++ if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
4461 + goto add_addr_echo;
4462 +
4463 ++ /* pick id 0 port, if none is provided the remote address */
4464 ++ if (!remote.port) {
4465 ++ reset_port = true;
4466 ++ remote.port = sk->sk_dport;
4467 ++ }
4468 ++
4469 + /* connect to the specified remote address, using whatever
4470 + * local address the routing configuration will pick.
4471 + */
4472 +- remote = msk->pm.remote;
4473 +- if (!remote.port)
4474 +- remote.port = sk->sk_dport;
4475 + nr = fill_local_addresses_vec(msk, addrs);
4476 +
4477 + msk->pm.add_addr_accepted++;
4478 +@@ -636,8 +641,12 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
4479 + __mptcp_subflow_connect(sk, &addrs[i], &remote);
4480 + spin_lock_bh(&msk->pm.lock);
4481 +
4482 ++ /* be sure to echo exactly the received address */
4483 ++ if (reset_port)
4484 ++ remote.port = 0;
4485 ++
4486 + add_addr_echo:
4487 +- mptcp_pm_announce_addr(msk, &msk->pm.remote, true);
4488 ++ mptcp_pm_announce_addr(msk, &remote, true);
4489 + mptcp_pm_nl_addr_send_ack(msk);
4490 + }
4491 +
4492 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4493 +index c207728226372..a65b530975f54 100644
4494 +--- a/net/netfilter/nf_tables_api.c
4495 ++++ b/net/netfilter/nf_tables_api.c
4496 +@@ -6535,12 +6535,15 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
4497 + {
4498 + struct nft_object *newobj;
4499 + struct nft_trans *trans;
4500 +- int err;
4501 ++ int err = -ENOMEM;
4502 ++
4503 ++ if (!try_module_get(type->owner))
4504 ++ return -ENOENT;
4505 +
4506 + trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
4507 + sizeof(struct nft_trans_obj));
4508 + if (!trans)
4509 +- return -ENOMEM;
4510 ++ goto err_trans;
4511 +
4512 + newobj = nft_obj_init(ctx, type, attr);
4513 + if (IS_ERR(newobj)) {
4514 +@@ -6557,6 +6560,8 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
4515 +
4516 + err_free_trans:
4517 + kfree(trans);
4518 ++err_trans:
4519 ++ module_put(type->owner);
4520 + return err;
4521 + }
4522 +
4523 +@@ -8169,7 +8174,7 @@ static void nft_obj_commit_update(struct nft_trans *trans)
4524 + if (obj->ops->update)
4525 + obj->ops->update(obj, newobj);
4526 +
4527 +- kfree(newobj);
4528 ++ nft_obj_destroy(&trans->ctx, newobj);
4529 + }
4530 +
4531 + static void nft_commit_release(struct nft_trans *trans)
4532 +@@ -8914,7 +8919,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
4533 + break;
4534 + case NFT_MSG_NEWOBJ:
4535 + if (nft_trans_obj_update(trans)) {
4536 +- kfree(nft_trans_obj_newobj(trans));
4537 ++ nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
4538 + nft_trans_destroy(trans);
4539 + } else {
4540 + trans->ctx.table->use--;
4541 +@@ -9574,10 +9579,13 @@ EXPORT_SYMBOL_GPL(__nft_release_basechain);
4542 +
4543 + static void __nft_release_hook(struct net *net, struct nft_table *table)
4544 + {
4545 ++ struct nft_flowtable *flowtable;
4546 + struct nft_chain *chain;
4547 +
4548 + list_for_each_entry(chain, &table->chains, list)
4549 + nf_tables_unregister_hook(net, table, chain);
4550 ++ list_for_each_entry(flowtable, &table->flowtables, list)
4551 ++ nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list);
4552 + }
4553 +
4554 + static void __nft_release_hooks(struct net *net)
4555 +diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
4556 +index 9656c16462222..2d36952b13920 100644
4557 +--- a/net/netfilter/nf_tables_offload.c
4558 ++++ b/net/netfilter/nf_tables_offload.c
4559 +@@ -94,7 +94,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
4560 +
4561 + expr = nft_expr_first(rule);
4562 + while (nft_expr_more(rule, expr)) {
4563 +- if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
4564 ++ if (expr->ops->offload_action &&
4565 ++ expr->ops->offload_action(expr))
4566 + num_actions++;
4567 +
4568 + expr = nft_expr_next(expr);
4569 +diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c
4570 +index bbf3fcba3df40..5b5c607fbf83f 100644
4571 +--- a/net/netfilter/nft_dup_netdev.c
4572 ++++ b/net/netfilter/nft_dup_netdev.c
4573 +@@ -67,6 +67,11 @@ static int nft_dup_netdev_offload(struct nft_offload_ctx *ctx,
4574 + return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif);
4575 + }
4576 +
4577 ++static bool nft_dup_netdev_offload_action(const struct nft_expr *expr)
4578 ++{
4579 ++ return true;
4580 ++}
4581 ++
4582 + static struct nft_expr_type nft_dup_netdev_type;
4583 + static const struct nft_expr_ops nft_dup_netdev_ops = {
4584 + .type = &nft_dup_netdev_type,
4585 +@@ -75,6 +80,7 @@ static const struct nft_expr_ops nft_dup_netdev_ops = {
4586 + .init = nft_dup_netdev_init,
4587 + .dump = nft_dup_netdev_dump,
4588 + .offload = nft_dup_netdev_offload,
4589 ++ .offload_action = nft_dup_netdev_offload_action,
4590 + };
4591 +
4592 + static struct nft_expr_type nft_dup_netdev_type __read_mostly = {
4593 +diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
4594 +index cd59afde5b2f8..7730409f6f091 100644
4595 +--- a/net/netfilter/nft_fwd_netdev.c
4596 ++++ b/net/netfilter/nft_fwd_netdev.c
4597 +@@ -77,6 +77,11 @@ static int nft_fwd_netdev_offload(struct nft_offload_ctx *ctx,
4598 + return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif);
4599 + }
4600 +
4601 ++static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr)
4602 ++{
4603 ++ return true;
4604 ++}
4605 ++
4606 + struct nft_fwd_neigh {
4607 + u8 sreg_dev;
4608 + u8 sreg_addr;
4609 +@@ -219,6 +224,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
4610 + .dump = nft_fwd_netdev_dump,
4611 + .validate = nft_fwd_validate,
4612 + .offload = nft_fwd_netdev_offload,
4613 ++ .offload_action = nft_fwd_netdev_offload_action,
4614 + };
4615 +
4616 + static const struct nft_expr_ops *
4617 +diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
4618 +index 90c64d27ae532..d0f67d325bdfd 100644
4619 +--- a/net/netfilter/nft_immediate.c
4620 ++++ b/net/netfilter/nft_immediate.c
4621 +@@ -213,6 +213,16 @@ static int nft_immediate_offload(struct nft_offload_ctx *ctx,
4622 + return 0;
4623 + }
4624 +
4625 ++static bool nft_immediate_offload_action(const struct nft_expr *expr)
4626 ++{
4627 ++ const struct nft_immediate_expr *priv = nft_expr_priv(expr);
4628 ++
4629 ++ if (priv->dreg == NFT_REG_VERDICT)
4630 ++ return true;
4631 ++
4632 ++ return false;
4633 ++}
4634 ++
4635 + static const struct nft_expr_ops nft_imm_ops = {
4636 + .type = &nft_imm_type,
4637 + .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
4638 +@@ -224,7 +234,7 @@ static const struct nft_expr_ops nft_imm_ops = {
4639 + .dump = nft_immediate_dump,
4640 + .validate = nft_immediate_validate,
4641 + .offload = nft_immediate_offload,
4642 +- .offload_flags = NFT_OFFLOAD_F_ACTION,
4643 ++ .offload_action = nft_immediate_offload_action,
4644 + };
4645 +
4646 + struct nft_expr_type nft_imm_type __read_mostly = {
4647 +diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
4648 +index 5e6459e116055..7013f55f05d1e 100644
4649 +--- a/net/netfilter/xt_socket.c
4650 ++++ b/net/netfilter/xt_socket.c
4651 +@@ -220,8 +220,10 @@ static void socket_mt_destroy(const struct xt_mtdtor_param *par)
4652 + {
4653 + if (par->family == NFPROTO_IPV4)
4654 + nf_defrag_ipv4_disable(par->net);
4655 ++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
4656 + else if (par->family == NFPROTO_IPV6)
4657 +- nf_defrag_ipv4_disable(par->net);
4658 ++ nf_defrag_ipv6_disable(par->net);
4659 ++#endif
4660 + }
4661 +
4662 + static struct xt_match socket_mt_reg[] __read_mostly = {
4663 +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
4664 +index 076774034bb96..780d9e2246f39 100644
4665 +--- a/net/openvswitch/actions.c
4666 ++++ b/net/openvswitch/actions.c
4667 +@@ -423,12 +423,43 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
4668 + memcpy(addr, new_addr, sizeof(__be32[4]));
4669 + }
4670 +
4671 +-static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
4672 ++static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
4673 + {
4674 ++ u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
4675 ++
4676 ++ ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
4677 ++
4678 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
4679 ++ csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
4680 ++ (__force __wsum)(ipv6_tclass << 12));
4681 ++
4682 ++ ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
4683 ++}
4684 ++
4685 ++static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
4686 ++{
4687 ++ u32 ofl;
4688 ++
4689 ++ ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
4690 ++ fl = OVS_MASKED(ofl, fl, mask);
4691 ++
4692 + /* Bits 21-24 are always unmasked, so this retains their values. */
4693 +- OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
4694 +- OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
4695 +- OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
4696 ++ nh->flow_lbl[0] = (u8)(fl >> 16);
4697 ++ nh->flow_lbl[1] = (u8)(fl >> 8);
4698 ++ nh->flow_lbl[2] = (u8)fl;
4699 ++
4700 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
4701 ++ csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
4702 ++}
4703 ++
4704 ++static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
4705 ++{
4706 ++ new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
4707 ++
4708 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
4709 ++ csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
4710 ++ (__force __wsum)(new_ttl << 8));
4711 ++ nh->hop_limit = new_ttl;
4712 + }
4713 +
4714 + static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
4715 +@@ -546,18 +577,17 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
4716 + }
4717 + }
4718 + if (mask->ipv6_tclass) {
4719 +- ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
4720 ++ set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
4721 + flow_key->ip.tos = ipv6_get_dsfield(nh);
4722 + }
4723 + if (mask->ipv6_label) {
4724 +- set_ipv6_fl(nh, ntohl(key->ipv6_label),
4725 ++ set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
4726 + ntohl(mask->ipv6_label));
4727 + flow_key->ipv6.label =
4728 + *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
4729 + }
4730 + if (mask->ipv6_hlimit) {
4731 +- OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
4732 +- mask->ipv6_hlimit);
4733 ++ set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
4734 + flow_key->ip.ttl = nh->hop_limit;
4735 + }
4736 + return 0;
4737 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
4738 +index 2a17eb77c9049..4ffea1290ce1c 100644
4739 +--- a/net/sched/act_ct.c
4740 ++++ b/net/sched/act_ct.c
4741 +@@ -516,11 +516,6 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
4742 + struct nf_conn *ct;
4743 + u8 dir;
4744 +
4745 +- /* Previously seen or loopback */
4746 +- ct = nf_ct_get(skb, &ctinfo);
4747 +- if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
4748 +- return false;
4749 +-
4750 + switch (family) {
4751 + case NFPROTO_IPV4:
4752 + if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
4753 +diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
4754 +index 4a964e9190b02..707615809e5a9 100644
4755 +--- a/net/smc/smc_pnet.c
4756 ++++ b/net/smc/smc_pnet.c
4757 +@@ -112,7 +112,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
4758 + pnettable = &sn->pnettable;
4759 +
4760 + /* remove table entry */
4761 +- write_lock(&pnettable->lock);
4762 ++ mutex_lock(&pnettable->lock);
4763 + list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist,
4764 + list) {
4765 + if (!pnet_name ||
4766 +@@ -130,7 +130,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
4767 + rc = 0;
4768 + }
4769 + }
4770 +- write_unlock(&pnettable->lock);
4771 ++ mutex_unlock(&pnettable->lock);
4772 +
4773 + /* if this is not the initial namespace, stop here */
4774 + if (net != &init_net)
4775 +@@ -191,7 +191,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
4776 + sn = net_generic(net, smc_net_id);
4777 + pnettable = &sn->pnettable;
4778 +
4779 +- write_lock(&pnettable->lock);
4780 ++ mutex_lock(&pnettable->lock);
4781 + list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
4782 + if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev &&
4783 + !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) {
4784 +@@ -205,7 +205,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
4785 + break;
4786 + }
4787 + }
4788 +- write_unlock(&pnettable->lock);
4789 ++ mutex_unlock(&pnettable->lock);
4790 + return rc;
4791 + }
4792 +
4793 +@@ -223,7 +223,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
4794 + sn = net_generic(net, smc_net_id);
4795 + pnettable = &sn->pnettable;
4796 +
4797 +- write_lock(&pnettable->lock);
4798 ++ mutex_lock(&pnettable->lock);
4799 + list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
4800 + if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) {
4801 + dev_put(pnetelem->ndev);
4802 +@@ -236,7 +236,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
4803 + break;
4804 + }
4805 + }
4806 +- write_unlock(&pnettable->lock);
4807 ++ mutex_unlock(&pnettable->lock);
4808 + return rc;
4809 + }
4810 +
4811 +@@ -371,7 +371,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
4812 +
4813 + rc = -EEXIST;
4814 + new_netdev = true;
4815 +- write_lock(&pnettable->lock);
4816 ++ mutex_lock(&pnettable->lock);
4817 + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
4818 + if (tmp_pe->type == SMC_PNET_ETH &&
4819 + !strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) {
4820 +@@ -381,9 +381,9 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
4821 + }
4822 + if (new_netdev) {
4823 + list_add_tail(&new_pe->list, &pnettable->pnetlist);
4824 +- write_unlock(&pnettable->lock);
4825 ++ mutex_unlock(&pnettable->lock);
4826 + } else {
4827 +- write_unlock(&pnettable->lock);
4828 ++ mutex_unlock(&pnettable->lock);
4829 + kfree(new_pe);
4830 + goto out_put;
4831 + }
4832 +@@ -444,7 +444,7 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
4833 + new_pe->ib_port = ib_port;
4834 +
4835 + new_ibdev = true;
4836 +- write_lock(&pnettable->lock);
4837 ++ mutex_lock(&pnettable->lock);
4838 + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
4839 + if (tmp_pe->type == SMC_PNET_IB &&
4840 + !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) {
4841 +@@ -454,9 +454,9 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
4842 + }
4843 + if (new_ibdev) {
4844 + list_add_tail(&new_pe->list, &pnettable->pnetlist);
4845 +- write_unlock(&pnettable->lock);
4846 ++ mutex_unlock(&pnettable->lock);
4847 + } else {
4848 +- write_unlock(&pnettable->lock);
4849 ++ mutex_unlock(&pnettable->lock);
4850 + kfree(new_pe);
4851 + }
4852 + return (new_ibdev) ? 0 : -EEXIST;
4853 +@@ -601,7 +601,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
4854 + pnettable = &sn->pnettable;
4855 +
4856 + /* dump pnettable entries */
4857 +- read_lock(&pnettable->lock);
4858 ++ mutex_lock(&pnettable->lock);
4859 + list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
4860 + if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid))
4861 + continue;
4862 +@@ -616,7 +616,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
4863 + break;
4864 + }
4865 + }
4866 +- read_unlock(&pnettable->lock);
4867 ++ mutex_unlock(&pnettable->lock);
4868 + return idx;
4869 + }
4870 +
4871 +@@ -860,7 +860,7 @@ int smc_pnet_net_init(struct net *net)
4872 + struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev;
4873 +
4874 + INIT_LIST_HEAD(&pnettable->pnetlist);
4875 +- rwlock_init(&pnettable->lock);
4876 ++ mutex_init(&pnettable->lock);
4877 + INIT_LIST_HEAD(&pnetids_ndev->list);
4878 + rwlock_init(&pnetids_ndev->lock);
4879 +
4880 +@@ -940,7 +940,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
4881 + sn = net_generic(net, smc_net_id);
4882 + pnettable = &sn->pnettable;
4883 +
4884 +- read_lock(&pnettable->lock);
4885 ++ mutex_lock(&pnettable->lock);
4886 + list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
4887 + if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) {
4888 + /* get pnetid of netdev device */
4889 +@@ -949,7 +949,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
4890 + break;
4891 + }
4892 + }
4893 +- read_unlock(&pnettable->lock);
4894 ++ mutex_unlock(&pnettable->lock);
4895 + return rc;
4896 + }
4897 +
4898 +@@ -1130,7 +1130,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
4899 + sn = net_generic(&init_net, smc_net_id);
4900 + pnettable = &sn->pnettable;
4901 +
4902 +- read_lock(&pnettable->lock);
4903 ++ mutex_lock(&pnettable->lock);
4904 + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
4905 + if (tmp_pe->type == SMC_PNET_IB &&
4906 + !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) &&
4907 +@@ -1140,7 +1140,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
4908 + break;
4909 + }
4910 + }
4911 +- read_unlock(&pnettable->lock);
4912 ++ mutex_unlock(&pnettable->lock);
4913 +
4914 + return rc;
4915 + }
4916 +@@ -1159,7 +1159,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
4917 + sn = net_generic(&init_net, smc_net_id);
4918 + pnettable = &sn->pnettable;
4919 +
4920 +- read_lock(&pnettable->lock);
4921 ++ mutex_lock(&pnettable->lock);
4922 + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
4923 + if (tmp_pe->type == SMC_PNET_IB &&
4924 + !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) {
4925 +@@ -1168,7 +1168,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
4926 + break;
4927 + }
4928 + }
4929 +- read_unlock(&pnettable->lock);
4930 ++ mutex_unlock(&pnettable->lock);
4931 +
4932 + return rc;
4933 + }
4934 +diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h
4935 +index 14039272f7e42..80a88eea49491 100644
4936 +--- a/net/smc/smc_pnet.h
4937 ++++ b/net/smc/smc_pnet.h
4938 +@@ -29,7 +29,7 @@ struct smc_link_group;
4939 + * @pnetlist: List of PNETIDs
4940 + */
4941 + struct smc_pnettable {
4942 +- rwlock_t lock;
4943 ++ struct mutex lock;
4944 + struct list_head pnetlist;
4945 + };
4946 +
4947 +diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
4948 +index 01396dd1c899b..1d8ba233d0474 100644
4949 +--- a/net/tipc/name_table.c
4950 ++++ b/net/tipc/name_table.c
4951 +@@ -967,7 +967,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
4952 + list_for_each_entry(p, &sr->all_publ, all_publ)
4953 + if (p->key == *last_key)
4954 + break;
4955 +- if (p->key != *last_key)
4956 ++ if (list_entry_is_head(p, &sr->all_publ, all_publ))
4957 + return -EPIPE;
4958 + } else {
4959 + p = list_first_entry(&sr->all_publ,
4960 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
4961 +index 3e63c83e641c5..7545321c3440b 100644
4962 +--- a/net/tipc/socket.c
4963 ++++ b/net/tipc/socket.c
4964 +@@ -3749,7 +3749,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
4965 + if (p->key == *last_publ)
4966 + break;
4967 + }
4968 +- if (p->key != *last_publ) {
4969 ++ if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
4970 + /* We never set seq or call nl_dump_check_consistent()
4971 + * this means that setting prev_seq here will cause the
4972 + * consistence check to fail in the netlink callback
4973 +diff --git a/security/selinux/ima.c b/security/selinux/ima.c
4974 +index 727c4e43219d7..ff7aea6b3774a 100644
4975 +--- a/security/selinux/ima.c
4976 ++++ b/security/selinux/ima.c
4977 +@@ -77,7 +77,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state)
4978 + size_t policy_len;
4979 + int rc = 0;
4980 +
4981 +- WARN_ON(!mutex_is_locked(&state->policy_mutex));
4982 ++ lockdep_assert_held(&state->policy_mutex);
4983 +
4984 + state_str = selinux_ima_collect_state(state);
4985 + if (!state_str) {
4986 +@@ -117,7 +117,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state)
4987 + */
4988 + void selinux_ima_measure_state(struct selinux_state *state)
4989 + {
4990 +- WARN_ON(mutex_is_locked(&state->policy_mutex));
4991 ++ lockdep_assert_not_held(&state->policy_mutex);
4992 +
4993 + mutex_lock(&state->policy_mutex);
4994 + selinux_ima_measure_state_locked(state);
4995 +diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
4996 +index f5d260b1df4d1..15a4547d608ec 100644
4997 +--- a/tools/perf/util/data.c
4998 ++++ b/tools/perf/util/data.c
4999 +@@ -44,10 +44,6 @@ int perf_data__create_dir(struct perf_data *data, int nr)
5000 + if (!files)
5001 + return -ENOMEM;
5002 +
5003 +- data->dir.version = PERF_DIR_VERSION;
5004 +- data->dir.files = files;
5005 +- data->dir.nr = nr;
5006 +-
5007 + for (i = 0; i < nr; i++) {
5008 + struct perf_data_file *file = &files[i];
5009 +
5010 +@@ -62,6 +58,9 @@ int perf_data__create_dir(struct perf_data *data, int nr)
5011 + file->fd = ret;
5012 + }
5013 +
5014 ++ data->dir.version = PERF_DIR_VERSION;
5015 ++ data->dir.files = files;
5016 ++ data->dir.nr = nr;
5017 + return 0;
5018 +
5019 + out_err:
5020 +diff --git a/tools/perf/util/evlist-hybrid.c b/tools/perf/util/evlist-hybrid.c
5021 +index 7c554234b43d4..f39c8ffc5a111 100644
5022 +--- a/tools/perf/util/evlist-hybrid.c
5023 ++++ b/tools/perf/util/evlist-hybrid.c
5024 +@@ -153,8 +153,8 @@ int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
5025 + perf_cpu_map__put(matched_cpus);
5026 + perf_cpu_map__put(unmatched_cpus);
5027 + }
5028 +-
5029 +- ret = (unmatched_count == events_nr) ? -1 : 0;
5030 ++ if (events_nr)
5031 ++ ret = (unmatched_count == events_nr) ? -1 : 0;
5032 + out:
5033 + perf_cpu_map__put(cpus);
5034 + return ret;
5035 +diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
5036 +index 1858435de7aaf..5cb90ca292186 100644
5037 +--- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
5038 ++++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
5039 +@@ -235,7 +235,7 @@ SEC("sk_msg1")
5040 + int bpf_prog4(struct sk_msg_md *msg)
5041 + {
5042 + int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
5043 +- int *start, *end, *start_push, *end_push, *start_pop, *pop;
5044 ++ int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0;
5045 +
5046 + bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
5047 + if (bytes)
5048 +@@ -249,8 +249,11 @@ int bpf_prog4(struct sk_msg_md *msg)
5049 + bpf_msg_pull_data(msg, *start, *end, 0);
5050 + start_push = bpf_map_lookup_elem(&sock_bytes, &two);
5051 + end_push = bpf_map_lookup_elem(&sock_bytes, &three);
5052 +- if (start_push && end_push)
5053 +- bpf_msg_push_data(msg, *start_push, *end_push, 0);
5054 ++ if (start_push && end_push) {
5055 ++ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
5056 ++ if (err)
5057 ++ return SK_DROP;
5058 ++ }
5059 + start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
5060 + pop = bpf_map_lookup_elem(&sock_bytes, &five);
5061 + if (start_pop && pop)
5062 +@@ -263,6 +266,7 @@ int bpf_prog6(struct sk_msg_md *msg)
5063 + {
5064 + int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
5065 + int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
5066 ++ int err = 0;
5067 + __u64 flags = 0;
5068 +
5069 + bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
5070 +@@ -279,8 +283,11 @@ int bpf_prog6(struct sk_msg_md *msg)
5071 +
5072 + start_push = bpf_map_lookup_elem(&sock_bytes, &two);
5073 + end_push = bpf_map_lookup_elem(&sock_bytes, &three);
5074 +- if (start_push && end_push)
5075 +- bpf_msg_push_data(msg, *start_push, *end_push, 0);
5076 ++ if (start_push && end_push) {
5077 ++ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
5078 ++ if (err)
5079 ++ return SK_DROP;
5080 ++ }
5081 +
5082 + start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
5083 + pop = bpf_map_lookup_elem(&sock_bytes, &five);
5084 +@@ -338,7 +345,7 @@ SEC("sk_msg5")
5085 + int bpf_prog10(struct sk_msg_md *msg)
5086 + {
5087 + int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
5088 +- int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
5089 ++ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0;
5090 +
5091 + bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
5092 + if (bytes)
5093 +@@ -352,8 +359,11 @@ int bpf_prog10(struct sk_msg_md *msg)
5094 + bpf_msg_pull_data(msg, *start, *end, 0);
5095 + start_push = bpf_map_lookup_elem(&sock_bytes, &two);
5096 + end_push = bpf_map_lookup_elem(&sock_bytes, &three);
5097 +- if (start_push && end_push)
5098 +- bpf_msg_push_data(msg, *start_push, *end_push, 0);
5099 ++ if (start_push && end_push) {
5100 ++ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
5101 ++ if (err)
5102 ++ return SK_PASS;
5103 ++ }
5104 + start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
5105 + pop = bpf_map_lookup_elem(&sock_bytes, &five);
5106 + if (start_pop && pop)
5107 +diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
5108 +index 2674ba20d5249..ff821025d3096 100755
5109 +--- a/tools/testing/selftests/net/mptcp/diag.sh
5110 ++++ b/tools/testing/selftests/net/mptcp/diag.sh
5111 +@@ -71,6 +71,36 @@ chk_msk_remote_key_nr()
5112 + __chk_nr "grep -c remote_key" $*
5113 + }
5114 +
5115 ++# $1: ns, $2: port
5116 ++wait_local_port_listen()
5117 ++{
5118 ++ local listener_ns="${1}"
5119 ++ local port="${2}"
5120 ++
5121 ++ local port_hex i
5122 ++
5123 ++ port_hex="$(printf "%04X" "${port}")"
5124 ++ for i in $(seq 10); do
5125 ++ ip netns exec "${listener_ns}" cat /proc/net/tcp | \
5126 ++ awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
5127 ++ break
5128 ++ sleep 0.1
5129 ++ done
5130 ++}
5131 ++
5132 ++wait_connected()
5133 ++{
5134 ++ local listener_ns="${1}"
5135 ++ local port="${2}"
5136 ++
5137 ++ local port_hex i
5138 ++
5139 ++ port_hex="$(printf "%04X" "${port}")"
5140 ++ for i in $(seq 10); do
5141 ++ ip netns exec ${listener_ns} grep -q " 0100007F:${port_hex} " /proc/net/tcp && break
5142 ++ sleep 0.1
5143 ++ done
5144 ++}
5145 +
5146 + trap cleanup EXIT
5147 + ip netns add $ns
5148 +@@ -81,15 +111,15 @@ echo "a" | \
5149 + ip netns exec $ns \
5150 + ./mptcp_connect -p 10000 -l -t ${timeout_poll} \
5151 + 0.0.0.0 >/dev/null &
5152 +-sleep 0.1
5153 ++wait_local_port_listen $ns 10000
5154 + chk_msk_nr 0 "no msk on netns creation"
5155 +
5156 + echo "b" | \
5157 + timeout ${timeout_test} \
5158 + ip netns exec $ns \
5159 +- ./mptcp_connect -p 10000 -j -t ${timeout_poll} \
5160 ++ ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} \
5161 + 127.0.0.1 >/dev/null &
5162 +-sleep 0.1
5163 ++wait_connected $ns 10000
5164 + chk_msk_nr 2 "after MPC handshake "
5165 + chk_msk_remote_key_nr 2 "....chk remote_key"
5166 + chk_msk_fallback_nr 0 "....chk no fallback"
5167 +@@ -101,13 +131,13 @@ echo "a" | \
5168 + ip netns exec $ns \
5169 + ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
5170 + 0.0.0.0 >/dev/null &
5171 +-sleep 0.1
5172 ++wait_local_port_listen $ns 10001
5173 + echo "b" | \
5174 + timeout ${timeout_test} \
5175 + ip netns exec $ns \
5176 +- ./mptcp_connect -p 10001 -j -t ${timeout_poll} \
5177 ++ ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} \
5178 + 127.0.0.1 >/dev/null &
5179 +-sleep 0.1
5180 ++wait_connected $ns 10001
5181 + chk_msk_fallback_nr 1 "check fallback"
5182 + flush_pids
5183 +
5184 +@@ -119,7 +149,7 @@ for I in `seq 1 $NR_CLIENTS`; do
5185 + ./mptcp_connect -p $((I+10001)) -l -w 10 \
5186 + -t ${timeout_poll} 0.0.0.0 >/dev/null &
5187 + done
5188 +-sleep 0.1
5189 ++wait_local_port_listen $ns $((NR_CLIENTS + 10001))
5190 +
5191 + for I in `seq 1 $NR_CLIENTS`; do
5192 + echo "b" | \
5193 +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
5194 +index 3e9d3df9c45cb..3be615ab1588b 100755
5195 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
5196 ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
5197 +@@ -624,6 +624,7 @@ chk_join_nr()
5198 + local ack_nr=$4
5199 + local count
5200 + local dump_stats
5201 ++ local with_cookie
5202 +
5203 + printf "%02u %-36s %s" "$TEST_COUNT" "$msg" "syn"
5204 + count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}'`
5205 +@@ -637,12 +638,20 @@ chk_join_nr()
5206 + fi
5207 +
5208 + echo -n " - synack"
5209 ++ with_cookie=`ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies`
5210 + count=`ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinSynAckRx | awk '{print $2}'`
5211 + [ -z "$count" ] && count=0
5212 + if [ "$count" != "$syn_ack_nr" ]; then
5213 +- echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
5214 +- ret=1
5215 +- dump_stats=1
5216 ++ # simult connections exceeding the limit with cookie enabled could go up to
5217 ++ # synack validation as the conn limit can be enforced reliably only after
5218 ++ # the subflow creation
5219 ++ if [ "$with_cookie" = 2 ] && [ "$count" -gt "$syn_ack_nr" ] && [ "$count" -le "$syn_nr" ]; then
5220 ++ echo -n "[ ok ]"
5221 ++ else
5222 ++ echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
5223 ++ ret=1
5224 ++ dump_stats=1
5225 ++ fi
5226 + else
5227 + echo -n "[ ok ]"
5228 + fi