Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.16 commit in: /
Date: Wed, 02 Mar 2022 13:04:45
Message-Id: 1646226261.9dca87bbb73f85f2bea7d939047158ff44930ace.mpagano@gentoo
1 commit: 9dca87bbb73f85f2bea7d939047158ff44930ace
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Mar 2 13:04:21 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Mar 2 13:04:21 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9dca87bb
7
8 Linux patch 5.16.12
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1011_linux-5.16.12.patch | 6431 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6435 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 7706410d..0785204e 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -87,6 +87,10 @@ Patch: 1010_linux-5.16.11.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.16.11
23
24 +Patch: 1011_linux-5.16.12.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.16.12
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1011_linux-5.16.12.patch b/1011_linux-5.16.12.patch
33 new file mode 100644
34 index 00000000..3b6bed0f
35 --- /dev/null
36 +++ b/1011_linux-5.16.12.patch
37 @@ -0,0 +1,6431 @@
38 +diff --git a/Makefile b/Makefile
39 +index 00ba75768af73..09a9bb824afad 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 16
46 +-SUBLEVEL = 11
47 ++SUBLEVEL = 12
48 + EXTRAVERSION =
49 + NAME = Gobble Gobble
50 +
51 +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
52 +index 237d20dd5622d..286cec4d86d7b 100644
53 +--- a/arch/parisc/kernel/unaligned.c
54 ++++ b/arch/parisc/kernel/unaligned.c
55 +@@ -340,7 +340,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
56 + : "r" (val), "r" (regs->ior), "r" (regs->isr)
57 + : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
58 +
59 +- return 0;
60 ++ return ret;
61 + }
62 + static int emulate_std(struct pt_regs *regs, int frreg, int flop)
63 + {
64 +@@ -397,7 +397,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
65 + __asm__ __volatile__ (
66 + " mtsp %4, %%sr1\n"
67 + " zdep %2, 29, 2, %%r19\n"
68 +-" dep %%r0, 31, 2, %2\n"
69 ++" dep %%r0, 31, 2, %3\n"
70 + " mtsar %%r19\n"
71 + " zvdepi -2, 32, %%r19\n"
72 + "1: ldw 0(%%sr1,%3),%%r20\n"
73 +@@ -409,7 +409,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
74 + " andcm %%r21, %%r19, %%r21\n"
75 + " or %1, %%r20, %1\n"
76 + " or %2, %%r21, %2\n"
77 +-"3: stw %1,0(%%sr1,%1)\n"
78 ++"3: stw %1,0(%%sr1,%3)\n"
79 + "4: stw %%r1,4(%%sr1,%3)\n"
80 + "5: stw %2,8(%%sr1,%3)\n"
81 + " copy %%r0, %0\n"
82 +@@ -596,7 +596,6 @@ void handle_unaligned(struct pt_regs *regs)
83 + ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
84 + break;
85 + }
86 +-#ifdef CONFIG_PA20
87 + switch (regs->iir & OPCODE2_MASK)
88 + {
89 + case OPCODE_FLDD_L:
90 +@@ -607,22 +606,23 @@ void handle_unaligned(struct pt_regs *regs)
91 + flop=1;
92 + ret = emulate_std(regs, R2(regs->iir),1);
93 + break;
94 ++#ifdef CONFIG_PA20
95 + case OPCODE_LDD_L:
96 + ret = emulate_ldd(regs, R2(regs->iir),0);
97 + break;
98 + case OPCODE_STD_L:
99 + ret = emulate_std(regs, R2(regs->iir),0);
100 + break;
101 +- }
102 + #endif
103 ++ }
104 + switch (regs->iir & OPCODE3_MASK)
105 + {
106 + case OPCODE_FLDW_L:
107 + flop=1;
108 +- ret = emulate_ldw(regs, R2(regs->iir),0);
109 ++ ret = emulate_ldw(regs, R2(regs->iir), 1);
110 + break;
111 + case OPCODE_LDW_M:
112 +- ret = emulate_ldw(regs, R2(regs->iir),1);
113 ++ ret = emulate_ldw(regs, R2(regs->iir), 0);
114 + break;
115 +
116 + case OPCODE_FSTW_L:
117 +diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
118 +index d68b743d580f8..15d1fd0a70184 100644
119 +--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
120 ++++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
121 +@@ -23,7 +23,7 @@ CONFIG_SLOB=y
122 + CONFIG_SOC_CANAAN=y
123 + CONFIG_SMP=y
124 + CONFIG_NR_CPUS=2
125 +-CONFIG_CMDLINE="earlycon console=ttySIF0 rootdelay=2 root=/dev/mmcblk0p1 ro"
126 ++CONFIG_CMDLINE="earlycon console=ttySIF0 root=/dev/mmcblk0p1 rootwait ro"
127 + CONFIG_CMDLINE_FORCE=y
128 + # CONFIG_SECCOMP is not set
129 + # CONFIG_STACKPROTECTOR is not set
130 +diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
131 +index 3397ddac1a30c..16308ef1e5787 100644
132 +--- a/arch/riscv/kernel/Makefile
133 ++++ b/arch/riscv/kernel/Makefile
134 +@@ -50,6 +50,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
135 + obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
136 + obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
137 +
138 ++obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o
139 ++
140 + obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o
141 + obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
142 + obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
143 +diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
144 +index ed29e9c8f660c..d6a46ed0bf051 100644
145 +--- a/arch/riscv/kernel/entry.S
146 ++++ b/arch/riscv/kernel/entry.S
147 +@@ -108,7 +108,7 @@ _save_context:
148 + .option pop
149 +
150 + #ifdef CONFIG_TRACE_IRQFLAGS
151 +- call trace_hardirqs_off
152 ++ call __trace_hardirqs_off
153 + #endif
154 +
155 + #ifdef CONFIG_CONTEXT_TRACKING
156 +@@ -143,7 +143,7 @@ skip_context_tracking:
157 + li t0, EXC_BREAKPOINT
158 + beq s4, t0, 1f
159 + #ifdef CONFIG_TRACE_IRQFLAGS
160 +- call trace_hardirqs_on
161 ++ call __trace_hardirqs_on
162 + #endif
163 + csrs CSR_STATUS, SR_IE
164 +
165 +@@ -234,7 +234,7 @@ ret_from_exception:
166 + REG_L s0, PT_STATUS(sp)
167 + csrc CSR_STATUS, SR_IE
168 + #ifdef CONFIG_TRACE_IRQFLAGS
169 +- call trace_hardirqs_off
170 ++ call __trace_hardirqs_off
171 + #endif
172 + #ifdef CONFIG_RISCV_M_MODE
173 + /* the MPP value is too large to be used as an immediate arg for addi */
174 +@@ -270,10 +270,10 @@ restore_all:
175 + REG_L s1, PT_STATUS(sp)
176 + andi t0, s1, SR_PIE
177 + beqz t0, 1f
178 +- call trace_hardirqs_on
179 ++ call __trace_hardirqs_on
180 + j 2f
181 + 1:
182 +- call trace_hardirqs_off
183 ++ call __trace_hardirqs_off
184 + 2:
185 + #endif
186 + REG_L a0, PT_STATUS(sp)
187 +diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c
188 +new file mode 100644
189 +index 0000000000000..095ac976d7da1
190 +--- /dev/null
191 ++++ b/arch/riscv/kernel/trace_irq.c
192 +@@ -0,0 +1,27 @@
193 ++// SPDX-License-Identifier: GPL-2.0
194 ++/*
195 ++ * Copyright (C) 2022 Changbin Du <changbin.du@×××××.com>
196 ++ */
197 ++
198 ++#include <linux/irqflags.h>
199 ++#include <linux/kprobes.h>
200 ++#include "trace_irq.h"
201 ++
202 ++/*
203 ++ * trace_hardirqs_on/off require the caller to setup frame pointer properly.
204 ++ * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel.
205 ++ * Here we add one extra level so they can be safely called by low
206 ++ * level entry code which $fp is used for other purpose.
207 ++ */
208 ++
209 ++void __trace_hardirqs_on(void)
210 ++{
211 ++ trace_hardirqs_on();
212 ++}
213 ++NOKPROBE_SYMBOL(__trace_hardirqs_on);
214 ++
215 ++void __trace_hardirqs_off(void)
216 ++{
217 ++ trace_hardirqs_off();
218 ++}
219 ++NOKPROBE_SYMBOL(__trace_hardirqs_off);
220 +diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h
221 +new file mode 100644
222 +index 0000000000000..99fe67377e5ed
223 +--- /dev/null
224 ++++ b/arch/riscv/kernel/trace_irq.h
225 +@@ -0,0 +1,11 @@
226 ++/* SPDX-License-Identifier: GPL-2.0 */
227 ++/*
228 ++ * Copyright (C) 2022 Changbin Du <changbin.du@×××××.com>
229 ++ */
230 ++#ifndef __TRACE_IRQ_H
231 ++#define __TRACE_IRQ_H
232 ++
233 ++void __trace_hardirqs_on(void);
234 ++void __trace_hardirqs_off(void);
235 ++
236 ++#endif /* __TRACE_IRQ_H */
237 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
238 +index fcdf3f8bb59a6..84e23b9864f4c 100644
239 +--- a/arch/x86/kvm/mmu/mmu.c
240 ++++ b/arch/x86/kvm/mmu/mmu.c
241 +@@ -3905,12 +3905,23 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
242 + walk_shadow_page_lockless_end(vcpu);
243 + }
244 +
245 ++static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
246 ++{
247 ++ /* make sure the token value is not 0 */
248 ++ u32 id = vcpu->arch.apf.id;
249 ++
250 ++ if (id << 12 == 0)
251 ++ vcpu->arch.apf.id = 1;
252 ++
253 ++ return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
254 ++}
255 ++
256 + static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
257 + gfn_t gfn)
258 + {
259 + struct kvm_arch_async_pf arch;
260 +
261 +- arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
262 ++ arch.token = alloc_apf_token(vcpu);
263 + arch.gfn = gfn;
264 + arch.direct_map = vcpu->arch.mmu->direct_map;
265 + arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
266 +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
267 +index 57e2a55e46175..9875c4cc3c768 100644
268 +--- a/arch/x86/kvm/svm/svm.c
269 ++++ b/arch/x86/kvm/svm/svm.c
270 +@@ -2903,8 +2903,23 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
271 + u64 data = msr->data;
272 + switch (ecx) {
273 + case MSR_AMD64_TSC_RATIO:
274 +- if (!msr->host_initiated && !svm->tsc_scaling_enabled)
275 +- return 1;
276 ++
277 ++ if (!svm->tsc_scaling_enabled) {
278 ++
279 ++ if (!msr->host_initiated)
280 ++ return 1;
281 ++ /*
282 ++ * In case TSC scaling is not enabled, always
283 ++ * leave this MSR at the default value.
284 ++ *
285 ++ * Due to bug in qemu 6.2.0, it would try to set
286 ++ * this msr to 0 if tsc scaling is not enabled.
287 ++ * Ignore this value as well.
288 ++ */
289 ++ if (data != 0 && data != svm->tsc_ratio_msr)
290 ++ return 1;
291 ++ break;
292 ++ }
293 +
294 + if (data & TSC_RATIO_RSVD)
295 + return 1;
296 +diff --git a/block/fops.c b/block/fops.c
297 +index 0da147edbd186..77a5579d8de66 100644
298 +--- a/block/fops.c
299 ++++ b/block/fops.c
300 +@@ -289,6 +289,8 @@ static void blkdev_bio_end_io_async(struct bio *bio)
301 + struct kiocb *iocb = dio->iocb;
302 + ssize_t ret;
303 +
304 ++ WRITE_ONCE(iocb->private, NULL);
305 ++
306 + if (likely(!bio->bi_status)) {
307 + ret = dio->size;
308 + iocb->ki_pos += ret;
309 +diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
310 +index f242157bc81bb..ae8375e9d2681 100644
311 +--- a/drivers/ata/pata_hpt37x.c
312 ++++ b/drivers/ata/pata_hpt37x.c
313 +@@ -919,6 +919,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
314 + irqmask &= ~0x10;
315 + pci_write_config_byte(dev, 0x5a, irqmask);
316 +
317 ++ /*
318 ++ * HPT371 chips physically have only one channel, the secondary one,
319 ++ * but the primary channel registers do exist! Go figure...
320 ++ * So, we manually disable the non-existing channel here
321 ++ * (if the BIOS hasn't done this already).
322 ++ */
323 ++ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
324 ++ u8 mcr1;
325 ++
326 ++ pci_read_config_byte(dev, 0x50, &mcr1);
327 ++ mcr1 &= ~0x04;
328 ++ pci_write_config_byte(dev, 0x50, mcr1);
329 ++ }
330 ++
331 + /*
332 + * default to pci clock. make sure MA15/16 are set to output
333 + * to prevent drives having problems with 40-pin cables. Needed
334 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
335 +index 68ea1f949daa9..6b66306932016 100644
336 +--- a/drivers/base/dd.c
337 ++++ b/drivers/base/dd.c
338 +@@ -629,6 +629,9 @@ re_probe:
339 + drv->remove(dev);
340 +
341 + devres_release_all(dev);
342 ++ arch_teardown_dma_ops(dev);
343 ++ kfree(dev->dma_range_map);
344 ++ dev->dma_range_map = NULL;
345 + driver_sysfs_remove(dev);
346 + dev->driver = NULL;
347 + dev_set_drvdata(dev, NULL);
348 +@@ -1208,6 +1211,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
349 +
350 + devres_release_all(dev);
351 + arch_teardown_dma_ops(dev);
352 ++ kfree(dev->dma_range_map);
353 ++ dev->dma_range_map = NULL;
354 + dev->driver = NULL;
355 + dev_set_drvdata(dev, NULL);
356 + if (dev->pm_domain && dev->pm_domain->dismiss)
357 +diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
358 +index d2656581a6085..4a446259a184e 100644
359 +--- a/drivers/base/regmap/regmap-irq.c
360 ++++ b/drivers/base/regmap/regmap-irq.c
361 +@@ -189,11 +189,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
362 + ret = regmap_write(map, reg, d->mask_buf[i]);
363 + if (d->chip->clear_ack) {
364 + if (d->chip->ack_invert && !ret)
365 +- ret = regmap_write(map, reg,
366 +- d->mask_buf[i]);
367 ++ ret = regmap_write(map, reg, UINT_MAX);
368 + else if (!ret)
369 +- ret = regmap_write(map, reg,
370 +- ~d->mask_buf[i]);
371 ++ ret = regmap_write(map, reg, 0);
372 + }
373 + if (ret != 0)
374 + dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
375 +@@ -556,11 +554,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
376 + data->status_buf[i]);
377 + if (chip->clear_ack) {
378 + if (chip->ack_invert && !ret)
379 +- ret = regmap_write(map, reg,
380 +- data->status_buf[i]);
381 ++ ret = regmap_write(map, reg, UINT_MAX);
382 + else if (!ret)
383 +- ret = regmap_write(map, reg,
384 +- ~data->status_buf[i]);
385 ++ ret = regmap_write(map, reg, 0);
386 + }
387 + if (ret != 0)
388 + dev_err(map->dev, "Failed to ack 0x%x: %d\n",
389 +@@ -817,13 +813,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
390 + d->status_buf[i] & d->mask_buf[i]);
391 + if (chip->clear_ack) {
392 + if (chip->ack_invert && !ret)
393 +- ret = regmap_write(map, reg,
394 +- (d->status_buf[i] &
395 +- d->mask_buf[i]));
396 ++ ret = regmap_write(map, reg, UINT_MAX);
397 + else if (!ret)
398 +- ret = regmap_write(map, reg,
399 +- ~(d->status_buf[i] &
400 +- d->mask_buf[i]));
401 ++ ret = regmap_write(map, reg, 0);
402 + }
403 + if (ret != 0) {
404 + dev_err(map->dev, "Failed to ack 0x%x: %d\n",
405 +diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
406 +index 744d136b721bc..15d61793f53b1 100644
407 +--- a/drivers/clk/ingenic/jz4725b-cgu.c
408 ++++ b/drivers/clk/ingenic/jz4725b-cgu.c
409 +@@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
410 + },
411 +
412 + [JZ4725B_CLK_I2S] = {
413 +- "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
414 ++ "i2s", CGU_CLK_MUX | CGU_CLK_DIV,
415 + .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 },
416 + .mux = { CGU_REG_CPCCR, 31, 1 },
417 + .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 },
418 +- .gate = { CGU_REG_CLKGR, 6 },
419 + },
420 +
421 + [JZ4725B_CLK_SPI] = {
422 +diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
423 +index 702a9bdc05598..5df9f1ead48e0 100644
424 +--- a/drivers/clk/qcom/gcc-msm8994.c
425 ++++ b/drivers/clk/qcom/gcc-msm8994.c
426 +@@ -107,42 +107,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
427 + { .hw = &gpll4.clkr.hw },
428 + };
429 +
430 +-static struct clk_rcg2 system_noc_clk_src = {
431 +- .cmd_rcgr = 0x0120,
432 +- .hid_width = 5,
433 +- .parent_map = gcc_xo_gpll0_map,
434 +- .clkr.hw.init = &(struct clk_init_data){
435 +- .name = "system_noc_clk_src",
436 +- .parent_data = gcc_xo_gpll0,
437 +- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
438 +- .ops = &clk_rcg2_ops,
439 +- },
440 +-};
441 +-
442 +-static struct clk_rcg2 config_noc_clk_src = {
443 +- .cmd_rcgr = 0x0150,
444 +- .hid_width = 5,
445 +- .parent_map = gcc_xo_gpll0_map,
446 +- .clkr.hw.init = &(struct clk_init_data){
447 +- .name = "config_noc_clk_src",
448 +- .parent_data = gcc_xo_gpll0,
449 +- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
450 +- .ops = &clk_rcg2_ops,
451 +- },
452 +-};
453 +-
454 +-static struct clk_rcg2 periph_noc_clk_src = {
455 +- .cmd_rcgr = 0x0190,
456 +- .hid_width = 5,
457 +- .parent_map = gcc_xo_gpll0_map,
458 +- .clkr.hw.init = &(struct clk_init_data){
459 +- .name = "periph_noc_clk_src",
460 +- .parent_data = gcc_xo_gpll0,
461 +- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
462 +- .ops = &clk_rcg2_ops,
463 +- },
464 +-};
465 +-
466 + static struct freq_tbl ftbl_ufs_axi_clk_src[] = {
467 + F(50000000, P_GPLL0, 12, 0, 0),
468 + F(100000000, P_GPLL0, 6, 0, 0),
469 +@@ -1149,8 +1113,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
470 + .enable_mask = BIT(17),
471 + .hw.init = &(struct clk_init_data){
472 + .name = "gcc_blsp1_ahb_clk",
473 +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
474 +- .num_parents = 1,
475 + .ops = &clk_branch2_ops,
476 + },
477 + },
478 +@@ -1434,8 +1396,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
479 + .enable_mask = BIT(15),
480 + .hw.init = &(struct clk_init_data){
481 + .name = "gcc_blsp2_ahb_clk",
482 +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
483 +- .num_parents = 1,
484 + .ops = &clk_branch2_ops,
485 + },
486 + },
487 +@@ -1763,8 +1723,6 @@ static struct clk_branch gcc_lpass_q6_axi_clk = {
488 + .enable_mask = BIT(0),
489 + .hw.init = &(struct clk_init_data){
490 + .name = "gcc_lpass_q6_axi_clk",
491 +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
492 +- .num_parents = 1,
493 + .ops = &clk_branch2_ops,
494 + },
495 + },
496 +@@ -1777,8 +1735,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
497 + .enable_mask = BIT(0),
498 + .hw.init = &(struct clk_init_data){
499 + .name = "gcc_mss_q6_bimc_axi_clk",
500 +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
501 +- .num_parents = 1,
502 + .ops = &clk_branch2_ops,
503 + },
504 + },
505 +@@ -1806,9 +1762,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
506 + .enable_mask = BIT(0),
507 + .hw.init = &(struct clk_init_data){
508 + .name = "gcc_pcie_0_cfg_ahb_clk",
509 +- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
510 +- .num_parents = 1,
511 +- .flags = CLK_SET_RATE_PARENT,
512 + .ops = &clk_branch2_ops,
513 + },
514 + },
515 +@@ -1821,9 +1774,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
516 + .enable_mask = BIT(0),
517 + .hw.init = &(struct clk_init_data){
518 + .name = "gcc_pcie_0_mstr_axi_clk",
519 +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
520 +- .num_parents = 1,
521 +- .flags = CLK_SET_RATE_PARENT,
522 + .ops = &clk_branch2_ops,
523 + },
524 + },
525 +@@ -1853,9 +1803,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
526 + .enable_mask = BIT(0),
527 + .hw.init = &(struct clk_init_data){
528 + .name = "gcc_pcie_0_slv_axi_clk",
529 +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
530 +- .num_parents = 1,
531 +- .flags = CLK_SET_RATE_PARENT,
532 + .ops = &clk_branch2_ops,
533 + },
534 + },
535 +@@ -1883,9 +1830,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
536 + .enable_mask = BIT(0),
537 + .hw.init = &(struct clk_init_data){
538 + .name = "gcc_pcie_1_cfg_ahb_clk",
539 +- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
540 +- .num_parents = 1,
541 +- .flags = CLK_SET_RATE_PARENT,
542 + .ops = &clk_branch2_ops,
543 + },
544 + },
545 +@@ -1898,9 +1842,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
546 + .enable_mask = BIT(0),
547 + .hw.init = &(struct clk_init_data){
548 + .name = "gcc_pcie_1_mstr_axi_clk",
549 +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
550 +- .num_parents = 1,
551 +- .flags = CLK_SET_RATE_PARENT,
552 + .ops = &clk_branch2_ops,
553 + },
554 + },
555 +@@ -1929,9 +1870,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
556 + .enable_mask = BIT(0),
557 + .hw.init = &(struct clk_init_data){
558 + .name = "gcc_pcie_1_slv_axi_clk",
559 +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
560 +- .num_parents = 1,
561 +- .flags = CLK_SET_RATE_PARENT,
562 + .ops = &clk_branch2_ops,
563 + },
564 + },
565 +@@ -1959,8 +1897,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
566 + .enable_mask = BIT(0),
567 + .hw.init = &(struct clk_init_data){
568 + .name = "gcc_pdm_ahb_clk",
569 +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
570 +- .num_parents = 1,
571 + .ops = &clk_branch2_ops,
572 + },
573 + },
574 +@@ -1988,9 +1924,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
575 + .enable_mask = BIT(0),
576 + .hw.init = &(struct clk_init_data){
577 + .name = "gcc_sdcc1_ahb_clk",
578 +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
579 +- .num_parents = 1,
580 +- .flags = CLK_SET_RATE_PARENT,
581 + .ops = &clk_branch2_ops,
582 + },
583 + },
584 +@@ -2003,9 +1936,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
585 + .enable_mask = BIT(0),
586 + .hw.init = &(struct clk_init_data){
587 + .name = "gcc_sdcc2_ahb_clk",
588 +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
589 +- .num_parents = 1,
590 +- .flags = CLK_SET_RATE_PARENT,
591 + .ops = &clk_branch2_ops,
592 + },
593 + },
594 +@@ -2033,9 +1963,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
595 + .enable_mask = BIT(0),
596 + .hw.init = &(struct clk_init_data){
597 + .name = "gcc_sdcc3_ahb_clk",
598 +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
599 +- .num_parents = 1,
600 +- .flags = CLK_SET_RATE_PARENT,
601 + .ops = &clk_branch2_ops,
602 + },
603 + },
604 +@@ -2063,9 +1990,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
605 + .enable_mask = BIT(0),
606 + .hw.init = &(struct clk_init_data){
607 + .name = "gcc_sdcc4_ahb_clk",
608 +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
609 +- .num_parents = 1,
610 +- .flags = CLK_SET_RATE_PARENT,
611 + .ops = &clk_branch2_ops,
612 + },
613 + },
614 +@@ -2123,8 +2047,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
615 + .enable_mask = BIT(0),
616 + .hw.init = &(struct clk_init_data){
617 + .name = "gcc_tsif_ahb_clk",
618 +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
619 +- .num_parents = 1,
620 + .ops = &clk_branch2_ops,
621 + },
622 + },
623 +@@ -2152,8 +2074,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
624 + .enable_mask = BIT(0),
625 + .hw.init = &(struct clk_init_data){
626 + .name = "gcc_ufs_ahb_clk",
627 +- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
628 +- .num_parents = 1,
629 + .ops = &clk_branch2_ops,
630 + },
631 + },
632 +@@ -2197,8 +2117,6 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
633 + .enable_mask = BIT(0),
634 + .hw.init = &(struct clk_init_data){
635 + .name = "gcc_ufs_rx_symbol_0_clk",
636 +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
637 +- .num_parents = 1,
638 + .ops = &clk_branch2_ops,
639 + },
640 + },
641 +@@ -2212,8 +2130,6 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
642 + .enable_mask = BIT(0),
643 + .hw.init = &(struct clk_init_data){
644 + .name = "gcc_ufs_rx_symbol_1_clk",
645 +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
646 +- .num_parents = 1,
647 + .ops = &clk_branch2_ops,
648 + },
649 + },
650 +@@ -2242,8 +2158,6 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
651 + .enable_mask = BIT(0),
652 + .hw.init = &(struct clk_init_data){
653 + .name = "gcc_ufs_tx_symbol_0_clk",
654 +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
655 +- .num_parents = 1,
656 + .ops = &clk_branch2_ops,
657 + },
658 + },
659 +@@ -2257,8 +2171,6 @@ static struct clk_branch gcc_ufs_tx_symbol_1_clk = {
660 + .enable_mask = BIT(0),
661 + .hw.init = &(struct clk_init_data){
662 + .name = "gcc_ufs_tx_symbol_1_clk",
663 +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
664 +- .num_parents = 1,
665 + .ops = &clk_branch2_ops,
666 + },
667 + },
668 +@@ -2363,8 +2275,6 @@ static struct clk_branch gcc_usb_hs_ahb_clk = {
669 + .enable_mask = BIT(0),
670 + .hw.init = &(struct clk_init_data){
671 + .name = "gcc_usb_hs_ahb_clk",
672 +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
673 +- .num_parents = 1,
674 + .ops = &clk_branch2_ops,
675 + },
676 + },
677 +@@ -2487,8 +2397,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
678 + .enable_mask = BIT(10),
679 + .hw.init = &(struct clk_init_data){
680 + .name = "gcc_boot_rom_ahb_clk",
681 +- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
682 +- .num_parents = 1,
683 + .ops = &clk_branch2_ops,
684 + },
685 + },
686 +@@ -2502,8 +2410,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
687 + .enable_mask = BIT(13),
688 + .hw.init = &(struct clk_init_data){
689 + .name = "gcc_prng_ahb_clk",
690 +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
691 +- .num_parents = 1,
692 + .ops = &clk_branch2_ops,
693 + },
694 + },
695 +@@ -2546,9 +2452,6 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
696 + [GPLL0] = &gpll0.clkr,
697 + [GPLL4_EARLY] = &gpll4_early.clkr,
698 + [GPLL4] = &gpll4.clkr,
699 +- [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
700 +- [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
701 +- [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
702 + [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
703 + [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
704 + [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
705 +@@ -2695,6 +2598,15 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
706 + [USB_SS_PHY_LDO] = &usb_ss_phy_ldo.clkr,
707 + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
708 + [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
709 ++
710 ++ /*
711 ++ * The following clocks should NOT be managed by this driver, but they once were
712 ++ * mistakengly added. Now they are only here to indicate that they are not defined
713 ++ * on purpose, even though the names will stay in the header file (for ABI sanity).
714 ++ */
715 ++ [CONFIG_NOC_CLK_SRC] = NULL,
716 ++ [PERIPH_NOC_CLK_SRC] = NULL,
717 ++ [SYSTEM_NOC_CLK_SRC] = NULL,
718 + };
719 +
720 + static struct gdsc *gcc_msm8994_gdscs[] = {
721 +diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
722 +index ce63cbd14d69a..24155c038f6d0 100644
723 +--- a/drivers/gpio/gpio-rockchip.c
724 ++++ b/drivers/gpio/gpio-rockchip.c
725 +@@ -410,10 +410,8 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
726 + level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type);
727 + polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity);
728 +
729 +- switch (type) {
730 +- case IRQ_TYPE_EDGE_BOTH:
731 ++ if (type == IRQ_TYPE_EDGE_BOTH) {
732 + if (bank->gpio_type == GPIO_TYPE_V2) {
733 +- bank->toggle_edge_mode &= ~mask;
734 + rockchip_gpio_writel_bit(bank, d->hwirq, 1,
735 + bank->gpio_regs->int_bothedge);
736 + goto out;
737 +@@ -431,30 +429,34 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
738 + else
739 + polarity |= mask;
740 + }
741 +- break;
742 +- case IRQ_TYPE_EDGE_RISING:
743 +- bank->toggle_edge_mode &= ~mask;
744 +- level |= mask;
745 +- polarity |= mask;
746 +- break;
747 +- case IRQ_TYPE_EDGE_FALLING:
748 +- bank->toggle_edge_mode &= ~mask;
749 +- level |= mask;
750 +- polarity &= ~mask;
751 +- break;
752 +- case IRQ_TYPE_LEVEL_HIGH:
753 +- bank->toggle_edge_mode &= ~mask;
754 +- level &= ~mask;
755 +- polarity |= mask;
756 +- break;
757 +- case IRQ_TYPE_LEVEL_LOW:
758 +- bank->toggle_edge_mode &= ~mask;
759 +- level &= ~mask;
760 +- polarity &= ~mask;
761 +- break;
762 +- default:
763 +- ret = -EINVAL;
764 +- goto out;
765 ++ } else {
766 ++ if (bank->gpio_type == GPIO_TYPE_V2) {
767 ++ rockchip_gpio_writel_bit(bank, d->hwirq, 0,
768 ++ bank->gpio_regs->int_bothedge);
769 ++ } else {
770 ++ bank->toggle_edge_mode &= ~mask;
771 ++ }
772 ++ switch (type) {
773 ++ case IRQ_TYPE_EDGE_RISING:
774 ++ level |= mask;
775 ++ polarity |= mask;
776 ++ break;
777 ++ case IRQ_TYPE_EDGE_FALLING:
778 ++ level |= mask;
779 ++ polarity &= ~mask;
780 ++ break;
781 ++ case IRQ_TYPE_LEVEL_HIGH:
782 ++ level &= ~mask;
783 ++ polarity |= mask;
784 ++ break;
785 ++ case IRQ_TYPE_LEVEL_LOW:
786 ++ level &= ~mask;
787 ++ polarity &= ~mask;
788 ++ break;
789 ++ default:
790 ++ ret = -EINVAL;
791 ++ goto out;
792 ++ }
793 + }
794 +
795 + rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type);
796 +diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
797 +index c026e7141e4ea..f62f267dfd7d2 100644
798 +--- a/drivers/gpio/gpio-tegra186.c
799 ++++ b/drivers/gpio/gpio-tegra186.c
800 +@@ -341,9 +341,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
801 + return offset + pin;
802 + }
803 +
804 ++#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio)
805 ++
806 + static void tegra186_irq_ack(struct irq_data *data)
807 + {
808 +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
809 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
810 ++ struct tegra_gpio *gpio = to_tegra_gpio(gc);
811 + void __iomem *base;
812 +
813 + base = tegra186_gpio_get_base(gpio, data->hwirq);
814 +@@ -355,7 +358,8 @@ static void tegra186_irq_ack(struct irq_data *data)
815 +
816 + static void tegra186_irq_mask(struct irq_data *data)
817 + {
818 +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
819 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
820 ++ struct tegra_gpio *gpio = to_tegra_gpio(gc);
821 + void __iomem *base;
822 + u32 value;
823 +
824 +@@ -370,7 +374,8 @@ static void tegra186_irq_mask(struct irq_data *data)
825 +
826 + static void tegra186_irq_unmask(struct irq_data *data)
827 + {
828 +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
829 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
830 ++ struct tegra_gpio *gpio = to_tegra_gpio(gc);
831 + void __iomem *base;
832 + u32 value;
833 +
834 +@@ -385,7 +390,8 @@ static void tegra186_irq_unmask(struct irq_data *data)
835 +
836 + static int tegra186_irq_set_type(struct irq_data *data, unsigned int type)
837 + {
838 +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
839 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
840 ++ struct tegra_gpio *gpio = to_tegra_gpio(gc);
841 + void __iomem *base;
842 + u32 value;
843 +
844 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
845 +index ab3851c26f71c..8c7637233c816 100644
846 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
847 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
848 +@@ -2014,6 +2014,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
849 + return -ENODEV;
850 + }
851 +
852 ++ if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev))
853 ++ amdgpu_aspm = 0;
854 ++
855 + if (amdgpu_virtual_display ||
856 + amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
857 + supports_atomic = true;
858 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
859 +index de9b55383e9f8..d01ddce2dec1d 100644
860 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
861 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
862 +@@ -619,8 +619,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
863 + static int soc15_asic_reset(struct amdgpu_device *adev)
864 + {
865 + /* original raven doesn't have full asic reset */
866 +- if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
867 +- !(adev->apu_flags & AMD_APU_IS_RAVEN2))
868 ++ if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
869 ++ (adev->apu_flags & AMD_APU_IS_RAVEN2))
870 + return 0;
871 +
872 + switch (soc15_asic_reset_method(adev)) {
873 +@@ -1114,8 +1114,11 @@ static int soc15_common_early_init(void *handle)
874 + AMD_CG_SUPPORT_SDMA_LS |
875 + AMD_CG_SUPPORT_VCN_MGCG;
876 +
877 ++ /*
878 ++ * MMHUB PG needs to be disabled for Picasso for
879 ++ * stability reasons.
880 ++ */
881 + adev->pg_flags = AMD_PG_SUPPORT_SDMA |
882 +- AMD_PG_SUPPORT_MMHUB |
883 + AMD_PG_SUPPORT_VCN;
884 + } else {
885 + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
886 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
887 +index 0117b00b4ed83..7a5bb5a3456a6 100644
888 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
889 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
890 +@@ -4232,6 +4232,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
891 + }
892 + #endif
893 +
894 ++ /* Disable vblank IRQs aggressively for power-saving. */
895 ++ adev_to_drm(adev)->vblank_disable_immediate = true;
896 ++
897 + /* loops over all connectors on the board */
898 + for (i = 0; i < link_cnt; i++) {
899 + struct dc_link *link = NULL;
900 +@@ -4277,19 +4280,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
901 + update_connector_ext_caps(aconnector);
902 + if (psr_feature_enabled)
903 + amdgpu_dm_set_psr_caps(link);
904 ++
905 ++ /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
906 ++ * PSR is also supported.
907 ++ */
908 ++ if (link->psr_settings.psr_feature_enabled)
909 ++ adev_to_drm(adev)->vblank_disable_immediate = false;
910 + }
911 +
912 +
913 + }
914 +
915 +- /*
916 +- * Disable vblank IRQs aggressively for power-saving.
917 +- *
918 +- * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
919 +- * is also supported.
920 +- */
921 +- adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
922 +-
923 + /* Software is initialized. Now we can register interrupt handlers. */
924 + switch (adev->asic_type) {
925 + #if defined(CONFIG_DRM_AMD_DC_SI)
926 +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
927 +index 1861a147a7fa1..5c5cbeb59c4d9 100644
928 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
929 ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
930 +@@ -437,8 +437,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
931 + clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
932 +
933 + /* Refresh bounding box */
934 ++ DC_FP_START();
935 + clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
936 + clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
937 ++ DC_FP_END();
938 + }
939 +
940 + static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
941 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
942 +index e890e063cde31..1e7fe6bea300f 100644
943 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
944 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
945 +@@ -999,10 +999,13 @@ static bool dc_construct(struct dc *dc,
946 + goto fail;
947 + #ifdef CONFIG_DRM_AMD_DC_DCN
948 + dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
949 +-#endif
950 +
951 +- if (dc->res_pool->funcs->update_bw_bounding_box)
952 ++ if (dc->res_pool->funcs->update_bw_bounding_box) {
953 ++ DC_FP_START();
954 + dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
955 ++ DC_FP_END();
956 ++ }
957 ++#endif
958 +
959 + /* Creation of current_state must occur after dc->dml
960 + * is initialized in dc_create_resource_pool because
961 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
962 +index e2d9a46d0e1ad..6b066ceab4128 100644
963 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
964 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
965 +@@ -1876,10 +1876,6 @@ enum dc_status dc_remove_stream_from_ctx(
966 + dc->res_pool,
967 + del_pipe->stream_res.stream_enc,
968 + false);
969 +- /* Release link encoder from stream in new dc_state. */
970 +- if (dc->res_pool->funcs->link_enc_unassign)
971 +- dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
972 +-
973 + #if defined(CONFIG_DRM_AMD_DC_DCN)
974 + if (is_dp_128b_132b_signal(del_pipe)) {
975 + update_hpo_dp_stream_engine_usage(
976 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
977 +index 446d37320b948..b55118388d2d7 100644
978 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
979 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
980 +@@ -418,6 +418,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu)
981 + return 0;
982 + }
983 +
984 ++static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu)
985 ++{
986 ++ struct amdgpu_device *adev = smu->adev;
987 ++ uint32_t *board_reserved;
988 ++ uint16_t *freq_table_gfx;
989 ++ uint32_t i;
990 ++
991 ++ /* Fix some OEM SKU specific stability issues */
992 ++ GET_PPTABLE_MEMBER(BoardReserved, &board_reserved);
993 ++ if ((adev->pdev->device == 0x73DF) &&
994 ++ (adev->pdev->revision == 0XC3) &&
995 ++ (adev->pdev->subsystem_device == 0x16C2) &&
996 ++ (adev->pdev->subsystem_vendor == 0x1043))
997 ++ board_reserved[0] = 1387;
998 ++
999 ++ GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx);
1000 ++ if ((adev->pdev->device == 0x73DF) &&
1001 ++ (adev->pdev->revision == 0XC3) &&
1002 ++ ((adev->pdev->subsystem_device == 0x16C2) ||
1003 ++ (adev->pdev->subsystem_device == 0x133C)) &&
1004 ++ (adev->pdev->subsystem_vendor == 0x1043)) {
1005 ++ for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) {
1006 ++ if (freq_table_gfx[i] > 2500)
1007 ++ freq_table_gfx[i] = 2500;
1008 ++ }
1009 ++ }
1010 ++
1011 ++ return 0;
1012 ++}
1013 ++
1014 + static int sienna_cichlid_setup_pptable(struct smu_context *smu)
1015 + {
1016 + int ret = 0;
1017 +@@ -438,7 +468,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu)
1018 + if (ret)
1019 + return ret;
1020 +
1021 +- return ret;
1022 ++ return sienna_cichlid_patch_pptable_quirk(smu);
1023 + }
1024 +
1025 + static int sienna_cichlid_tables_init(struct smu_context *smu)
1026 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1027 +index 12893e7be89bb..f5f5de362ff2c 100644
1028 +--- a/drivers/gpu/drm/drm_edid.c
1029 ++++ b/drivers/gpu/drm/drm_edid.c
1030 +@@ -5345,6 +5345,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
1031 + if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
1032 + return quirks;
1033 +
1034 ++ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
1035 + drm_parse_cea_ext(connector, edid);
1036 +
1037 + /*
1038 +@@ -5393,7 +5394,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
1039 + DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
1040 + connector->name, info->bpc);
1041 +
1042 +- info->color_formats |= DRM_COLOR_FORMAT_RGB444;
1043 + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
1044 + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
1045 + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
1046 +diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
1047 +index 8d9d888e93161..5a2f96d39ac78 100644
1048 +--- a/drivers/gpu/drm/i915/display/intel_bw.c
1049 ++++ b/drivers/gpu/drm/i915/display/intel_bw.c
1050 +@@ -681,6 +681,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
1051 + unsigned int max_bw_point = 0, max_bw = 0;
1052 + unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
1053 + unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
1054 ++ bool changed = false;
1055 + u32 mask = 0;
1056 +
1057 + /* FIXME earlier gens need some checks too */
1058 +@@ -724,6 +725,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
1059 + new_bw_state->data_rate[crtc->pipe] = new_data_rate;
1060 + new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
1061 +
1062 ++ changed = true;
1063 ++
1064 + drm_dbg_kms(&dev_priv->drm,
1065 + "pipe %c data rate %u num active planes %u\n",
1066 + pipe_name(crtc->pipe),
1067 +@@ -731,7 +734,19 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
1068 + new_bw_state->num_active_planes[crtc->pipe]);
1069 + }
1070 +
1071 +- if (!new_bw_state)
1072 ++ old_bw_state = intel_atomic_get_old_bw_state(state);
1073 ++ new_bw_state = intel_atomic_get_new_bw_state(state);
1074 ++
1075 ++ if (new_bw_state &&
1076 ++ intel_can_enable_sagv(dev_priv, old_bw_state) !=
1077 ++ intel_can_enable_sagv(dev_priv, new_bw_state))
1078 ++ changed = true;
1079 ++
1080 ++ /*
1081 ++ * If none of our inputs (data rates, number of active
1082 ++ * planes, SAGV yes/no) changed then nothing to do here.
1083 ++ */
1084 ++ if (!changed)
1085 + return 0;
1086 +
1087 + ret = intel_atomic_lock_global_state(&new_bw_state->base);
1088 +@@ -814,7 +829,6 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
1089 + */
1090 + new_bw_state->qgv_points_mask = ~allowed_points & mask;
1091 +
1092 +- old_bw_state = intel_atomic_get_old_bw_state(state);
1093 + /*
1094 + * If the actual mask had changed we need to make sure that
1095 + * the commits are serialized(in case this is a nomodeset, nonblocking)
1096 +diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
1097 +index 46c6eecbd9175..0ceaed1c96562 100644
1098 +--- a/drivers/gpu/drm/i915/display/intel_bw.h
1099 ++++ b/drivers/gpu/drm/i915/display/intel_bw.h
1100 +@@ -30,19 +30,19 @@ struct intel_bw_state {
1101 + */
1102 + u8 pipe_sagv_reject;
1103 +
1104 ++ /* bitmask of active pipes */
1105 ++ u8 active_pipes;
1106 ++
1107 + /*
1108 + * Current QGV points mask, which restricts
1109 + * some particular SAGV states, not to confuse
1110 + * with pipe_sagv_mask.
1111 + */
1112 +- u8 qgv_points_mask;
1113 ++ u16 qgv_points_mask;
1114 +
1115 + unsigned int data_rate[I915_MAX_PIPES];
1116 + u8 num_active_planes[I915_MAX_PIPES];
1117 +
1118 +- /* bitmask of active pipes */
1119 +- u8 active_pipes;
1120 +-
1121 + int min_cdclk;
1122 + };
1123 +
1124 +diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
1125 +index 5e20f340730fb..601929bab874c 100644
1126 +--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
1127 ++++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
1128 +@@ -34,7 +34,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv)
1129 + if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy),
1130 + DG2_PHY_DP_TX_ACK_MASK, 25))
1131 + DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n",
1132 +- phy);
1133 ++ phy_name(phy));
1134 + }
1135 + }
1136 +
1137 +diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
1138 +index dbd7d0d83a141..7784c30fe8937 100644
1139 +--- a/drivers/gpu/drm/i915/display/intel_tc.c
1140 ++++ b/drivers/gpu/drm/i915/display/intel_tc.c
1141 +@@ -691,6 +691,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
1142 + {
1143 + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1144 + struct intel_encoder *encoder = &dig_port->base;
1145 ++ intel_wakeref_t tc_cold_wref;
1146 ++ enum intel_display_power_domain domain;
1147 + int active_links = 0;
1148 +
1149 + mutex_lock(&dig_port->tc_lock);
1150 +@@ -702,12 +704,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
1151 +
1152 + drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
1153 + drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
1154 +- if (active_links) {
1155 +- enum intel_display_power_domain domain;
1156 +- intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain);
1157 +
1158 +- dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
1159 ++ tc_cold_wref = tc_cold_block(dig_port, &domain);
1160 +
1161 ++ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
1162 ++ if (active_links) {
1163 + if (!icl_tc_phy_is_connected(dig_port))
1164 + drm_dbg_kms(&i915->drm,
1165 + "Port %s: PHY disconnected with %d active link(s)\n",
1166 +@@ -716,10 +717,23 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
1167 +
1168 + dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
1169 + &dig_port->tc_lock_power_domain);
1170 +-
1171 +- tc_cold_unblock(dig_port, domain, tc_cold_wref);
1172 ++ } else {
1173 ++ /*
1174 ++ * TBT-alt is the default mode in any case the PHY ownership is not
1175 ++ * held (regardless of the sink's connected live state), so
1176 ++ * we'll just switch to disconnected mode from it here without
1177 ++ * a note.
1178 ++ */
1179 ++ if (dig_port->tc_mode != TC_PORT_TBT_ALT)
1180 ++ drm_dbg_kms(&i915->drm,
1181 ++ "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1182 ++ dig_port->tc_port_name,
1183 ++ tc_port_mode_name(dig_port->tc_mode));
1184 ++ icl_tc_phy_disconnect(dig_port);
1185 + }
1186 +
1187 ++ tc_cold_unblock(dig_port, domain, tc_cold_wref);
1188 ++
1189 + drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
1190 + dig_port->tc_port_name,
1191 + tc_port_mode_name(dig_port->tc_mode));
1192 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1193 +index 75c1522fdae8c..7cbffd9a7be88 100644
1194 +--- a/drivers/gpu/drm/i915/intel_pm.c
1195 ++++ b/drivers/gpu/drm/i915/intel_pm.c
1196 +@@ -4019,6 +4019,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
1197 + return ret;
1198 + }
1199 +
1200 ++ if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
1201 ++ intel_can_enable_sagv(dev_priv, old_bw_state)) {
1202 ++ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1203 ++ if (ret)
1204 ++ return ret;
1205 ++ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
1206 ++ ret = intel_atomic_lock_global_state(&new_bw_state->base);
1207 ++ if (ret)
1208 ++ return ret;
1209 ++ }
1210 ++
1211 + for_each_new_intel_crtc_in_state(state, crtc,
1212 + new_crtc_state, i) {
1213 + struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
1214 +@@ -4034,17 +4045,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
1215 + intel_can_enable_sagv(dev_priv, new_bw_state);
1216 + }
1217 +
1218 +- if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
1219 +- intel_can_enable_sagv(dev_priv, old_bw_state)) {
1220 +- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1221 +- if (ret)
1222 +- return ret;
1223 +- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
1224 +- ret = intel_atomic_lock_global_state(&new_bw_state->base);
1225 +- if (ret)
1226 +- return ret;
1227 +- }
1228 +-
1229 + return 0;
1230 + }
1231 +
1232 +diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
1233 +index e3ed52d96f423..3e61184e194c9 100644
1234 +--- a/drivers/gpu/drm/vc4/vc4_crtc.c
1235 ++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
1236 +@@ -538,9 +538,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
1237 + if (ret)
1238 + return ret;
1239 +
1240 +- ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
1241 +- if (ret)
1242 +- return ret;
1243 ++ /*
1244 ++ * post_crtc_powerdown will have called pm_runtime_put, so we
1245 ++ * don't need it here otherwise we'll get the reference counting
1246 ++ * wrong.
1247 ++ */
1248 +
1249 + return 0;
1250 + }
1251 +diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
1252 +index d198a10848c6b..a89a408182e60 100644
1253 +--- a/drivers/gpu/host1x/syncpt.c
1254 ++++ b/drivers/gpu/host1x/syncpt.c
1255 +@@ -225,27 +225,12 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
1256 + void *ref;
1257 + struct host1x_waitlist *waiter;
1258 + int err = 0, check_count = 0;
1259 +- u32 val;
1260 +
1261 + if (value)
1262 +- *value = 0;
1263 +-
1264 +- /* first check cache */
1265 +- if (host1x_syncpt_is_expired(sp, thresh)) {
1266 +- if (value)
1267 +- *value = host1x_syncpt_load(sp);
1268 ++ *value = host1x_syncpt_load(sp);
1269 +
1270 ++ if (host1x_syncpt_is_expired(sp, thresh))
1271 + return 0;
1272 +- }
1273 +-
1274 +- /* try to read from register */
1275 +- val = host1x_hw_syncpt_load(sp->host, sp);
1276 +- if (host1x_syncpt_is_expired(sp, thresh)) {
1277 +- if (value)
1278 +- *value = val;
1279 +-
1280 +- goto done;
1281 +- }
1282 +
1283 + if (!timeout) {
1284 + err = -EAGAIN;
1285 +diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
1286 +index 3501a3ead4ba6..3ae961986fc31 100644
1287 +--- a/drivers/hwmon/hwmon.c
1288 ++++ b/drivers/hwmon/hwmon.c
1289 +@@ -214,12 +214,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index)
1290 +
1291 + tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
1292 + &hwmon_thermal_ops);
1293 +- /*
1294 +- * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
1295 +- * so ignore that error but forward any other error.
1296 +- */
1297 +- if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
1298 +- return PTR_ERR(tzd);
1299 ++ if (IS_ERR(tzd)) {
1300 ++ if (PTR_ERR(tzd) != -ENODEV)
1301 ++ return PTR_ERR(tzd);
1302 ++ dev_info(dev, "temp%d_input not attached to any thermal zone\n",
1303 ++ index + 1);
1304 ++ devm_kfree(dev, tdata);
1305 ++ return 0;
1306 ++ }
1307 +
1308 + err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node);
1309 + if (err)
1310 +diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
1311 +index b0678c351e829..c3a2b4c0b3b26 100644
1312 +--- a/drivers/iio/accel/bmc150-accel-core.c
1313 ++++ b/drivers/iio/accel/bmc150-accel-core.c
1314 +@@ -1783,11 +1783,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
1315 + ret = iio_device_register(indio_dev);
1316 + if (ret < 0) {
1317 + dev_err(dev, "Unable to register iio device\n");
1318 +- goto err_trigger_unregister;
1319 ++ goto err_pm_cleanup;
1320 + }
1321 +
1322 + return 0;
1323 +
1324 ++err_pm_cleanup:
1325 ++ pm_runtime_dont_use_autosuspend(dev);
1326 ++ pm_runtime_disable(dev);
1327 + err_trigger_unregister:
1328 + bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
1329 + err_buffer_cleanup:
1330 +diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
1331 +index 32989d91b9829..f7fd9e046588b 100644
1332 +--- a/drivers/iio/accel/fxls8962af-core.c
1333 ++++ b/drivers/iio/accel/fxls8962af-core.c
1334 +@@ -173,12 +173,20 @@ struct fxls8962af_data {
1335 + u16 upper_thres;
1336 + };
1337 +
1338 +-const struct regmap_config fxls8962af_regmap_conf = {
1339 ++const struct regmap_config fxls8962af_i2c_regmap_conf = {
1340 + .reg_bits = 8,
1341 + .val_bits = 8,
1342 + .max_register = FXLS8962AF_MAX_REG,
1343 + };
1344 +-EXPORT_SYMBOL_GPL(fxls8962af_regmap_conf);
1345 ++EXPORT_SYMBOL_GPL(fxls8962af_i2c_regmap_conf);
1346 ++
1347 ++const struct regmap_config fxls8962af_spi_regmap_conf = {
1348 ++ .reg_bits = 8,
1349 ++ .pad_bits = 8,
1350 ++ .val_bits = 8,
1351 ++ .max_register = FXLS8962AF_MAX_REG,
1352 ++};
1353 ++EXPORT_SYMBOL_GPL(fxls8962af_spi_regmap_conf);
1354 +
1355 + enum {
1356 + fxls8962af_idx_x,
1357 +diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c
1358 +index cfb004b204559..6bde9891effbf 100644
1359 +--- a/drivers/iio/accel/fxls8962af-i2c.c
1360 ++++ b/drivers/iio/accel/fxls8962af-i2c.c
1361 +@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct i2c_client *client)
1362 + {
1363 + struct regmap *regmap;
1364 +
1365 +- regmap = devm_regmap_init_i2c(client, &fxls8962af_regmap_conf);
1366 ++ regmap = devm_regmap_init_i2c(client, &fxls8962af_i2c_regmap_conf);
1367 + if (IS_ERR(regmap)) {
1368 + dev_err(&client->dev, "Failed to initialize i2c regmap\n");
1369 + return PTR_ERR(regmap);
1370 +diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c
1371 +index 57108d3d480b6..6f4dff3238d3c 100644
1372 +--- a/drivers/iio/accel/fxls8962af-spi.c
1373 ++++ b/drivers/iio/accel/fxls8962af-spi.c
1374 +@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct spi_device *spi)
1375 + {
1376 + struct regmap *regmap;
1377 +
1378 +- regmap = devm_regmap_init_spi(spi, &fxls8962af_regmap_conf);
1379 ++ regmap = devm_regmap_init_spi(spi, &fxls8962af_spi_regmap_conf);
1380 + if (IS_ERR(regmap)) {
1381 + dev_err(&spi->dev, "Failed to initialize spi regmap\n");
1382 + return PTR_ERR(regmap);
1383 +diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h
1384 +index b67572c3ef069..9cbe98c3ba9a2 100644
1385 +--- a/drivers/iio/accel/fxls8962af.h
1386 ++++ b/drivers/iio/accel/fxls8962af.h
1387 +@@ -17,6 +17,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq);
1388 + int fxls8962af_core_remove(struct device *dev);
1389 +
1390 + extern const struct dev_pm_ops fxls8962af_pm_ops;
1391 +-extern const struct regmap_config fxls8962af_regmap_conf;
1392 ++extern const struct regmap_config fxls8962af_i2c_regmap_conf;
1393 ++extern const struct regmap_config fxls8962af_spi_regmap_conf;
1394 +
1395 + #endif /* _FXLS8962AF_H_ */
1396 +diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
1397 +index 24c9387c29687..ba6c8ca488b1a 100644
1398 +--- a/drivers/iio/accel/kxcjk-1013.c
1399 ++++ b/drivers/iio/accel/kxcjk-1013.c
1400 +@@ -1589,11 +1589,14 @@ static int kxcjk1013_probe(struct i2c_client *client,
1401 + ret = iio_device_register(indio_dev);
1402 + if (ret < 0) {
1403 + dev_err(&client->dev, "unable to register iio device\n");
1404 +- goto err_buffer_cleanup;
1405 ++ goto err_pm_cleanup;
1406 + }
1407 +
1408 + return 0;
1409 +
1410 ++err_pm_cleanup:
1411 ++ pm_runtime_dont_use_autosuspend(&client->dev);
1412 ++ pm_runtime_disable(&client->dev);
1413 + err_buffer_cleanup:
1414 + iio_triggered_buffer_cleanup(indio_dev);
1415 + err_trigger_unregister:
1416 +diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
1417 +index 4c359fb054801..c53a3398b14c4 100644
1418 +--- a/drivers/iio/accel/mma9551.c
1419 ++++ b/drivers/iio/accel/mma9551.c
1420 +@@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client,
1421 + ret = iio_device_register(indio_dev);
1422 + if (ret < 0) {
1423 + dev_err(&client->dev, "unable to register iio device\n");
1424 +- goto out_poweroff;
1425 ++ goto err_pm_cleanup;
1426 + }
1427 +
1428 + return 0;
1429 +
1430 ++err_pm_cleanup:
1431 ++ pm_runtime_dont_use_autosuspend(&client->dev);
1432 ++ pm_runtime_disable(&client->dev);
1433 + out_poweroff:
1434 + mma9551_set_device_state(client, false);
1435 +
1436 +diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
1437 +index ba3ecb3b57dcd..1599b75724d4f 100644
1438 +--- a/drivers/iio/accel/mma9553.c
1439 ++++ b/drivers/iio/accel/mma9553.c
1440 +@@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client,
1441 + ret = iio_device_register(indio_dev);
1442 + if (ret < 0) {
1443 + dev_err(&client->dev, "unable to register iio device\n");
1444 +- goto out_poweroff;
1445 ++ goto err_pm_cleanup;
1446 + }
1447 +
1448 + dev_dbg(&indio_dev->dev, "Registered device %s\n", name);
1449 + return 0;
1450 +
1451 ++err_pm_cleanup:
1452 ++ pm_runtime_dont_use_autosuspend(&client->dev);
1453 ++ pm_runtime_disable(&client->dev);
1454 + out_poweroff:
1455 + mma9551_set_device_state(client, false);
1456 + return ret;
1457 +diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
1458 +index e45c600fccc0b..18c154afbd7ac 100644
1459 +--- a/drivers/iio/adc/ad7124.c
1460 ++++ b/drivers/iio/adc/ad7124.c
1461 +@@ -76,7 +76,7 @@
1462 + #define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x)
1463 + #define AD7124_CONFIG_PGA_MSK GENMASK(2, 0)
1464 + #define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x)
1465 +-#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6)
1466 ++#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5)
1467 + #define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x)
1468 +
1469 + /* AD7124_FILTER_X */
1470 +diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
1471 +index 42ea8bc7e7805..adc5ceaef8c93 100644
1472 +--- a/drivers/iio/adc/men_z188_adc.c
1473 ++++ b/drivers/iio/adc/men_z188_adc.c
1474 +@@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev,
1475 + struct z188_adc *adc;
1476 + struct iio_dev *indio_dev;
1477 + struct resource *mem;
1478 ++ int ret;
1479 +
1480 + indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
1481 + if (!indio_dev)
1482 +@@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev,
1483 + adc->mem = mem;
1484 + mcb_set_drvdata(dev, indio_dev);
1485 +
1486 +- return iio_device_register(indio_dev);
1487 ++ ret = iio_device_register(indio_dev);
1488 ++ if (ret)
1489 ++ goto err_unmap;
1490 ++
1491 ++ return 0;
1492 +
1493 ++err_unmap:
1494 ++ iounmap(adc->base);
1495 + err:
1496 + mcb_release_mem(mem);
1497 + return -ENXIO;
1498 +diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
1499 +index d84ae6b008c1b..e8fc4d01f30b6 100644
1500 +--- a/drivers/iio/adc/ti-tsc2046.c
1501 ++++ b/drivers/iio/adc/ti-tsc2046.c
1502 +@@ -388,7 +388,7 @@ static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev,
1503 + mutex_lock(&priv->slock);
1504 +
1505 + size = 0;
1506 +- for_each_set_bit(ch_idx, active_scan_mask, indio_dev->num_channels) {
1507 ++ for_each_set_bit(ch_idx, active_scan_mask, ARRAY_SIZE(priv->l)) {
1508 + size += tsc2046_adc_group_set_layout(priv, group, ch_idx);
1509 + tsc2046_adc_group_set_cmd(priv, group, ch_idx);
1510 + group++;
1511 +@@ -548,7 +548,7 @@ static int tsc2046_adc_setup_spi_msg(struct tsc2046_adc_priv *priv)
1512 + * enabled.
1513 + */
1514 + size = 0;
1515 +- for (ch_idx = 0; ch_idx < priv->dcfg->num_channels; ch_idx++)
1516 ++ for (ch_idx = 0; ch_idx < ARRAY_SIZE(priv->l); ch_idx++)
1517 + size += tsc2046_adc_group_set_layout(priv, ch_idx, ch_idx);
1518 +
1519 + priv->tx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL);
1520 +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
1521 +index 17b939a367ad0..81a6d09788bd7 100644
1522 +--- a/drivers/iio/gyro/bmg160_core.c
1523 ++++ b/drivers/iio/gyro/bmg160_core.c
1524 +@@ -1188,11 +1188,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
1525 + ret = iio_device_register(indio_dev);
1526 + if (ret < 0) {
1527 + dev_err(dev, "unable to register iio device\n");
1528 +- goto err_buffer_cleanup;
1529 ++ goto err_pm_cleanup;
1530 + }
1531 +
1532 + return 0;
1533 +
1534 ++err_pm_cleanup:
1535 ++ pm_runtime_dont_use_autosuspend(dev);
1536 ++ pm_runtime_disable(dev);
1537 + err_buffer_cleanup:
1538 + iio_triggered_buffer_cleanup(indio_dev);
1539 + err_trigger_unregister:
1540 +diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
1541 +index ed129321a14da..f9b4540db1f43 100644
1542 +--- a/drivers/iio/imu/adis16480.c
1543 ++++ b/drivers/iio/imu/adis16480.c
1544 +@@ -1403,6 +1403,7 @@ static int adis16480_probe(struct spi_device *spi)
1545 + {
1546 + const struct spi_device_id *id = spi_get_device_id(spi);
1547 + const struct adis_data *adis16480_data;
1548 ++ irq_handler_t trigger_handler = NULL;
1549 + struct iio_dev *indio_dev;
1550 + struct adis16480 *st;
1551 + int ret;
1552 +@@ -1474,8 +1475,12 @@ static int adis16480_probe(struct spi_device *spi)
1553 + st->clk_freq = st->chip_info->int_clk;
1554 + }
1555 +
1556 ++ /* Only use our trigger handler if burst mode is supported */
1557 ++ if (adis16480_data->burst_len)
1558 ++ trigger_handler = adis16480_trigger_handler;
1559 ++
1560 + ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
1561 +- adis16480_trigger_handler);
1562 ++ trigger_handler);
1563 + if (ret)
1564 + return ret;
1565 +
1566 +diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
1567 +index 1dabfd615dabf..f89724481df93 100644
1568 +--- a/drivers/iio/imu/kmx61.c
1569 ++++ b/drivers/iio/imu/kmx61.c
1570 +@@ -1385,7 +1385,7 @@ static int kmx61_probe(struct i2c_client *client,
1571 + ret = iio_device_register(data->acc_indio_dev);
1572 + if (ret < 0) {
1573 + dev_err(&client->dev, "Failed to register acc iio device\n");
1574 +- goto err_buffer_cleanup_mag;
1575 ++ goto err_pm_cleanup;
1576 + }
1577 +
1578 + ret = iio_device_register(data->mag_indio_dev);
1579 +@@ -1398,6 +1398,9 @@ static int kmx61_probe(struct i2c_client *client,
1580 +
1581 + err_iio_unregister_acc:
1582 + iio_device_unregister(data->acc_indio_dev);
1583 ++err_pm_cleanup:
1584 ++ pm_runtime_dont_use_autosuspend(&client->dev);
1585 ++ pm_runtime_disable(&client->dev);
1586 + err_buffer_cleanup_mag:
1587 + if (client->irq > 0)
1588 + iio_triggered_buffer_cleanup(data->mag_indio_dev);
1589 +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
1590 +index f2cbbc756459b..32d9a5e30685b 100644
1591 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
1592 ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
1593 +@@ -1374,8 +1374,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
1594 + if (err < 0)
1595 + return err;
1596 +
1597 ++ /*
1598 ++ * we need to wait for sensor settling time before
1599 ++ * reading data in order to avoid corrupted samples
1600 ++ */
1601 + delay = 1000000000 / sensor->odr;
1602 +- usleep_range(delay, 2 * delay);
1603 ++ usleep_range(3 * delay, 4 * delay);
1604 +
1605 + err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data));
1606 + if (err < 0)
1607 +diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
1608 +index f96f531753495..3d4d21f979fab 100644
1609 +--- a/drivers/iio/magnetometer/bmc150_magn.c
1610 ++++ b/drivers/iio/magnetometer/bmc150_magn.c
1611 +@@ -962,13 +962,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
1612 + ret = iio_device_register(indio_dev);
1613 + if (ret < 0) {
1614 + dev_err(dev, "unable to register iio device\n");
1615 +- goto err_disable_runtime_pm;
1616 ++ goto err_pm_cleanup;
1617 + }
1618 +
1619 + dev_dbg(dev, "Registered device %s\n", name);
1620 + return 0;
1621 +
1622 +-err_disable_runtime_pm:
1623 ++err_pm_cleanup:
1624 ++ pm_runtime_dont_use_autosuspend(dev);
1625 + pm_runtime_disable(dev);
1626 + err_buffer_cleanup:
1627 + iio_triggered_buffer_cleanup(indio_dev);
1628 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1629 +index a8da4291e7e3b..41ec05c4b0d0e 100644
1630 +--- a/drivers/infiniband/core/cma.c
1631 ++++ b/drivers/infiniband/core/cma.c
1632 +@@ -3370,22 +3370,30 @@ err:
1633 + static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1634 + const struct sockaddr *dst_addr)
1635 + {
1636 +- if (!src_addr || !src_addr->sa_family) {
1637 +- src_addr = (struct sockaddr *) &id->route.addr.src_addr;
1638 +- src_addr->sa_family = dst_addr->sa_family;
1639 +- if (IS_ENABLED(CONFIG_IPV6) &&
1640 +- dst_addr->sa_family == AF_INET6) {
1641 +- struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
1642 +- struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
1643 +- src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
1644 +- if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
1645 +- id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
1646 +- } else if (dst_addr->sa_family == AF_IB) {
1647 +- ((struct sockaddr_ib *) src_addr)->sib_pkey =
1648 +- ((struct sockaddr_ib *) dst_addr)->sib_pkey;
1649 +- }
1650 +- }
1651 +- return rdma_bind_addr(id, src_addr);
1652 ++ struct sockaddr_storage zero_sock = {};
1653 ++
1654 ++ if (src_addr && src_addr->sa_family)
1655 ++ return rdma_bind_addr(id, src_addr);
1656 ++
1657 ++ /*
1658 ++ * When the src_addr is not specified, automatically supply an any addr
1659 ++ */
1660 ++ zero_sock.ss_family = dst_addr->sa_family;
1661 ++ if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
1662 ++ struct sockaddr_in6 *src_addr6 =
1663 ++ (struct sockaddr_in6 *)&zero_sock;
1664 ++ struct sockaddr_in6 *dst_addr6 =
1665 ++ (struct sockaddr_in6 *)dst_addr;
1666 ++
1667 ++ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
1668 ++ if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
1669 ++ id->route.addr.dev_addr.bound_dev_if =
1670 ++ dst_addr6->sin6_scope_id;
1671 ++ } else if (dst_addr->sa_family == AF_IB) {
1672 ++ ((struct sockaddr_ib *)&zero_sock)->sib_pkey =
1673 ++ ((struct sockaddr_ib *)dst_addr)->sib_pkey;
1674 ++ }
1675 ++ return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
1676 + }
1677 +
1678 + /*
1679 +diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
1680 +index 0a3b28142c05b..41c272980f91c 100644
1681 +--- a/drivers/infiniband/hw/qib/qib_sysfs.c
1682 ++++ b/drivers/infiniband/hw/qib/qib_sysfs.c
1683 +@@ -541,7 +541,7 @@ static struct attribute *port_diagc_attributes[] = {
1684 + };
1685 +
1686 + static const struct attribute_group port_diagc_group = {
1687 +- .name = "linkcontrol",
1688 ++ .name = "diag_counters",
1689 + .attrs = port_diagc_attributes,
1690 + };
1691 +
1692 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
1693 +index e39709dee179d..be96701cf281e 100644
1694 +--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
1695 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
1696 +@@ -2664,6 +2664,8 @@ static void rtrs_clt_dev_release(struct device *dev)
1697 + {
1698 + struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
1699 +
1700 ++ mutex_destroy(&clt->paths_ev_mutex);
1701 ++ mutex_destroy(&clt->paths_mutex);
1702 + kfree(clt);
1703 + }
1704 +
1705 +@@ -2693,6 +2695,8 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
1706 + return ERR_PTR(-ENOMEM);
1707 + }
1708 +
1709 ++ clt->dev.class = rtrs_clt_dev_class;
1710 ++ clt->dev.release = rtrs_clt_dev_release;
1711 + uuid_gen(&clt->paths_uuid);
1712 + INIT_LIST_HEAD_RCU(&clt->paths_list);
1713 + clt->paths_num = paths_num;
1714 +@@ -2709,53 +2713,51 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
1715 + init_waitqueue_head(&clt->permits_wait);
1716 + mutex_init(&clt->paths_ev_mutex);
1717 + mutex_init(&clt->paths_mutex);
1718 ++ device_initialize(&clt->dev);
1719 +
1720 +- clt->dev.class = rtrs_clt_dev_class;
1721 +- clt->dev.release = rtrs_clt_dev_release;
1722 + err = dev_set_name(&clt->dev, "%s", sessname);
1723 + if (err)
1724 +- goto err;
1725 ++ goto err_put;
1726 ++
1727 + /*
1728 + * Suppress user space notification until
1729 + * sysfs files are created
1730 + */
1731 + dev_set_uevent_suppress(&clt->dev, true);
1732 +- err = device_register(&clt->dev);
1733 +- if (err) {
1734 +- put_device(&clt->dev);
1735 +- goto err;
1736 +- }
1737 ++ err = device_add(&clt->dev);
1738 ++ if (err)
1739 ++ goto err_put;
1740 +
1741 + clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
1742 + if (!clt->kobj_paths) {
1743 + err = -ENOMEM;
1744 +- goto err_dev;
1745 ++ goto err_del;
1746 + }
1747 + err = rtrs_clt_create_sysfs_root_files(clt);
1748 + if (err) {
1749 + kobject_del(clt->kobj_paths);
1750 + kobject_put(clt->kobj_paths);
1751 +- goto err_dev;
1752 ++ goto err_del;
1753 + }
1754 + dev_set_uevent_suppress(&clt->dev, false);
1755 + kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
1756 +
1757 + return clt;
1758 +-err_dev:
1759 +- device_unregister(&clt->dev);
1760 +-err:
1761 ++err_del:
1762 ++ device_del(&clt->dev);
1763 ++err_put:
1764 + free_percpu(clt->pcpu_path);
1765 +- kfree(clt);
1766 ++ put_device(&clt->dev);
1767 + return ERR_PTR(err);
1768 + }
1769 +
1770 + static void free_clt(struct rtrs_clt *clt)
1771 + {
1772 +- free_permits(clt);
1773 + free_percpu(clt->pcpu_path);
1774 +- mutex_destroy(&clt->paths_ev_mutex);
1775 +- mutex_destroy(&clt->paths_mutex);
1776 +- /* release callback will free clt in last put */
1777 ++
1778 ++ /*
1779 ++ * release callback will free clt and destroy mutexes in last put
1780 ++ */
1781 + device_unregister(&clt->dev);
1782 + }
1783 +
1784 +@@ -2872,6 +2874,7 @@ void rtrs_clt_close(struct rtrs_clt *clt)
1785 + rtrs_clt_destroy_sess_files(sess, NULL);
1786 + kobject_put(&sess->kobj);
1787 + }
1788 ++ free_permits(clt);
1789 + free_clt(clt);
1790 + }
1791 + EXPORT_SYMBOL(rtrs_clt_close);
1792 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1793 +index e174e853f8a40..285b766e4e704 100644
1794 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
1795 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
1796 +@@ -4047,9 +4047,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
1797 + spin_unlock(&host->target_lock);
1798 +
1799 + /*
1800 +- * Wait for tl_err and target port removal tasks.
1801 ++ * srp_queue_remove_work() queues a call to
1802 ++ * srp_remove_target(). The latter function cancels
1803 ++ * target->tl_err_work so waiting for the remove works to
1804 ++ * finish is sufficient.
1805 + */
1806 +- flush_workqueue(system_long_wq);
1807 + flush_workqueue(srp_remove_wq);
1808 +
1809 + kfree(host);
1810 +diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
1811 +index fc0bed14bfb10..02ca6e5fa0dc7 100644
1812 +--- a/drivers/mtd/mtdcore.c
1813 ++++ b/drivers/mtd/mtdcore.c
1814 +@@ -546,6 +546,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
1815 + config.stride = 1;
1816 + config.read_only = true;
1817 + config.root_only = true;
1818 ++ config.ignore_wp = true;
1819 + config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
1820 + config.priv = mtd;
1821 +
1822 +@@ -830,6 +831,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
1823 + config.owner = THIS_MODULE;
1824 + config.type = NVMEM_TYPE_OTP;
1825 + config.root_only = true;
1826 ++ config.ignore_wp = true;
1827 + config.reg_read = reg_read;
1828 + config.size = size;
1829 + config.of_node = np;
1830 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1831 +index 125dafe1db7ee..4ce596daeaae3 100644
1832 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1833 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1834 +@@ -100,6 +100,9 @@ MODULE_LICENSE("GPL");
1835 + MODULE_FIRMWARE(FW_FILE_NAME_E1);
1836 + MODULE_FIRMWARE(FW_FILE_NAME_E1H);
1837 + MODULE_FIRMWARE(FW_FILE_NAME_E2);
1838 ++MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
1839 ++MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
1840 ++MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
1841 +
1842 + int bnx2x_num_queues;
1843 + module_param_named(num_queues, bnx2x_num_queues, int, 0444);
1844 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1845 +index 7eaf74e5b2929..fab8dd73fa84c 100644
1846 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1847 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1848 +@@ -4719,8 +4719,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
1849 + return rc;
1850 +
1851 + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
1852 +- req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
1853 +- req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
1854 ++ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
1855 ++ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
1856 ++ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
1857 ++ }
1858 + req->mask = cpu_to_le32(vnic->rx_mask);
1859 + return hwrm_req_send_silent(bp, req);
1860 + }
1861 +@@ -7774,6 +7776,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
1862 + return 0;
1863 + }
1864 +
1865 ++static void bnxt_remap_fw_health_regs(struct bnxt *bp)
1866 ++{
1867 ++ if (!bp->fw_health)
1868 ++ return;
1869 ++
1870 ++ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
1871 ++ bp->fw_health->status_reliable = true;
1872 ++ bp->fw_health->resets_reliable = true;
1873 ++ } else {
1874 ++ bnxt_try_map_fw_health_reg(bp);
1875 ++ }
1876 ++}
1877 ++
1878 + static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
1879 + {
1880 + struct bnxt_fw_health *fw_health = bp->fw_health;
1881 +@@ -8623,6 +8638,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
1882 + vnic->uc_filter_count = 1;
1883 +
1884 + vnic->rx_mask = 0;
1885 ++ if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
1886 ++ goto skip_rx_mask;
1887 ++
1888 + if (bp->dev->flags & IFF_BROADCAST)
1889 + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
1890 +
1891 +@@ -8632,7 +8650,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
1892 + if (bp->dev->flags & IFF_ALLMULTI) {
1893 + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1894 + vnic->mc_list_count = 0;
1895 +- } else {
1896 ++ } else if (bp->dev->flags & IFF_MULTICAST) {
1897 + u32 mask = 0;
1898 +
1899 + bnxt_mc_list_updated(bp, &mask);
1900 +@@ -8643,6 +8661,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
1901 + if (rc)
1902 + goto err_out;
1903 +
1904 ++skip_rx_mask:
1905 + rc = bnxt_hwrm_set_coal(bp);
1906 + if (rc)
1907 + netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
1908 +@@ -9830,8 +9849,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
1909 + resc_reinit = true;
1910 + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
1911 + fw_reset = true;
1912 +- else if (bp->fw_health && !bp->fw_health->status_reliable)
1913 +- bnxt_try_map_fw_health_reg(bp);
1914 ++ else
1915 ++ bnxt_remap_fw_health_regs(bp);
1916 +
1917 + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
1918 + netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
1919 +@@ -10310,13 +10329,15 @@ int bnxt_half_open_nic(struct bnxt *bp)
1920 + goto half_open_err;
1921 + }
1922 +
1923 +- rc = bnxt_alloc_mem(bp, false);
1924 ++ rc = bnxt_alloc_mem(bp, true);
1925 + if (rc) {
1926 + netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
1927 + goto half_open_err;
1928 + }
1929 +- rc = bnxt_init_nic(bp, false);
1930 ++ set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
1931 ++ rc = bnxt_init_nic(bp, true);
1932 + if (rc) {
1933 ++ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
1934 + netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
1935 + goto half_open_err;
1936 + }
1937 +@@ -10324,7 +10345,7 @@ int bnxt_half_open_nic(struct bnxt *bp)
1938 +
1939 + half_open_err:
1940 + bnxt_free_skbs(bp);
1941 +- bnxt_free_mem(bp, false);
1942 ++ bnxt_free_mem(bp, true);
1943 + dev_close(bp->dev);
1944 + return rc;
1945 + }
1946 +@@ -10334,9 +10355,10 @@ half_open_err:
1947 + */
1948 + void bnxt_half_close_nic(struct bnxt *bp)
1949 + {
1950 +- bnxt_hwrm_resource_free(bp, false, false);
1951 ++ bnxt_hwrm_resource_free(bp, false, true);
1952 + bnxt_free_skbs(bp);
1953 +- bnxt_free_mem(bp, false);
1954 ++ bnxt_free_mem(bp, true);
1955 ++ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
1956 + }
1957 +
1958 + void bnxt_reenable_sriov(struct bnxt *bp)
1959 +@@ -10752,7 +10774,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
1960 + if (dev->flags & IFF_ALLMULTI) {
1961 + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1962 + vnic->mc_list_count = 0;
1963 +- } else {
1964 ++ } else if (dev->flags & IFF_MULTICAST) {
1965 + mc_update = bnxt_mc_list_updated(bp, &mask);
1966 + }
1967 +
1968 +@@ -10820,9 +10842,10 @@ skip_uc:
1969 + !bnxt_promisc_ok(bp))
1970 + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
1971 + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
1972 +- if (rc && vnic->mc_list_count) {
1973 ++ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
1974 + netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
1975 + rc);
1976 ++ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
1977 + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
1978 + vnic->mc_list_count = 0;
1979 + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
1980 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1981 +index 6bacd5fae6ba5..2846d14756671 100644
1982 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1983 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1984 +@@ -1919,6 +1919,7 @@ struct bnxt {
1985 + #define BNXT_STATE_RECOVER 12
1986 + #define BNXT_STATE_FW_NON_FATAL_COND 13
1987 + #define BNXT_STATE_FW_ACTIVATE_RESET 14
1988 ++#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */
1989 +
1990 + #define BNXT_NO_FW_ACCESS(bp) \
1991 + (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
1992 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
1993 +index 951c4c569a9b3..61e0373079316 100644
1994 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
1995 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
1996 +@@ -366,6 +366,16 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
1997 + }
1998 + }
1999 +
2000 ++/* Live patch status in NVM */
2001 ++#define BNXT_LIVEPATCH_NOT_INSTALLED 0
2002 ++#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL
2003 ++#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE
2004 ++#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \
2005 ++ FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE)
2006 ++#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK
2007 ++
2008 ++#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK)
2009 ++
2010 + static int
2011 + bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
2012 + {
2013 +@@ -373,8 +383,9 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
2014 + struct hwrm_fw_livepatch_query_input *query_req;
2015 + struct hwrm_fw_livepatch_output *patch_resp;
2016 + struct hwrm_fw_livepatch_input *patch_req;
2017 ++ u16 flags, live_patch_state;
2018 ++ bool activated = false;
2019 + u32 installed = 0;
2020 +- u16 flags;
2021 + u8 target;
2022 + int rc;
2023 +
2024 +@@ -393,7 +404,6 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
2025 + hwrm_req_drop(bp, query_req);
2026 + return rc;
2027 + }
2028 +- patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
2029 + patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
2030 + patch_resp = hwrm_req_hold(bp, patch_req);
2031 +
2032 +@@ -406,12 +416,20 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
2033 + }
2034 +
2035 + flags = le16_to_cpu(query_resp->status_flags);
2036 +- if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL)
2037 ++ live_patch_state = BNXT_LIVEPATCH_STATE(flags);
2038 ++
2039 ++ if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED)
2040 + continue;
2041 +- if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) &&
2042 +- !strncmp(query_resp->active_ver, query_resp->install_ver,
2043 +- sizeof(query_resp->active_ver)))
2044 ++
2045 ++ if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) {
2046 ++ activated = true;
2047 + continue;
2048 ++ }
2049 ++
2050 ++ if (live_patch_state == BNXT_LIVEPATCH_INSTALLED)
2051 ++ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
2052 ++ else if (live_patch_state == BNXT_LIVEPATCH_REMOVED)
2053 ++ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE;
2054 +
2055 + patch_req->fw_target = target;
2056 + rc = hwrm_req_send(bp, patch_req);
2057 +@@ -423,8 +441,13 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
2058 + }
2059 +
2060 + if (!rc && !installed) {
2061 +- NL_SET_ERR_MSG_MOD(extack, "No live patches found");
2062 +- rc = -ENOENT;
2063 ++ if (activated) {
2064 ++ NL_SET_ERR_MSG_MOD(extack, "Live patch already activated");
2065 ++ rc = -EEXIST;
2066 ++ } else {
2067 ++ NL_SET_ERR_MSG_MOD(extack, "No live patches found");
2068 ++ rc = -ENOENT;
2069 ++ }
2070 + }
2071 + hwrm_req_drop(bp, query_req);
2072 + hwrm_req_drop(bp, patch_req);
2073 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2074 +index 7307df49c1313..f147ad5a65315 100644
2075 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2076 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2077 +@@ -25,6 +25,7 @@
2078 + #include "bnxt_hsi.h"
2079 + #include "bnxt.h"
2080 + #include "bnxt_hwrm.h"
2081 ++#include "bnxt_ulp.h"
2082 + #include "bnxt_xdp.h"
2083 + #include "bnxt_ptp.h"
2084 + #include "bnxt_ethtool.h"
2085 +@@ -1944,6 +1945,9 @@ static int bnxt_get_fecparam(struct net_device *dev,
2086 + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
2087 + fec->active_fec |= ETHTOOL_FEC_LLRS;
2088 + break;
2089 ++ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
2090 ++ fec->active_fec |= ETHTOOL_FEC_OFF;
2091 ++ break;
2092 + }
2093 + return 0;
2094 + }
2095 +@@ -3429,7 +3433,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
2096 + if (!skb)
2097 + return -ENOMEM;
2098 + data = skb_put(skb, pkt_size);
2099 +- eth_broadcast_addr(data);
2100 ++ ether_addr_copy(&data[i], bp->dev->dev_addr);
2101 + i += ETH_ALEN;
2102 + ether_addr_copy(&data[i], bp->dev->dev_addr);
2103 + i += ETH_ALEN;
2104 +@@ -3523,9 +3527,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
2105 + if (!offline) {
2106 + bnxt_run_fw_tests(bp, test_mask, &test_results);
2107 + } else {
2108 +- rc = bnxt_close_nic(bp, false, false);
2109 +- if (rc)
2110 ++ bnxt_ulp_stop(bp);
2111 ++ rc = bnxt_close_nic(bp, true, false);
2112 ++ if (rc) {
2113 ++ bnxt_ulp_start(bp, rc);
2114 + return;
2115 ++ }
2116 + bnxt_run_fw_tests(bp, test_mask, &test_results);
2117 +
2118 + buf[BNXT_MACLPBK_TEST_IDX] = 1;
2119 +@@ -3535,6 +3542,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
2120 + if (rc) {
2121 + bnxt_hwrm_mac_loopback(bp, false);
2122 + etest->flags |= ETH_TEST_FL_FAILED;
2123 ++ bnxt_ulp_start(bp, rc);
2124 + return;
2125 + }
2126 + if (bnxt_run_loopback(bp))
2127 +@@ -3560,7 +3568,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
2128 + }
2129 + bnxt_hwrm_phy_loopback(bp, false, false);
2130 + bnxt_half_close_nic(bp);
2131 +- rc = bnxt_open_nic(bp, false, true);
2132 ++ rc = bnxt_open_nic(bp, true, true);
2133 ++ bnxt_ulp_start(bp, rc);
2134 + }
2135 + if (rc || bnxt_test_irq(bp)) {
2136 + buf[BNXT_IRQ_TEST_IDX] = 1;
2137 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
2138 +index 8171f4912fa01..3a0eeb3737767 100644
2139 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
2140 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
2141 +@@ -595,18 +595,24 @@ timeout_abort:
2142 +
2143 + /* Last byte of resp contains valid bit */
2144 + valid = ((u8 *)ctx->resp) + len - 1;
2145 +- for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
2146 ++ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
2147 + /* make sure we read from updated DMA memory */
2148 + dma_rmb();
2149 + if (*valid)
2150 + break;
2151 +- usleep_range(1, 5);
2152 ++ if (j < 10) {
2153 ++ udelay(1);
2154 ++ j++;
2155 ++ } else {
2156 ++ usleep_range(20, 30);
2157 ++ j += 20;
2158 ++ }
2159 + }
2160 +
2161 + if (j >= HWRM_VALID_BIT_DELAY_USEC) {
2162 + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
2163 + netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
2164 +- hwrm_total_timeout(i),
2165 ++ hwrm_total_timeout(i) + j,
2166 + le16_to_cpu(ctx->req->req_type),
2167 + le16_to_cpu(ctx->req->seq_id), len,
2168 + *valid);
2169 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
2170 +index 9a9fc4e8041b6..380ef69afb51b 100644
2171 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
2172 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
2173 +@@ -94,7 +94,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n)
2174 + }
2175 +
2176 +
2177 +-#define HWRM_VALID_BIT_DELAY_USEC 150
2178 ++#define HWRM_VALID_BIT_DELAY_USEC 50000
2179 +
2180 + static inline bool bnxt_cfa_hwrm_message(u16 req_type)
2181 + {
2182 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
2183 +index d5d33325a413e..1c6bc69197a53 100644
2184 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
2185 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
2186 +@@ -5919,10 +5919,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
2187 + be64_to_cpu(session_token));
2188 + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2189 + H_SESSION_ERR_DETECTED, session_token, 0, 0);
2190 +- if (rc)
2191 ++ if (rc) {
2192 + netdev_err(netdev,
2193 + "H_VIOCTL initiated failover failed, rc %ld\n",
2194 + rc);
2195 ++ goto last_resort;
2196 ++ }
2197 ++
2198 ++ return count;
2199 +
2200 + last_resort:
2201 + netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
2202 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
2203 +index d3af1457fa0dc..1eddb99c4e9e1 100644
2204 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
2205 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
2206 +@@ -5372,15 +5372,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
2207 + /* There is no need to reset BW when mqprio mode is on. */
2208 + if (pf->flags & I40E_FLAG_TC_MQPRIO)
2209 + return 0;
2210 +-
2211 +- if (!vsi->mqprio_qopt.qopt.hw) {
2212 +- if (pf->flags & I40E_FLAG_DCB_ENABLED)
2213 +- goto skip_reset;
2214 +-
2215 +- if (IS_ENABLED(CONFIG_I40E_DCB) &&
2216 +- i40e_dcb_hw_get_num_tc(&pf->hw) == 1)
2217 +- goto skip_reset;
2218 +-
2219 ++ if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
2220 + ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
2221 + if (ret)
2222 + dev_info(&pf->pdev->dev,
2223 +@@ -5388,8 +5380,6 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
2224 + vsi->seid);
2225 + return ret;
2226 + }
2227 +-
2228 +-skip_reset:
2229 + memset(&bw_data, 0, sizeof(bw_data));
2230 + bw_data.tc_valid_bits = enabled_tc;
2231 + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2232 +diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
2233 +index b3e1fc6a0a8eb..b067dd9c71e78 100644
2234 +--- a/drivers/net/ethernet/intel/ice/ice.h
2235 ++++ b/drivers/net/ethernet/intel/ice/ice.h
2236 +@@ -280,7 +280,6 @@ enum ice_pf_state {
2237 + ICE_VFLR_EVENT_PENDING,
2238 + ICE_FLTR_OVERFLOW_PROMISC,
2239 + ICE_VF_DIS,
2240 +- ICE_VF_DEINIT_IN_PROGRESS,
2241 + ICE_CFG_BUSY,
2242 + ICE_SERVICE_SCHED,
2243 + ICE_SERVICE_DIS,
2244 +diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
2245 +index e9a0159cb8b92..ec8c980f73421 100644
2246 +--- a/drivers/net/ethernet/intel/ice/ice_common.c
2247 ++++ b/drivers/net/ethernet/intel/ice/ice_common.c
2248 +@@ -3319,7 +3319,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2249 +
2250 + if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
2251 + !ice_fw_supports_report_dflt_cfg(hw)) {
2252 +- struct ice_link_default_override_tlv tlv;
2253 ++ struct ice_link_default_override_tlv tlv = { 0 };
2254 +
2255 + status = ice_get_link_default_override(&tlv, pi);
2256 + if (status)
2257 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
2258 +index 5b4be432b60ce..8ee778aaa8000 100644
2259 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
2260 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
2261 +@@ -1772,7 +1772,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
2262 + * reset, so print the event prior to reset.
2263 + */
2264 + ice_print_vf_rx_mdd_event(vf);
2265 ++ mutex_lock(&pf->vf[i].cfg_lock);
2266 + ice_reset_vf(&pf->vf[i], false);
2267 ++ mutex_unlock(&pf->vf[i].cfg_lock);
2268 + }
2269 + }
2270 + }
2271 +diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
2272 +index 442b031b0edc0..fdb9c4b367588 100644
2273 +--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
2274 ++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
2275 +@@ -1121,9 +1121,12 @@ exit:
2276 + static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
2277 + {
2278 + struct timespec64 now, then;
2279 ++ int ret;
2280 +
2281 + then = ns_to_timespec64(delta);
2282 +- ice_ptp_gettimex64(info, &now, NULL);
2283 ++ ret = ice_ptp_gettimex64(info, &now, NULL);
2284 ++ if (ret)
2285 ++ return ret;
2286 + now = timespec64_add(now, then);
2287 +
2288 + return ice_ptp_settime64(info, (const struct timespec64 *)&now);
2289 +diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
2290 +index 25cca5c4ae575..275a99f62b285 100644
2291 +--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
2292 ++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
2293 +@@ -711,7 +711,7 @@ ice_tc_set_port(struct flow_match_ports match,
2294 + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
2295 + else
2296 + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
2297 +- fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
2298 ++
2299 + headers->l4_key.dst_port = match.key->dst;
2300 + headers->l4_mask.dst_port = match.mask->dst;
2301 + }
2302 +@@ -720,7 +720,7 @@ ice_tc_set_port(struct flow_match_ports match,
2303 + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
2304 + else
2305 + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
2306 +- fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
2307 ++
2308 + headers->l4_key.src_port = match.key->src;
2309 + headers->l4_mask.src_port = match.mask->src;
2310 + }
2311 +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
2312 +index 6427e7ec93de6..a12cc305c4619 100644
2313 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
2314 ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
2315 +@@ -617,8 +617,6 @@ void ice_free_vfs(struct ice_pf *pf)
2316 + struct ice_hw *hw = &pf->hw;
2317 + unsigned int tmp, i;
2318 +
2319 +- set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
2320 +-
2321 + if (!pf->vf)
2322 + return;
2323 +
2324 +@@ -636,22 +634,26 @@ void ice_free_vfs(struct ice_pf *pf)
2325 + else
2326 + dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
2327 +
2328 +- /* Avoid wait time by stopping all VFs at the same time */
2329 +- ice_for_each_vf(pf, i)
2330 +- ice_dis_vf_qs(&pf->vf[i]);
2331 +-
2332 + tmp = pf->num_alloc_vfs;
2333 + pf->num_qps_per_vf = 0;
2334 + pf->num_alloc_vfs = 0;
2335 + for (i = 0; i < tmp; i++) {
2336 +- if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
2337 ++ struct ice_vf *vf = &pf->vf[i];
2338 ++
2339 ++ mutex_lock(&vf->cfg_lock);
2340 ++
2341 ++ ice_dis_vf_qs(vf);
2342 ++
2343 ++ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2344 + /* disable VF qp mappings and set VF disable state */
2345 +- ice_dis_vf_mappings(&pf->vf[i]);
2346 +- set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
2347 +- ice_free_vf_res(&pf->vf[i]);
2348 ++ ice_dis_vf_mappings(vf);
2349 ++ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
2350 ++ ice_free_vf_res(vf);
2351 + }
2352 +
2353 +- mutex_destroy(&pf->vf[i].cfg_lock);
2354 ++ mutex_unlock(&vf->cfg_lock);
2355 ++
2356 ++ mutex_destroy(&vf->cfg_lock);
2357 + }
2358 +
2359 + if (ice_sriov_free_msix_res(pf))
2360 +@@ -687,7 +689,6 @@ void ice_free_vfs(struct ice_pf *pf)
2361 + i);
2362 +
2363 + clear_bit(ICE_VF_DIS, pf->state);
2364 +- clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
2365 + clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
2366 + }
2367 +
2368 +@@ -1613,6 +1614,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
2369 + ice_for_each_vf(pf, v) {
2370 + vf = &pf->vf[v];
2371 +
2372 ++ mutex_lock(&vf->cfg_lock);
2373 ++
2374 + vf->driver_caps = 0;
2375 + ice_vc_set_default_allowlist(vf);
2376 +
2377 +@@ -1627,6 +1630,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
2378 + ice_vf_pre_vsi_rebuild(vf);
2379 + ice_vf_rebuild_vsi(vf);
2380 + ice_vf_post_vsi_rebuild(vf);
2381 ++
2382 ++ mutex_unlock(&vf->cfg_lock);
2383 + }
2384 +
2385 + if (ice_is_eswitch_mode_switchdev(pf))
2386 +@@ -1677,6 +1682,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
2387 + u32 reg;
2388 + int i;
2389 +
2390 ++ lockdep_assert_held(&vf->cfg_lock);
2391 ++
2392 + dev = ice_pf_to_dev(pf);
2393 +
2394 + if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2395 +@@ -2176,9 +2183,12 @@ void ice_process_vflr_event(struct ice_pf *pf)
2396 + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2397 + /* read GLGEN_VFLRSTAT register to find out the flr VFs */
2398 + reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
2399 +- if (reg & BIT(bit_idx))
2400 ++ if (reg & BIT(bit_idx)) {
2401 + /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
2402 ++ mutex_lock(&vf->cfg_lock);
2403 + ice_reset_vf(vf, true);
2404 ++ mutex_unlock(&vf->cfg_lock);
2405 ++ }
2406 + }
2407 + }
2408 +
2409 +@@ -2255,7 +2265,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
2410 + if (!vf)
2411 + return;
2412 +
2413 ++ mutex_lock(&vf->cfg_lock);
2414 + ice_vc_reset_vf(vf);
2415 ++ mutex_unlock(&vf->cfg_lock);
2416 + }
2417 +
2418 + /**
2419 +@@ -4651,10 +4663,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
2420 + struct device *dev;
2421 + int err = 0;
2422 +
2423 +- /* if de-init is underway, don't process messages from VF */
2424 +- if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
2425 +- return;
2426 +-
2427 + dev = ice_pf_to_dev(pf);
2428 + if (ice_validate_vf_id(pf, vf_id)) {
2429 + err = -EINVAL;
2430 +diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
2431 +index bb14fa2241a36..0636783f7bc03 100644
2432 +--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
2433 ++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
2434 +@@ -2700,6 +2700,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
2435 +
2436 + static struct platform_device *port_platdev[3];
2437 +
2438 ++static void mv643xx_eth_shared_of_remove(void)
2439 ++{
2440 ++ int n;
2441 ++
2442 ++ for (n = 0; n < 3; n++) {
2443 ++ platform_device_del(port_platdev[n]);
2444 ++ port_platdev[n] = NULL;
2445 ++ }
2446 ++}
2447 ++
2448 + static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2449 + struct device_node *pnp)
2450 + {
2451 +@@ -2736,7 +2746,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2452 + return -EINVAL;
2453 + }
2454 +
2455 +- of_get_mac_address(pnp, ppd.mac_addr);
2456 ++ ret = of_get_mac_address(pnp, ppd.mac_addr);
2457 ++ if (ret)
2458 ++ return ret;
2459 +
2460 + mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
2461 + mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
2462 +@@ -2800,21 +2812,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2463 + ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
2464 + if (ret) {
2465 + of_node_put(pnp);
2466 ++ mv643xx_eth_shared_of_remove();
2467 + return ret;
2468 + }
2469 + }
2470 + return 0;
2471 + }
2472 +
2473 +-static void mv643xx_eth_shared_of_remove(void)
2474 +-{
2475 +- int n;
2476 +-
2477 +- for (n = 0; n < 3; n++) {
2478 +- platform_device_del(port_platdev[n]);
2479 +- port_platdev[n] = NULL;
2480 +- }
2481 +-}
2482 + #else
2483 + static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2484 + {
2485 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
2486 +index 60952b33b5688..d2333310b56fe 100644
2487 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
2488 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
2489 +@@ -60,37 +60,31 @@ static int parse_tunnel(struct mlx5e_priv *priv,
2490 + void *headers_v)
2491 + {
2492 + struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2493 +- struct flow_match_enc_keyid enc_keyid;
2494 + struct flow_match_mpls match;
2495 + void *misc2_c;
2496 + void *misc2_v;
2497 +
2498 +- misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2499 +- misc_parameters_2);
2500 +- misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2501 +- misc_parameters_2);
2502 +-
2503 +- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
2504 +- return 0;
2505 +-
2506 +- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
2507 +- return 0;
2508 +-
2509 +- flow_rule_match_enc_keyid(rule, &enc_keyid);
2510 +-
2511 +- if (!enc_keyid.mask->keyid)
2512 +- return 0;
2513 +-
2514 + if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
2515 + !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
2516 + return -EOPNOTSUPP;
2517 +
2518 ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
2519 ++ return -EOPNOTSUPP;
2520 ++
2521 ++ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
2522 ++ return 0;
2523 ++
2524 + flow_rule_match_mpls(rule, &match);
2525 +
2526 + /* Only support matching the first LSE */
2527 + if (match.mask->used_lses != 1)
2528 + return -EOPNOTSUPP;
2529 +
2530 ++ misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2531 ++ misc_parameters_2);
2532 ++ misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2533 ++ misc_parameters_2);
2534 ++
2535 + MLX5_SET(fte_match_set_misc2, misc2_c,
2536 + outer_first_mpls_over_udp.mpls_label,
2537 + match.mask->ls[0].mpls_label);
2538 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2539 +index c2ea5fad48ddf..58c72142804a5 100644
2540 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2541 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2542 +@@ -1752,7 +1752,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
2543 + if (size_read < 0) {
2544 + netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
2545 + __func__, size_read);
2546 +- return 0;
2547 ++ return size_read;
2548 + }
2549 +
2550 + i += size_read;
2551 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2552 +index bf25d0aa74c3b..ea0968ea88d6a 100644
2553 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2554 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2555 +@@ -1348,7 +1348,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
2556 + }
2557 +
2558 + /* True when explicitly set via priv flag, or XDP prog is loaded */
2559 +- if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
2560 ++ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
2561 ++ get_cqe_tls_offload(cqe))
2562 + goto csum_unnecessary;
2563 +
2564 + /* CQE csum doesn't cover padding octets in short ethernet
2565 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
2566 +index 8c9163d2c6468..08a75654f5f18 100644
2567 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
2568 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
2569 +@@ -334,6 +334,7 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
2570 + netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
2571 + buf[count] = st.st_func(priv);
2572 + netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
2573 ++ count++;
2574 + }
2575 +
2576 + mutex_unlock(&priv->state_lock);
2577 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2578 +index eae37934cdf70..308733cbaf775 100644
2579 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2580 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2581 +@@ -3427,6 +3427,18 @@ actions_match_supported(struct mlx5e_priv *priv,
2582 + return false;
2583 + }
2584 +
2585 ++ if (!(~actions &
2586 ++ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
2587 ++ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
2588 ++ return false;
2589 ++ }
2590 ++
2591 ++ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
2592 ++ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
2593 ++ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
2594 ++ return false;
2595 ++ }
2596 ++
2597 + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
2598 + actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
2599 + NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
2600 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2601 +index ccb66428aeb5b..52b973e244189 100644
2602 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2603 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2604 +@@ -2838,10 +2838,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2605 + if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2606 + return false;
2607 +
2608 +- if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2609 +- mlx5_ecpf_vport_exists(esw->dev))
2610 +- return false;
2611 +-
2612 + return true;
2613 + }
2614 +
2615 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2616 +index 386ab9a2d490f..4f6b010726998 100644
2617 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2618 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2619 +@@ -2073,6 +2073,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2620 + fte->node.del_hw_func = NULL;
2621 + up_write_ref_node(&fte->node, false);
2622 + tree_put_node(&fte->node, false);
2623 ++ } else {
2624 ++ up_write_ref_node(&fte->node, false);
2625 + }
2626 + kfree(handle);
2627 + }
2628 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
2629 +index df58cba37930a..1e8ec4f236b28 100644
2630 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
2631 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
2632 +@@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
2633 +
2634 + u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
2635 + {
2636 ++ if (!mlx5_chains_prios_supported(chains))
2637 ++ return 1;
2638 ++
2639 + if (mlx5_chains_ignore_flow_level_supported(chains))
2640 + return UINT_MAX;
2641 +
2642 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2643 +index 6e381111f1d2f..c3861c69521c2 100644
2644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
2645 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2646 +@@ -510,7 +510,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
2647 +
2648 + /* Check log_max_qp from HCA caps to set in current profile */
2649 + if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
2650 +- prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
2651 ++ prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
2652 + } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
2653 + mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
2654 + prof->log_max_qp,
2655 +@@ -1796,10 +1796,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
2656 + { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
2657 + { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
2658 + { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
2659 ++ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
2660 + { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
2661 + { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
2662 + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
2663 + { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
2664 ++ { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */
2665 + { 0, }
2666 + };
2667 +
2668 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
2669 +index 7f6fd9c5e371b..e289cfdbce075 100644
2670 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
2671 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
2672 +@@ -4,7 +4,6 @@
2673 + #include "dr_types.h"
2674 +
2675 + #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
2676 +-#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024)
2677 +
2678 + struct mlx5dr_icm_pool {
2679 + enum mlx5dr_icm_type icm_type;
2680 +@@ -136,37 +135,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
2681 + kvfree(icm_mr);
2682 + }
2683 +
2684 +-static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
2685 ++static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
2686 + {
2687 +- chunk->ste_arr = kvzalloc(chunk->num_of_entries *
2688 +- sizeof(chunk->ste_arr[0]), GFP_KERNEL);
2689 +- if (!chunk->ste_arr)
2690 +- return -ENOMEM;
2691 +-
2692 +- chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries *
2693 +- DR_STE_SIZE_REDUCED, GFP_KERNEL);
2694 +- if (!chunk->hw_ste_arr)
2695 +- goto out_free_ste_arr;
2696 +-
2697 +- chunk->miss_list = kvmalloc(chunk->num_of_entries *
2698 +- sizeof(chunk->miss_list[0]), GFP_KERNEL);
2699 +- if (!chunk->miss_list)
2700 +- goto out_free_hw_ste_arr;
2701 ++ /* We support only one type of STE size, both for ConnectX-5 and later
2702 ++ * devices. Once the support for match STE which has a larger tag is
2703 ++ * added (32B instead of 16B), the STE size for devices later than
2704 ++ * ConnectX-5 needs to account for that.
2705 ++ */
2706 ++ return DR_STE_SIZE_REDUCED;
2707 ++}
2708 +
2709 +- return 0;
2710 ++static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
2711 ++{
2712 ++ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
2713 ++ int index = offset / DR_STE_SIZE;
2714 +
2715 +-out_free_hw_ste_arr:
2716 +- kvfree(chunk->hw_ste_arr);
2717 +-out_free_ste_arr:
2718 +- kvfree(chunk->ste_arr);
2719 +- return -ENOMEM;
2720 ++ chunk->ste_arr = &buddy->ste_arr[index];
2721 ++ chunk->miss_list = &buddy->miss_list[index];
2722 ++ chunk->hw_ste_arr = buddy->hw_ste_arr +
2723 ++ index * dr_icm_buddy_get_ste_size(buddy);
2724 + }
2725 +
2726 + static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
2727 + {
2728 +- kvfree(chunk->miss_list);
2729 +- kvfree(chunk->hw_ste_arr);
2730 +- kvfree(chunk->ste_arr);
2731 ++ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
2732 ++
2733 ++ memset(chunk->hw_ste_arr, 0,
2734 ++ chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
2735 ++ memset(chunk->ste_arr, 0,
2736 ++ chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
2737 + }
2738 +
2739 + static enum mlx5dr_icm_type
2740 +@@ -189,6 +186,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
2741 + kvfree(chunk);
2742 + }
2743 +
2744 ++static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
2745 ++{
2746 ++ int num_of_entries =
2747 ++ mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
2748 ++
2749 ++ buddy->ste_arr = kvcalloc(num_of_entries,
2750 ++ sizeof(struct mlx5dr_ste), GFP_KERNEL);
2751 ++ if (!buddy->ste_arr)
2752 ++ return -ENOMEM;
2753 ++
2754 ++ /* Preallocate full STE size on non-ConnectX-5 devices since
2755 ++ * we need to support both full and reduced with the same cache.
2756 ++ */
2757 ++ buddy->hw_ste_arr = kvcalloc(num_of_entries,
2758 ++ dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
2759 ++ if (!buddy->hw_ste_arr)
2760 ++ goto free_ste_arr;
2761 ++
2762 ++ buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
2763 ++ if (!buddy->miss_list)
2764 ++ goto free_hw_ste_arr;
2765 ++
2766 ++ return 0;
2767 ++
2768 ++free_hw_ste_arr:
2769 ++ kvfree(buddy->hw_ste_arr);
2770 ++free_ste_arr:
2771 ++ kvfree(buddy->ste_arr);
2772 ++ return -ENOMEM;
2773 ++}
2774 ++
2775 ++static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
2776 ++{
2777 ++ kvfree(buddy->ste_arr);
2778 ++ kvfree(buddy->hw_ste_arr);
2779 ++ kvfree(buddy->miss_list);
2780 ++}
2781 ++
2782 + static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
2783 + {
2784 + struct mlx5dr_icm_buddy_mem *buddy;
2785 +@@ -208,11 +243,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
2786 + buddy->icm_mr = icm_mr;
2787 + buddy->pool = pool;
2788 +
2789 ++ if (pool->icm_type == DR_ICM_TYPE_STE) {
2790 ++ /* Reduce allocations by preallocating and reusing the STE structures */
2791 ++ if (dr_icm_buddy_init_ste_cache(buddy))
2792 ++ goto err_cleanup_buddy;
2793 ++ }
2794 ++
2795 + /* add it to the -start- of the list in order to search in it first */
2796 + list_add(&buddy->list_node, &pool->buddy_mem_list);
2797 +
2798 + return 0;
2799 +
2800 ++err_cleanup_buddy:
2801 ++ mlx5dr_buddy_cleanup(buddy);
2802 + err_free_buddy:
2803 + kvfree(buddy);
2804 + free_mr:
2805 +@@ -234,6 +277,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
2806 +
2807 + mlx5dr_buddy_cleanup(buddy);
2808 +
2809 ++ if (buddy->pool->icm_type == DR_ICM_TYPE_STE)
2810 ++ dr_icm_buddy_cleanup_ste_cache(buddy);
2811 ++
2812 + kvfree(buddy);
2813 + }
2814 +
2815 +@@ -261,34 +307,30 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
2816 + chunk->byte_size =
2817 + mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
2818 + chunk->seg = seg;
2819 ++ chunk->buddy_mem = buddy_mem_pool;
2820 +
2821 +- if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) {
2822 +- mlx5dr_err(pool->dmn,
2823 +- "Failed to init ste arrays (order: %d)\n",
2824 +- chunk_size);
2825 +- goto out_free_chunk;
2826 +- }
2827 ++ if (pool->icm_type == DR_ICM_TYPE_STE)
2828 ++ dr_icm_chunk_ste_init(chunk, offset);
2829 +
2830 + buddy_mem_pool->used_memory += chunk->byte_size;
2831 +- chunk->buddy_mem = buddy_mem_pool;
2832 + INIT_LIST_HEAD(&chunk->chunk_list);
2833 +
2834 + /* chunk now is part of the used_list */
2835 + list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
2836 +
2837 + return chunk;
2838 +-
2839 +-out_free_chunk:
2840 +- kvfree(chunk);
2841 +- return NULL;
2842 + }
2843 +
2844 + static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
2845 + {
2846 +- if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL)
2847 +- return true;
2848 ++ int allow_hot_size;
2849 ++
2850 ++ /* sync when hot memory reaches half of the pool size */
2851 ++ allow_hot_size =
2852 ++ mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
2853 ++ pool->icm_type) / 2;
2854 +
2855 +- return false;
2856 ++ return pool->hot_memory_size > allow_hot_size;
2857 + }
2858 +
2859 + static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
2860 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
2861 +index 3d0cdc36a91ab..01213045a8a84 100644
2862 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
2863 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
2864 +@@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
2865 + return (spec->dmac_47_16 || spec->dmac_15_0);
2866 + }
2867 +
2868 +-static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
2869 +-{
2870 +- return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
2871 +- spec->src_ip_63_32 || spec->src_ip_31_0);
2872 +-}
2873 +-
2874 +-static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
2875 +-{
2876 +- return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
2877 +- spec->dst_ip_63_32 || spec->dst_ip_31_0);
2878 +-}
2879 +-
2880 + static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
2881 + {
2882 + return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
2883 +@@ -480,11 +468,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
2884 + &mask, inner, rx);
2885 +
2886 + if (outer_ipv == DR_RULE_IPV6) {
2887 +- if (dr_mask_is_dst_addr_set(&mask.outer))
2888 ++ if (DR_MASK_IS_DST_IP_SET(&mask.outer))
2889 + mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
2890 + &mask, inner, rx);
2891 +
2892 +- if (dr_mask_is_src_addr_set(&mask.outer))
2893 ++ if (DR_MASK_IS_SRC_IP_SET(&mask.outer))
2894 + mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
2895 + &mask, inner, rx);
2896 +
2897 +@@ -580,11 +568,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
2898 + &mask, inner, rx);
2899 +
2900 + if (inner_ipv == DR_RULE_IPV6) {
2901 +- if (dr_mask_is_dst_addr_set(&mask.inner))
2902 ++ if (DR_MASK_IS_DST_IP_SET(&mask.inner))
2903 + mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
2904 + &mask, inner, rx);
2905 +
2906 +- if (dr_mask_is_src_addr_set(&mask.inner))
2907 ++ if (DR_MASK_IS_SRC_IP_SET(&mask.inner))
2908 + mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
2909 + &mask, inner, rx);
2910 +
2911 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
2912 +index 219a5474a8a46..7e711b2037b5b 100644
2913 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
2914 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
2915 +@@ -602,12 +602,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
2916 + used_hw_action_num);
2917 + }
2918 +
2919 ++static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
2920 ++ struct mlx5dr_match_spec *spec)
2921 ++{
2922 ++ if (spec->ip_version) {
2923 ++ if (spec->ip_version != 0xf) {
2924 ++ mlx5dr_err(dmn,
2925 ++ "Partial ip_version mask with src/dst IP is not supported\n");
2926 ++ return -EINVAL;
2927 ++ }
2928 ++ } else if (spec->ethertype != 0xffff &&
2929 ++ (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
2930 ++ mlx5dr_err(dmn,
2931 ++ "Partial/no ethertype mask with src/dst IP is not supported\n");
2932 ++ return -EINVAL;
2933 ++ }
2934 ++
2935 ++ return 0;
2936 ++}
2937 ++
2938 + int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
2939 + u8 match_criteria,
2940 + struct mlx5dr_match_param *mask,
2941 + struct mlx5dr_match_param *value)
2942 + {
2943 +- if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
2944 ++ if (value)
2945 ++ return 0;
2946 ++
2947 ++ if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
2948 + if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
2949 + mlx5dr_err(dmn,
2950 + "Partial mask source_port is not supported\n");
2951 +@@ -621,6 +643,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
2952 + }
2953 + }
2954 +
2955 ++ if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
2956 ++ dr_ste_build_pre_check_spec(dmn, &mask->outer))
2957 ++ return -EINVAL;
2958 ++
2959 ++ if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
2960 ++ dr_ste_build_pre_check_spec(dmn, &mask->inner))
2961 ++ return -EINVAL;
2962 ++
2963 + return 0;
2964 + }
2965 +
2966 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
2967 +index 2333c2439c287..5f98db648e865 100644
2968 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
2969 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
2970 +@@ -739,6 +739,16 @@ struct mlx5dr_match_param {
2971 + (_misc3)->icmpv4_code || \
2972 + (_misc3)->icmpv4_header_data)
2973 +
2974 ++#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \
2975 ++ (_spec)->src_ip_95_64 || \
2976 ++ (_spec)->src_ip_63_32 || \
2977 ++ (_spec)->src_ip_31_0)
2978 ++
2979 ++#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \
2980 ++ (_spec)->dst_ip_95_64 || \
2981 ++ (_spec)->dst_ip_63_32 || \
2982 ++ (_spec)->dst_ip_31_0)
2983 ++
2984 + struct mlx5dr_esw_caps {
2985 + u64 drop_icm_address_rx;
2986 + u64 drop_icm_address_tx;
2987 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2988 +index 2632d5ae9bc0e..ac4651235b34c 100644
2989 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2990 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2991 +@@ -222,7 +222,11 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
2992 + dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2993 + }
2994 +
2995 +-#define MLX5_FLOW_CONTEXT_ACTION_MAX 32
2996 ++/* We want to support a rule with 32 destinations, which means we need to
2997 ++ * account for 32 destinations plus usually a counter plus one more action
2998 ++ * for a multi-destination flow table.
2999 ++ */
3000 ++#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
3001 + static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
3002 + struct mlx5_flow_table *ft,
3003 + struct mlx5_flow_group *group,
3004 +@@ -392,9 +396,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
3005 + enum mlx5_flow_destination_type type = dst->dest_attr.type;
3006 + u32 id;
3007 +
3008 +- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
3009 +- num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
3010 +- err = -ENOSPC;
3011 ++ if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
3012 ++ num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
3013 ++ err = -EOPNOTSUPP;
3014 + goto free_actions;
3015 + }
3016 +
3017 +@@ -464,8 +468,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
3018 + MLX5_FLOW_DESTINATION_TYPE_COUNTER)
3019 + continue;
3020 +
3021 +- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
3022 +- err = -ENOSPC;
3023 ++ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
3024 ++ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
3025 ++ err = -EOPNOTSUPP;
3026 + goto free_actions;
3027 + }
3028 +
3029 +@@ -485,14 +490,28 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
3030 + params.match_sz = match_sz;
3031 + params.match_buf = (u64 *)fte->val;
3032 + if (num_term_actions == 1) {
3033 +- if (term_actions->reformat)
3034 ++ if (term_actions->reformat) {
3035 ++ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
3036 ++ err = -EOPNOTSUPP;
3037 ++ goto free_actions;
3038 ++ }
3039 + actions[num_actions++] = term_actions->reformat;
3040 ++ }
3041 +
3042 ++ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
3043 ++ err = -EOPNOTSUPP;
3044 ++ goto free_actions;
3045 ++ }
3046 + actions[num_actions++] = term_actions->dest;
3047 + } else if (num_term_actions > 1) {
3048 + bool ignore_flow_level =
3049 + !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
3050 +
3051 ++ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
3052 ++ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
3053 ++ err = -EOPNOTSUPP;
3054 ++ goto free_actions;
3055 ++ }
3056 + tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
3057 + term_actions,
3058 + num_term_actions,
3059 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
3060 +index c7c93131b762b..dfa223415fe24 100644
3061 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
3062 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
3063 +@@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem {
3064 + * sync_ste command sets them free.
3065 + */
3066 + struct list_head hot_list;
3067 ++
3068 ++ /* Memory optimisation */
3069 ++ struct mlx5dr_ste *ste_arr;
3070 ++ struct list_head *miss_list;
3071 ++ u8 *hw_ste_arr;
3072 + };
3073 +
3074 + int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
3075 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
3076 +index 0a326e04e6923..cb43651ea9ba8 100644
3077 +--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
3078 ++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
3079 +@@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
3080 + int port, bool mod)
3081 + {
3082 + struct nfp_flower_priv *priv = app->priv;
3083 +- int ida_idx = NFP_MAX_MAC_INDEX, err;
3084 + struct nfp_tun_offloaded_mac *entry;
3085 ++ int ida_idx = -1, err;
3086 + u16 nfp_mac_idx = 0;
3087 +
3088 + entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
3089 +@@ -997,7 +997,7 @@ err_remove_hash:
3090 + err_free_entry:
3091 + kfree(entry);
3092 + err_free_ida:
3093 +- if (ida_idx != NFP_MAX_MAC_INDEX)
3094 ++ if (ida_idx != -1)
3095 + ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
3096 +
3097 + return err;
3098 +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
3099 +index e7065c9a8e389..e8be35b1b6c96 100644
3100 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
3101 ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
3102 +@@ -1427,6 +1427,8 @@ static int temac_probe(struct platform_device *pdev)
3103 + lp->indirect_lock = devm_kmalloc(&pdev->dev,
3104 + sizeof(*lp->indirect_lock),
3105 + GFP_KERNEL);
3106 ++ if (!lp->indirect_lock)
3107 ++ return -ENOMEM;
3108 + spin_lock_init(lp->indirect_lock);
3109 + }
3110 +
3111 +diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
3112 +index 5f4cd24a0241d..4eba5a91075c0 100644
3113 +--- a/drivers/net/mdio/mdio-ipq4019.c
3114 ++++ b/drivers/net/mdio/mdio-ipq4019.c
3115 +@@ -200,7 +200,11 @@ static int ipq_mdio_reset(struct mii_bus *bus)
3116 + if (ret)
3117 + return ret;
3118 +
3119 +- return clk_prepare_enable(priv->mdio_clk);
3120 ++ ret = clk_prepare_enable(priv->mdio_clk);
3121 ++ if (ret == 0)
3122 ++ mdelay(10);
3123 ++
3124 ++ return ret;
3125 + }
3126 +
3127 + static int ipq4019_mdio_probe(struct platform_device *pdev)
3128 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
3129 +index eb3817d70f2b8..9b4dfa3001d6e 100644
3130 +--- a/drivers/net/usb/cdc_ether.c
3131 ++++ b/drivers/net/usb/cdc_ether.c
3132 +@@ -583,6 +583,11 @@ static const struct usb_device_id products[] = {
3133 + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
3134 + .bInterfaceProtocol = USB_CDC_PROTO_NONE
3135 +
3136 ++#define ZAURUS_FAKE_INTERFACE \
3137 ++ .bInterfaceClass = USB_CLASS_COMM, \
3138 ++ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
3139 ++ .bInterfaceProtocol = USB_CDC_PROTO_NONE
3140 ++
3141 + /* SA-1100 based Sharp Zaurus ("collie"), or compatible;
3142 + * wire-incompatible with true CDC Ethernet implementations.
3143 + * (And, it seems, needlessly so...)
3144 +@@ -636,6 +641,13 @@ static const struct usb_device_id products[] = {
3145 + .idProduct = 0x9032, /* SL-6000 */
3146 + ZAURUS_MASTER_INTERFACE,
3147 + .driver_info = 0,
3148 ++}, {
3149 ++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3150 ++ | USB_DEVICE_ID_MATCH_DEVICE,
3151 ++ .idVendor = 0x04DD,
3152 ++ .idProduct = 0x9032, /* SL-6000 */
3153 ++ ZAURUS_FAKE_INTERFACE,
3154 ++ .driver_info = 0,
3155 + }, {
3156 + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3157 + | USB_DEVICE_ID_MATCH_DEVICE,
3158 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
3159 +index e303b522efb50..15f91d691bba3 100644
3160 +--- a/drivers/net/usb/cdc_ncm.c
3161 ++++ b/drivers/net/usb/cdc_ncm.c
3162 +@@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
3163 + {
3164 + struct sk_buff *skb;
3165 + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
3166 +- int len;
3167 ++ unsigned int len;
3168 + int nframes;
3169 + int x;
3170 +- int offset;
3171 ++ unsigned int offset;
3172 + union {
3173 + struct usb_cdc_ncm_ndp16 *ndp16;
3174 + struct usb_cdc_ncm_ndp32 *ndp32;
3175 +@@ -1790,8 +1790,8 @@ next_ndp:
3176 + break;
3177 + }
3178 +
3179 +- /* sanity checking */
3180 +- if (((offset + len) > skb_in->len) ||
3181 ++ /* sanity checking - watch out for integer wrap*/
3182 ++ if ((offset > skb_in->len) || (len > skb_in->len - offset) ||
3183 + (len > ctx->rx_max) || (len < ETH_HLEN)) {
3184 + netif_dbg(dev, rx_err, dev->net,
3185 + "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
3186 +diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
3187 +index b658510cc9a42..5a53e63d33a60 100644
3188 +--- a/drivers/net/usb/sr9700.c
3189 ++++ b/drivers/net/usb/sr9700.c
3190 +@@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
3191 + /* ignore the CRC length */
3192 + len = (skb->data[1] | (skb->data[2] << 8)) - 4;
3193 +
3194 +- if (len > ETH_FRAME_LEN)
3195 ++ if (len > ETH_FRAME_LEN || len > skb->len)
3196 + return 0;
3197 +
3198 + /* the last packet of current skb */
3199 +diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
3200 +index 8e717a0b559b3..7984f2157d222 100644
3201 +--- a/drivers/net/usb/zaurus.c
3202 ++++ b/drivers/net/usb/zaurus.c
3203 +@@ -256,6 +256,11 @@ static const struct usb_device_id products [] = {
3204 + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
3205 + .bInterfaceProtocol = USB_CDC_PROTO_NONE
3206 +
3207 ++#define ZAURUS_FAKE_INTERFACE \
3208 ++ .bInterfaceClass = USB_CLASS_COMM, \
3209 ++ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
3210 ++ .bInterfaceProtocol = USB_CDC_PROTO_NONE
3211 ++
3212 + /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
3213 + {
3214 + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3215 +@@ -313,6 +318,13 @@ static const struct usb_device_id products [] = {
3216 + .idProduct = 0x9032, /* SL-6000 */
3217 + ZAURUS_MASTER_INTERFACE,
3218 + .driver_info = ZAURUS_PXA_INFO,
3219 ++}, {
3220 ++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3221 ++ | USB_DEVICE_ID_MATCH_DEVICE,
3222 ++ .idVendor = 0x04DD,
3223 ++ .idProduct = 0x9032, /* SL-6000 */
3224 ++ ZAURUS_FAKE_INTERFACE,
3225 ++ .driver_info = (unsigned long)&bogus_mdlm_info,
3226 + }, {
3227 + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3228 + | USB_DEVICE_ID_MATCH_DEVICE,
3229 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
3230 +index 352766aa3122e..5785f6abf1945 100644
3231 +--- a/drivers/nvme/host/core.c
3232 ++++ b/drivers/nvme/host/core.c
3233 +@@ -1936,7 +1936,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
3234 + if (blk_queue_is_zoned(ns->queue)) {
3235 + ret = nvme_revalidate_zones(ns);
3236 + if (ret && !nvme_first_scan(ns->disk))
3237 +- goto out;
3238 ++ return ret;
3239 + }
3240 +
3241 + if (nvme_ns_head_multipath(ns->head)) {
3242 +@@ -1951,16 +1951,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
3243 + return 0;
3244 +
3245 + out_unfreeze:
3246 +- blk_mq_unfreeze_queue(ns->disk->queue);
3247 +-out:
3248 + /*
3249 + * If probing fails due an unsupported feature, hide the block device,
3250 + * but still allow other access.
3251 + */
3252 + if (ret == -ENODEV) {
3253 + ns->disk->flags |= GENHD_FL_HIDDEN;
3254 ++ set_bit(NVME_NS_READY, &ns->flags);
3255 + ret = 0;
3256 + }
3257 ++ blk_mq_unfreeze_queue(ns->disk->queue);
3258 + return ret;
3259 + }
3260 +
3261 +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
3262 +index 23a38dcf0fc4d..9fd1602b539d9 100644
3263 +--- a/drivers/nvmem/core.c
3264 ++++ b/drivers/nvmem/core.c
3265 +@@ -771,7 +771,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
3266 +
3267 + if (config->wp_gpio)
3268 + nvmem->wp_gpio = config->wp_gpio;
3269 +- else
3270 ++ else if (!config->ignore_wp)
3271 + nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
3272 + GPIOD_OUT_HIGH);
3273 + if (IS_ERR(nvmem->wp_gpio)) {
3274 +diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
3275 +index 357e9a293edf7..2a3bf82aa4e26 100644
3276 +--- a/drivers/pci/controller/pci-mvebu.c
3277 ++++ b/drivers/pci/controller/pci-mvebu.c
3278 +@@ -1288,7 +1288,8 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
3279 + * indirectly via kernel emulated PCI bridge driver.
3280 + */
3281 + mvebu_pcie_setup_hw(port);
3282 +- mvebu_pcie_set_local_dev_nr(port, 0);
3283 ++ mvebu_pcie_set_local_dev_nr(port, 1);
3284 ++ mvebu_pcie_set_local_bus_nr(port, 0);
3285 + }
3286 +
3287 + pcie->nports = i;
3288 +diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
3289 +index 49e32684dbb25..ecab6bf63dc6d 100644
3290 +--- a/drivers/pinctrl/pinctrl-k210.c
3291 ++++ b/drivers/pinctrl/pinctrl-k210.c
3292 +@@ -482,7 +482,7 @@ static int k210_pinconf_get_drive(unsigned int max_strength_ua)
3293 + {
3294 + int i;
3295 +
3296 +- for (i = K210_PC_DRIVE_MAX; i; i--) {
3297 ++ for (i = K210_PC_DRIVE_MAX; i >= 0; i--) {
3298 + if (k210_pinconf_drive_strength[i] <= max_strength_ua)
3299 + return i;
3300 + }
3301 +@@ -527,7 +527,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev,
3302 + case PIN_CONFIG_BIAS_PULL_UP:
3303 + if (!arg)
3304 + return -EINVAL;
3305 +- val |= K210_PC_PD;
3306 ++ val |= K210_PC_PU;
3307 + break;
3308 + case PIN_CONFIG_DRIVE_STRENGTH:
3309 + arg *= 1000;
3310 +diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c
3311 +index abac3eec565e8..444ec81ba02d7 100644
3312 +--- a/drivers/platform/surface/surface3_power.c
3313 ++++ b/drivers/platform/surface/surface3_power.c
3314 +@@ -232,14 +232,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix)
3315 + }
3316 + bix->last_full_charg_capacity = ret;
3317 +
3318 +- /* get serial number */
3319 ++ /*
3320 ++ * Get serial number, on some devices (with unofficial replacement
3321 ++ * battery?) reading any of the serial number range addresses gets
3322 ++ * nacked in this case just leave the serial number empty.
3323 ++ */
3324 + ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO,
3325 + sizeof(buf), buf);
3326 +- if (ret != sizeof(buf)) {
3327 ++ if (ret == -EREMOTEIO) {
3328 ++ /* no serial number available */
3329 ++ } else if (ret != sizeof(buf)) {
3330 + dev_err(&client->dev, "Error reading serial no: %d\n", ret);
3331 + return ret;
3332 ++ } else {
3333 ++ snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
3334 + }
3335 +- snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
3336 +
3337 + /* get cycle count */
3338 + ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT);
3339 +diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
3340 +index cfa222c9bd5e7..78f31b61a2aac 100644
3341 +--- a/drivers/spi/spi-zynq-qspi.c
3342 ++++ b/drivers/spi/spi-zynq-qspi.c
3343 +@@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
3344 +
3345 + if (op->dummy.nbytes) {
3346 + tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
3347 ++ if (!tmpbuf)
3348 ++ return -ENOMEM;
3349 ++
3350 + memset(tmpbuf, 0xff, op->dummy.nbytes);
3351 + reinit_completion(&xqspi->data_completion);
3352 + xqspi->txbuf = tmpbuf;
3353 +diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c
3354 +index abe9395a0aefd..861a154144e66 100644
3355 +--- a/drivers/staging/fbtft/fb_st7789v.c
3356 ++++ b/drivers/staging/fbtft/fb_st7789v.c
3357 +@@ -144,6 +144,8 @@ static int init_display(struct fbtft_par *par)
3358 + {
3359 + int rc;
3360 +
3361 ++ par->fbtftops.reset(par);
3362 ++
3363 + rc = init_tearing_effect_line(par);
3364 + if (rc)
3365 + return rc;
3366 +diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
3367 +index 8502b7d8df896..68f61a7389303 100644
3368 +--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
3369 ++++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
3370 +@@ -404,6 +404,10 @@ static void int3400_notify(acpi_handle handle,
3371 + thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event);
3372 + thermal_prop[4] = NULL;
3373 + kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop);
3374 ++ kfree(thermal_prop[0]);
3375 ++ kfree(thermal_prop[1]);
3376 ++ kfree(thermal_prop[2]);
3377 ++ kfree(thermal_prop[3]);
3378 + }
3379 +
3380 + static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
3381 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
3382 +index 9c5211f2ea84c..2ec9eeaabac94 100644
3383 +--- a/drivers/tty/n_gsm.c
3384 ++++ b/drivers/tty/n_gsm.c
3385 +@@ -439,7 +439,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
3386 + modembits |= MDM_RTR;
3387 + if (dlci->modem_tx & TIOCM_RI)
3388 + modembits |= MDM_IC;
3389 +- if (dlci->modem_tx & TIOCM_CD)
3390 ++ if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator)
3391 + modembits |= MDM_DV;
3392 + return modembits;
3393 + }
3394 +@@ -448,7 +448,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
3395 + * gsm_print_packet - display a frame for debug
3396 + * @hdr: header to print before decode
3397 + * @addr: address EA from the frame
3398 +- * @cr: C/R bit from the frame
3399 ++ * @cr: C/R bit seen as initiator
3400 + * @control: control including PF bit
3401 + * @data: following data bytes
3402 + * @dlen: length of data
3403 +@@ -548,7 +548,7 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
3404 + * gsm_send - send a control frame
3405 + * @gsm: our GSM mux
3406 + * @addr: address for control frame
3407 +- * @cr: command/response bit
3408 ++ * @cr: command/response bit seen as initiator
3409 + * @control: control byte including PF bit
3410 + *
3411 + * Format up and transmit a control frame. These do not go via the
3412 +@@ -563,11 +563,15 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
3413 + int len;
3414 + u8 cbuf[10];
3415 + u8 ibuf[3];
3416 ++ int ocr;
3417 ++
3418 ++ /* toggle C/R coding if not initiator */
3419 ++ ocr = cr ^ (gsm->initiator ? 0 : 1);
3420 +
3421 + switch (gsm->encoding) {
3422 + case 0:
3423 + cbuf[0] = GSM0_SOF;
3424 +- cbuf[1] = (addr << 2) | (cr << 1) | EA;
3425 ++ cbuf[1] = (addr << 2) | (ocr << 1) | EA;
3426 + cbuf[2] = control;
3427 + cbuf[3] = EA; /* Length of data = 0 */
3428 + cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
3429 +@@ -577,7 +581,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
3430 + case 1:
3431 + case 2:
3432 + /* Control frame + packing (but not frame stuffing) in mode 1 */
3433 +- ibuf[0] = (addr << 2) | (cr << 1) | EA;
3434 ++ ibuf[0] = (addr << 2) | (ocr << 1) | EA;
3435 + ibuf[1] = control;
3436 + ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
3437 + /* Stuffing may double the size worst case */
3438 +@@ -611,7 +615,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
3439 +
3440 + static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
3441 + {
3442 +- gsm_send(gsm, addr, 1, control);
3443 ++ gsm_send(gsm, addr, 0, control);
3444 + }
3445 +
3446 + /**
3447 +@@ -1017,25 +1021,25 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data,
3448 + * @tty: virtual tty bound to the DLCI
3449 + * @dlci: DLCI to affect
3450 + * @modem: modem bits (full EA)
3451 +- * @clen: command length
3452 ++ * @slen: number of signal octets
3453 + *
3454 + * Used when a modem control message or line state inline in adaption
3455 + * layer 2 is processed. Sort out the local modem state and throttles
3456 + */
3457 +
3458 + static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
3459 +- u32 modem, int clen)
3460 ++ u32 modem, int slen)
3461 + {
3462 + int mlines = 0;
3463 + u8 brk = 0;
3464 + int fc;
3465 +
3466 +- /* The modem status command can either contain one octet (v.24 signals)
3467 +- or two octets (v.24 signals + break signals). The length field will
3468 +- either be 2 or 3 respectively. This is specified in section
3469 +- 5.4.6.3.7 of the 27.010 mux spec. */
3470 ++ /* The modem status command can either contain one octet (V.24 signals)
3471 ++ * or two octets (V.24 signals + break signals). This is specified in
3472 ++ * section 5.4.6.3.7 of the 07.10 mux spec.
3473 ++ */
3474 +
3475 +- if (clen == 2)
3476 ++ if (slen == 1)
3477 + modem = modem & 0x7f;
3478 + else {
3479 + brk = modem & 0x7f;
3480 +@@ -1092,6 +1096,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
3481 + unsigned int brk = 0;
3482 + struct gsm_dlci *dlci;
3483 + int len = clen;
3484 ++ int slen;
3485 + const u8 *dp = data;
3486 + struct tty_struct *tty;
3487 +
3488 +@@ -1111,6 +1116,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
3489 + return;
3490 + dlci = gsm->dlci[addr];
3491 +
3492 ++ slen = len;
3493 + while (gsm_read_ea(&modem, *dp++) == 0) {
3494 + len--;
3495 + if (len == 0)
3496 +@@ -1127,7 +1133,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
3497 + modem |= (brk & 0x7f);
3498 + }
3499 + tty = tty_port_tty_get(&dlci->port);
3500 +- gsm_process_modem(tty, dlci, modem, clen);
3501 ++ gsm_process_modem(tty, dlci, modem, slen);
3502 + if (tty) {
3503 + tty_wakeup(tty);
3504 + tty_kref_put(tty);
3505 +@@ -1451,6 +1457,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci)
3506 + if (dlci->addr != 0) {
3507 + tty_port_tty_hangup(&dlci->port, false);
3508 + kfifo_reset(&dlci->fifo);
3509 ++ /* Ensure that gsmtty_open() can return. */
3510 ++ tty_port_set_initialized(&dlci->port, 0);
3511 ++ wake_up_interruptible(&dlci->port.open_wait);
3512 + } else
3513 + dlci->gsm->dead = true;
3514 + /* Unregister gsmtty driver,report gsmtty dev remove uevent for user */
3515 +@@ -1514,7 +1523,7 @@ static void gsm_dlci_t1(struct timer_list *t)
3516 + dlci->mode = DLCI_MODE_ADM;
3517 + gsm_dlci_open(dlci);
3518 + } else {
3519 +- gsm_dlci_close(dlci);
3520 ++ gsm_dlci_begin_close(dlci); /* prevent half open link */
3521 + }
3522 +
3523 + break;
3524 +@@ -1593,6 +1602,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
3525 + struct tty_struct *tty;
3526 + unsigned int modem = 0;
3527 + int len = clen;
3528 ++ int slen = 0;
3529 +
3530 + if (debug & 16)
3531 + pr_debug("%d bytes for tty\n", len);
3532 +@@ -1605,12 +1615,14 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
3533 + case 2: /* Asynchronous serial with line state in each frame */
3534 + while (gsm_read_ea(&modem, *data++) == 0) {
3535 + len--;
3536 ++ slen++;
3537 + if (len == 0)
3538 + return;
3539 + }
3540 ++ slen++;
3541 + tty = tty_port_tty_get(port);
3542 + if (tty) {
3543 +- gsm_process_modem(tty, dlci, modem, clen);
3544 ++ gsm_process_modem(tty, dlci, modem, slen);
3545 + tty_kref_put(tty);
3546 + }
3547 + fallthrough;
3548 +@@ -1748,7 +1760,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
3549 + gsm_destroy_network(dlci);
3550 + mutex_unlock(&dlci->mutex);
3551 +
3552 +- tty_hangup(tty);
3553 ++ /* We cannot use tty_hangup() because in tty_kref_put() the tty
3554 ++ * driver assumes that the hangup queue is free and reuses it to
3555 ++ * queue release_one_tty() -> NULL pointer panic in
3556 ++ * process_one_work().
3557 ++ */
3558 ++ tty_vhangup(tty);
3559 +
3560 + tty_port_tty_set(&dlci->port, NULL);
3561 + tty_kref_put(tty);
3562 +@@ -1800,10 +1817,10 @@ static void gsm_queue(struct gsm_mux *gsm)
3563 + goto invalid;
3564 +
3565 + cr = gsm->address & 1; /* C/R bit */
3566 ++ cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */
3567 +
3568 + gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
3569 +
3570 +- cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */
3571 + dlci = gsm->dlci[address];
3572 +
3573 + switch (gsm->control) {
3574 +@@ -3237,9 +3254,9 @@ static void gsmtty_throttle(struct tty_struct *tty)
3575 + if (dlci->state == DLCI_CLOSED)
3576 + return;
3577 + if (C_CRTSCTS(tty))
3578 +- dlci->modem_tx &= ~TIOCM_DTR;
3579 ++ dlci->modem_tx &= ~TIOCM_RTS;
3580 + dlci->throttled = true;
3581 +- /* Send an MSC with DTR cleared */
3582 ++ /* Send an MSC with RTS cleared */
3583 + gsmtty_modem_update(dlci, 0);
3584 + }
3585 +
3586 +@@ -3249,9 +3266,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty)
3587 + if (dlci->state == DLCI_CLOSED)
3588 + return;
3589 + if (C_CRTSCTS(tty))
3590 +- dlci->modem_tx |= TIOCM_DTR;
3591 ++ dlci->modem_tx |= TIOCM_RTS;
3592 + dlci->throttled = false;
3593 +- /* Send an MSC with DTR set */
3594 ++ /* Send an MSC with RTS set */
3595 + gsmtty_modem_update(dlci, 0);
3596 + }
3597 +
3598 +diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
3599 +index 64e7e6c8145f8..38d1c0748533c 100644
3600 +--- a/drivers/tty/serial/sc16is7xx.c
3601 ++++ b/drivers/tty/serial/sc16is7xx.c
3602 +@@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
3603 + static void sc16is7xx_tx_proc(struct kthread_work *ws)
3604 + {
3605 + struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
3606 ++ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
3607 +
3608 + if ((port->rs485.flags & SER_RS485_ENABLED) &&
3609 + (port->rs485.delay_rts_before_send > 0))
3610 + msleep(port->rs485.delay_rts_before_send);
3611 +
3612 ++ mutex_lock(&s->efr_lock);
3613 + sc16is7xx_handle_tx(port);
3614 ++ mutex_unlock(&s->efr_lock);
3615 + }
3616 +
3617 + static void sc16is7xx_reconf_rs485(struct uart_port *port)
3618 +diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
3619 +index 37185eb66ae4c..f76c30083fbc9 100644
3620 +--- a/drivers/usb/dwc2/core.h
3621 ++++ b/drivers/usb/dwc2/core.h
3622 +@@ -1416,6 +1416,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
3623 + void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2);
3624 + int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
3625 + #define dwc2_is_device_connected(hsotg) (hsotg->connected)
3626 ++#define dwc2_is_device_enabled(hsotg) (hsotg->enabled)
3627 + int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg);
3628 + int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup);
3629 + int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg);
3630 +@@ -1452,6 +1453,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
3631 + int testmode)
3632 + { return 0; }
3633 + #define dwc2_is_device_connected(hsotg) (0)
3634 ++#define dwc2_is_device_enabled(hsotg) (0)
3635 + static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
3636 + { return 0; }
3637 + static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg,
3638 +diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
3639 +index aa6eb76f64ddc..36f2c38416e5e 100644
3640 +--- a/drivers/usb/dwc2/drd.c
3641 ++++ b/drivers/usb/dwc2/drd.c
3642 +@@ -109,8 +109,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
3643 + already = dwc2_ovr_avalid(hsotg, true);
3644 + } else if (role == USB_ROLE_DEVICE) {
3645 + already = dwc2_ovr_bvalid(hsotg, true);
3646 +- /* This clear DCTL.SFTDISCON bit */
3647 +- dwc2_hsotg_core_connect(hsotg);
3648 ++ if (dwc2_is_device_enabled(hsotg)) {
3649 ++ /* This clear DCTL.SFTDISCON bit */
3650 ++ dwc2_hsotg_core_connect(hsotg);
3651 ++ }
3652 + } else {
3653 + if (dwc2_is_device_mode(hsotg)) {
3654 + if (!dwc2_ovr_bvalid(hsotg, false))
3655 +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
3656 +index 7ff8fc8f79a9b..1ecedbb1684c8 100644
3657 +--- a/drivers/usb/dwc3/dwc3-pci.c
3658 ++++ b/drivers/usb/dwc3/dwc3-pci.c
3659 +@@ -85,8 +85,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
3660 + static struct gpiod_lookup_table platform_bytcr_gpios = {
3661 + .dev_id = "0000:00:16.0",
3662 + .table = {
3663 +- GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH),
3664 +- GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH),
3665 ++ GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH),
3666 ++ GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH),
3667 + {}
3668 + },
3669 + };
3670 +@@ -119,6 +119,13 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
3671 + {}
3672 + };
3673 +
3674 ++static const struct property_entry dwc3_pci_intel_byt_properties[] = {
3675 ++ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
3676 ++ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
3677 ++ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
3678 ++ {}
3679 ++};
3680 ++
3681 + static const struct property_entry dwc3_pci_mrfld_properties[] = {
3682 + PROPERTY_ENTRY_STRING("dr_mode", "otg"),
3683 + PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
3684 +@@ -161,6 +168,10 @@ static const struct software_node dwc3_pci_intel_swnode = {
3685 + .properties = dwc3_pci_intel_properties,
3686 + };
3687 +
3688 ++static const struct software_node dwc3_pci_intel_byt_swnode = {
3689 ++ .properties = dwc3_pci_intel_byt_properties,
3690 ++};
3691 ++
3692 + static const struct software_node dwc3_pci_intel_mrfld_swnode = {
3693 + .properties = dwc3_pci_mrfld_properties,
3694 + };
3695 +@@ -344,7 +355,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
3696 + (kernel_ulong_t) &dwc3_pci_intel_swnode, },
3697 +
3698 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT),
3699 +- (kernel_ulong_t) &dwc3_pci_intel_swnode, },
3700 ++ (kernel_ulong_t) &dwc3_pci_intel_byt_swnode, },
3701 +
3702 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
3703 + (kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, },
3704 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3705 +index 7aab9116b0256..0566a841dca25 100644
3706 +--- a/drivers/usb/dwc3/gadget.c
3707 ++++ b/drivers/usb/dwc3/gadget.c
3708 +@@ -4131,9 +4131,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
3709 + unsigned long flags;
3710 + irqreturn_t ret = IRQ_NONE;
3711 +
3712 ++ local_bh_disable();
3713 + spin_lock_irqsave(&dwc->lock, flags);
3714 + ret = dwc3_process_event_buf(evt);
3715 + spin_unlock_irqrestore(&dwc->lock, flags);
3716 ++ local_bh_enable();
3717 +
3718 + return ret;
3719 + }
3720 +diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
3721 +index d9ed651f06ac3..0f14c5291af07 100644
3722 +--- a/drivers/usb/gadget/function/rndis.c
3723 ++++ b/drivers/usb/gadget/function/rndis.c
3724 +@@ -922,6 +922,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
3725 + params->resp_avail = resp_avail;
3726 + params->v = v;
3727 + INIT_LIST_HEAD(&params->resp_queue);
3728 ++ spin_lock_init(&params->resp_lock);
3729 + pr_debug("%s: configNr = %d\n", __func__, i);
3730 +
3731 + return params;
3732 +@@ -1015,12 +1016,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf)
3733 + {
3734 + rndis_resp_t *r, *n;
3735 +
3736 ++ spin_lock(&params->resp_lock);
3737 + list_for_each_entry_safe(r, n, &params->resp_queue, list) {
3738 + if (r->buf == buf) {
3739 + list_del(&r->list);
3740 + kfree(r);
3741 + }
3742 + }
3743 ++ spin_unlock(&params->resp_lock);
3744 + }
3745 + EXPORT_SYMBOL_GPL(rndis_free_response);
3746 +
3747 +@@ -1030,14 +1033,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length)
3748 +
3749 + if (!length) return NULL;
3750 +
3751 ++ spin_lock(&params->resp_lock);
3752 + list_for_each_entry_safe(r, n, &params->resp_queue, list) {
3753 + if (!r->send) {
3754 + r->send = 1;
3755 + *length = r->length;
3756 ++ spin_unlock(&params->resp_lock);
3757 + return r->buf;
3758 + }
3759 + }
3760 +
3761 ++ spin_unlock(&params->resp_lock);
3762 + return NULL;
3763 + }
3764 + EXPORT_SYMBOL_GPL(rndis_get_next_response);
3765 +@@ -1054,7 +1060,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length)
3766 + r->length = length;
3767 + r->send = 0;
3768 +
3769 ++ spin_lock(&params->resp_lock);
3770 + list_add_tail(&r->list, &params->resp_queue);
3771 ++ spin_unlock(&params->resp_lock);
3772 + return r;
3773 + }
3774 +
3775 +diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
3776 +index f6167f7fea82b..6206b8b7490f6 100644
3777 +--- a/drivers/usb/gadget/function/rndis.h
3778 ++++ b/drivers/usb/gadget/function/rndis.h
3779 +@@ -174,6 +174,7 @@ typedef struct rndis_params {
3780 + void (*resp_avail)(void *v);
3781 + void *v;
3782 + struct list_head resp_queue;
3783 ++ spinlock_t resp_lock;
3784 + } rndis_params;
3785 +
3786 + /* RNDIS Message parser and other useless functions */
3787 +diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
3788 +index 857159dd5ae05..540824534e962 100644
3789 +--- a/drivers/usb/gadget/udc/udc-xilinx.c
3790 ++++ b/drivers/usb/gadget/udc/udc-xilinx.c
3791 +@@ -1615,6 +1615,8 @@ static void xudc_getstatus(struct xusb_udc *udc)
3792 + break;
3793 + case USB_RECIP_ENDPOINT:
3794 + epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
3795 ++ if (epnum >= XUSB_MAX_ENDPOINTS)
3796 ++ goto stall;
3797 + target_ep = &udc->ep[epnum];
3798 + epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
3799 + halt = epcfgreg & XUSB_EP_CFG_STALL_MASK;
3800 +@@ -1682,6 +1684,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
3801 + case USB_RECIP_ENDPOINT:
3802 + if (!udc->setup.wValue) {
3803 + endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
3804 ++ if (endpoint >= XUSB_MAX_ENDPOINTS) {
3805 ++ xudc_ep0_stall(udc);
3806 ++ return;
3807 ++ }
3808 + target_ep = &udc->ep[endpoint];
3809 + outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK;
3810 + outinbit = outinbit >> 7;
3811 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3812 +index f5b1bcc875ded..d7c0bf494d930 100644
3813 +--- a/drivers/usb/host/xhci.c
3814 ++++ b/drivers/usb/host/xhci.c
3815 +@@ -1091,6 +1091,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3816 + int retval = 0;
3817 + bool comp_timer_running = false;
3818 + bool pending_portevent = false;
3819 ++ bool reinit_xhc = false;
3820 +
3821 + if (!hcd->state)
3822 + return 0;
3823 +@@ -1107,10 +1108,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3824 + set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
3825 +
3826 + spin_lock_irq(&xhci->lock);
3827 +- if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
3828 +- hibernated = true;
3829 +
3830 +- if (!hibernated) {
3831 ++ if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
3832 ++ reinit_xhc = true;
3833 ++
3834 ++ if (!reinit_xhc) {
3835 + /*
3836 + * Some controllers might lose power during suspend, so wait
3837 + * for controller not ready bit to clear, just as in xHC init.
3838 +@@ -1143,12 +1145,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3839 + spin_unlock_irq(&xhci->lock);
3840 + return -ETIMEDOUT;
3841 + }
3842 +- temp = readl(&xhci->op_regs->status);
3843 + }
3844 +
3845 +- /* If restore operation fails, re-initialize the HC during resume */
3846 +- if ((temp & STS_SRE) || hibernated) {
3847 ++ temp = readl(&xhci->op_regs->status);
3848 +
3849 ++ /* re-initialize the HC on Restore Error, or Host Controller Error */
3850 ++ if (temp & (STS_SRE | STS_HCE)) {
3851 ++ reinit_xhc = true;
3852 ++ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
3853 ++ }
3854 ++
3855 ++ if (reinit_xhc) {
3856 + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
3857 + !(xhci_all_ports_seen_u0(xhci))) {
3858 + del_timer_sync(&xhci->comp_mode_recovery_timer);
3859 +@@ -1604,9 +1611,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
3860 + struct urb_priv *urb_priv;
3861 + int num_tds;
3862 +
3863 +- if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
3864 +- true, true, __func__) <= 0)
3865 ++ if (!urb)
3866 + return -EINVAL;
3867 ++ ret = xhci_check_args(hcd, urb->dev, urb->ep,
3868 ++ true, true, __func__);
3869 ++ if (ret <= 0)
3870 ++ return ret ? ret : -EINVAL;
3871 +
3872 + slot_id = urb->dev->slot_id;
3873 + ep_index = xhci_get_endpoint_index(&urb->ep->desc);
3874 +@@ -3323,7 +3333,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3875 + return -EINVAL;
3876 + ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3877 + if (ret <= 0)
3878 +- return -EINVAL;
3879 ++ return ret ? ret : -EINVAL;
3880 + if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3881 + xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3882 + " descriptor for ep 0x%x does not support streams\n",
3883 +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
3884 +index 58cba8ee0277a..2798fca712612 100644
3885 +--- a/drivers/usb/serial/ch341.c
3886 ++++ b/drivers/usb/serial/ch341.c
3887 +@@ -81,7 +81,6 @@
3888 + #define CH341_QUIRK_SIMULATE_BREAK BIT(1)
3889 +
3890 + static const struct usb_device_id id_table[] = {
3891 +- { USB_DEVICE(0x1a86, 0x5512) },
3892 + { USB_DEVICE(0x1a86, 0x5523) },
3893 + { USB_DEVICE(0x1a86, 0x7522) },
3894 + { USB_DEVICE(0x1a86, 0x7523) },
3895 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3896 +index 962e9943fc20e..e7755d9cfc61a 100644
3897 +--- a/drivers/usb/serial/option.c
3898 ++++ b/drivers/usb/serial/option.c
3899 +@@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb);
3900 +
3901 + #define DELL_PRODUCT_5821E 0x81d7
3902 + #define DELL_PRODUCT_5821E_ESIM 0x81e0
3903 ++#define DELL_PRODUCT_5829E_ESIM 0x81e4
3904 ++#define DELL_PRODUCT_5829E 0x81e6
3905 +
3906 + #define KYOCERA_VENDOR_ID 0x0c88
3907 + #define KYOCERA_PRODUCT_KPC650 0x17da
3908 +@@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = {
3909 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
3910 + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
3911 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
3912 ++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E),
3913 ++ .driver_info = RSVD(0) | RSVD(6) },
3914 ++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
3915 ++ .driver_info = RSVD(0) | RSVD(6) },
3916 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
3917 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
3918 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
3919 +@@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = {
3920 + .driver_info = NCTRL(2) },
3921 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
3922 + .driver_info = NCTRL(2) },
3923 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */
3924 ++ .driver_info = NCTRL(2) },
3925 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */
3926 ++ .driver_info = NCTRL(2) },
3927 + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
3928 + .driver_info = NCTRL(0) | ZLP },
3929 + { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
3930 + .driver_info = NCTRL(0) | ZLP },
3931 ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */
3932 ++ .driver_info = NCTRL(0) | ZLP },
3933 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
3934 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
3935 + .driver_info = RSVD(1) },
3936 +diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
3937 +index 6d27a5b5e3cac..7ffcda94d323a 100644
3938 +--- a/drivers/usb/typec/tipd/core.c
3939 ++++ b/drivers/usb/typec/tipd/core.c
3940 +@@ -761,12 +761,12 @@ static int tps6598x_probe(struct i2c_client *client)
3941 +
3942 + ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
3943 + if (ret < 0)
3944 +- return ret;
3945 ++ goto err_clear_mask;
3946 + trace_tps6598x_status(status);
3947 +
3948 + ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf);
3949 + if (ret < 0)
3950 +- return ret;
3951 ++ goto err_clear_mask;
3952 +
3953 + /*
3954 + * This fwnode has a "compatible" property, but is never populated as a
3955 +@@ -855,7 +855,8 @@ err_role_put:
3956 + usb_role_switch_put(tps->role_sw);
3957 + err_fwnode_put:
3958 + fwnode_handle_put(fwnode);
3959 +-
3960 ++err_clear_mask:
3961 ++ tps6598x_write64(tps, TPS_REG_INT_MASK1, 0);
3962 + return ret;
3963 + }
3964 +
3965 +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
3966 +index d6ca1c7ad513f..37f0b4274113c 100644
3967 +--- a/drivers/vhost/vsock.c
3968 ++++ b/drivers/vhost/vsock.c
3969 +@@ -629,16 +629,18 @@ err:
3970 + return ret;
3971 + }
3972 +
3973 +-static int vhost_vsock_stop(struct vhost_vsock *vsock)
3974 ++static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
3975 + {
3976 + size_t i;
3977 +- int ret;
3978 ++ int ret = 0;
3979 +
3980 + mutex_lock(&vsock->dev.mutex);
3981 +
3982 +- ret = vhost_dev_check_owner(&vsock->dev);
3983 +- if (ret)
3984 +- goto err;
3985 ++ if (check_owner) {
3986 ++ ret = vhost_dev_check_owner(&vsock->dev);
3987 ++ if (ret)
3988 ++ goto err;
3989 ++ }
3990 +
3991 + for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
3992 + struct vhost_virtqueue *vq = &vsock->vqs[i];
3993 +@@ -753,7 +755,12 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
3994 + * inefficient. Room for improvement here. */
3995 + vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
3996 +
3997 +- vhost_vsock_stop(vsock);
3998 ++ /* Don't check the owner, because we are in the release path, so we
3999 ++ * need to stop the vsock device in any case.
4000 ++ * vhost_vsock_stop() can not fail in this case, so we don't need to
4001 ++ * check the return code.
4002 ++ */
4003 ++ vhost_vsock_stop(vsock, false);
4004 + vhost_vsock_flush(vsock);
4005 + vhost_dev_stop(&vsock->dev);
4006 +
4007 +@@ -868,7 +875,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
4008 + if (start)
4009 + return vhost_vsock_start(vsock);
4010 + else
4011 +- return vhost_vsock_stop(vsock);
4012 ++ return vhost_vsock_stop(vsock, true);
4013 + case VHOST_GET_FEATURES:
4014 + features = VHOST_VSOCK_FEATURES;
4015 + if (copy_to_user(argp, &features, sizeof(features)))
4016 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
4017 +index 5fe5eccb3c874..269094176b8b3 100644
4018 +--- a/fs/btrfs/ctree.h
4019 ++++ b/fs/btrfs/ctree.h
4020 +@@ -3315,7 +3315,7 @@ void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
4021 + int __init btrfs_auto_defrag_init(void);
4022 + void __cold btrfs_auto_defrag_exit(void);
4023 + int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
4024 +- struct btrfs_inode *inode);
4025 ++ struct btrfs_inode *inode, u32 extent_thresh);
4026 + int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
4027 + void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
4028 + int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
4029 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
4030 +index 11204dbbe0530..a0179cc62913b 100644
4031 +--- a/fs/btrfs/file.c
4032 ++++ b/fs/btrfs/file.c
4033 +@@ -50,11 +50,14 @@ struct inode_defrag {
4034 + /* root objectid */
4035 + u64 root;
4036 +
4037 +- /* last offset we were able to defrag */
4038 +- u64 last_offset;
4039 +-
4040 +- /* if we've wrapped around back to zero once already */
4041 +- int cycled;
4042 ++ /*
4043 ++ * The extent size threshold for autodefrag.
4044 ++ *
4045 ++ * This value is different for compressed/non-compressed extents,
4046 ++ * thus needs to be passed from higher layer.
4047 ++ * (aka, inode_should_defrag())
4048 ++ */
4049 ++ u32 extent_thresh;
4050 + };
4051 +
4052 + static int __compare_inode_defrag(struct inode_defrag *defrag1,
4053 +@@ -107,8 +110,8 @@ static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
4054 + */
4055 + if (defrag->transid < entry->transid)
4056 + entry->transid = defrag->transid;
4057 +- if (defrag->last_offset > entry->last_offset)
4058 +- entry->last_offset = defrag->last_offset;
4059 ++ entry->extent_thresh = min(defrag->extent_thresh,
4060 ++ entry->extent_thresh);
4061 + return -EEXIST;
4062 + }
4063 + }
4064 +@@ -134,7 +137,7 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
4065 + * enabled
4066 + */
4067 + int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
4068 +- struct btrfs_inode *inode)
4069 ++ struct btrfs_inode *inode, u32 extent_thresh)
4070 + {
4071 + struct btrfs_root *root = inode->root;
4072 + struct btrfs_fs_info *fs_info = root->fs_info;
4073 +@@ -160,6 +163,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
4074 + defrag->ino = btrfs_ino(inode);
4075 + defrag->transid = transid;
4076 + defrag->root = root->root_key.objectid;
4077 ++ defrag->extent_thresh = extent_thresh;
4078 +
4079 + spin_lock(&fs_info->defrag_inodes_lock);
4080 + if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
4081 +@@ -178,34 +182,6 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
4082 + return 0;
4083 + }
4084 +
4085 +-/*
4086 +- * Requeue the defrag object. If there is a defrag object that points to
4087 +- * the same inode in the tree, we will merge them together (by
4088 +- * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
4089 +- */
4090 +-static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
4091 +- struct inode_defrag *defrag)
4092 +-{
4093 +- struct btrfs_fs_info *fs_info = inode->root->fs_info;
4094 +- int ret;
4095 +-
4096 +- if (!__need_auto_defrag(fs_info))
4097 +- goto out;
4098 +-
4099 +- /*
4100 +- * Here we don't check the IN_DEFRAG flag, because we need merge
4101 +- * them together.
4102 +- */
4103 +- spin_lock(&fs_info->defrag_inodes_lock);
4104 +- ret = __btrfs_add_inode_defrag(inode, defrag);
4105 +- spin_unlock(&fs_info->defrag_inodes_lock);
4106 +- if (ret)
4107 +- goto out;
4108 +- return;
4109 +-out:
4110 +- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
4111 +-}
4112 +-
4113 + /*
4114 + * pick the defragable inode that we want, if it doesn't exist, we will get
4115 + * the next one.
4116 +@@ -278,8 +254,14 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
4117 + struct btrfs_root *inode_root;
4118 + struct inode *inode;
4119 + struct btrfs_ioctl_defrag_range_args range;
4120 +- int num_defrag;
4121 +- int ret;
4122 ++ int ret = 0;
4123 ++ u64 cur = 0;
4124 ++
4125 ++again:
4126 ++ if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
4127 ++ goto cleanup;
4128 ++ if (!__need_auto_defrag(fs_info))
4129 ++ goto cleanup;
4130 +
4131 + /* get the inode */
4132 + inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
4133 +@@ -295,39 +277,30 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
4134 + goto cleanup;
4135 + }
4136 +
4137 ++ if (cur >= i_size_read(inode)) {
4138 ++ iput(inode);
4139 ++ goto cleanup;
4140 ++ }
4141 ++
4142 + /* do a chunk of defrag */
4143 + clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
4144 + memset(&range, 0, sizeof(range));
4145 + range.len = (u64)-1;
4146 +- range.start = defrag->last_offset;
4147 ++ range.start = cur;
4148 ++ range.extent_thresh = defrag->extent_thresh;
4149 +
4150 + sb_start_write(fs_info->sb);
4151 +- num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
4152 ++ ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
4153 + BTRFS_DEFRAG_BATCH);
4154 + sb_end_write(fs_info->sb);
4155 +- /*
4156 +- * if we filled the whole defrag batch, there
4157 +- * must be more work to do. Queue this defrag
4158 +- * again
4159 +- */
4160 +- if (num_defrag == BTRFS_DEFRAG_BATCH) {
4161 +- defrag->last_offset = range.start;
4162 +- btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
4163 +- } else if (defrag->last_offset && !defrag->cycled) {
4164 +- /*
4165 +- * we didn't fill our defrag batch, but
4166 +- * we didn't start at zero. Make sure we loop
4167 +- * around to the start of the file.
4168 +- */
4169 +- defrag->last_offset = 0;
4170 +- defrag->cycled = 1;
4171 +- btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
4172 +- } else {
4173 +- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
4174 +- }
4175 +-
4176 + iput(inode);
4177 +- return 0;
4178 ++
4179 ++ if (ret < 0)
4180 ++ goto cleanup;
4181 ++
4182 ++ cur = max(cur + fs_info->sectorsize, range.start);
4183 ++ goto again;
4184 ++
4185 + cleanup:
4186 + kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
4187 + return ret;
4188 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4189 +index 39a6745434613..3be5735372ee8 100644
4190 +--- a/fs/btrfs/inode.c
4191 ++++ b/fs/btrfs/inode.c
4192 +@@ -561,12 +561,12 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
4193 + }
4194 +
4195 + static inline void inode_should_defrag(struct btrfs_inode *inode,
4196 +- u64 start, u64 end, u64 num_bytes, u64 small_write)
4197 ++ u64 start, u64 end, u64 num_bytes, u32 small_write)
4198 + {
4199 + /* If this is a small write inside eof, kick off a defrag */
4200 + if (num_bytes < small_write &&
4201 + (start > 0 || end + 1 < inode->disk_i_size))
4202 +- btrfs_add_inode_defrag(NULL, inode);
4203 ++ btrfs_add_inode_defrag(NULL, inode, small_write);
4204 + }
4205 +
4206 + /*
4207 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
4208 +index cec7163bc8730..541a4fbfd79ec 100644
4209 +--- a/fs/btrfs/ioctl.c
4210 ++++ b/fs/btrfs/ioctl.c
4211 +@@ -1020,23 +1020,37 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
4212 + return em;
4213 + }
4214 +
4215 ++static u32 get_extent_max_capacity(const struct extent_map *em)
4216 ++{
4217 ++ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4218 ++ return BTRFS_MAX_COMPRESSED;
4219 ++ return BTRFS_MAX_EXTENT_SIZE;
4220 ++}
4221 ++
4222 + static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
4223 + bool locked)
4224 + {
4225 + struct extent_map *next;
4226 +- bool ret = true;
4227 ++ bool ret = false;
4228 +
4229 + /* this is the last extent */
4230 + if (em->start + em->len >= i_size_read(inode))
4231 + return false;
4232 +
4233 + next = defrag_lookup_extent(inode, em->start + em->len, locked);
4234 ++ /* No more em or hole */
4235 + if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
4236 +- ret = false;
4237 +- else if ((em->block_start + em->block_len == next->block_start) &&
4238 +- (em->block_len > SZ_128K && next->block_len > SZ_128K))
4239 +- ret = false;
4240 +-
4241 ++ goto out;
4242 ++ if (test_bit(EXTENT_FLAG_PREALLOC, &next->flags))
4243 ++ goto out;
4244 ++ /*
4245 ++ * If the next extent is at its max capacity, defragging current extent
4246 ++ * makes no sense, as the total number of extents won't change.
4247 ++ */
4248 ++ if (next->len >= get_extent_max_capacity(em))
4249 ++ goto out;
4250 ++ ret = true;
4251 ++out:
4252 + free_extent_map(next);
4253 + return ret;
4254 + }
4255 +@@ -1160,8 +1174,10 @@ struct defrag_target_range {
4256 + static int defrag_collect_targets(struct btrfs_inode *inode,
4257 + u64 start, u64 len, u32 extent_thresh,
4258 + u64 newer_than, bool do_compress,
4259 +- bool locked, struct list_head *target_list)
4260 ++ bool locked, struct list_head *target_list,
4261 ++ u64 *last_scanned_ret)
4262 + {
4263 ++ bool last_is_target = false;
4264 + u64 cur = start;
4265 + int ret = 0;
4266 +
4267 +@@ -1171,6 +1187,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
4268 + bool next_mergeable = true;
4269 + u64 range_len;
4270 +
4271 ++ last_is_target = false;
4272 + em = defrag_lookup_extent(&inode->vfs_inode, cur, locked);
4273 + if (!em)
4274 + break;
4275 +@@ -1228,6 +1245,13 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
4276 + if (range_len >= extent_thresh)
4277 + goto next;
4278 +
4279 ++ /*
4280 ++ * Skip extents already at its max capacity, this is mostly for
4281 ++ * compressed extents, which max cap is only 128K.
4282 ++ */
4283 ++ if (em->len >= get_extent_max_capacity(em))
4284 ++ goto next;
4285 ++
4286 + next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
4287 + locked);
4288 + if (!next_mergeable) {
4289 +@@ -1246,6 +1270,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
4290 + }
4291 +
4292 + add:
4293 ++ last_is_target = true;
4294 + range_len = min(extent_map_end(em), start + len) - cur;
4295 + /*
4296 + * This one is a good target, check if it can be merged into
4297 +@@ -1289,6 +1314,17 @@ next:
4298 + kfree(entry);
4299 + }
4300 + }
4301 ++ if (!ret && last_scanned_ret) {
4302 ++ /*
4303 ++ * If the last extent is not a target, the caller can skip to
4304 ++ * the end of that extent.
4305 ++ * Otherwise, we can only go the end of the specified range.
4306 ++ */
4307 ++ if (!last_is_target)
4308 ++ *last_scanned_ret = max(cur, *last_scanned_ret);
4309 ++ else
4310 ++ *last_scanned_ret = max(start + len, *last_scanned_ret);
4311 ++ }
4312 + return ret;
4313 + }
4314 +
4315 +@@ -1347,7 +1383,8 @@ static int defrag_one_locked_target(struct btrfs_inode *inode,
4316 + }
4317 +
4318 + static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
4319 +- u32 extent_thresh, u64 newer_than, bool do_compress)
4320 ++ u32 extent_thresh, u64 newer_than, bool do_compress,
4321 ++ u64 *last_scanned_ret)
4322 + {
4323 + struct extent_state *cached_state = NULL;
4324 + struct defrag_target_range *entry;
4325 +@@ -1393,7 +1430,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
4326 + */
4327 + ret = defrag_collect_targets(inode, start, len, extent_thresh,
4328 + newer_than, do_compress, true,
4329 +- &target_list);
4330 ++ &target_list, last_scanned_ret);
4331 + if (ret < 0)
4332 + goto unlock_extent;
4333 +
4334 +@@ -1428,7 +1465,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
4335 + u64 start, u32 len, u32 extent_thresh,
4336 + u64 newer_than, bool do_compress,
4337 + unsigned long *sectors_defragged,
4338 +- unsigned long max_sectors)
4339 ++ unsigned long max_sectors,
4340 ++ u64 *last_scanned_ret)
4341 + {
4342 + const u32 sectorsize = inode->root->fs_info->sectorsize;
4343 + struct defrag_target_range *entry;
4344 +@@ -1439,7 +1477,7 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
4345 + BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
4346 + ret = defrag_collect_targets(inode, start, len, extent_thresh,
4347 + newer_than, do_compress, false,
4348 +- &target_list);
4349 ++ &target_list, NULL);
4350 + if (ret < 0)
4351 + goto out;
4352 +
4353 +@@ -1456,6 +1494,15 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
4354 + range_len = min_t(u32, range_len,
4355 + (max_sectors - *sectors_defragged) * sectorsize);
4356 +
4357 ++ /*
4358 ++ * If defrag_one_range() has updated last_scanned_ret,
4359 ++ * our range may already be invalid (e.g. hole punched).
4360 ++ * Skip if our range is before last_scanned_ret, as there is
4361 ++ * no need to defrag the range anymore.
4362 ++ */
4363 ++ if (entry->start + range_len <= *last_scanned_ret)
4364 ++ continue;
4365 ++
4366 + if (ra)
4367 + page_cache_sync_readahead(inode->vfs_inode.i_mapping,
4368 + ra, NULL, entry->start >> PAGE_SHIFT,
4369 +@@ -1468,7 +1515,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
4370 + * accounting.
4371 + */
4372 + ret = defrag_one_range(inode, entry->start, range_len,
4373 +- extent_thresh, newer_than, do_compress);
4374 ++ extent_thresh, newer_than, do_compress,
4375 ++ last_scanned_ret);
4376 + if (ret < 0)
4377 + break;
4378 + *sectors_defragged += range_len >>
4379 +@@ -1479,6 +1527,8 @@ out:
4380 + list_del_init(&entry->list);
4381 + kfree(entry);
4382 + }
4383 ++ if (ret >= 0)
4384 ++ *last_scanned_ret = max(*last_scanned_ret, start + len);
4385 + return ret;
4386 + }
4387 +
4388 +@@ -1564,6 +1614,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
4389 +
4390 + while (cur < last_byte) {
4391 + const unsigned long prev_sectors_defragged = sectors_defragged;
4392 ++ u64 last_scanned = cur;
4393 + u64 cluster_end;
4394 +
4395 + /* The cluster size 256K should always be page aligned */
4396 +@@ -1593,8 +1644,8 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
4397 + BTRFS_I(inode)->defrag_compress = compress_type;
4398 + ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
4399 + cluster_end + 1 - cur, extent_thresh,
4400 +- newer_than, do_compress,
4401 +- &sectors_defragged, max_to_defrag);
4402 ++ newer_than, do_compress, &sectors_defragged,
4403 ++ max_to_defrag, &last_scanned);
4404 +
4405 + if (sectors_defragged > prev_sectors_defragged)
4406 + balance_dirty_pages_ratelimited(inode->i_mapping);
4407 +@@ -1602,7 +1653,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
4408 + btrfs_inode_unlock(inode, 0);
4409 + if (ret < 0)
4410 + break;
4411 +- cur = cluster_end + 1;
4412 ++ cur = max(cluster_end + 1, last_scanned);
4413 + if (ret > 0) {
4414 + ret = 0;
4415 + break;
4416 +diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
4417 +index 0fb90cbe76697..e6e28a9c79877 100644
4418 +--- a/fs/btrfs/lzo.c
4419 ++++ b/fs/btrfs/lzo.c
4420 +@@ -380,6 +380,17 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
4421 + kunmap(cur_page);
4422 + cur_in += LZO_LEN;
4423 +
4424 ++ if (seg_len > lzo1x_worst_compress(PAGE_SIZE)) {
4425 ++ /*
4426 ++ * seg_len shouldn't be larger than we have allocated
4427 ++ * for workspace->cbuf
4428 ++ */
4429 ++ btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
4430 ++ seg_len);
4431 ++ ret = -EIO;
4432 ++ goto out;
4433 ++ }
4434 ++
4435 + /* Copy the compressed segment payload into workspace */
4436 + copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
4437 +
4438 +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
4439 +index 7733e8ac0a698..51382d2be3d44 100644
4440 +--- a/fs/btrfs/tree-checker.c
4441 ++++ b/fs/btrfs/tree-checker.c
4442 +@@ -965,6 +965,7 @@ static int check_dev_item(struct extent_buffer *leaf,
4443 + struct btrfs_key *key, int slot)
4444 + {
4445 + struct btrfs_dev_item *ditem;
4446 ++ const u32 item_size = btrfs_item_size_nr(leaf, slot);
4447 +
4448 + if (unlikely(key->objectid != BTRFS_DEV_ITEMS_OBJECTID)) {
4449 + dev_item_err(leaf, slot,
4450 +@@ -972,6 +973,13 @@ static int check_dev_item(struct extent_buffer *leaf,
4451 + key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
4452 + return -EUCLEAN;
4453 + }
4454 ++
4455 ++ if (unlikely(item_size != sizeof(*ditem))) {
4456 ++ dev_item_err(leaf, slot, "invalid item size: has %u expect %zu",
4457 ++ item_size, sizeof(*ditem));
4458 ++ return -EUCLEAN;
4459 ++ }
4460 ++
4461 + ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
4462 + if (unlikely(btrfs_device_id(leaf, ditem) != key->offset)) {
4463 + dev_item_err(leaf, slot,
4464 +@@ -1007,6 +1015,7 @@ static int check_inode_item(struct extent_buffer *leaf,
4465 + struct btrfs_inode_item *iitem;
4466 + u64 super_gen = btrfs_super_generation(fs_info->super_copy);
4467 + u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
4468 ++ const u32 item_size = btrfs_item_size_nr(leaf, slot);
4469 + u32 mode;
4470 + int ret;
4471 + u32 flags;
4472 +@@ -1016,6 +1025,12 @@ static int check_inode_item(struct extent_buffer *leaf,
4473 + if (unlikely(ret < 0))
4474 + return ret;
4475 +
4476 ++ if (unlikely(item_size != sizeof(*iitem))) {
4477 ++ generic_err(leaf, slot, "invalid item size: has %u expect %zu",
4478 ++ item_size, sizeof(*iitem));
4479 ++ return -EUCLEAN;
4480 ++ }
4481 ++
4482 + iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
4483 +
4484 + /* Here we use super block generation + 1 to handle log tree */
4485 +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
4486 +index d3cd2a94d1e8c..d1f9d26322027 100644
4487 +--- a/fs/configfs/dir.c
4488 ++++ b/fs/configfs/dir.c
4489 +@@ -34,6 +34,14 @@
4490 + */
4491 + DEFINE_SPINLOCK(configfs_dirent_lock);
4492 +
4493 ++/*
4494 ++ * All of link_obj/unlink_obj/link_group/unlink_group require that
4495 ++ * subsys->su_mutex is held.
4496 ++ * But parent configfs_subsystem is NULL when config_item is root.
4497 ++ * Use this mutex when config_item is root.
4498 ++ */
4499 ++static DEFINE_MUTEX(configfs_subsystem_mutex);
4500 ++
4501 + static void configfs_d_iput(struct dentry * dentry,
4502 + struct inode * inode)
4503 + {
4504 +@@ -1859,7 +1867,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
4505 + group->cg_item.ci_name = group->cg_item.ci_namebuf;
4506 +
4507 + sd = root->d_fsdata;
4508 ++ mutex_lock(&configfs_subsystem_mutex);
4509 + link_group(to_config_group(sd->s_element), group);
4510 ++ mutex_unlock(&configfs_subsystem_mutex);
4511 +
4512 + inode_lock_nested(d_inode(root), I_MUTEX_PARENT);
4513 +
4514 +@@ -1884,7 +1894,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
4515 + inode_unlock(d_inode(root));
4516 +
4517 + if (err) {
4518 ++ mutex_lock(&configfs_subsystem_mutex);
4519 + unlink_group(group);
4520 ++ mutex_unlock(&configfs_subsystem_mutex);
4521 + configfs_release_fs();
4522 + }
4523 + put_fragment(frag);
4524 +@@ -1931,7 +1943,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
4525 +
4526 + dput(dentry);
4527 +
4528 ++ mutex_lock(&configfs_subsystem_mutex);
4529 + unlink_group(group);
4530 ++ mutex_unlock(&configfs_subsystem_mutex);
4531 + configfs_release_fs();
4532 + }
4533 +
4534 +diff --git a/fs/io_uring.c b/fs/io_uring.c
4535 +index a92f276f21d9c..db724482cd117 100644
4536 +--- a/fs/io_uring.c
4537 ++++ b/fs/io_uring.c
4538 +@@ -4477,6 +4477,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4539 + } else {
4540 + list_add_tail(&buf->list, &(*head)->list);
4541 + }
4542 ++ cond_resched();
4543 + }
4544 +
4545 + return i ? i : -ENOMEM;
4546 +@@ -7633,7 +7634,7 @@ static int io_run_task_work_sig(void)
4547 + /* when returns >0, the caller should retry */
4548 + static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
4549 + struct io_wait_queue *iowq,
4550 +- signed long *timeout)
4551 ++ ktime_t timeout)
4552 + {
4553 + int ret;
4554 +
4555 +@@ -7645,8 +7646,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
4556 + if (test_bit(0, &ctx->check_cq_overflow))
4557 + return 1;
4558 +
4559 +- *timeout = schedule_timeout(*timeout);
4560 +- return !*timeout ? -ETIME : 1;
4561 ++ if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
4562 ++ return -ETIME;
4563 ++ return 1;
4564 + }
4565 +
4566 + /*
4567 +@@ -7659,7 +7661,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
4568 + {
4569 + struct io_wait_queue iowq;
4570 + struct io_rings *rings = ctx->rings;
4571 +- signed long timeout = MAX_SCHEDULE_TIMEOUT;
4572 ++ ktime_t timeout = KTIME_MAX;
4573 + int ret;
4574 +
4575 + do {
4576 +@@ -7675,7 +7677,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
4577 +
4578 + if (get_timespec64(&ts, uts))
4579 + return -EFAULT;
4580 +- timeout = timespec64_to_jiffies(&ts);
4581 ++ timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
4582 + }
4583 +
4584 + if (sig) {
4585 +@@ -7707,7 +7709,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
4586 + }
4587 + prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
4588 + TASK_INTERRUPTIBLE);
4589 +- ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
4590 ++ ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
4591 + finish_wait(&ctx->cq_wait, &iowq.wq);
4592 + cond_resched();
4593 + } while (ret > 0);
4594 +@@ -7864,7 +7866,15 @@ static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
4595 + ret = wait_for_completion_interruptible(&data->done);
4596 + if (!ret) {
4597 + mutex_lock(&ctx->uring_lock);
4598 +- break;
4599 ++ if (atomic_read(&data->refs) > 0) {
4600 ++ /*
4601 ++ * it has been revived by another thread while
4602 ++ * we were unlocked
4603 ++ */
4604 ++ mutex_unlock(&ctx->uring_lock);
4605 ++ } else {
4606 ++ break;
4607 ++ }
4608 + }
4609 +
4610 + atomic_inc(&data->refs);
4611 +diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
4612 +index 3616839c5c4b6..f2625a372a3ae 100644
4613 +--- a/fs/tracefs/inode.c
4614 ++++ b/fs/tracefs/inode.c
4615 +@@ -264,7 +264,6 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
4616 + if (!gid_valid(gid))
4617 + return -EINVAL;
4618 + opts->gid = gid;
4619 +- set_gid(tracefs_mount->mnt_root, gid);
4620 + break;
4621 + case Opt_mode:
4622 + if (match_octal(&args[0], &option))
4623 +@@ -291,7 +290,9 @@ static int tracefs_apply_options(struct super_block *sb)
4624 + inode->i_mode |= opts->mode;
4625 +
4626 + inode->i_uid = opts->uid;
4627 +- inode->i_gid = opts->gid;
4628 ++
4629 ++ /* Set all the group ids to the mount option */
4630 ++ set_gid(sb->s_root, opts->gid);
4631 +
4632 + return 0;
4633 + }
4634 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
4635 +index 29b9b199c56bb..7078938ba235c 100644
4636 +--- a/include/linux/bpf.h
4637 ++++ b/include/linux/bpf.h
4638 +@@ -209,11 +209,9 @@ static inline bool map_value_has_timer(const struct bpf_map *map)
4639 + static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
4640 + {
4641 + if (unlikely(map_value_has_spin_lock(map)))
4642 +- *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
4643 +- (struct bpf_spin_lock){};
4644 ++ memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
4645 + if (unlikely(map_value_has_timer(map)))
4646 +- *(struct bpf_timer *)(dst + map->timer_off) =
4647 +- (struct bpf_timer){};
4648 ++ memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
4649 + }
4650 +
4651 + /* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
4652 +@@ -224,7 +222,8 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
4653 + if (unlikely(map_value_has_spin_lock(map))) {
4654 + s_off = map->spin_lock_off;
4655 + s_sz = sizeof(struct bpf_spin_lock);
4656 +- } else if (unlikely(map_value_has_timer(map))) {
4657 ++ }
4658 ++ if (unlikely(map_value_has_timer(map))) {
4659 + t_off = map->timer_off;
4660 + t_sz = sizeof(struct bpf_timer);
4661 + }
4662 +diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
4663 +index 98efb7b5660d9..c9a3ac9efeaa9 100644
4664 +--- a/include/linux/nvmem-provider.h
4665 ++++ b/include/linux/nvmem-provider.h
4666 +@@ -70,7 +70,8 @@ struct nvmem_keepout {
4667 + * @word_size: Minimum read/write access granularity.
4668 + * @stride: Minimum read/write access stride.
4669 + * @priv: User context passed to read/write callbacks.
4670 +- * @wp-gpio: Write protect pin
4671 ++ * @wp-gpio: Write protect pin
4672 ++ * @ignore_wp: Write Protect pin is managed by the provider.
4673 + *
4674 + * Note: A default "nvmem<id>" name will be assigned to the device if
4675 + * no name is specified in its configuration. In such case "<id>" is
4676 +@@ -92,6 +93,7 @@ struct nvmem_config {
4677 + enum nvmem_type type;
4678 + bool read_only;
4679 + bool root_only;
4680 ++ bool ignore_wp;
4681 + struct device_node *of_node;
4682 + bool no_of_node;
4683 + nvmem_reg_read_t reg_read;
4684 +diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
4685 +index 584d94be9c8b0..18a717fe62eb0 100644
4686 +--- a/include/linux/skmsg.h
4687 ++++ b/include/linux/skmsg.h
4688 +@@ -507,12 +507,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
4689 + return !!psock->saved_data_ready;
4690 + }
4691 +
4692 +-static inline bool sk_is_tcp(const struct sock *sk)
4693 +-{
4694 +- return sk->sk_type == SOCK_STREAM &&
4695 +- sk->sk_protocol == IPPROTO_TCP;
4696 +-}
4697 +-
4698 + static inline bool sk_is_udp(const struct sock *sk)
4699 + {
4700 + return sk->sk_type == SOCK_DGRAM &&
4701 +diff --git a/include/linux/slab.h b/include/linux/slab.h
4702 +index 181045148b065..79c2ff9256d04 100644
4703 +--- a/include/linux/slab.h
4704 ++++ b/include/linux/slab.h
4705 +@@ -669,8 +669,7 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
4706 + * allocator where we care about the real place the memory allocation
4707 + * request comes from.
4708 + */
4709 +-extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
4710 +- __alloc_size(1);
4711 ++extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
4712 + #define kmalloc_track_caller(size, flags) \
4713 + __kmalloc_track_caller(size, flags, _RET_IP_)
4714 +
4715 +diff --git a/include/net/checksum.h b/include/net/checksum.h
4716 +index 5b96d5bd6e545..d3b5d368a0caa 100644
4717 +--- a/include/net/checksum.h
4718 ++++ b/include/net/checksum.h
4719 +@@ -22,7 +22,7 @@
4720 + #include <asm/checksum.h>
4721 +
4722 + #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
4723 +-static inline
4724 ++static __always_inline
4725 + __wsum csum_and_copy_from_user (const void __user *src, void *dst,
4726 + int len)
4727 + {
4728 +@@ -33,7 +33,7 @@ __wsum csum_and_copy_from_user (const void __user *src, void *dst,
4729 + #endif
4730 +
4731 + #ifndef HAVE_CSUM_COPY_USER
4732 +-static __inline__ __wsum csum_and_copy_to_user
4733 ++static __always_inline __wsum csum_and_copy_to_user
4734 + (const void *src, void __user *dst, int len)
4735 + {
4736 + __wsum sum = csum_partial(src, len, ~0U);
4737 +@@ -45,7 +45,7 @@ static __inline__ __wsum csum_and_copy_to_user
4738 + #endif
4739 +
4740 + #ifndef _HAVE_ARCH_CSUM_AND_COPY
4741 +-static inline __wsum
4742 ++static __always_inline __wsum
4743 + csum_partial_copy_nocheck(const void *src, void *dst, int len)
4744 + {
4745 + memcpy(dst, src, len);
4746 +@@ -54,7 +54,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len)
4747 + #endif
4748 +
4749 + #ifndef HAVE_ARCH_CSUM_ADD
4750 +-static inline __wsum csum_add(__wsum csum, __wsum addend)
4751 ++static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
4752 + {
4753 + u32 res = (__force u32)csum;
4754 + res += (__force u32)addend;
4755 +@@ -62,12 +62,12 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
4756 + }
4757 + #endif
4758 +
4759 +-static inline __wsum csum_sub(__wsum csum, __wsum addend)
4760 ++static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
4761 + {
4762 + return csum_add(csum, ~addend);
4763 + }
4764 +
4765 +-static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
4766 ++static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
4767 + {
4768 + u16 res = (__force u16)csum;
4769 +
4770 +@@ -75,12 +75,12 @@ static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
4771 + return (__force __sum16)(res + (res < (__force u16)addend));
4772 + }
4773 +
4774 +-static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
4775 ++static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
4776 + {
4777 + return csum16_add(csum, ~addend);
4778 + }
4779 +
4780 +-static inline __wsum csum_shift(__wsum sum, int offset)
4781 ++static __always_inline __wsum csum_shift(__wsum sum, int offset)
4782 + {
4783 + /* rotate sum to align it with a 16b boundary */
4784 + if (offset & 1)
4785 +@@ -88,42 +88,43 @@ static inline __wsum csum_shift(__wsum sum, int offset)
4786 + return sum;
4787 + }
4788 +
4789 +-static inline __wsum
4790 ++static __always_inline __wsum
4791 + csum_block_add(__wsum csum, __wsum csum2, int offset)
4792 + {
4793 + return csum_add(csum, csum_shift(csum2, offset));
4794 + }
4795 +
4796 +-static inline __wsum
4797 ++static __always_inline __wsum
4798 + csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
4799 + {
4800 + return csum_block_add(csum, csum2, offset);
4801 + }
4802 +
4803 +-static inline __wsum
4804 ++static __always_inline __wsum
4805 + csum_block_sub(__wsum csum, __wsum csum2, int offset)
4806 + {
4807 + return csum_block_add(csum, ~csum2, offset);
4808 + }
4809 +
4810 +-static inline __wsum csum_unfold(__sum16 n)
4811 ++static __always_inline __wsum csum_unfold(__sum16 n)
4812 + {
4813 + return (__force __wsum)n;
4814 + }
4815 +
4816 +-static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
4817 ++static __always_inline
4818 ++__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
4819 + {
4820 + return csum_partial(buff, len, sum);
4821 + }
4822 +
4823 + #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
4824 +
4825 +-static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
4826 ++static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
4827 + {
4828 + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
4829 + }
4830 +
4831 +-static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
4832 ++static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
4833 + {
4834 + __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
4835 +
4836 +@@ -136,11 +137,16 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
4837 + * m : old value of a 16bit field
4838 + * m' : new value of a 16bit field
4839 + */
4840 +-static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
4841 ++static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
4842 + {
4843 + *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
4844 + }
4845 +
4846 ++static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
4847 ++{
4848 ++ *csum = csum_add(csum_sub(*csum, old), new);
4849 ++}
4850 ++
4851 + struct sk_buff;
4852 + void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
4853 + __be32 from, __be32 to, bool pseudohdr);
4854 +@@ -150,16 +156,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
4855 + void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
4856 + __wsum diff, bool pseudohdr);
4857 +
4858 +-static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
4859 +- __be16 from, __be16 to,
4860 +- bool pseudohdr)
4861 ++static __always_inline
4862 ++void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
4863 ++ __be16 from, __be16 to, bool pseudohdr)
4864 + {
4865 + inet_proto_csum_replace4(sum, skb, (__force __be32)from,
4866 + (__force __be32)to, pseudohdr);
4867 + }
4868 +
4869 +-static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
4870 +- int start, int offset)
4871 ++static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
4872 ++ int start, int offset)
4873 + {
4874 + __sum16 *psum = (__sum16 *)(ptr + offset);
4875 + __wsum delta;
4876 +@@ -175,7 +181,7 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
4877 + return delta;
4878 + }
4879 +
4880 +-static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
4881 ++static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
4882 + {
4883 + *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
4884 + }
4885 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
4886 +index a0d9e0b47ab8f..1dbddde8364ab 100644
4887 +--- a/include/net/netfilter/nf_tables.h
4888 ++++ b/include/net/netfilter/nf_tables.h
4889 +@@ -889,9 +889,9 @@ struct nft_expr_ops {
4890 + int (*offload)(struct nft_offload_ctx *ctx,
4891 + struct nft_flow_rule *flow,
4892 + const struct nft_expr *expr);
4893 ++ bool (*offload_action)(const struct nft_expr *expr);
4894 + void (*offload_stats)(struct nft_expr *expr,
4895 + const struct flow_stats *stats);
4896 +- u32 offload_flags;
4897 + const struct nft_expr_type *type;
4898 + void *data;
4899 + };
4900 +diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
4901 +index f9d95ff82df83..7971478439580 100644
4902 +--- a/include/net/netfilter/nf_tables_offload.h
4903 ++++ b/include/net/netfilter/nf_tables_offload.h
4904 +@@ -67,8 +67,6 @@ struct nft_flow_rule {
4905 + struct flow_rule *rule;
4906 + };
4907 +
4908 +-#define NFT_OFFLOAD_F_ACTION (1 << 0)
4909 +-
4910 + void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
4911 + enum flow_dissector_key_id addr_type);
4912 +
4913 +diff --git a/include/net/sock.h b/include/net/sock.h
4914 +index d47e9658da285..cd69595949614 100644
4915 +--- a/include/net/sock.h
4916 ++++ b/include/net/sock.h
4917 +@@ -504,7 +504,7 @@ struct sock {
4918 + u16 sk_tsflags;
4919 + int sk_bind_phc;
4920 + u8 sk_shutdown;
4921 +- u32 sk_tskey;
4922 ++ atomic_t sk_tskey;
4923 + atomic_t sk_zckey;
4924 +
4925 + u8 sk_clockid;
4926 +@@ -2636,7 +2636,7 @@ static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
4927 + __sock_tx_timestamp(tsflags, tx_flags);
4928 + if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
4929 + tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
4930 +- *tskey = sk->sk_tskey++;
4931 ++ *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
4932 + }
4933 + if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
4934 + *tx_flags |= SKBTX_WIFI_STATUS;
4935 +@@ -2654,6 +2654,11 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
4936 + &skb_shinfo(skb)->tskey);
4937 + }
4938 +
4939 ++static inline bool sk_is_tcp(const struct sock *sk)
4940 ++{
4941 ++ return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
4942 ++}
4943 ++
4944 + /**
4945 + * sk_eat_skb - Release a skb if it is no longer needed
4946 + * @sk: socket to eat this skb from
4947 +diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
4948 +index d2ff8ba7ae58f..c9da250fee38c 100644
4949 +--- a/kernel/bpf/btf.c
4950 ++++ b/kernel/bpf/btf.c
4951 +@@ -5564,12 +5564,53 @@ static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
4952 + #endif
4953 + };
4954 +
4955 ++/* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
4956 ++static bool __btf_type_is_scalar_struct(struct bpf_verifier_log *log,
4957 ++ const struct btf *btf,
4958 ++ const struct btf_type *t, int rec)
4959 ++{
4960 ++ const struct btf_type *member_type;
4961 ++ const struct btf_member *member;
4962 ++ u32 i;
4963 ++
4964 ++ if (!btf_type_is_struct(t))
4965 ++ return false;
4966 ++
4967 ++ for_each_member(i, t, member) {
4968 ++ const struct btf_array *array;
4969 ++
4970 ++ member_type = btf_type_skip_modifiers(btf, member->type, NULL);
4971 ++ if (btf_type_is_struct(member_type)) {
4972 ++ if (rec >= 3) {
4973 ++ bpf_log(log, "max struct nesting depth exceeded\n");
4974 ++ return false;
4975 ++ }
4976 ++ if (!__btf_type_is_scalar_struct(log, btf, member_type, rec + 1))
4977 ++ return false;
4978 ++ continue;
4979 ++ }
4980 ++ if (btf_type_is_array(member_type)) {
4981 ++ array = btf_type_array(member_type);
4982 ++ if (!array->nelems)
4983 ++ return false;
4984 ++ member_type = btf_type_skip_modifiers(btf, array->type, NULL);
4985 ++ if (!btf_type_is_scalar(member_type))
4986 ++ return false;
4987 ++ continue;
4988 ++ }
4989 ++ if (!btf_type_is_scalar(member_type))
4990 ++ return false;
4991 ++ }
4992 ++ return true;
4993 ++}
4994 ++
4995 + static int btf_check_func_arg_match(struct bpf_verifier_env *env,
4996 + const struct btf *btf, u32 func_id,
4997 + struct bpf_reg_state *regs,
4998 + bool ptr_to_mem_ok)
4999 + {
5000 + struct bpf_verifier_log *log = &env->log;
5001 ++ bool is_kfunc = btf_is_kernel(btf);
5002 + const char *func_name, *ref_tname;
5003 + const struct btf_type *t, *ref_t;
5004 + const struct btf_param *args;
5005 +@@ -5622,7 +5663,21 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
5006 +
5007 + ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
5008 + ref_tname = btf_name_by_offset(btf, ref_t->name_off);
5009 +- if (btf_is_kernel(btf)) {
5010 ++ if (btf_get_prog_ctx_type(log, btf, t,
5011 ++ env->prog->type, i)) {
5012 ++ /* If function expects ctx type in BTF check that caller
5013 ++ * is passing PTR_TO_CTX.
5014 ++ */
5015 ++ if (reg->type != PTR_TO_CTX) {
5016 ++ bpf_log(log,
5017 ++ "arg#%d expected pointer to ctx, but got %s\n",
5018 ++ i, btf_type_str(t));
5019 ++ return -EINVAL;
5020 ++ }
5021 ++ if (check_ctx_reg(env, reg, regno))
5022 ++ return -EINVAL;
5023 ++ } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID ||
5024 ++ (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) {
5025 + const struct btf_type *reg_ref_t;
5026 + const struct btf *reg_btf;
5027 + const char *reg_ref_tname;
5028 +@@ -5638,14 +5693,9 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
5029 + if (reg->type == PTR_TO_BTF_ID) {
5030 + reg_btf = reg->btf;
5031 + reg_ref_id = reg->btf_id;
5032 +- } else if (reg2btf_ids[reg->type]) {
5033 +- reg_btf = btf_vmlinux;
5034 +- reg_ref_id = *reg2btf_ids[reg->type];
5035 + } else {
5036 +- bpf_log(log, "kernel function %s args#%d expected pointer to %s %s but R%d is not a pointer to btf_id\n",
5037 +- func_name, i,
5038 +- btf_type_str(ref_t), ref_tname, regno);
5039 +- return -EINVAL;
5040 ++ reg_btf = btf_vmlinux;
5041 ++ reg_ref_id = *reg2btf_ids[base_type(reg->type)];
5042 + }
5043 +
5044 + reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id,
5045 +@@ -5661,23 +5711,24 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
5046 + reg_ref_tname);
5047 + return -EINVAL;
5048 + }
5049 +- } else if (btf_get_prog_ctx_type(log, btf, t,
5050 +- env->prog->type, i)) {
5051 +- /* If function expects ctx type in BTF check that caller
5052 +- * is passing PTR_TO_CTX.
5053 +- */
5054 +- if (reg->type != PTR_TO_CTX) {
5055 +- bpf_log(log,
5056 +- "arg#%d expected pointer to ctx, but got %s\n",
5057 +- i, btf_type_str(t));
5058 +- return -EINVAL;
5059 +- }
5060 +- if (check_ctx_reg(env, reg, regno))
5061 +- return -EINVAL;
5062 + } else if (ptr_to_mem_ok) {
5063 + const struct btf_type *resolve_ret;
5064 + u32 type_size;
5065 +
5066 ++ if (is_kfunc) {
5067 ++ /* Permit pointer to mem, but only when argument
5068 ++ * type is pointer to scalar, or struct composed
5069 ++ * (recursively) of scalars.
5070 ++ */
5071 ++ if (!btf_type_is_scalar(ref_t) &&
5072 ++ !__btf_type_is_scalar_struct(log, btf, ref_t, 0)) {
5073 ++ bpf_log(log,
5074 ++ "arg#%d pointer type %s %s must point to scalar or struct with scalar\n",
5075 ++ i, btf_type_str(ref_t), ref_tname);
5076 ++ return -EINVAL;
5077 ++ }
5078 ++ }
5079 ++
5080 + resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
5081 + if (IS_ERR(resolve_ret)) {
5082 + bpf_log(log,
5083 +@@ -5690,6 +5741,8 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
5084 + if (check_mem_reg(env, reg, regno, type_size))
5085 + return -EINVAL;
5086 + } else {
5087 ++ bpf_log(log, "reg type unsupported for arg#%d %sfunction %s#%d\n", i,
5088 ++ is_kfunc ? "kernel " : "", func_name, func_id);
5089 + return -EINVAL;
5090 + }
5091 + }
5092 +@@ -5739,7 +5792,7 @@ int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
5093 + const struct btf *btf, u32 func_id,
5094 + struct bpf_reg_state *regs)
5095 + {
5096 +- return btf_check_func_arg_match(env, btf, func_id, regs, false);
5097 ++ return btf_check_func_arg_match(env, btf, func_id, regs, true);
5098 + }
5099 +
5100 + /* Convert BTF of a function into bpf_reg_state if possible
5101 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
5102 +index 4c6c2c2137458..d2914cb9b7d18 100644
5103 +--- a/kernel/bpf/syscall.c
5104 ++++ b/kernel/bpf/syscall.c
5105 +@@ -1354,6 +1354,7 @@ int generic_map_delete_batch(struct bpf_map *map,
5106 + maybe_wait_bpf_programs(map);
5107 + if (err)
5108 + break;
5109 ++ cond_resched();
5110 + }
5111 + if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
5112 + err = -EFAULT;
5113 +@@ -1411,6 +1412,7 @@ int generic_map_update_batch(struct bpf_map *map,
5114 +
5115 + if (err)
5116 + break;
5117 ++ cond_resched();
5118 + }
5119 +
5120 + if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
5121 +@@ -1508,6 +1510,7 @@ int generic_map_lookup_batch(struct bpf_map *map,
5122 + swap(prev_key, key);
5123 + retry = MAP_LOOKUP_RETRIES;
5124 + cp++;
5125 ++ cond_resched();
5126 + }
5127 +
5128 + if (err == -EFAULT)
5129 +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
5130 +index 0e877dbcfeea9..afc6c0e9c966e 100644
5131 +--- a/kernel/cgroup/cgroup-v1.c
5132 ++++ b/kernel/cgroup/cgroup-v1.c
5133 +@@ -546,6 +546,7 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
5134 + char *buf, size_t nbytes, loff_t off)
5135 + {
5136 + struct cgroup *cgrp;
5137 ++ struct cgroup_file_ctx *ctx;
5138 +
5139 + BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
5140 +
5141 +@@ -553,8 +554,9 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
5142 + * Release agent gets called with all capabilities,
5143 + * require capabilities to set release agent.
5144 + */
5145 +- if ((of->file->f_cred->user_ns != &init_user_ns) ||
5146 +- !capable(CAP_SYS_ADMIN))
5147 ++ ctx = of->priv;
5148 ++ if ((ctx->ns->user_ns != &init_user_ns) ||
5149 ++ !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
5150 + return -EPERM;
5151 +
5152 + cgrp = cgroup_kn_lock_live(of->kn, false);
5153 +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
5154 +index d729cbd2445af..df62527f5e0b1 100644
5155 +--- a/kernel/cgroup/cpuset.c
5156 ++++ b/kernel/cgroup/cpuset.c
5157 +@@ -2269,6 +2269,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
5158 + cgroup_taskset_first(tset, &css);
5159 + cs = css_cs(css);
5160 +
5161 ++ cpus_read_lock();
5162 + percpu_down_write(&cpuset_rwsem);
5163 +
5164 + guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
5165 +@@ -2322,6 +2323,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
5166 + wake_up(&cpuset_attach_wq);
5167 +
5168 + percpu_up_write(&cpuset_rwsem);
5169 ++ cpus_read_unlock();
5170 + }
5171 +
5172 + /* The various types of files and directories in a cpuset file system */
5173 +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
5174 +index 3d5c07239a2a8..67c7979c40c0b 100644
5175 +--- a/kernel/trace/trace_events_trigger.c
5176 ++++ b/kernel/trace/trace_events_trigger.c
5177 +@@ -955,6 +955,16 @@ traceon_trigger(struct event_trigger_data *data,
5178 + struct trace_buffer *buffer, void *rec,
5179 + struct ring_buffer_event *event)
5180 + {
5181 ++ struct trace_event_file *file = data->private_data;
5182 ++
5183 ++ if (file) {
5184 ++ if (tracer_tracing_is_on(file->tr))
5185 ++ return;
5186 ++
5187 ++ tracer_tracing_on(file->tr);
5188 ++ return;
5189 ++ }
5190 ++
5191 + if (tracing_is_on())
5192 + return;
5193 +
5194 +@@ -966,8 +976,15 @@ traceon_count_trigger(struct event_trigger_data *data,
5195 + struct trace_buffer *buffer, void *rec,
5196 + struct ring_buffer_event *event)
5197 + {
5198 +- if (tracing_is_on())
5199 +- return;
5200 ++ struct trace_event_file *file = data->private_data;
5201 ++
5202 ++ if (file) {
5203 ++ if (tracer_tracing_is_on(file->tr))
5204 ++ return;
5205 ++ } else {
5206 ++ if (tracing_is_on())
5207 ++ return;
5208 ++ }
5209 +
5210 + if (!data->count)
5211 + return;
5212 +@@ -975,7 +992,10 @@ traceon_count_trigger(struct event_trigger_data *data,
5213 + if (data->count != -1)
5214 + (data->count)--;
5215 +
5216 +- tracing_on();
5217 ++ if (file)
5218 ++ tracer_tracing_on(file->tr);
5219 ++ else
5220 ++ tracing_on();
5221 + }
5222 +
5223 + static void
5224 +@@ -983,6 +1003,16 @@ traceoff_trigger(struct event_trigger_data *data,
5225 + struct trace_buffer *buffer, void *rec,
5226 + struct ring_buffer_event *event)
5227 + {
5228 ++ struct trace_event_file *file = data->private_data;
5229 ++
5230 ++ if (file) {
5231 ++ if (!tracer_tracing_is_on(file->tr))
5232 ++ return;
5233 ++
5234 ++ tracer_tracing_off(file->tr);
5235 ++ return;
5236 ++ }
5237 ++
5238 + if (!tracing_is_on())
5239 + return;
5240 +
5241 +@@ -994,8 +1024,15 @@ traceoff_count_trigger(struct event_trigger_data *data,
5242 + struct trace_buffer *buffer, void *rec,
5243 + struct ring_buffer_event *event)
5244 + {
5245 +- if (!tracing_is_on())
5246 +- return;
5247 ++ struct trace_event_file *file = data->private_data;
5248 ++
5249 ++ if (file) {
5250 ++ if (!tracer_tracing_is_on(file->tr))
5251 ++ return;
5252 ++ } else {
5253 ++ if (!tracing_is_on())
5254 ++ return;
5255 ++ }
5256 +
5257 + if (!data->count)
5258 + return;
5259 +@@ -1003,7 +1040,10 @@ traceoff_count_trigger(struct event_trigger_data *data,
5260 + if (data->count != -1)
5261 + (data->count)--;
5262 +
5263 +- tracing_off();
5264 ++ if (file)
5265 ++ tracer_tracing_off(file->tr);
5266 ++ else
5267 ++ tracing_off();
5268 + }
5269 +
5270 + static int
5271 +@@ -1200,7 +1240,12 @@ stacktrace_trigger(struct event_trigger_data *data,
5272 + struct trace_buffer *buffer, void *rec,
5273 + struct ring_buffer_event *event)
5274 + {
5275 +- trace_dump_stack(STACK_SKIP);
5276 ++ struct trace_event_file *file = data->private_data;
5277 ++
5278 ++ if (file)
5279 ++ __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
5280 ++ else
5281 ++ trace_dump_stack(STACK_SKIP);
5282 + }
5283 +
5284 + static void
5285 +diff --git a/mm/filemap.c b/mm/filemap.c
5286 +index 39c4c46c61337..56b437eb85547 100644
5287 +--- a/mm/filemap.c
5288 ++++ b/mm/filemap.c
5289 +@@ -2365,8 +2365,12 @@ static void filemap_get_read_batch(struct address_space *mapping,
5290 + break;
5291 + if (PageReadahead(head))
5292 + break;
5293 +- xas.xa_index = head->index + thp_nr_pages(head) - 1;
5294 +- xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK;
5295 ++ if (PageHead(head)) {
5296 ++ xas_set(&xas, head->index + thp_nr_pages(head));
5297 ++ /* Handle wrap correctly */
5298 ++ if (xas.xa_index - 1 >= max)
5299 ++ break;
5300 ++ }
5301 + continue;
5302 + put_page:
5303 + put_page(head);
5304 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
5305 +index a1baa198519a2..221239db6389a 100644
5306 +--- a/mm/hugetlb.c
5307 ++++ b/mm/hugetlb.c
5308 +@@ -4159,10 +4159,10 @@ static int __init hugepages_setup(char *s)
5309 + pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
5310 + return 0;
5311 + }
5312 ++ if (tmp >= nr_online_nodes)
5313 ++ goto invalid;
5314 + node = tmp;
5315 + p += count + 1;
5316 +- if (node < 0 || node >= nr_online_nodes)
5317 +- goto invalid;
5318 + /* Parse hugepages */
5319 + if (sscanf(p, "%lu%n", &tmp, &count) != 1)
5320 + goto invalid;
5321 +@@ -4851,14 +4851,13 @@ again:
5322 + }
5323 +
5324 + static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
5325 +- unsigned long new_addr, pte_t *src_pte)
5326 ++ unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
5327 + {
5328 + struct hstate *h = hstate_vma(vma);
5329 + struct mm_struct *mm = vma->vm_mm;
5330 +- pte_t *dst_pte, pte;
5331 + spinlock_t *src_ptl, *dst_ptl;
5332 ++ pte_t pte;
5333 +
5334 +- dst_pte = huge_pte_offset(mm, new_addr, huge_page_size(h));
5335 + dst_ptl = huge_pte_lock(h, mm, dst_pte);
5336 + src_ptl = huge_pte_lockptr(h, mm, src_pte);
5337 +
5338 +@@ -4917,7 +4916,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
5339 + if (!dst_pte)
5340 + break;
5341 +
5342 +- move_huge_pte(vma, old_addr, new_addr, src_pte);
5343 ++ move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
5344 + }
5345 + flush_tlb_range(vma, old_end - len, old_end);
5346 + mmu_notifier_invalidate_range_end(&range);
5347 +diff --git a/mm/memblock.c b/mm/memblock.c
5348 +index 1018e50566f35..b12a364f2766f 100644
5349 +--- a/mm/memblock.c
5350 ++++ b/mm/memblock.c
5351 +@@ -366,14 +366,20 @@ void __init memblock_discard(void)
5352 + addr = __pa(memblock.reserved.regions);
5353 + size = PAGE_ALIGN(sizeof(struct memblock_region) *
5354 + memblock.reserved.max);
5355 +- memblock_free_late(addr, size);
5356 ++ if (memblock_reserved_in_slab)
5357 ++ kfree(memblock.reserved.regions);
5358 ++ else
5359 ++ memblock_free_late(addr, size);
5360 + }
5361 +
5362 + if (memblock.memory.regions != memblock_memory_init_regions) {
5363 + addr = __pa(memblock.memory.regions);
5364 + size = PAGE_ALIGN(sizeof(struct memblock_region) *
5365 + memblock.memory.max);
5366 +- memblock_free_late(addr, size);
5367 ++ if (memblock_memory_in_slab)
5368 ++ kfree(memblock.memory.regions);
5369 ++ else
5370 ++ memblock_free_late(addr, size);
5371 + }
5372 +
5373 + memblock_memory = NULL;
5374 +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
5375 +index a271688780a2c..307ee1174a6e2 100644
5376 +--- a/net/can/j1939/transport.c
5377 ++++ b/net/can/j1939/transport.c
5378 +@@ -2006,7 +2006,7 @@ struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
5379 + /* set the end-packet for broadcast */
5380 + session->pkt.last = session->pkt.total;
5381 +
5382 +- skcb->tskey = session->sk->sk_tskey++;
5383 ++ skcb->tskey = atomic_inc_return(&session->sk->sk_tskey) - 1;
5384 + session->tskey = skcb->tskey;
5385 +
5386 + return session;
5387 +diff --git a/net/core/filter.c b/net/core/filter.c
5388 +index 22bed067284fb..d4cdf11656b3f 100644
5389 +--- a/net/core/filter.c
5390 ++++ b/net/core/filter.c
5391 +@@ -2711,6 +2711,9 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
5392 + if (unlikely(flags))
5393 + return -EINVAL;
5394 +
5395 ++ if (unlikely(len == 0))
5396 ++ return 0;
5397 ++
5398 + /* First find the starting scatterlist element */
5399 + i = msg->sg.start;
5400 + do {
5401 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
5402 +index 909db87d7383d..f78969d8d8160 100644
5403 +--- a/net/core/skbuff.c
5404 ++++ b/net/core/skbuff.c
5405 +@@ -2254,7 +2254,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
5406 + /* Free pulled out fragments. */
5407 + while ((list = skb_shinfo(skb)->frag_list) != insp) {
5408 + skb_shinfo(skb)->frag_list = list->next;
5409 +- kfree_skb(list);
5410 ++ consume_skb(list);
5411 + }
5412 + /* And insert new clone at head. */
5413 + if (clone) {
5414 +@@ -4849,9 +4849,8 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
5415 + serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
5416 + if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
5417 + serr->ee.ee_data = skb_shinfo(skb)->tskey;
5418 +- if (sk->sk_protocol == IPPROTO_TCP &&
5419 +- sk->sk_type == SOCK_STREAM)
5420 +- serr->ee.ee_data -= sk->sk_tskey;
5421 ++ if (sk_is_tcp(sk))
5422 ++ serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
5423 + }
5424 +
5425 + err = sock_queue_err_skb(sk, skb);
5426 +@@ -4919,8 +4918,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
5427 + if (tsonly) {
5428 + #ifdef CONFIG_INET
5429 + if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
5430 +- sk->sk_protocol == IPPROTO_TCP &&
5431 +- sk->sk_type == SOCK_STREAM) {
5432 ++ sk_is_tcp(sk)) {
5433 + skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
5434 + ack_skb);
5435 + opt_stats = true;
5436 +@@ -6227,7 +6225,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb,
5437 + /* Free pulled out fragments. */
5438 + while ((list = shinfo->frag_list) != insp) {
5439 + shinfo->frag_list = list->next;
5440 +- kfree_skb(list);
5441 ++ consume_skb(list);
5442 + }
5443 + /* And insert new clone at head. */
5444 + if (clone) {
5445 +diff --git a/net/core/sock.c b/net/core/sock.c
5446 +index 7de234693a3bf..6613a864f7f5a 100644
5447 +--- a/net/core/sock.c
5448 ++++ b/net/core/sock.c
5449 +@@ -874,14 +874,13 @@ int sock_set_timestamping(struct sock *sk, int optname,
5450 +
5451 + if (val & SOF_TIMESTAMPING_OPT_ID &&
5452 + !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
5453 +- if (sk->sk_protocol == IPPROTO_TCP &&
5454 +- sk->sk_type == SOCK_STREAM) {
5455 ++ if (sk_is_tcp(sk)) {
5456 + if ((1 << sk->sk_state) &
5457 + (TCPF_CLOSE | TCPF_LISTEN))
5458 + return -EINVAL;
5459 +- sk->sk_tskey = tcp_sk(sk)->snd_una;
5460 ++ atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una);
5461 + } else {
5462 +- sk->sk_tskey = 0;
5463 ++ atomic_set(&sk->sk_tskey, 0);
5464 + }
5465 + }
5466 +
5467 +@@ -1372,8 +1371,7 @@ set_sndbuf:
5468 +
5469 + case SO_ZEROCOPY:
5470 + if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
5471 +- if (!((sk->sk_type == SOCK_STREAM &&
5472 +- sk->sk_protocol == IPPROTO_TCP) ||
5473 ++ if (!(sk_is_tcp(sk) ||
5474 + (sk->sk_type == SOCK_DGRAM &&
5475 + sk->sk_protocol == IPPROTO_UDP)))
5476 + ret = -ENOTSUPP;
5477 +diff --git a/net/dsa/master.c b/net/dsa/master.c
5478 +index e8e19857621bd..b0ab3cbeff3ca 100644
5479 +--- a/net/dsa/master.c
5480 ++++ b/net/dsa/master.c
5481 +@@ -260,11 +260,16 @@ static void dsa_netdev_ops_set(struct net_device *dev,
5482 + dev->dsa_ptr->netdev_ops = ops;
5483 + }
5484 +
5485 ++/* Keep the master always promiscuous if the tagging protocol requires that
5486 ++ * (garbles MAC DA) or if it doesn't support unicast filtering, case in which
5487 ++ * it would revert to promiscuous mode as soon as we call dev_uc_add() on it
5488 ++ * anyway.
5489 ++ */
5490 + static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
5491 + {
5492 + const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
5493 +
5494 +- if (!ops->promisc_on_master)
5495 ++ if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
5496 + return;
5497 +
5498 + rtnl_lock();
5499 +diff --git a/net/dsa/port.c b/net/dsa/port.c
5500 +index f6f12ad2b5251..6cc353b77681f 100644
5501 +--- a/net/dsa/port.c
5502 ++++ b/net/dsa/port.c
5503 +@@ -777,9 +777,15 @@ int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
5504 + struct dsa_port *cpu_dp = dp->cpu_dp;
5505 + int err;
5506 +
5507 +- err = dev_uc_add(cpu_dp->master, addr);
5508 +- if (err)
5509 +- return err;
5510 ++ /* Avoid a call to __dev_set_promiscuity() on the master, which
5511 ++ * requires rtnl_lock(), since we can't guarantee that is held here,
5512 ++ * and we can't take it either.
5513 ++ */
5514 ++ if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
5515 ++ err = dev_uc_add(cpu_dp->master, addr);
5516 ++ if (err)
5517 ++ return err;
5518 ++ }
5519 +
5520 + return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
5521 + }
5522 +@@ -796,9 +802,11 @@ int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
5523 + struct dsa_port *cpu_dp = dp->cpu_dp;
5524 + int err;
5525 +
5526 +- err = dev_uc_del(cpu_dp->master, addr);
5527 +- if (err)
5528 +- return err;
5529 ++ if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
5530 ++ err = dev_uc_del(cpu_dp->master, addr);
5531 ++ if (err)
5532 ++ return err;
5533 ++ }
5534 +
5535 + return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
5536 + }
5537 +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
5538 +index 5f70ffdae1b52..43dd5dd176c24 100644
5539 +--- a/net/ipv4/af_inet.c
5540 ++++ b/net/ipv4/af_inet.c
5541 +@@ -1376,8 +1376,11 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
5542 + }
5543 +
5544 + ops = rcu_dereference(inet_offloads[proto]);
5545 +- if (likely(ops && ops->callbacks.gso_segment))
5546 ++ if (likely(ops && ops->callbacks.gso_segment)) {
5547 + segs = ops->callbacks.gso_segment(skb, features);
5548 ++ if (!segs)
5549 ++ skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
5550 ++ }
5551 +
5552 + if (IS_ERR_OR_NULL(segs))
5553 + goto out;
5554 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
5555 +index a4d2eb691cbc1..131066d0319a2 100644
5556 +--- a/net/ipv4/ip_output.c
5557 ++++ b/net/ipv4/ip_output.c
5558 +@@ -992,7 +992,7 @@ static int __ip_append_data(struct sock *sk,
5559 +
5560 + if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
5561 + sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
5562 +- tskey = sk->sk_tskey++;
5563 ++ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
5564 +
5565 + hh_len = LL_RESERVED_SPACE(rt->dst.dev);
5566 +
5567 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
5568 +index e3a159c8f231e..36e89b6873876 100644
5569 +--- a/net/ipv4/ping.c
5570 ++++ b/net/ipv4/ping.c
5571 +@@ -187,7 +187,6 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
5572 + (int)ident, &ipv6_hdr(skb)->daddr, dif);
5573 + #endif
5574 + } else {
5575 +- pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol));
5576 + return NULL;
5577 + }
5578 +
5579 +diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
5580 +index b91003538d87a..bc3a043a5d5c7 100644
5581 +--- a/net/ipv4/udp_tunnel_nic.c
5582 ++++ b/net/ipv4/udp_tunnel_nic.c
5583 +@@ -846,7 +846,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
5584 + list_for_each_entry(node, &info->shared->devices, list)
5585 + if (node->dev == dev)
5586 + break;
5587 +- if (node->dev != dev)
5588 ++ if (list_entry_is_head(node, &info->shared->devices, list))
5589 + return;
5590 +
5591 + list_del(&node->list);
5592 +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
5593 +index 1cbd49d5788dd..b2919a8e9c012 100644
5594 +--- a/net/ipv6/ip6_offload.c
5595 ++++ b/net/ipv6/ip6_offload.c
5596 +@@ -114,6 +114,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
5597 + if (likely(ops && ops->callbacks.gso_segment)) {
5598 + skb_reset_transport_header(skb);
5599 + segs = ops->callbacks.gso_segment(skb, features);
5600 ++ if (!segs)
5601 ++ skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
5602 + }
5603 +
5604 + if (IS_ERR_OR_NULL(segs))
5605 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
5606 +index ff4e83e2a5068..22bf8fb617165 100644
5607 +--- a/net/ipv6/ip6_output.c
5608 ++++ b/net/ipv6/ip6_output.c
5609 +@@ -1465,7 +1465,7 @@ static int __ip6_append_data(struct sock *sk,
5610 +
5611 + if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
5612 + sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
5613 +- tskey = sk->sk_tskey++;
5614 ++ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
5615 +
5616 + hh_len = LL_RESERVED_SPACE(rt->dst.dev);
5617 +
5618 +diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
5619 +index 3240b72271a7f..7558802a14350 100644
5620 +--- a/net/mptcp/mib.c
5621 ++++ b/net/mptcp/mib.c
5622 +@@ -35,12 +35,14 @@ static const struct snmp_mib mptcp_snmp_list[] = {
5623 + SNMP_MIB_ITEM("AddAddr", MPTCP_MIB_ADDADDR),
5624 + SNMP_MIB_ITEM("EchoAdd", MPTCP_MIB_ECHOADD),
5625 + SNMP_MIB_ITEM("PortAdd", MPTCP_MIB_PORTADD),
5626 ++ SNMP_MIB_ITEM("AddAddrDrop", MPTCP_MIB_ADDADDRDROP),
5627 + SNMP_MIB_ITEM("MPJoinPortSynRx", MPTCP_MIB_JOINPORTSYNRX),
5628 + SNMP_MIB_ITEM("MPJoinPortSynAckRx", MPTCP_MIB_JOINPORTSYNACKRX),
5629 + SNMP_MIB_ITEM("MPJoinPortAckRx", MPTCP_MIB_JOINPORTACKRX),
5630 + SNMP_MIB_ITEM("MismatchPortSynRx", MPTCP_MIB_MISMATCHPORTSYNRX),
5631 + SNMP_MIB_ITEM("MismatchPortAckRx", MPTCP_MIB_MISMATCHPORTACKRX),
5632 + SNMP_MIB_ITEM("RmAddr", MPTCP_MIB_RMADDR),
5633 ++ SNMP_MIB_ITEM("RmAddrDrop", MPTCP_MIB_RMADDRDROP),
5634 + SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW),
5635 + SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX),
5636 + SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX),
5637 +diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
5638 +index ecd3d8b117e0b..2966fcb6548ba 100644
5639 +--- a/net/mptcp/mib.h
5640 ++++ b/net/mptcp/mib.h
5641 +@@ -28,12 +28,14 @@ enum linux_mptcp_mib_field {
5642 + MPTCP_MIB_ADDADDR, /* Received ADD_ADDR with echo-flag=0 */
5643 + MPTCP_MIB_ECHOADD, /* Received ADD_ADDR with echo-flag=1 */
5644 + MPTCP_MIB_PORTADD, /* Received ADD_ADDR with a port-number */
5645 ++ MPTCP_MIB_ADDADDRDROP, /* Dropped incoming ADD_ADDR */
5646 + MPTCP_MIB_JOINPORTSYNRX, /* Received a SYN MP_JOIN with a different port-number */
5647 + MPTCP_MIB_JOINPORTSYNACKRX, /* Received a SYNACK MP_JOIN with a different port-number */
5648 + MPTCP_MIB_JOINPORTACKRX, /* Received an ACK MP_JOIN with a different port-number */
5649 + MPTCP_MIB_MISMATCHPORTSYNRX, /* Received a SYN MP_JOIN with a mismatched port-number */
5650 + MPTCP_MIB_MISMATCHPORTACKRX, /* Received an ACK MP_JOIN with a mismatched port-number */
5651 + MPTCP_MIB_RMADDR, /* Received RM_ADDR */
5652 ++ MPTCP_MIB_RMADDRDROP, /* Dropped incoming RM_ADDR */
5653 + MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */
5654 + MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */
5655 + MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */
5656 +diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
5657 +index 6ab386ff32944..d9790d6fbce9c 100644
5658 +--- a/net/mptcp/pm.c
5659 ++++ b/net/mptcp/pm.c
5660 +@@ -194,6 +194,8 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
5661 + mptcp_pm_add_addr_send_ack(msk);
5662 + } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
5663 + pm->remote = *addr;
5664 ++ } else {
5665 ++ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
5666 + }
5667 +
5668 + spin_unlock_bh(&pm->lock);
5669 +@@ -234,8 +236,10 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
5670 + mptcp_event_addr_removed(msk, rm_list->ids[i]);
5671 +
5672 + spin_lock_bh(&pm->lock);
5673 +- mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
5674 +- pm->rm_list_rx = *rm_list;
5675 ++ if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
5676 ++ pm->rm_list_rx = *rm_list;
5677 ++ else
5678 ++ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
5679 + spin_unlock_bh(&pm->lock);
5680 + }
5681 +
5682 +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
5683 +index 5eada95dd76b3..d57d507ef83f1 100644
5684 +--- a/net/mptcp/pm_netlink.c
5685 ++++ b/net/mptcp/pm_netlink.c
5686 +@@ -606,6 +606,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
5687 + unsigned int add_addr_accept_max;
5688 + struct mptcp_addr_info remote;
5689 + unsigned int subflows_max;
5690 ++ bool reset_port = false;
5691 + int i, nr;
5692 +
5693 + add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
5694 +@@ -615,15 +616,19 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
5695 + msk->pm.add_addr_accepted, add_addr_accept_max,
5696 + msk->pm.remote.family);
5697 +
5698 +- if (lookup_subflow_by_daddr(&msk->conn_list, &msk->pm.remote))
5699 ++ remote = msk->pm.remote;
5700 ++ if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
5701 + goto add_addr_echo;
5702 +
5703 ++ /* pick id 0 port, if none is provided the remote address */
5704 ++ if (!remote.port) {
5705 ++ reset_port = true;
5706 ++ remote.port = sk->sk_dport;
5707 ++ }
5708 ++
5709 + /* connect to the specified remote address, using whatever
5710 + * local address the routing configuration will pick.
5711 + */
5712 +- remote = msk->pm.remote;
5713 +- if (!remote.port)
5714 +- remote.port = sk->sk_dport;
5715 + nr = fill_local_addresses_vec(msk, addrs);
5716 +
5717 + msk->pm.add_addr_accepted++;
5718 +@@ -636,8 +641,12 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
5719 + __mptcp_subflow_connect(sk, &addrs[i], &remote);
5720 + spin_lock_bh(&msk->pm.lock);
5721 +
5722 ++ /* be sure to echo exactly the received address */
5723 ++ if (reset_port)
5724 ++ remote.port = 0;
5725 ++
5726 + add_addr_echo:
5727 +- mptcp_pm_announce_addr(msk, &msk->pm.remote, true);
5728 ++ mptcp_pm_announce_addr(msk, &remote, true);
5729 + mptcp_pm_nl_addr_send_ack(msk);
5730 + }
5731 +
5732 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5733 +index c207728226372..a65b530975f54 100644
5734 +--- a/net/netfilter/nf_tables_api.c
5735 ++++ b/net/netfilter/nf_tables_api.c
5736 +@@ -6535,12 +6535,15 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
5737 + {
5738 + struct nft_object *newobj;
5739 + struct nft_trans *trans;
5740 +- int err;
5741 ++ int err = -ENOMEM;
5742 ++
5743 ++ if (!try_module_get(type->owner))
5744 ++ return -ENOENT;
5745 +
5746 + trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
5747 + sizeof(struct nft_trans_obj));
5748 + if (!trans)
5749 +- return -ENOMEM;
5750 ++ goto err_trans;
5751 +
5752 + newobj = nft_obj_init(ctx, type, attr);
5753 + if (IS_ERR(newobj)) {
5754 +@@ -6557,6 +6560,8 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
5755 +
5756 + err_free_trans:
5757 + kfree(trans);
5758 ++err_trans:
5759 ++ module_put(type->owner);
5760 + return err;
5761 + }
5762 +
5763 +@@ -8169,7 +8174,7 @@ static void nft_obj_commit_update(struct nft_trans *trans)
5764 + if (obj->ops->update)
5765 + obj->ops->update(obj, newobj);
5766 +
5767 +- kfree(newobj);
5768 ++ nft_obj_destroy(&trans->ctx, newobj);
5769 + }
5770 +
5771 + static void nft_commit_release(struct nft_trans *trans)
5772 +@@ -8914,7 +8919,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
5773 + break;
5774 + case NFT_MSG_NEWOBJ:
5775 + if (nft_trans_obj_update(trans)) {
5776 +- kfree(nft_trans_obj_newobj(trans));
5777 ++ nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
5778 + nft_trans_destroy(trans);
5779 + } else {
5780 + trans->ctx.table->use--;
5781 +@@ -9574,10 +9579,13 @@ EXPORT_SYMBOL_GPL(__nft_release_basechain);
5782 +
5783 + static void __nft_release_hook(struct net *net, struct nft_table *table)
5784 + {
5785 ++ struct nft_flowtable *flowtable;
5786 + struct nft_chain *chain;
5787 +
5788 + list_for_each_entry(chain, &table->chains, list)
5789 + nf_tables_unregister_hook(net, table, chain);
5790 ++ list_for_each_entry(flowtable, &table->flowtables, list)
5791 ++ nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list);
5792 + }
5793 +
5794 + static void __nft_release_hooks(struct net *net)
5795 +diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
5796 +index 9656c16462222..2d36952b13920 100644
5797 +--- a/net/netfilter/nf_tables_offload.c
5798 ++++ b/net/netfilter/nf_tables_offload.c
5799 +@@ -94,7 +94,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
5800 +
5801 + expr = nft_expr_first(rule);
5802 + while (nft_expr_more(rule, expr)) {
5803 +- if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
5804 ++ if (expr->ops->offload_action &&
5805 ++ expr->ops->offload_action(expr))
5806 + num_actions++;
5807 +
5808 + expr = nft_expr_next(expr);
5809 +diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c
5810 +index bbf3fcba3df40..5b5c607fbf83f 100644
5811 +--- a/net/netfilter/nft_dup_netdev.c
5812 ++++ b/net/netfilter/nft_dup_netdev.c
5813 +@@ -67,6 +67,11 @@ static int nft_dup_netdev_offload(struct nft_offload_ctx *ctx,
5814 + return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif);
5815 + }
5816 +
5817 ++static bool nft_dup_netdev_offload_action(const struct nft_expr *expr)
5818 ++{
5819 ++ return true;
5820 ++}
5821 ++
5822 + static struct nft_expr_type nft_dup_netdev_type;
5823 + static const struct nft_expr_ops nft_dup_netdev_ops = {
5824 + .type = &nft_dup_netdev_type,
5825 +@@ -75,6 +80,7 @@ static const struct nft_expr_ops nft_dup_netdev_ops = {
5826 + .init = nft_dup_netdev_init,
5827 + .dump = nft_dup_netdev_dump,
5828 + .offload = nft_dup_netdev_offload,
5829 ++ .offload_action = nft_dup_netdev_offload_action,
5830 + };
5831 +
5832 + static struct nft_expr_type nft_dup_netdev_type __read_mostly = {
5833 +diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
5834 +index cd59afde5b2f8..7730409f6f091 100644
5835 +--- a/net/netfilter/nft_fwd_netdev.c
5836 ++++ b/net/netfilter/nft_fwd_netdev.c
5837 +@@ -77,6 +77,11 @@ static int nft_fwd_netdev_offload(struct nft_offload_ctx *ctx,
5838 + return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif);
5839 + }
5840 +
5841 ++static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr)
5842 ++{
5843 ++ return true;
5844 ++}
5845 ++
5846 + struct nft_fwd_neigh {
5847 + u8 sreg_dev;
5848 + u8 sreg_addr;
5849 +@@ -219,6 +224,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
5850 + .dump = nft_fwd_netdev_dump,
5851 + .validate = nft_fwd_validate,
5852 + .offload = nft_fwd_netdev_offload,
5853 ++ .offload_action = nft_fwd_netdev_offload_action,
5854 + };
5855 +
5856 + static const struct nft_expr_ops *
5857 +diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
5858 +index 90c64d27ae532..d0f67d325bdfd 100644
5859 +--- a/net/netfilter/nft_immediate.c
5860 ++++ b/net/netfilter/nft_immediate.c
5861 +@@ -213,6 +213,16 @@ static int nft_immediate_offload(struct nft_offload_ctx *ctx,
5862 + return 0;
5863 + }
5864 +
5865 ++static bool nft_immediate_offload_action(const struct nft_expr *expr)
5866 ++{
5867 ++ const struct nft_immediate_expr *priv = nft_expr_priv(expr);
5868 ++
5869 ++ if (priv->dreg == NFT_REG_VERDICT)
5870 ++ return true;
5871 ++
5872 ++ return false;
5873 ++}
5874 ++
5875 + static const struct nft_expr_ops nft_imm_ops = {
5876 + .type = &nft_imm_type,
5877 + .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
5878 +@@ -224,7 +234,7 @@ static const struct nft_expr_ops nft_imm_ops = {
5879 + .dump = nft_immediate_dump,
5880 + .validate = nft_immediate_validate,
5881 + .offload = nft_immediate_offload,
5882 +- .offload_flags = NFT_OFFLOAD_F_ACTION,
5883 ++ .offload_action = nft_immediate_offload_action,
5884 + };
5885 +
5886 + struct nft_expr_type nft_imm_type __read_mostly = {
5887 +diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
5888 +index 5e6459e116055..7013f55f05d1e 100644
5889 +--- a/net/netfilter/xt_socket.c
5890 ++++ b/net/netfilter/xt_socket.c
5891 +@@ -220,8 +220,10 @@ static void socket_mt_destroy(const struct xt_mtdtor_param *par)
5892 + {
5893 + if (par->family == NFPROTO_IPV4)
5894 + nf_defrag_ipv4_disable(par->net);
5895 ++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
5896 + else if (par->family == NFPROTO_IPV6)
5897 +- nf_defrag_ipv4_disable(par->net);
5898 ++ nf_defrag_ipv6_disable(par->net);
5899 ++#endif
5900 + }
5901 +
5902 + static struct xt_match socket_mt_reg[] __read_mostly = {
5903 +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
5904 +index 076774034bb96..780d9e2246f39 100644
5905 +--- a/net/openvswitch/actions.c
5906 ++++ b/net/openvswitch/actions.c
5907 +@@ -423,12 +423,43 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
5908 + memcpy(addr, new_addr, sizeof(__be32[4]));
5909 + }
5910 +
5911 +-static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
5912 ++static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
5913 + {
5914 ++ u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
5915 ++
5916 ++ ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
5917 ++
5918 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
5919 ++ csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
5920 ++ (__force __wsum)(ipv6_tclass << 12));
5921 ++
5922 ++ ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
5923 ++}
5924 ++
5925 ++static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
5926 ++{
5927 ++ u32 ofl;
5928 ++
5929 ++ ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
5930 ++ fl = OVS_MASKED(ofl, fl, mask);
5931 ++
5932 + /* Bits 21-24 are always unmasked, so this retains their values. */
5933 +- OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
5934 +- OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
5935 +- OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
5936 ++ nh->flow_lbl[0] = (u8)(fl >> 16);
5937 ++ nh->flow_lbl[1] = (u8)(fl >> 8);
5938 ++ nh->flow_lbl[2] = (u8)fl;
5939 ++
5940 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
5941 ++ csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
5942 ++}
5943 ++
5944 ++static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
5945 ++{
5946 ++ new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
5947 ++
5948 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
5949 ++ csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
5950 ++ (__force __wsum)(new_ttl << 8));
5951 ++ nh->hop_limit = new_ttl;
5952 + }
5953 +
5954 + static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
5955 +@@ -546,18 +577,17 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
5956 + }
5957 + }
5958 + if (mask->ipv6_tclass) {
5959 +- ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
5960 ++ set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
5961 + flow_key->ip.tos = ipv6_get_dsfield(nh);
5962 + }
5963 + if (mask->ipv6_label) {
5964 +- set_ipv6_fl(nh, ntohl(key->ipv6_label),
5965 ++ set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
5966 + ntohl(mask->ipv6_label));
5967 + flow_key->ipv6.label =
5968 + *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
5969 + }
5970 + if (mask->ipv6_hlimit) {
5971 +- OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
5972 +- mask->ipv6_hlimit);
5973 ++ set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
5974 + flow_key->ip.ttl = nh->hop_limit;
5975 + }
5976 + return 0;
5977 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
5978 +index 2a17eb77c9049..4ffea1290ce1c 100644
5979 +--- a/net/sched/act_ct.c
5980 ++++ b/net/sched/act_ct.c
5981 +@@ -516,11 +516,6 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
5982 + struct nf_conn *ct;
5983 + u8 dir;
5984 +
5985 +- /* Previously seen or loopback */
5986 +- ct = nf_ct_get(skb, &ctinfo);
5987 +- if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
5988 +- return false;
5989 +-
5990 + switch (family) {
5991 + case NFPROTO_IPV4:
5992 + if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
5993 +diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
5994 +index 67e9d9fde0854..756b4dbadf36d 100644
5995 +--- a/net/smc/smc_pnet.c
5996 ++++ b/net/smc/smc_pnet.c
5997 +@@ -112,7 +112,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
5998 + pnettable = &sn->pnettable;
5999 +
6000 + /* remove table entry */
6001 +- write_lock(&pnettable->lock);
6002 ++ mutex_lock(&pnettable->lock);
6003 + list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist,
6004 + list) {
6005 + if (!pnet_name ||
6006 +@@ -130,7 +130,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
6007 + rc = 0;
6008 + }
6009 + }
6010 +- write_unlock(&pnettable->lock);
6011 ++ mutex_unlock(&pnettable->lock);
6012 +
6013 + /* if this is not the initial namespace, stop here */
6014 + if (net != &init_net)
6015 +@@ -191,7 +191,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
6016 + sn = net_generic(net, smc_net_id);
6017 + pnettable = &sn->pnettable;
6018 +
6019 +- write_lock(&pnettable->lock);
6020 ++ mutex_lock(&pnettable->lock);
6021 + list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
6022 + if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev &&
6023 + !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) {
6024 +@@ -205,7 +205,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
6025 + break;
6026 + }
6027 + }
6028 +- write_unlock(&pnettable->lock);
6029 ++ mutex_unlock(&pnettable->lock);
6030 + return rc;
6031 + }
6032 +
6033 +@@ -223,7 +223,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
6034 + sn = net_generic(net, smc_net_id);
6035 + pnettable = &sn->pnettable;
6036 +
6037 +- write_lock(&pnettable->lock);
6038 ++ mutex_lock(&pnettable->lock);
6039 + list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
6040 + if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) {
6041 + dev_put(pnetelem->ndev);
6042 +@@ -236,7 +236,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
6043 + break;
6044 + }
6045 + }
6046 +- write_unlock(&pnettable->lock);
6047 ++ mutex_unlock(&pnettable->lock);
6048 + return rc;
6049 + }
6050 +
6051 +@@ -371,7 +371,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
6052 +
6053 + rc = -EEXIST;
6054 + new_netdev = true;
6055 +- write_lock(&pnettable->lock);
6056 ++ mutex_lock(&pnettable->lock);
6057 + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
6058 + if (tmp_pe->type == SMC_PNET_ETH &&
6059 + !strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) {
6060 +@@ -381,9 +381,9 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
6061 + }
6062 + if (new_netdev) {
6063 + list_add_tail(&new_pe->list, &pnettable->pnetlist);
6064 +- write_unlock(&pnettable->lock);
6065 ++ mutex_unlock(&pnettable->lock);
6066 + } else {
6067 +- write_unlock(&pnettable->lock);
6068 ++ mutex_unlock(&pnettable->lock);
6069 + kfree(new_pe);
6070 + goto out_put;
6071 + }
6072 +@@ -444,7 +444,7 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
6073 + new_pe->ib_port = ib_port;
6074 +
6075 + new_ibdev = true;
6076 +- write_lock(&pnettable->lock);
6077 ++ mutex_lock(&pnettable->lock);
6078 + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
6079 + if (tmp_pe->type == SMC_PNET_IB &&
6080 + !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) {
6081 +@@ -454,9 +454,9 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
6082 + }
6083 + if (new_ibdev) {
6084 + list_add_tail(&new_pe->list, &pnettable->pnetlist);
6085 +- write_unlock(&pnettable->lock);
6086 ++ mutex_unlock(&pnettable->lock);
6087 + } else {
6088 +- write_unlock(&pnettable->lock);
6089 ++ mutex_unlock(&pnettable->lock);
6090 + kfree(new_pe);
6091 + }
6092 + return (new_ibdev) ? 0 : -EEXIST;
6093 +@@ -601,7 +601,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
6094 + pnettable = &sn->pnettable;
6095 +
6096 + /* dump pnettable entries */
6097 +- read_lock(&pnettable->lock);
6098 ++ mutex_lock(&pnettable->lock);
6099 + list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
6100 + if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid))
6101 + continue;
6102 +@@ -616,7 +616,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
6103 + break;
6104 + }
6105 + }
6106 +- read_unlock(&pnettable->lock);
6107 ++ mutex_unlock(&pnettable->lock);
6108 + return idx;
6109 + }
6110 +
6111 +@@ -860,7 +860,7 @@ int smc_pnet_net_init(struct net *net)
6112 + struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev;
6113 +
6114 + INIT_LIST_HEAD(&pnettable->pnetlist);
6115 +- rwlock_init(&pnettable->lock);
6116 ++ mutex_init(&pnettable->lock);
6117 + INIT_LIST_HEAD(&pnetids_ndev->list);
6118 + rwlock_init(&pnetids_ndev->lock);
6119 +
6120 +@@ -940,7 +940,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
6121 + sn = net_generic(net, smc_net_id);
6122 + pnettable = &sn->pnettable;
6123 +
6124 +- read_lock(&pnettable->lock);
6125 ++ mutex_lock(&pnettable->lock);
6126 + list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
6127 + if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) {
6128 + /* get pnetid of netdev device */
6129 +@@ -949,7 +949,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
6130 + break;
6131 + }
6132 + }
6133 +- read_unlock(&pnettable->lock);
6134 ++ mutex_unlock(&pnettable->lock);
6135 + return rc;
6136 + }
6137 +
6138 +@@ -1141,7 +1141,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
6139 + sn = net_generic(&init_net, smc_net_id);
6140 + pnettable = &sn->pnettable;
6141 +
6142 +- read_lock(&pnettable->lock);
6143 ++ mutex_lock(&pnettable->lock);
6144 + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
6145 + if (tmp_pe->type == SMC_PNET_IB &&
6146 + !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) &&
6147 +@@ -1151,7 +1151,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
6148 + break;
6149 + }
6150 + }
6151 +- read_unlock(&pnettable->lock);
6152 ++ mutex_unlock(&pnettable->lock);
6153 +
6154 + return rc;
6155 + }
6156 +@@ -1170,7 +1170,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
6157 + sn = net_generic(&init_net, smc_net_id);
6158 + pnettable = &sn->pnettable;
6159 +
6160 +- read_lock(&pnettable->lock);
6161 ++ mutex_lock(&pnettable->lock);
6162 + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
6163 + if (tmp_pe->type == SMC_PNET_IB &&
6164 + !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) {
6165 +@@ -1179,7 +1179,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
6166 + break;
6167 + }
6168 + }
6169 +- read_unlock(&pnettable->lock);
6170 ++ mutex_unlock(&pnettable->lock);
6171 +
6172 + return rc;
6173 + }
6174 +diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h
6175 +index 14039272f7e42..80a88eea49491 100644
6176 +--- a/net/smc/smc_pnet.h
6177 ++++ b/net/smc/smc_pnet.h
6178 +@@ -29,7 +29,7 @@ struct smc_link_group;
6179 + * @pnetlist: List of PNETIDs
6180 + */
6181 + struct smc_pnettable {
6182 +- rwlock_t lock;
6183 ++ struct mutex lock;
6184 + struct list_head pnetlist;
6185 + };
6186 +
6187 +diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
6188 +index 01396dd1c899b..1d8ba233d0474 100644
6189 +--- a/net/tipc/name_table.c
6190 ++++ b/net/tipc/name_table.c
6191 +@@ -967,7 +967,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
6192 + list_for_each_entry(p, &sr->all_publ, all_publ)
6193 + if (p->key == *last_key)
6194 + break;
6195 +- if (p->key != *last_key)
6196 ++ if (list_entry_is_head(p, &sr->all_publ, all_publ))
6197 + return -EPIPE;
6198 + } else {
6199 + p = list_first_entry(&sr->all_publ,
6200 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
6201 +index 3e63c83e641c5..7545321c3440b 100644
6202 +--- a/net/tipc/socket.c
6203 ++++ b/net/tipc/socket.c
6204 +@@ -3749,7 +3749,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
6205 + if (p->key == *last_publ)
6206 + break;
6207 + }
6208 +- if (p->key != *last_publ) {
6209 ++ if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
6210 + /* We never set seq or call nl_dump_check_consistent()
6211 + * this means that setting prev_seq here will cause the
6212 + * consistence check to fail in the netlink callback
6213 +diff --git a/security/selinux/ima.c b/security/selinux/ima.c
6214 +index 727c4e43219d7..ff7aea6b3774a 100644
6215 +--- a/security/selinux/ima.c
6216 ++++ b/security/selinux/ima.c
6217 +@@ -77,7 +77,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state)
6218 + size_t policy_len;
6219 + int rc = 0;
6220 +
6221 +- WARN_ON(!mutex_is_locked(&state->policy_mutex));
6222 ++ lockdep_assert_held(&state->policy_mutex);
6223 +
6224 + state_str = selinux_ima_collect_state(state);
6225 + if (!state_str) {
6226 +@@ -117,7 +117,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state)
6227 + */
6228 + void selinux_ima_measure_state(struct selinux_state *state)
6229 + {
6230 +- WARN_ON(mutex_is_locked(&state->policy_mutex));
6231 ++ lockdep_assert_not_held(&state->policy_mutex);
6232 +
6233 + mutex_lock(&state->policy_mutex);
6234 + selinux_ima_measure_state_locked(state);
6235 +diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
6236 +index f5d260b1df4d1..15a4547d608ec 100644
6237 +--- a/tools/perf/util/data.c
6238 ++++ b/tools/perf/util/data.c
6239 +@@ -44,10 +44,6 @@ int perf_data__create_dir(struct perf_data *data, int nr)
6240 + if (!files)
6241 + return -ENOMEM;
6242 +
6243 +- data->dir.version = PERF_DIR_VERSION;
6244 +- data->dir.files = files;
6245 +- data->dir.nr = nr;
6246 +-
6247 + for (i = 0; i < nr; i++) {
6248 + struct perf_data_file *file = &files[i];
6249 +
6250 +@@ -62,6 +58,9 @@ int perf_data__create_dir(struct perf_data *data, int nr)
6251 + file->fd = ret;
6252 + }
6253 +
6254 ++ data->dir.version = PERF_DIR_VERSION;
6255 ++ data->dir.files = files;
6256 ++ data->dir.nr = nr;
6257 + return 0;
6258 +
6259 + out_err:
6260 +diff --git a/tools/perf/util/evlist-hybrid.c b/tools/perf/util/evlist-hybrid.c
6261 +index 7c554234b43d4..f39c8ffc5a111 100644
6262 +--- a/tools/perf/util/evlist-hybrid.c
6263 ++++ b/tools/perf/util/evlist-hybrid.c
6264 +@@ -153,8 +153,8 @@ int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
6265 + perf_cpu_map__put(matched_cpus);
6266 + perf_cpu_map__put(unmatched_cpus);
6267 + }
6268 +-
6269 +- ret = (unmatched_count == events_nr) ? -1 : 0;
6270 ++ if (events_nr)
6271 ++ ret = (unmatched_count == events_nr) ? -1 : 0;
6272 + out:
6273 + perf_cpu_map__put(cpus);
6274 + return ret;
6275 +diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
6276 +index 2966564b8497a..6c85b00f27b2e 100644
6277 +--- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
6278 ++++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
6279 +@@ -235,7 +235,7 @@ SEC("sk_msg1")
6280 + int bpf_prog4(struct sk_msg_md *msg)
6281 + {
6282 + int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
6283 +- int *start, *end, *start_push, *end_push, *start_pop, *pop;
6284 ++ int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0;
6285 +
6286 + bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
6287 + if (bytes)
6288 +@@ -249,8 +249,11 @@ int bpf_prog4(struct sk_msg_md *msg)
6289 + bpf_msg_pull_data(msg, *start, *end, 0);
6290 + start_push = bpf_map_lookup_elem(&sock_bytes, &two);
6291 + end_push = bpf_map_lookup_elem(&sock_bytes, &three);
6292 +- if (start_push && end_push)
6293 +- bpf_msg_push_data(msg, *start_push, *end_push, 0);
6294 ++ if (start_push && end_push) {
6295 ++ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
6296 ++ if (err)
6297 ++ return SK_DROP;
6298 ++ }
6299 + start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
6300 + pop = bpf_map_lookup_elem(&sock_bytes, &five);
6301 + if (start_pop && pop)
6302 +@@ -263,6 +266,7 @@ int bpf_prog6(struct sk_msg_md *msg)
6303 + {
6304 + int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
6305 + int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
6306 ++ int err = 0;
6307 + __u64 flags = 0;
6308 +
6309 + bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
6310 +@@ -279,8 +283,11 @@ int bpf_prog6(struct sk_msg_md *msg)
6311 +
6312 + start_push = bpf_map_lookup_elem(&sock_bytes, &two);
6313 + end_push = bpf_map_lookup_elem(&sock_bytes, &three);
6314 +- if (start_push && end_push)
6315 +- bpf_msg_push_data(msg, *start_push, *end_push, 0);
6316 ++ if (start_push && end_push) {
6317 ++ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
6318 ++ if (err)
6319 ++ return SK_DROP;
6320 ++ }
6321 +
6322 + start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
6323 + pop = bpf_map_lookup_elem(&sock_bytes, &five);
6324 +@@ -338,7 +345,7 @@ SEC("sk_msg5")
6325 + int bpf_prog10(struct sk_msg_md *msg)
6326 + {
6327 + int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
6328 +- int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
6329 ++ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0;
6330 +
6331 + bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
6332 + if (bytes)
6333 +@@ -352,8 +359,11 @@ int bpf_prog10(struct sk_msg_md *msg)
6334 + bpf_msg_pull_data(msg, *start, *end, 0);
6335 + start_push = bpf_map_lookup_elem(&sock_bytes, &two);
6336 + end_push = bpf_map_lookup_elem(&sock_bytes, &three);
6337 +- if (start_push && end_push)
6338 +- bpf_msg_push_data(msg, *start_push, *end_push, 0);
6339 ++ if (start_push && end_push) {
6340 ++ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
6341 ++ if (err)
6342 ++ return SK_PASS;
6343 ++ }
6344 + start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
6345 + pop = bpf_map_lookup_elem(&sock_bytes, &five);
6346 + if (start_pop && pop)
6347 +diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
6348 +index 2674ba20d5249..ff821025d3096 100755
6349 +--- a/tools/testing/selftests/net/mptcp/diag.sh
6350 ++++ b/tools/testing/selftests/net/mptcp/diag.sh
6351 +@@ -71,6 +71,36 @@ chk_msk_remote_key_nr()
6352 + __chk_nr "grep -c remote_key" $*
6353 + }
6354 +
6355 ++# $1: ns, $2: port
6356 ++wait_local_port_listen()
6357 ++{
6358 ++ local listener_ns="${1}"
6359 ++ local port="${2}"
6360 ++
6361 ++ local port_hex i
6362 ++
6363 ++ port_hex="$(printf "%04X" "${port}")"
6364 ++ for i in $(seq 10); do
6365 ++ ip netns exec "${listener_ns}" cat /proc/net/tcp | \
6366 ++ awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
6367 ++ break
6368 ++ sleep 0.1
6369 ++ done
6370 ++}
6371 ++
6372 ++wait_connected()
6373 ++{
6374 ++ local listener_ns="${1}"
6375 ++ local port="${2}"
6376 ++
6377 ++ local port_hex i
6378 ++
6379 ++ port_hex="$(printf "%04X" "${port}")"
6380 ++ for i in $(seq 10); do
6381 ++ ip netns exec ${listener_ns} grep -q " 0100007F:${port_hex} " /proc/net/tcp && break
6382 ++ sleep 0.1
6383 ++ done
6384 ++}
6385 +
6386 + trap cleanup EXIT
6387 + ip netns add $ns
6388 +@@ -81,15 +111,15 @@ echo "a" | \
6389 + ip netns exec $ns \
6390 + ./mptcp_connect -p 10000 -l -t ${timeout_poll} \
6391 + 0.0.0.0 >/dev/null &
6392 +-sleep 0.1
6393 ++wait_local_port_listen $ns 10000
6394 + chk_msk_nr 0 "no msk on netns creation"
6395 +
6396 + echo "b" | \
6397 + timeout ${timeout_test} \
6398 + ip netns exec $ns \
6399 +- ./mptcp_connect -p 10000 -j -t ${timeout_poll} \
6400 ++ ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} \
6401 + 127.0.0.1 >/dev/null &
6402 +-sleep 0.1
6403 ++wait_connected $ns 10000
6404 + chk_msk_nr 2 "after MPC handshake "
6405 + chk_msk_remote_key_nr 2 "....chk remote_key"
6406 + chk_msk_fallback_nr 0 "....chk no fallback"
6407 +@@ -101,13 +131,13 @@ echo "a" | \
6408 + ip netns exec $ns \
6409 + ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
6410 + 0.0.0.0 >/dev/null &
6411 +-sleep 0.1
6412 ++wait_local_port_listen $ns 10001
6413 + echo "b" | \
6414 + timeout ${timeout_test} \
6415 + ip netns exec $ns \
6416 +- ./mptcp_connect -p 10001 -j -t ${timeout_poll} \
6417 ++ ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} \
6418 + 127.0.0.1 >/dev/null &
6419 +-sleep 0.1
6420 ++wait_connected $ns 10001
6421 + chk_msk_fallback_nr 1 "check fallback"
6422 + flush_pids
6423 +
6424 +@@ -119,7 +149,7 @@ for I in `seq 1 $NR_CLIENTS`; do
6425 + ./mptcp_connect -p $((I+10001)) -l -w 10 \
6426 + -t ${timeout_poll} 0.0.0.0 >/dev/null &
6427 + done
6428 +-sleep 0.1
6429 ++wait_local_port_listen $ns $((NR_CLIENTS + 10001))
6430 +
6431 + for I in `seq 1 $NR_CLIENTS`; do
6432 + echo "b" | \
6433 +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
6434 +index f06dc9dfe15eb..f4f0e3eb3b921 100755
6435 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
6436 ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
6437 +@@ -624,6 +624,7 @@ chk_join_nr()
6438 + local ack_nr=$4
6439 + local count
6440 + local dump_stats
6441 ++ local with_cookie
6442 +
6443 + printf "%02u %-36s %s" "$TEST_COUNT" "$msg" "syn"
6444 + count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}'`
6445 +@@ -637,12 +638,20 @@ chk_join_nr()
6446 + fi
6447 +
6448 + echo -n " - synack"
6449 ++ with_cookie=`ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies`
6450 + count=`ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinSynAckRx | awk '{print $2}'`
6451 + [ -z "$count" ] && count=0
6452 + if [ "$count" != "$syn_ack_nr" ]; then
6453 +- echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
6454 +- ret=1
6455 +- dump_stats=1
6456 ++ # simult connections exceeding the limit with cookie enabled could go up to
6457 ++ # synack validation as the conn limit can be enforced reliably only after
6458 ++ # the subflow creation
6459 ++ if [ "$with_cookie" = 2 ] && [ "$count" -gt "$syn_ack_nr" ] && [ "$count" -le "$syn_nr" ]; then
6460 ++ echo -n "[ ok ]"
6461 ++ else
6462 ++ echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
6463 ++ ret=1
6464 ++ dump_stats=1
6465 ++ fi
6466 + else
6467 + echo -n "[ ok ]"
6468 + fi