Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Tue, 30 Mar 2021 12:57:47
Message-Id: 1617109034.df9eae456fca608e8af053e24c165a57bc410ce0.alicef@gentoo
1 commit: df9eae456fca608e8af053e24c165a57bc410ce0
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Tue Mar 30 12:57:03 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Tue Mar 30 12:57:14 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=df9eae45
7
8 Linux patch 5.10.27
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1026_linux-5.10.27.patch | 9018 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 9022 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index da6b8c2..936c976 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -147,6 +147,10 @@ Patch: 1025_linux-5.10.26.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.26
23
24 +Patch: 1026_linux-5.10.27.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.27
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1026_linux-5.10.27.patch b/1026_linux-5.10.27.patch
33 new file mode 100644
34 index 0000000..9a18c2c
35 --- /dev/null
36 +++ b/1026_linux-5.10.27.patch
37 @@ -0,0 +1,9018 @@
38 +diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
39 +index a5d27553d59c9..cd8a585568045 100644
40 +--- a/Documentation/virt/kvm/api.rst
41 ++++ b/Documentation/virt/kvm/api.rst
42 +@@ -4810,8 +4810,10 @@ If an MSR access is not permitted through the filtering, it generates a
43 + allows user space to deflect and potentially handle various MSR accesses
44 + into user space.
45 +
46 +-If a vCPU is in running state while this ioctl is invoked, the vCPU may
47 +-experience inconsistent filtering behavior on MSR accesses.
48 ++Note, invoking this ioctl with a vCPU is running is inherently racy. However,
49 ++KVM does guarantee that vCPUs will see either the previous filter or the new
50 ++filter, e.g. MSRs with identical settings in both the old and new filter will
51 ++have deterministic behavior.
52 +
53 +
54 + 5. The kvm_run structure
55 +diff --git a/Makefile b/Makefile
56 +index d4b87e604762a..4801cc25e3472 100644
57 +--- a/Makefile
58 ++++ b/Makefile
59 +@@ -1,7 +1,7 @@
60 + # SPDX-License-Identifier: GPL-2.0
61 + VERSION = 5
62 + PATCHLEVEL = 10
63 +-SUBLEVEL = 26
64 ++SUBLEVEL = 27
65 + EXTRAVERSION =
66 + NAME = Dare mighty things
67 +
68 +@@ -265,7 +265,8 @@ no-dot-config-targets := $(clean-targets) \
69 + $(version_h) headers headers_% archheaders archscripts \
70 + %asm-generic kernelversion %src-pkg dt_binding_check \
71 + outputmakefile
72 +-no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease
73 ++no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
74 ++ image_name
75 + single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
76 +
77 + config-build :=
78 +diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
79 +index 73b6b1f89de99..775ceb3acb6c0 100644
80 +--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
81 ++++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
82 +@@ -334,14 +334,6 @@
83 + };
84 +
85 + &pinctrl {
86 +- atmel,mux-mask = <
87 +- /* A B C */
88 +- 0xFFFFFE7F 0xC0E0397F 0xEF00019D /* pioA */
89 +- 0x03FFFFFF 0x02FC7E68 0x00780000 /* pioB */
90 +- 0xffffffff 0xF83FFFFF 0xB800F3FC /* pioC */
91 +- 0x003FFFFF 0x003F8000 0x00000000 /* pioD */
92 +- >;
93 +-
94 + adc {
95 + pinctrl_adc_default: adc_default {
96 + atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
97 +diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
98 +index b1f994c0ae793..e4824abb385ac 100644
99 +--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
100 ++++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
101 +@@ -84,8 +84,8 @@
102 + pinctrl-0 = <&pinctrl_macb0_default>;
103 + phy-mode = "rmii";
104 +
105 +- ethernet-phy@0 {
106 +- reg = <0x0>;
107 ++ ethernet-phy@7 {
108 ++ reg = <0x7>;
109 + interrupt-parent = <&pioA>;
110 + interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
111 + pinctrl-names = "default";
112 +diff --git a/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts b/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
113 +index ecbb2cc5b9ab4..79cc45728cd2d 100644
114 +--- a/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
115 ++++ b/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
116 +@@ -14,5 +14,6 @@
117 + };
118 +
119 + &gpmi {
120 ++ fsl,use-minimum-ecc;
121 + status = "okay";
122 + };
123 +diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi
124 +index 84066c1298df9..ec45ced3cde68 100644
125 +--- a/arch/arm/boot/dts/sam9x60.dtsi
126 ++++ b/arch/arm/boot/dts/sam9x60.dtsi
127 +@@ -606,6 +606,15 @@
128 + compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
129 + ranges = <0xfffff400 0xfffff400 0x800>;
130 +
131 ++ /* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */
132 ++ atmel,mux-mask = <
133 ++ /* A B C */
134 ++ 0xffffffff 0xffe03fff 0xef00019d /* pioA */
135 ++ 0x03ffffff 0x02fc7e7f 0x00780000 /* pioB */
136 ++ 0xffffffff 0xffffffff 0xf83fffff /* pioC */
137 ++ 0x003fffff 0x003f8000 0x00000000 /* pioD */
138 ++ >;
139 ++
140 + pioA: gpio@fffff400 {
141 + compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
142 + reg = <0xfffff400 0x200>;
143 +diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
144 +index 62df666c2bd0b..17b66f0d0deef 100644
145 +--- a/arch/arm/mach-omap2/sr_device.c
146 ++++ b/arch/arm/mach-omap2/sr_device.c
147 +@@ -88,34 +88,26 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
148 +
149 + extern struct omap_sr_data omap_sr_pdata[];
150 +
151 +-static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
152 ++static int __init sr_init_by_name(const char *name, const char *voltdm)
153 + {
154 + struct omap_sr_data *sr_data = NULL;
155 + struct omap_volt_data *volt_data;
156 +- struct omap_smartreflex_dev_attr *sr_dev_attr;
157 + static int i;
158 +
159 +- if (!strncmp(oh->name, "smartreflex_mpu_iva", 20) ||
160 +- !strncmp(oh->name, "smartreflex_mpu", 16))
161 ++ if (!strncmp(name, "smartreflex_mpu_iva", 20) ||
162 ++ !strncmp(name, "smartreflex_mpu", 16))
163 + sr_data = &omap_sr_pdata[OMAP_SR_MPU];
164 +- else if (!strncmp(oh->name, "smartreflex_core", 17))
165 ++ else if (!strncmp(name, "smartreflex_core", 17))
166 + sr_data = &omap_sr_pdata[OMAP_SR_CORE];
167 +- else if (!strncmp(oh->name, "smartreflex_iva", 16))
168 ++ else if (!strncmp(name, "smartreflex_iva", 16))
169 + sr_data = &omap_sr_pdata[OMAP_SR_IVA];
170 +
171 + if (!sr_data) {
172 +- pr_err("%s: Unknown instance %s\n", __func__, oh->name);
173 ++ pr_err("%s: Unknown instance %s\n", __func__, name);
174 + return -EINVAL;
175 + }
176 +
177 +- sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
178 +- if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
179 +- pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
180 +- __func__, oh->name);
181 +- goto exit;
182 +- }
183 +-
184 +- sr_data->name = oh->name;
185 ++ sr_data->name = name;
186 + if (cpu_is_omap343x())
187 + sr_data->ip_type = 1;
188 + else
189 +@@ -136,10 +128,10 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
190 + }
191 + }
192 +
193 +- sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name);
194 ++ sr_data->voltdm = voltdm_lookup(voltdm);
195 + if (!sr_data->voltdm) {
196 + pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
197 +- __func__, sr_dev_attr->sensor_voltdm_name);
198 ++ __func__, voltdm);
199 + goto exit;
200 + }
201 +
202 +@@ -160,6 +152,20 @@ exit:
203 + return 0;
204 + }
205 +
206 ++static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
207 ++{
208 ++ struct omap_smartreflex_dev_attr *sr_dev_attr;
209 ++
210 ++ sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
211 ++ if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
212 ++ pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
213 ++ __func__, oh->name);
214 ++ return 0;
215 ++ }
216 ++
217 ++ return sr_init_by_name(oh->name, sr_dev_attr->sensor_voltdm_name);
218 ++}
219 ++
220 + /*
221 + * API to be called from board files to enable smartreflex
222 + * autocompensation at init.
223 +@@ -169,7 +175,42 @@ void __init omap_enable_smartreflex_on_init(void)
224 + sr_enable_on_init = true;
225 + }
226 +
227 ++static const char * const omap4_sr_instances[] = {
228 ++ "mpu",
229 ++ "iva",
230 ++ "core",
231 ++};
232 ++
233 ++static const char * const dra7_sr_instances[] = {
234 ++ "mpu",
235 ++ "core",
236 ++};
237 ++
238 + int __init omap_devinit_smartreflex(void)
239 + {
240 ++ const char * const *sr_inst;
241 ++ int i, nr_sr = 0;
242 ++
243 ++ if (soc_is_omap44xx()) {
244 ++ sr_inst = omap4_sr_instances;
245 ++ nr_sr = ARRAY_SIZE(omap4_sr_instances);
246 ++
247 ++ } else if (soc_is_dra7xx()) {
248 ++ sr_inst = dra7_sr_instances;
249 ++ nr_sr = ARRAY_SIZE(dra7_sr_instances);
250 ++ }
251 ++
252 ++ if (nr_sr) {
253 ++ const char *name, *voltdm;
254 ++
255 ++ for (i = 0; i < nr_sr; i++) {
256 ++ name = kasprintf(GFP_KERNEL, "smartreflex_%s", sr_inst[i]);
257 ++ voltdm = sr_inst[i];
258 ++ sr_init_by_name(name, voltdm);
259 ++ }
260 ++
261 ++ return 0;
262 ++ }
263 ++
264 + return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL);
265 + }
266 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
267 +index 6a2c091990479..5fd3ea2028c64 100644
268 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
269 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
270 +@@ -192,6 +192,7 @@
271 + ranges = <0x0 0x00 0x1700000 0x100000>;
272 + reg = <0x00 0x1700000 0x0 0x100000>;
273 + interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
274 ++ dma-coherent;
275 +
276 + sec_jr0: jr@10000 {
277 + compatible = "fsl,sec-v5.4-job-ring",
278 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
279 +index 0464b8aa4bc4d..b1b9544d8e7fd 100644
280 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
281 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
282 +@@ -322,6 +322,7 @@
283 + ranges = <0x0 0x00 0x1700000 0x100000>;
284 + reg = <0x00 0x1700000 0x0 0x100000>;
285 + interrupts = <0 75 0x4>;
286 ++ dma-coherent;
287 +
288 + sec_jr0: jr@10000 {
289 + compatible = "fsl,sec-v5.4-job-ring",
290 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
291 +index 0b4545012d43e..acf2ae2e151a0 100644
292 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
293 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
294 +@@ -325,6 +325,7 @@
295 + ranges = <0x0 0x00 0x1700000 0x100000>;
296 + reg = <0x00 0x1700000 0x0 0x100000>;
297 + interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
298 ++ dma-coherent;
299 +
300 + sec_jr0: jr@10000 {
301 + compatible = "fsl,sec-v5.4-job-ring",
302 +diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
303 +index e6e284265f19d..58303a9ec32c4 100644
304 +--- a/arch/arm64/kernel/crash_dump.c
305 ++++ b/arch/arm64/kernel/crash_dump.c
306 +@@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
307 + ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
308 + {
309 + memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
310 ++ *ppos += count;
311 ++
312 + return count;
313 + }
314 +diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
315 +index fa56af1a59c39..dbce0dcf4cc06 100644
316 +--- a/arch/arm64/kernel/stacktrace.c
317 ++++ b/arch/arm64/kernel/stacktrace.c
318 +@@ -199,8 +199,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
319 +
320 + #ifdef CONFIG_STACKTRACE
321 +
322 +-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
323 +- struct task_struct *task, struct pt_regs *regs)
324 ++noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
325 ++ void *cookie, struct task_struct *task,
326 ++ struct pt_regs *regs)
327 + {
328 + struct stackframe frame;
329 +
330 +@@ -208,8 +209,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
331 + start_backtrace(&frame, regs->regs[29], regs->pc);
332 + else if (task == current)
333 + start_backtrace(&frame,
334 +- (unsigned long)__builtin_frame_address(0),
335 +- (unsigned long)arch_stack_walk);
336 ++ (unsigned long)__builtin_frame_address(1),
337 ++ (unsigned long)__builtin_return_address(0));
338 + else
339 + start_backtrace(&frame, thread_saved_fp(task),
340 + thread_saved_pc(task));
341 +diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
342 +index 6c6f16e409a87..0d23c00493018 100644
343 +--- a/arch/ia64/include/asm/syscall.h
344 ++++ b/arch/ia64/include/asm/syscall.h
345 +@@ -32,7 +32,7 @@ static inline void syscall_rollback(struct task_struct *task,
346 + static inline long syscall_get_error(struct task_struct *task,
347 + struct pt_regs *regs)
348 + {
349 +- return regs->r10 == -1 ? regs->r8:0;
350 ++ return regs->r10 == -1 ? -regs->r8:0;
351 + }
352 +
353 + static inline long syscall_get_return_value(struct task_struct *task,
354 +diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
355 +index 75c070aed81e0..dad3a605cb7e5 100644
356 +--- a/arch/ia64/kernel/ptrace.c
357 ++++ b/arch/ia64/kernel/ptrace.c
358 +@@ -2010,27 +2010,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
359 + {
360 + struct syscall_get_set_args *args = data;
361 + struct pt_regs *pt = args->regs;
362 +- unsigned long *krbs, cfm, ndirty;
363 ++ unsigned long *krbs, cfm, ndirty, nlocals, nouts;
364 + int i, count;
365 +
366 + if (unw_unwind_to_user(info) < 0)
367 + return;
368 +
369 ++ /*
370 ++ * We get here via a few paths:
371 ++ * - break instruction: cfm is shared with caller.
372 ++ * syscall args are in out= regs, locals are non-empty.
373 ++ * - epsinstruction: cfm is set by br.call
374 ++ * locals don't exist.
375 ++ *
376 ++ * For both cases argguments are reachable in cfm.sof - cfm.sol.
377 ++ * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
378 ++ */
379 + cfm = pt->cr_ifs;
380 ++ nlocals = (cfm >> 7) & 0x7f; /* aka sol */
381 ++ nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
382 + krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
383 + ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
384 +
385 + count = 0;
386 + if (in_syscall(pt))
387 +- count = min_t(int, args->n, cfm & 0x7f);
388 ++ count = min_t(int, args->n, nouts);
389 +
390 ++ /* Iterate over outs. */
391 + for (i = 0; i < count; i++) {
392 ++ int j = ndirty + nlocals + i + args->i;
393 + if (args->rw)
394 +- *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
395 +- args->args[i];
396 ++ *ia64_rse_skip_regs(krbs, j) = args->args[i];
397 + else
398 +- args->args[i] = *ia64_rse_skip_regs(krbs,
399 +- ndirty + i + args->i);
400 ++ args->args[i] = *ia64_rse_skip_regs(krbs, j);
401 + }
402 +
403 + if (!args->rw) {
404 +diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
405 +index 7141ccea8c94e..a92059964579b 100644
406 +--- a/arch/powerpc/include/asm/dcr-native.h
407 ++++ b/arch/powerpc/include/asm/dcr-native.h
408 +@@ -53,8 +53,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
409 + #define mfdcr(rn) \
410 + ({unsigned int rval; \
411 + if (__builtin_constant_p(rn) && rn < 1024) \
412 +- asm volatile("mfdcr %0," __stringify(rn) \
413 +- : "=r" (rval)); \
414 ++ asm volatile("mfdcr %0, %1" : "=r" (rval) \
415 ++ : "n" (rn)); \
416 + else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
417 + rval = mfdcrx(rn); \
418 + else \
419 +@@ -64,8 +64,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
420 + #define mtdcr(rn, v) \
421 + do { \
422 + if (__builtin_constant_p(rn) && rn < 1024) \
423 +- asm volatile("mtdcr " __stringify(rn) ",%0" \
424 +- : : "r" (v)); \
425 ++ asm volatile("mtdcr %0, %1" \
426 ++ : : "n" (rn), "r" (v)); \
427 + else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
428 + mtdcrx(rn, v); \
429 + else \
430 +diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
431 +index d92e5eaa4c1d7..a850dccd78ea1 100644
432 +--- a/arch/sparc/kernel/traps_64.c
433 ++++ b/arch/sparc/kernel/traps_64.c
434 +@@ -275,14 +275,13 @@ bool is_no_fault_exception(struct pt_regs *regs)
435 + asi = (regs->tstate >> 24); /* saved %asi */
436 + else
437 + asi = (insn >> 5); /* immediate asi */
438 +- if ((asi & 0xf2) == ASI_PNF) {
439 +- if (insn & 0x1000000) { /* op3[5:4]=3 */
440 +- handle_ldf_stq(insn, regs);
441 +- return true;
442 +- } else if (insn & 0x200000) { /* op3[2], stores */
443 ++ if ((asi & 0xf6) == ASI_PNF) {
444 ++ if (insn & 0x200000) /* op3[2], stores */
445 + return false;
446 +- }
447 +- handle_ld_nf(insn, regs);
448 ++ if (insn & 0x1000000) /* op3[5:4]=3 (fp) */
449 ++ handle_ldf_stq(insn, regs);
450 ++ else
451 ++ handle_ld_nf(insn, regs);
452 + return true;
453 + }
454 + }
455 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
456 +index 7e5f33a0d0e2f..02d4c74d30e2b 100644
457 +--- a/arch/x86/include/asm/kvm_host.h
458 ++++ b/arch/x86/include/asm/kvm_host.h
459 +@@ -890,6 +890,12 @@ enum kvm_irqchip_mode {
460 + KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
461 + };
462 +
463 ++struct kvm_x86_msr_filter {
464 ++ u8 count;
465 ++ bool default_allow:1;
466 ++ struct msr_bitmap_range ranges[16];
467 ++};
468 ++
469 + #define APICV_INHIBIT_REASON_DISABLE 0
470 + #define APICV_INHIBIT_REASON_HYPERV 1
471 + #define APICV_INHIBIT_REASON_NESTED 2
472 +@@ -985,14 +991,12 @@ struct kvm_arch {
473 + bool guest_can_read_msr_platform_info;
474 + bool exception_payload_enabled;
475 +
476 ++ bool bus_lock_detection_enabled;
477 ++
478 + /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
479 + u32 user_space_msr_mask;
480 +
481 +- struct {
482 +- u8 count;
483 +- bool default_allow:1;
484 +- struct msr_bitmap_range ranges[16];
485 +- } msr_filter;
486 ++ struct kvm_x86_msr_filter __rcu *msr_filter;
487 +
488 + struct kvm_pmu_event_filter *pmu_event_filter;
489 + struct task_struct *nx_lpage_recovery_thread;
490 +diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
491 +index c37f11999d0c0..cbb67b6030f97 100644
492 +--- a/arch/x86/include/asm/static_call.h
493 ++++ b/arch/x86/include/asm/static_call.h
494 +@@ -37,4 +37,11 @@
495 + #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
496 + __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
497 +
498 ++
499 ++#define ARCH_ADD_TRAMP_KEY(name) \
500 ++ asm(".pushsection .static_call_tramp_key, \"a\" \n" \
501 ++ ".long " STATIC_CALL_TRAMP_STR(name) " - . \n" \
502 ++ ".long " STATIC_CALL_KEY_STR(name) " - . \n" \
503 ++ ".popsection \n")
504 ++
505 + #endif /* _ASM_STATIC_CALL_H */
506 +diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
507 +index ab09af613788b..5941e18edd5a9 100644
508 +--- a/arch/x86/include/asm/xen/page.h
509 ++++ b/arch/x86/include/asm/xen/page.h
510 +@@ -86,18 +86,6 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
511 + }
512 + #endif
513 +
514 +-/*
515 +- * The maximum amount of extra memory compared to the base size. The
516 +- * main scaling factor is the size of struct page. At extreme ratios
517 +- * of base:extra, all the base memory can be filled with page
518 +- * structures for the extra memory, leaving no space for anything
519 +- * else.
520 +- *
521 +- * 10x seems like a reasonable balance between scaling flexibility and
522 +- * leaving a practically usable system.
523 +- */
524 +-#define XEN_EXTRA_MEM_RATIO (10)
525 +-
526 + /*
527 + * Helper functions to write or read unsigned long values to/from
528 + * memory, when the access may fault.
529 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
530 +index fa5f059c2b940..0d8383b82bca4 100644
531 +--- a/arch/x86/kvm/x86.c
532 ++++ b/arch/x86/kvm/x86.c
533 +@@ -1505,35 +1505,44 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
534 +
535 + bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
536 + {
537 ++ struct kvm_x86_msr_filter *msr_filter;
538 ++ struct msr_bitmap_range *ranges;
539 + struct kvm *kvm = vcpu->kvm;
540 +- struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
541 +- u32 count = kvm->arch.msr_filter.count;
542 +- u32 i;
543 +- bool r = kvm->arch.msr_filter.default_allow;
544 ++ bool allowed;
545 + int idx;
546 ++ u32 i;
547 +
548 +- /* MSR filtering not set up or x2APIC enabled, allow everything */
549 +- if (!count || (index >= 0x800 && index <= 0x8ff))
550 ++ /* x2APIC MSRs do not support filtering. */
551 ++ if (index >= 0x800 && index <= 0x8ff)
552 + return true;
553 +
554 +- /* Prevent collision with set_msr_filter */
555 + idx = srcu_read_lock(&kvm->srcu);
556 +
557 +- for (i = 0; i < count; i++) {
558 ++ msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
559 ++ if (!msr_filter) {
560 ++ allowed = true;
561 ++ goto out;
562 ++ }
563 ++
564 ++ allowed = msr_filter->default_allow;
565 ++ ranges = msr_filter->ranges;
566 ++
567 ++ for (i = 0; i < msr_filter->count; i++) {
568 + u32 start = ranges[i].base;
569 + u32 end = start + ranges[i].nmsrs;
570 + u32 flags = ranges[i].flags;
571 + unsigned long *bitmap = ranges[i].bitmap;
572 +
573 + if ((index >= start) && (index < end) && (flags & type)) {
574 +- r = !!test_bit(index - start, bitmap);
575 ++ allowed = !!test_bit(index - start, bitmap);
576 + break;
577 + }
578 + }
579 +
580 ++out:
581 + srcu_read_unlock(&kvm->srcu, idx);
582 +
583 +- return r;
584 ++ return allowed;
585 + }
586 + EXPORT_SYMBOL_GPL(kvm_msr_allowed);
587 +
588 +@@ -5291,25 +5300,34 @@ split_irqchip_unlock:
589 + return r;
590 + }
591 +
592 +-static void kvm_clear_msr_filter(struct kvm *kvm)
593 ++static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
594 ++{
595 ++ struct kvm_x86_msr_filter *msr_filter;
596 ++
597 ++ msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);
598 ++ if (!msr_filter)
599 ++ return NULL;
600 ++
601 ++ msr_filter->default_allow = default_allow;
602 ++ return msr_filter;
603 ++}
604 ++
605 ++static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
606 + {
607 + u32 i;
608 +- u32 count = kvm->arch.msr_filter.count;
609 +- struct msr_bitmap_range ranges[16];
610 +
611 +- mutex_lock(&kvm->lock);
612 +- kvm->arch.msr_filter.count = 0;
613 +- memcpy(ranges, kvm->arch.msr_filter.ranges, count * sizeof(ranges[0]));
614 +- mutex_unlock(&kvm->lock);
615 +- synchronize_srcu(&kvm->srcu);
616 ++ if (!msr_filter)
617 ++ return;
618 ++
619 ++ for (i = 0; i < msr_filter->count; i++)
620 ++ kfree(msr_filter->ranges[i].bitmap);
621 +
622 +- for (i = 0; i < count; i++)
623 +- kfree(ranges[i].bitmap);
624 ++ kfree(msr_filter);
625 + }
626 +
627 +-static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user_range)
628 ++static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
629 ++ struct kvm_msr_filter_range *user_range)
630 + {
631 +- struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
632 + struct msr_bitmap_range range;
633 + unsigned long *bitmap = NULL;
634 + size_t bitmap_size;
635 +@@ -5343,11 +5361,9 @@ static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user
636 + goto err;
637 + }
638 +
639 +- /* Everything ok, add this range identifier to our global pool */
640 +- ranges[kvm->arch.msr_filter.count] = range;
641 +- /* Make sure we filled the array before we tell anyone to walk it */
642 +- smp_wmb();
643 +- kvm->arch.msr_filter.count++;
644 ++ /* Everything ok, add this range identifier. */
645 ++ msr_filter->ranges[msr_filter->count] = range;
646 ++ msr_filter->count++;
647 +
648 + return 0;
649 + err:
650 +@@ -5358,10 +5374,11 @@ err:
651 + static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
652 + {
653 + struct kvm_msr_filter __user *user_msr_filter = argp;
654 ++ struct kvm_x86_msr_filter *new_filter, *old_filter;
655 + struct kvm_msr_filter filter;
656 + bool default_allow;
657 +- int r = 0;
658 + bool empty = true;
659 ++ int r = 0;
660 + u32 i;
661 +
662 + if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
663 +@@ -5374,25 +5391,32 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
664 + if (empty && !default_allow)
665 + return -EINVAL;
666 +
667 +- kvm_clear_msr_filter(kvm);
668 +-
669 +- kvm->arch.msr_filter.default_allow = default_allow;
670 ++ new_filter = kvm_alloc_msr_filter(default_allow);
671 ++ if (!new_filter)
672 ++ return -ENOMEM;
673 +
674 +- /*
675 +- * Protect from concurrent calls to this function that could trigger
676 +- * a TOCTOU violation on kvm->arch.msr_filter.count.
677 +- */
678 +- mutex_lock(&kvm->lock);
679 + for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
680 +- r = kvm_add_msr_filter(kvm, &filter.ranges[i]);
681 +- if (r)
682 +- break;
683 ++ r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
684 ++ if (r) {
685 ++ kvm_free_msr_filter(new_filter);
686 ++ return r;
687 ++ }
688 + }
689 +
690 ++ mutex_lock(&kvm->lock);
691 ++
692 ++ /* The per-VM filter is protected by kvm->lock... */
693 ++ old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1);
694 ++
695 ++ rcu_assign_pointer(kvm->arch.msr_filter, new_filter);
696 ++ synchronize_srcu(&kvm->srcu);
697 ++
698 ++ kvm_free_msr_filter(old_filter);
699 ++
700 + kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
701 + mutex_unlock(&kvm->lock);
702 +
703 +- return r;
704 ++ return 0;
705 + }
706 +
707 + long kvm_arch_vm_ioctl(struct file *filp,
708 +@@ -10423,8 +10447,6 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
709 +
710 + void kvm_arch_destroy_vm(struct kvm *kvm)
711 + {
712 +- u32 i;
713 +-
714 + if (current->mm == kvm->mm) {
715 + /*
716 + * Free memory regions allocated on behalf of userspace,
717 +@@ -10441,8 +10463,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
718 + }
719 + if (kvm_x86_ops.vm_destroy)
720 + kvm_x86_ops.vm_destroy(kvm);
721 +- for (i = 0; i < kvm->arch.msr_filter.count; i++)
722 +- kfree(kvm->arch.msr_filter.ranges[i].bitmap);
723 ++ kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
724 + kvm_pic_destroy(kvm);
725 + kvm_ioapic_destroy(kvm);
726 + kvm_free_vcpus(kvm);
727 +diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
728 +index f80d10d39cf6d..cc85e199108eb 100644
729 +--- a/arch/x86/mm/mem_encrypt.c
730 ++++ b/arch/x86/mm/mem_encrypt.c
731 +@@ -231,7 +231,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
732 + if (pgprot_val(old_prot) == pgprot_val(new_prot))
733 + return;
734 +
735 +- pa = pfn << page_level_shift(level);
736 ++ pa = pfn << PAGE_SHIFT;
737 + size = page_level_size(level);
738 +
739 + /*
740 +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
741 +index 60da7e793385e..56e0f290fef65 100644
742 +--- a/arch/x86/xen/p2m.c
743 ++++ b/arch/x86/xen/p2m.c
744 +@@ -98,8 +98,8 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
745 + unsigned long xen_max_p2m_pfn __read_mostly;
746 + EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
747 +
748 +-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
749 +-#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
750 ++#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
751 ++#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
752 + #else
753 + #define P2M_LIMIT 0
754 + #endif
755 +@@ -416,9 +416,6 @@ void __init xen_vmalloc_p2m_tree(void)
756 + xen_p2m_last_pfn = xen_max_p2m_pfn;
757 +
758 + p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
759 +- if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
760 +- p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
761 +-
762 + vm.flags = VM_ALLOC;
763 + vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
764 + PMD_SIZE * PMDS_PER_MID_PAGE);
765 +diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
766 +index 1a3b75652fa4f..8bfc103301077 100644
767 +--- a/arch/x86/xen/setup.c
768 ++++ b/arch/x86/xen/setup.c
769 +@@ -59,6 +59,18 @@ static struct {
770 + } xen_remap_buf __initdata __aligned(PAGE_SIZE);
771 + static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
772 +
773 ++/*
774 ++ * The maximum amount of extra memory compared to the base size. The
775 ++ * main scaling factor is the size of struct page. At extreme ratios
776 ++ * of base:extra, all the base memory can be filled with page
777 ++ * structures for the extra memory, leaving no space for anything
778 ++ * else.
779 ++ *
780 ++ * 10x seems like a reasonable balance between scaling flexibility and
781 ++ * leaving a practically usable system.
782 ++ */
783 ++#define EXTRA_MEM_RATIO (10)
784 ++
785 + static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
786 +
787 + static void __init xen_parse_512gb(void)
788 +@@ -778,13 +790,13 @@ char * __init xen_memory_setup(void)
789 + extra_pages += max_pages - max_pfn;
790 +
791 + /*
792 +- * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
793 ++ * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
794 + * factor the base size.
795 + *
796 + * Make sure we have no memory above max_pages, as this area
797 + * isn't handled by the p2m management.
798 + */
799 +- extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
800 ++ extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
801 + extra_pages, max_pages - max_pfn);
802 + i = 0;
803 + addr = xen_e820_table.entries[0].addr;
804 +diff --git a/block/blk-cgroup-rwstat.c b/block/blk-cgroup-rwstat.c
805 +index 85d5790ac49b0..3304e841df7ce 100644
806 +--- a/block/blk-cgroup-rwstat.c
807 ++++ b/block/blk-cgroup-rwstat.c
808 +@@ -109,6 +109,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
809 +
810 + lockdep_assert_held(&blkg->q->queue_lock);
811 +
812 ++ memset(sum, 0, sizeof(*sum));
813 + rcu_read_lock();
814 + blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
815 + struct blkg_rwstat *rwstat;
816 +@@ -122,7 +123,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
817 + rwstat = (void *)pos_blkg + off;
818 +
819 + for (i = 0; i < BLKG_RWSTAT_NR; i++)
820 +- sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
821 ++ sum->cnt[i] += blkg_rwstat_read_counter(rwstat, i);
822 + }
823 + rcu_read_unlock();
824 + }
825 +diff --git a/block/blk-merge.c b/block/blk-merge.c
826 +index 97b7c28215652..7cdd566966473 100644
827 +--- a/block/blk-merge.c
828 ++++ b/block/blk-merge.c
829 +@@ -375,6 +375,14 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
830 + switch (bio_op(rq->bio)) {
831 + case REQ_OP_DISCARD:
832 + case REQ_OP_SECURE_ERASE:
833 ++ if (queue_max_discard_segments(rq->q) > 1) {
834 ++ struct bio *bio = rq->bio;
835 ++
836 ++ for_each_bio(bio)
837 ++ nr_phys_segs++;
838 ++ return nr_phys_segs;
839 ++ }
840 ++ return 1;
841 + case REQ_OP_WRITE_ZEROES:
842 + return 0;
843 + case REQ_OP_WRITE_SAME:
844 +diff --git a/block/blk-zoned.c b/block/blk-zoned.c
845 +index 4676c6f00489c..ab7d7ebcf6ddc 100644
846 +--- a/block/blk-zoned.c
847 ++++ b/block/blk-zoned.c
848 +@@ -240,7 +240,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
849 + */
850 + if (op == REQ_OP_ZONE_RESET &&
851 + blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
852 +- bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
853 ++ bio->bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC;
854 + break;
855 + }
856 +
857 +diff --git a/block/genhd.c b/block/genhd.c
858 +index ec6264e2ed671..796baf7612024 100644
859 +--- a/block/genhd.c
860 ++++ b/block/genhd.c
861 +@@ -732,10 +732,8 @@ static void register_disk(struct device *parent, struct gendisk *disk,
862 + disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
863 + disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
864 +
865 +- if (disk->flags & GENHD_FL_HIDDEN) {
866 +- dev_set_uevent_suppress(ddev, 0);
867 ++ if (disk->flags & GENHD_FL_HIDDEN)
868 + return;
869 +- }
870 +
871 + disk_scan_partitions(disk);
872 +
873 +diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
874 +index 3f045b5953b2e..a0c1a665dfc12 100644
875 +--- a/drivers/acpi/acpica/nsaccess.c
876 ++++ b/drivers/acpi/acpica/nsaccess.c
877 +@@ -99,13 +99,12 @@ acpi_status acpi_ns_root_initialize(void)
878 + * just create and link the new node(s) here.
879 + */
880 + new_node =
881 +- ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node));
882 ++ acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name));
883 + if (!new_node) {
884 + status = AE_NO_MEMORY;
885 + goto unlock_and_exit;
886 + }
887 +
888 +- ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name);
889 + new_node->descriptor_type = ACPI_DESC_TYPE_NAMED;
890 + new_node->type = init_val->type;
891 +
892 +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
893 +index aee023ad02375..a958ad60a3394 100644
894 +--- a/drivers/acpi/internal.h
895 ++++ b/drivers/acpi/internal.h
896 +@@ -9,6 +9,8 @@
897 + #ifndef _ACPI_INTERNAL_H_
898 + #define _ACPI_INTERNAL_H_
899 +
900 ++#include <linux/idr.h>
901 ++
902 + #define PREFIX "ACPI: "
903 +
904 + int early_acpi_osi_init(void);
905 +@@ -96,9 +98,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
906 +
907 + extern struct list_head acpi_bus_id_list;
908 +
909 ++#define ACPI_MAX_DEVICE_INSTANCES 4096
910 ++
911 + struct acpi_device_bus_id {
912 + const char *bus_id;
913 +- unsigned int instance_no;
914 ++ struct ida instance_ida;
915 + struct list_head node;
916 + };
917 +
918 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
919 +index dca5cc423cd41..b47f14ac75ae0 100644
920 +--- a/drivers/acpi/scan.c
921 ++++ b/drivers/acpi/scan.c
922 +@@ -482,9 +482,8 @@ static void acpi_device_del(struct acpi_device *device)
923 + list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
924 + if (!strcmp(acpi_device_bus_id->bus_id,
925 + acpi_device_hid(device))) {
926 +- if (acpi_device_bus_id->instance_no > 0)
927 +- acpi_device_bus_id->instance_no--;
928 +- else {
929 ++ ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
930 ++ if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
931 + list_del(&acpi_device_bus_id->node);
932 + kfree_const(acpi_device_bus_id->bus_id);
933 + kfree(acpi_device_bus_id);
934 +@@ -623,12 +622,38 @@ void acpi_bus_put_acpi_device(struct acpi_device *adev)
935 + put_device(&adev->dev);
936 + }
937 +
938 ++static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
939 ++{
940 ++ struct acpi_device_bus_id *acpi_device_bus_id;
941 ++
942 ++ /* Find suitable bus_id and instance number in acpi_bus_id_list. */
943 ++ list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
944 ++ if (!strcmp(acpi_device_bus_id->bus_id, dev_id))
945 ++ return acpi_device_bus_id;
946 ++ }
947 ++ return NULL;
948 ++}
949 ++
950 ++static int acpi_device_set_name(struct acpi_device *device,
951 ++ struct acpi_device_bus_id *acpi_device_bus_id)
952 ++{
953 ++ struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
954 ++ int result;
955 ++
956 ++ result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
957 ++ if (result < 0)
958 ++ return result;
959 ++
960 ++ device->pnp.instance_no = result;
961 ++ dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
962 ++ return 0;
963 ++}
964 ++
965 + int acpi_device_add(struct acpi_device *device,
966 + void (*release)(struct device *))
967 + {
968 ++ struct acpi_device_bus_id *acpi_device_bus_id;
969 + int result;
970 +- struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
971 +- int found = 0;
972 +
973 + if (device->handle) {
974 + acpi_status status;
975 +@@ -654,41 +679,38 @@ int acpi_device_add(struct acpi_device *device,
976 + INIT_LIST_HEAD(&device->del_list);
977 + mutex_init(&device->physical_node_lock);
978 +
979 +- new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
980 +- if (!new_bus_id) {
981 +- pr_err(PREFIX "Memory allocation error\n");
982 +- result = -ENOMEM;
983 +- goto err_detach;
984 +- }
985 +-
986 + mutex_lock(&acpi_device_lock);
987 +- /*
988 +- * Find suitable bus_id and instance number in acpi_bus_id_list
989 +- * If failed, create one and link it into acpi_bus_id_list
990 +- */
991 +- list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
992 +- if (!strcmp(acpi_device_bus_id->bus_id,
993 +- acpi_device_hid(device))) {
994 +- acpi_device_bus_id->instance_no++;
995 +- found = 1;
996 +- kfree(new_bus_id);
997 +- break;
998 ++
999 ++ acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
1000 ++ if (acpi_device_bus_id) {
1001 ++ result = acpi_device_set_name(device, acpi_device_bus_id);
1002 ++ if (result)
1003 ++ goto err_unlock;
1004 ++ } else {
1005 ++ acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
1006 ++ GFP_KERNEL);
1007 ++ if (!acpi_device_bus_id) {
1008 ++ result = -ENOMEM;
1009 ++ goto err_unlock;
1010 + }
1011 +- }
1012 +- if (!found) {
1013 +- acpi_device_bus_id = new_bus_id;
1014 + acpi_device_bus_id->bus_id =
1015 + kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
1016 + if (!acpi_device_bus_id->bus_id) {
1017 +- pr_err(PREFIX "Memory allocation error for bus id\n");
1018 ++ kfree(acpi_device_bus_id);
1019 + result = -ENOMEM;
1020 +- goto err_free_new_bus_id;
1021 ++ goto err_unlock;
1022 ++ }
1023 ++
1024 ++ ida_init(&acpi_device_bus_id->instance_ida);
1025 ++
1026 ++ result = acpi_device_set_name(device, acpi_device_bus_id);
1027 ++ if (result) {
1028 ++ kfree(acpi_device_bus_id);
1029 ++ goto err_unlock;
1030 + }
1031 +
1032 +- acpi_device_bus_id->instance_no = 0;
1033 + list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
1034 + }
1035 +- dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
1036 +
1037 + if (device->parent)
1038 + list_add_tail(&device->node, &device->parent->children);
1039 +@@ -720,13 +742,9 @@ int acpi_device_add(struct acpi_device *device,
1040 + list_del(&device->node);
1041 + list_del(&device->wakeup_list);
1042 +
1043 +- err_free_new_bus_id:
1044 +- if (!found)
1045 +- kfree(new_bus_id);
1046 +-
1047 ++ err_unlock:
1048 + mutex_unlock(&acpi_device_lock);
1049 +
1050 +- err_detach:
1051 + acpi_detach_data(device->handle, acpi_scan_drop_device);
1052 + return result;
1053 + }
1054 +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
1055 +index 811d298637cb2..83cd4c95faf0d 100644
1056 +--- a/drivers/acpi/video_detect.c
1057 ++++ b/drivers/acpi/video_detect.c
1058 +@@ -147,6 +147,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
1059 + },
1060 + },
1061 + {
1062 ++ .callback = video_detect_force_vendor,
1063 + .ident = "Sony VPCEH3U1E",
1064 + .matches = {
1065 + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
1066 +diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
1067 +index 316a9947541fe..b574cce98dc36 100644
1068 +--- a/drivers/atm/eni.c
1069 ++++ b/drivers/atm/eni.c
1070 +@@ -2260,7 +2260,8 @@ out:
1071 + return rc;
1072 +
1073 + err_eni_release:
1074 +- eni_do_release(dev);
1075 ++ dev->phy = NULL;
1076 ++ iounmap(ENI_DEV(dev)->ioaddr);
1077 + err_unregister:
1078 + atm_dev_deregister(dev);
1079 + err_free_consistent:
1080 +diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
1081 +index 3c081b6171a8f..bfca7b8a6f31e 100644
1082 +--- a/drivers/atm/idt77105.c
1083 ++++ b/drivers/atm/idt77105.c
1084 +@@ -262,7 +262,7 @@ static int idt77105_start(struct atm_dev *dev)
1085 + {
1086 + unsigned long flags;
1087 +
1088 +- if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
1089 ++ if (!(dev->phy_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
1090 + return -ENOMEM;
1091 + PRIV(dev)->dev = dev;
1092 + spin_lock_irqsave(&idt77105_priv_lock, flags);
1093 +@@ -337,7 +337,7 @@ static int idt77105_stop(struct atm_dev *dev)
1094 + else
1095 + idt77105_all = walk->next;
1096 + dev->phy = NULL;
1097 +- dev->dev_data = NULL;
1098 ++ dev->phy_data = NULL;
1099 + kfree(walk);
1100 + break;
1101 + }
1102 +diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
1103 +index ac811cfa68431..92edd100a394f 100644
1104 +--- a/drivers/atm/lanai.c
1105 ++++ b/drivers/atm/lanai.c
1106 +@@ -2234,6 +2234,7 @@ static int lanai_dev_open(struct atm_dev *atmdev)
1107 + conf1_write(lanai);
1108 + #endif
1109 + iounmap(lanai->base);
1110 ++ lanai->base = NULL;
1111 + error_pci:
1112 + pci_disable_device(lanai->pci);
1113 + error:
1114 +@@ -2246,6 +2247,8 @@ static int lanai_dev_open(struct atm_dev *atmdev)
1115 + static void lanai_dev_close(struct atm_dev *atmdev)
1116 + {
1117 + struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data;
1118 ++ if (lanai->base==NULL)
1119 ++ return;
1120 + printk(KERN_INFO DEV_LABEL "(itf %d): shutting down interface\n",
1121 + lanai->number);
1122 + lanai_timed_poll_stop(lanai);
1123 +@@ -2553,7 +2556,7 @@ static int lanai_init_one(struct pci_dev *pci,
1124 + struct atm_dev *atmdev;
1125 + int result;
1126 +
1127 +- lanai = kmalloc(sizeof(*lanai), GFP_KERNEL);
1128 ++ lanai = kzalloc(sizeof(*lanai), GFP_KERNEL);
1129 + if (lanai == NULL) {
1130 + printk(KERN_ERR DEV_LABEL
1131 + ": couldn't allocate dev_data structure!\n");
1132 +diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
1133 +index 7850758b5bb82..239852d855589 100644
1134 +--- a/drivers/atm/uPD98402.c
1135 ++++ b/drivers/atm/uPD98402.c
1136 +@@ -211,7 +211,7 @@ static void uPD98402_int(struct atm_dev *dev)
1137 + static int uPD98402_start(struct atm_dev *dev)
1138 + {
1139 + DPRINTK("phy_start\n");
1140 +- if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
1141 ++ if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
1142 + return -ENOMEM;
1143 + spin_lock_init(&PRIV(dev)->lock);
1144 + memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));
1145 +diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
1146 +index bfda153b1a41d..5ef67bacb585e 100644
1147 +--- a/drivers/base/power/runtime.c
1148 ++++ b/drivers/base/power/runtime.c
1149 +@@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev)
1150 + return 0;
1151 + }
1152 +
1153 +-static void rpm_put_suppliers(struct device *dev)
1154 ++static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
1155 + {
1156 + struct device_link *link;
1157 +
1158 +@@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev)
1159 + device_links_read_lock_held()) {
1160 +
1161 + while (refcount_dec_not_one(&link->rpm_active))
1162 +- pm_runtime_put(link->supplier);
1163 ++ pm_runtime_put_noidle(link->supplier);
1164 ++
1165 ++ if (try_to_suspend)
1166 ++ pm_request_idle(link->supplier);
1167 + }
1168 + }
1169 +
1170 ++static void rpm_put_suppliers(struct device *dev)
1171 ++{
1172 ++ __rpm_put_suppliers(dev, true);
1173 ++}
1174 ++
1175 ++static void rpm_suspend_suppliers(struct device *dev)
1176 ++{
1177 ++ struct device_link *link;
1178 ++ int idx = device_links_read_lock();
1179 ++
1180 ++ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1181 ++ device_links_read_lock_held())
1182 ++ pm_request_idle(link->supplier);
1183 ++
1184 ++ device_links_read_unlock(idx);
1185 ++}
1186 ++
1187 + /**
1188 + * __rpm_callback - Run a given runtime PM callback for a given device.
1189 + * @cb: Runtime PM callback to run.
1190 +@@ -344,8 +364,10 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
1191 + idx = device_links_read_lock();
1192 +
1193 + retval = rpm_get_suppliers(dev);
1194 +- if (retval)
1195 ++ if (retval) {
1196 ++ rpm_put_suppliers(dev);
1197 + goto fail;
1198 ++ }
1199 +
1200 + device_links_read_unlock(idx);
1201 + }
1202 +@@ -368,9 +390,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
1203 + || (dev->power.runtime_status == RPM_RESUMING && retval))) {
1204 + idx = device_links_read_lock();
1205 +
1206 +- fail:
1207 +- rpm_put_suppliers(dev);
1208 ++ __rpm_put_suppliers(dev, false);
1209 +
1210 ++fail:
1211 + device_links_read_unlock(idx);
1212 + }
1213 +
1214 +@@ -642,8 +664,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
1215 + goto out;
1216 + }
1217 +
1218 ++ if (dev->power.irq_safe)
1219 ++ goto out;
1220 ++
1221 + /* Maybe the parent is now able to suspend. */
1222 +- if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
1223 ++ if (parent && !parent->power.ignore_children) {
1224 + spin_unlock(&dev->power.lock);
1225 +
1226 + spin_lock(&parent->power.lock);
1227 +@@ -652,6 +677,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
1228 +
1229 + spin_lock(&dev->power.lock);
1230 + }
1231 ++ /* Maybe the suppliers are now able to suspend. */
1232 ++ if (dev->power.links_count > 0) {
1233 ++ spin_unlock_irq(&dev->power.lock);
1234 ++
1235 ++ rpm_suspend_suppliers(dev);
1236 ++
1237 ++ spin_lock_irq(&dev->power.lock);
1238 ++ }
1239 +
1240 + out:
1241 + trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
1242 +diff --git a/drivers/block/umem.c b/drivers/block/umem.c
1243 +index 2b95d7b33b918..5eb44e4a91eeb 100644
1244 +--- a/drivers/block/umem.c
1245 ++++ b/drivers/block/umem.c
1246 +@@ -877,6 +877,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1247 + if (card->mm_pages[0].desc == NULL ||
1248 + card->mm_pages[1].desc == NULL) {
1249 + dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
1250 ++ ret = -ENOMEM;
1251 + goto failed_alloc;
1252 + }
1253 + reset_page(&card->mm_pages[0]);
1254 +@@ -888,8 +889,10 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1255 + spin_lock_init(&card->lock);
1256 +
1257 + card->queue = blk_alloc_queue(NUMA_NO_NODE);
1258 +- if (!card->queue)
1259 ++ if (!card->queue) {
1260 ++ ret = -ENOMEM;
1261 + goto failed_alloc;
1262 ++ }
1263 +
1264 + tasklet_init(&card->tasklet, process_page, (unsigned long)card);
1265 +
1266 +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
1267 +index da16121140cab..3874233f7194d 100644
1268 +--- a/drivers/block/xen-blkback/blkback.c
1269 ++++ b/drivers/block/xen-blkback/blkback.c
1270 +@@ -891,7 +891,7 @@ next:
1271 + out:
1272 + for (i = last_map; i < num; i++) {
1273 + /* Don't zap current batch's valid persistent grants. */
1274 +- if(i >= last_map + segs_to_map)
1275 ++ if(i >= map_until)
1276 + pages[i]->persistent_gnt = NULL;
1277 + pages[i]->handle = BLKBACK_INVALID_HANDLE;
1278 + }
1279 +diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
1280 +index b040447575adc..dcfb32ee5cb60 100644
1281 +--- a/drivers/bus/omap_l3_noc.c
1282 ++++ b/drivers/bus/omap_l3_noc.c
1283 +@@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev)
1284 + */
1285 + l3->debug_irq = platform_get_irq(pdev, 0);
1286 + ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
1287 +- 0x0, "l3-dbg-irq", l3);
1288 ++ IRQF_NO_THREAD, "l3-dbg-irq", l3);
1289 + if (ret) {
1290 + dev_err(l3->dev, "request_irq failed for %d\n",
1291 + l3->debug_irq);
1292 +@@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev)
1293 +
1294 + l3->app_irq = platform_get_irq(pdev, 1);
1295 + ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
1296 +- 0x0, "l3-app-irq", l3);
1297 ++ IRQF_NO_THREAD, "l3-app-irq", l3);
1298 + if (ret)
1299 + dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
1300 +
1301 +diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
1302 +index b080739ab0c33..7e80dbd4a3f9f 100644
1303 +--- a/drivers/clk/qcom/gcc-sc7180.c
1304 ++++ b/drivers/clk/qcom/gcc-sc7180.c
1305 +@@ -620,7 +620,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
1306 + .name = "gcc_sdcc1_apps_clk_src",
1307 + .parent_data = gcc_parent_data_1,
1308 + .num_parents = 5,
1309 +- .ops = &clk_rcg2_ops,
1310 ++ .ops = &clk_rcg2_floor_ops,
1311 + },
1312 + };
1313 +
1314 +@@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
1315 + .name = "gcc_sdcc1_ice_core_clk_src",
1316 + .parent_data = gcc_parent_data_0,
1317 + .num_parents = 4,
1318 +- .ops = &clk_rcg2_floor_ops,
1319 ++ .ops = &clk_rcg2_ops,
1320 + },
1321 + };
1322 +
1323 +diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
1324 +index 3776d960f405e..1c192a42f11e0 100644
1325 +--- a/drivers/cpufreq/cpufreq-dt-platdev.c
1326 ++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
1327 +@@ -103,6 +103,8 @@ static const struct of_device_id whitelist[] __initconst = {
1328 + static const struct of_device_id blacklist[] __initconst = {
1329 + { .compatible = "allwinner,sun50i-h6", },
1330 +
1331 ++ { .compatible = "arm,vexpress", },
1332 ++
1333 + { .compatible = "calxeda,highbank", },
1334 + { .compatible = "calxeda,ecx-2000", },
1335 +
1336 +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
1337 +index 49a1f8ce4baa6..863f059bc498a 100644
1338 +--- a/drivers/gpio/gpiolib-acpi.c
1339 ++++ b/drivers/gpio/gpiolib-acpi.c
1340 +@@ -174,7 +174,7 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
1341 + int ret, value;
1342 +
1343 + ret = request_threaded_irq(event->irq, NULL, event->handler,
1344 +- event->irqflags, "ACPI:Event", event);
1345 ++ event->irqflags | IRQF_ONESHOT, "ACPI:Event", event);
1346 + if (ret) {
1347 + dev_err(acpi_gpio->chip->parent,
1348 + "Failed to setup interrupt handler for %d\n",
1349 +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
1350 +index 16f73c1023943..ca868271f4c43 100644
1351 +--- a/drivers/gpu/drm/Kconfig
1352 ++++ b/drivers/gpu/drm/Kconfig
1353 +@@ -239,6 +239,7 @@ source "drivers/gpu/drm/arm/Kconfig"
1354 + config DRM_RADEON
1355 + tristate "ATI Radeon"
1356 + depends on DRM && PCI && MMU
1357 ++ depends on AGP || !AGP
1358 + select FW_LOADER
1359 + select DRM_KMS_HELPER
1360 + select DRM_TTM
1361 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1362 +index 1a880cb48d19e..a2425f7ca7597 100644
1363 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1364 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1365 +@@ -1093,6 +1093,7 @@ static const struct pci_device_id pciidlist[] = {
1366 + {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1367 + {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1368 + {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1369 ++ {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1370 + {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1371 +
1372 + {0, 0, 0}
1373 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
1374 +index e2c2eb45a7934..1ea8af48ae2f5 100644
1375 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
1376 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
1377 +@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
1378 + size = mode_cmd->pitches[0] * height;
1379 + aligned_size = ALIGN(size, PAGE_SIZE);
1380 + ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
1381 +- ttm_bo_type_kernel, NULL, &gobj);
1382 ++ ttm_bo_type_device, NULL, &gobj);
1383 + if (ret) {
1384 + pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
1385 + return -ENOMEM;
1386 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
1387 +index 15c2ff264ff60..1a347484cf2a1 100644
1388 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
1389 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
1390 +@@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc)
1391 + } else {
1392 + AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
1393 +
1394 +- AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
1395 +-
1396 ++ AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
1397 + }
1398 +
1399 + //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
1400 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1401 +index b5fe2a008bd47..7ed4d7c8734f0 100644
1402 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1403 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
1404 +@@ -295,7 +295,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
1405 + .num_banks = 8,
1406 + .num_chans = 4,
1407 + .vmm_page_size_bytes = 4096,
1408 +- .dram_clock_change_latency_us = 11.72,
1409 ++ .dram_clock_change_latency_us = 23.84,
1410 + .return_bus_width_bytes = 64,
1411 + .dispclk_dppclk_vco_speed_mhz = 3600,
1412 + .xfc_bus_transport_time_us = 4,
1413 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
1414 +index c5223a9e0d891..b76425164e297 100644
1415 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
1416 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
1417 +@@ -524,6 +524,48 @@ static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
1418 + tmp, MC_CG_ARB_FREQ_F0);
1419 + }
1420 +
1421 ++static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
1422 ++{
1423 ++ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
1424 ++ uint16_t pcie_gen = 0;
1425 ++
1426 ++ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
1427 ++ adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
1428 ++ pcie_gen = 3;
1429 ++ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
1430 ++ adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
1431 ++ pcie_gen = 2;
1432 ++ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
1433 ++ adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
1434 ++ pcie_gen = 1;
1435 ++ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
1436 ++ adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
1437 ++ pcie_gen = 0;
1438 ++
1439 ++ return pcie_gen;
1440 ++}
1441 ++
1442 ++static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
1443 ++{
1444 ++ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
1445 ++ uint16_t pcie_width = 0;
1446 ++
1447 ++ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1448 ++ pcie_width = 16;
1449 ++ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1450 ++ pcie_width = 12;
1451 ++ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1452 ++ pcie_width = 8;
1453 ++ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1454 ++ pcie_width = 4;
1455 ++ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1456 ++ pcie_width = 2;
1457 ++ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1458 ++ pcie_width = 1;
1459 ++
1460 ++ return pcie_width;
1461 ++}
1462 ++
1463 + static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1464 + {
1465 + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1466 +@@ -620,6 +662,11 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1467 + PP_Min_PCIEGen),
1468 + get_pcie_lane_support(data->pcie_lane_cap,
1469 + PP_Max_PCIELane));
1470 ++
1471 ++ if (data->pcie_dpm_key_disabled)
1472 ++ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
1473 ++ data->dpm_table.pcie_speed_table.count,
1474 ++ smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
1475 + }
1476 + return 0;
1477 + }
1478 +@@ -1180,6 +1227,13 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1479 + NULL)),
1480 + "Failed to enable pcie DPM during DPM Start Function!",
1481 + return -EINVAL);
1482 ++ } else {
1483 ++ PP_ASSERT_WITH_CODE(
1484 ++ (0 == smum_send_msg_to_smc(hwmgr,
1485 ++ PPSMC_MSG_PCIeDPM_Disable,
1486 ++ NULL)),
1487 ++ "Failed to disble pcie DPM during DPM Start Function!",
1488 ++ return -EINVAL);
1489 + }
1490 +
1491 + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1492 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1493 +index 18e4eb8884c26..ed4eafc744d3d 100644
1494 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1495 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
1496 +@@ -54,6 +54,9 @@
1497 + #include "smuio/smuio_9_0_offset.h"
1498 + #include "smuio/smuio_9_0_sh_mask.h"
1499 +
1500 ++#define smnPCIE_LC_SPEED_CNTL 0x11140290
1501 ++#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
1502 ++
1503 + #define HBM_MEMORY_CHANNEL_WIDTH 128
1504 +
1505 + static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
1506 +@@ -443,8 +446,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1507 + if (PP_CAP(PHM_PlatformCaps_VCEDPM))
1508 + data->smu_features[GNLD_DPM_VCE].supported = true;
1509 +
1510 +- if (!data->registry_data.pcie_dpm_key_disabled)
1511 +- data->smu_features[GNLD_DPM_LINK].supported = true;
1512 ++ data->smu_features[GNLD_DPM_LINK].supported = true;
1513 +
1514 + if (!data->registry_data.dcefclk_dpm_key_disabled)
1515 + data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
1516 +@@ -1545,6 +1547,13 @@ static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
1517 + pp_table->PcieLaneCount[i] = pcie_width;
1518 + }
1519 +
1520 ++ if (data->registry_data.pcie_dpm_key_disabled) {
1521 ++ for (i = 0; i < NUM_LINK_LEVELS; i++) {
1522 ++ pp_table->PcieGenSpeed[i] = pcie_gen;
1523 ++ pp_table->PcieLaneCount[i] = pcie_width;
1524 ++ }
1525 ++ }
1526 ++
1527 + return 0;
1528 + }
1529 +
1530 +@@ -2967,6 +2976,14 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
1531 + }
1532 + }
1533 +
1534 ++ if (data->registry_data.pcie_dpm_key_disabled) {
1535 ++ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
1536 ++ false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap),
1537 ++ "Attempt to Disable Link DPM feature Failed!", return -EINVAL);
1538 ++ data->smu_features[GNLD_DPM_LINK].enabled = false;
1539 ++ data->smu_features[GNLD_DPM_LINK].supported = false;
1540 ++ }
1541 ++
1542 + return 0;
1543 + }
1544 +
1545 +@@ -4583,6 +4600,24 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
1546 + return 0;
1547 + }
1548 +
1549 ++static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
1550 ++{
1551 ++ struct amdgpu_device *adev = hwmgr->adev;
1552 ++
1553 ++ return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
1554 ++ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
1555 ++ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
1556 ++}
1557 ++
1558 ++static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
1559 ++{
1560 ++ struct amdgpu_device *adev = hwmgr->adev;
1561 ++
1562 ++ return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
1563 ++ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
1564 ++ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
1565 ++}
1566 ++
1567 + static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
1568 + enum pp_clock_type type, char *buf)
1569 + {
1570 +@@ -4591,8 +4626,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
1571 + struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
1572 + struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
1573 + struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
1574 +- struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1575 + struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
1576 ++ uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
1577 ++ PPTable_t *pptable = &(data->smc_state_table.pp_table);
1578 +
1579 + int i, now, size = 0, count = 0;
1580 +
1581 +@@ -4649,15 +4685,31 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
1582 + "*" : "");
1583 + break;
1584 + case PP_PCIE:
1585 +- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now);
1586 +-
1587 +- for (i = 0; i < pcie_table->count; i++)
1588 +- size += sprintf(buf + size, "%d: %s %s\n", i,
1589 +- (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
1590 +- (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
1591 +- (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
1592 +- (i == now) ? "*" : "");
1593 ++ current_gen_speed =
1594 ++ vega10_get_current_pcie_link_speed_level(hwmgr);
1595 ++ current_lane_width =
1596 ++ vega10_get_current_pcie_link_width_level(hwmgr);
1597 ++ for (i = 0; i < NUM_LINK_LEVELS; i++) {
1598 ++ gen_speed = pptable->PcieGenSpeed[i];
1599 ++ lane_width = pptable->PcieLaneCount[i];
1600 ++
1601 ++ size += sprintf(buf + size, "%d: %s %s %s\n", i,
1602 ++ (gen_speed == 0) ? "2.5GT/s," :
1603 ++ (gen_speed == 1) ? "5.0GT/s," :
1604 ++ (gen_speed == 2) ? "8.0GT/s," :
1605 ++ (gen_speed == 3) ? "16.0GT/s," : "",
1606 ++ (lane_width == 1) ? "x1" :
1607 ++ (lane_width == 2) ? "x2" :
1608 ++ (lane_width == 3) ? "x4" :
1609 ++ (lane_width == 4) ? "x8" :
1610 ++ (lane_width == 5) ? "x12" :
1611 ++ (lane_width == 6) ? "x16" : "",
1612 ++ (current_gen_speed == gen_speed) &&
1613 ++ (current_lane_width == lane_width) ?
1614 ++ "*" : "");
1615 ++ }
1616 + break;
1617 ++
1618 + case OD_SCLK:
1619 + if (hwmgr->od_enabled) {
1620 + size = sprintf(buf, "%s:\n", "OD_SCLK");
1621 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1622 +index 62076035029ac..e68651fb7ca4c 100644
1623 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1624 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
1625 +@@ -133,6 +133,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
1626 + data->registry_data.auto_wattman_debug = 0;
1627 + data->registry_data.auto_wattman_sample_period = 100;
1628 + data->registry_data.auto_wattman_threshold = 50;
1629 ++ data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
1630 + }
1631 +
1632 + static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
1633 +@@ -539,6 +540,29 @@ static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
1634 + pp_table->PcieLaneCount[i] = pcie_width_arg;
1635 + }
1636 +
1637 ++ /* override to the highest if it's disabled from ppfeaturmask */
1638 ++ if (data->registry_data.pcie_dpm_key_disabled) {
1639 ++ for (i = 0; i < NUM_LINK_LEVELS; i++) {
1640 ++ smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
1641 ++ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1642 ++ PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
1643 ++ NULL);
1644 ++ PP_ASSERT_WITH_CODE(!ret,
1645 ++ "[OverridePcieParameters] Attempt to override pcie params failed!",
1646 ++ return ret);
1647 ++
1648 ++ pp_table->PcieGenSpeed[i] = pcie_gen;
1649 ++ pp_table->PcieLaneCount[i] = pcie_width;
1650 ++ }
1651 ++ ret = vega12_enable_smc_features(hwmgr,
1652 ++ false,
1653 ++ data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
1654 ++ PP_ASSERT_WITH_CODE(!ret,
1655 ++ "Attempt to Disable DPM LINK Failed!",
1656 ++ return ret);
1657 ++ data->smu_features[GNLD_DPM_LINK].enabled = false;
1658 ++ data->smu_features[GNLD_DPM_LINK].supported = false;
1659 ++ }
1660 + return 0;
1661 + }
1662 +
1663 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1664 +index 251979c059c8b..60cde0c528257 100644
1665 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1666 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
1667 +@@ -171,6 +171,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
1668 + data->registry_data.gfxoff_controlled_by_driver = 1;
1669 + data->gfxoff_allowed = false;
1670 + data->counter_gfxoff = 0;
1671 ++ data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
1672 + }
1673 +
1674 + static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
1675 +@@ -885,6 +886,30 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
1676 + pp_table->PcieLaneCount[i] = pcie_width_arg;
1677 + }
1678 +
1679 ++ /* override to the highest if it's disabled from ppfeaturmask */
1680 ++ if (data->registry_data.pcie_dpm_key_disabled) {
1681 ++ for (i = 0; i < NUM_LINK_LEVELS; i++) {
1682 ++ smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
1683 ++ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1684 ++ PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
1685 ++ NULL);
1686 ++ PP_ASSERT_WITH_CODE(!ret,
1687 ++ "[OverridePcieParameters] Attempt to override pcie params failed!",
1688 ++ return ret);
1689 ++
1690 ++ pp_table->PcieGenSpeed[i] = pcie_gen;
1691 ++ pp_table->PcieLaneCount[i] = pcie_width;
1692 ++ }
1693 ++ ret = vega20_enable_smc_features(hwmgr,
1694 ++ false,
1695 ++ data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
1696 ++ PP_ASSERT_WITH_CODE(!ret,
1697 ++ "Attempt to Disable DPM LINK Failed!",
1698 ++ return ret);
1699 ++ data->smu_features[GNLD_DPM_LINK].enabled = false;
1700 ++ data->smu_features[GNLD_DPM_LINK].supported = false;
1701 ++ }
1702 ++
1703 + return 0;
1704 + }
1705 +
1706 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1707 +index d1533bdc1335e..2b7e85318a76a 100644
1708 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1709 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1710 +@@ -675,7 +675,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
1711 + struct page **pages = pvec + pinned;
1712 +
1713 + ret = pin_user_pages_fast(ptr, num_pages,
1714 +- !userptr->ro ? FOLL_WRITE : 0, pages);
1715 ++ FOLL_WRITE | FOLL_FORCE, pages);
1716 + if (ret < 0) {
1717 + unpin_user_pages(pvec, pinned);
1718 + kvfree(pvec);
1719 +diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
1720 +index 7fb36b12fe7a2..6614f67364862 100644
1721 +--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
1722 ++++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
1723 +@@ -316,7 +316,18 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
1724 + WRITE_ONCE(fence->vma, NULL);
1725 + vma->fence = NULL;
1726 +
1727 +- with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
1728 ++ /*
1729 ++ * Skip the write to HW if and only if the device is currently
1730 ++ * suspended.
1731 ++ *
1732 ++ * If the driver does not currently hold a wakeref (if_in_use == 0),
1733 ++ * the device may currently be runtime suspended, or it may be woken
1734 ++ * up before the suspend takes place. If the device is not suspended
1735 ++ * (powered down) and we skip clearing the fence register, the HW is
1736 ++ * left in an undefined state where we may end up with multiple
1737 ++ * registers overlapping.
1738 ++ */
1739 ++ with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
1740 + fence_write(fence);
1741 + }
1742 +
1743 +diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
1744 +index 153ca9e65382e..8b725efb2254c 100644
1745 +--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
1746 ++++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
1747 +@@ -412,12 +412,20 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
1748 + }
1749 +
1750 + /**
1751 +- * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
1752 ++ * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
1753 + * @rpm: the intel_runtime_pm structure
1754 ++ * @ignore_usecount: get a ref even if dev->power.usage_count is 0
1755 + *
1756 + * This function grabs a device-level runtime pm reference if the device is
1757 +- * already in use and ensures that it is powered up. It is illegal to try
1758 +- * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
1759 ++ * already active and ensures that it is powered up. It is illegal to try
1760 ++ * and access the HW should intel_runtime_pm_get_if_active() report failure.
1761 ++ *
1762 ++ * If @ignore_usecount=true, a reference will be acquired even if there is no
1763 ++ * user requiring the device to be powered up (dev->power.usage_count == 0).
1764 ++ * If the function returns false in this case then it's guaranteed that the
1765 ++ * device's runtime suspend hook has been called already or that it will be
1766 ++ * called (and hence it's also guaranteed that the device's runtime resume
1767 ++ * hook will be called eventually).
1768 + *
1769 + * Any runtime pm reference obtained by this function must have a symmetric
1770 + * call to intel_runtime_pm_put() to release the reference again.
1771 +@@ -425,7 +433,8 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
1772 + * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
1773 + * as True if the wakeref was acquired, or False otherwise.
1774 + */
1775 +-intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
1776 ++static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
1777 ++ bool ignore_usecount)
1778 + {
1779 + if (IS_ENABLED(CONFIG_PM)) {
1780 + /*
1781 +@@ -434,7 +443,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
1782 + * function, since the power state is undefined. This applies
1783 + * atm to the late/early system suspend/resume handlers.
1784 + */
1785 +- if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
1786 ++ if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
1787 + return 0;
1788 + }
1789 +
1790 +@@ -443,6 +452,16 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
1791 + return track_intel_runtime_pm_wakeref(rpm);
1792 + }
1793 +
1794 ++intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
1795 ++{
1796 ++ return __intel_runtime_pm_get_if_active(rpm, false);
1797 ++}
1798 ++
1799 ++intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
1800 ++{
1801 ++ return __intel_runtime_pm_get_if_active(rpm, true);
1802 ++}
1803 ++
1804 + /**
1805 + * intel_runtime_pm_get_noresume - grab a runtime pm reference
1806 + * @rpm: the intel_runtime_pm structure
1807 +diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
1808 +index ae64ff14c6425..1e4ddd11c12bb 100644
1809 +--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
1810 ++++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
1811 +@@ -177,6 +177,7 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
1812 +
1813 + intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
1814 + intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
1815 ++intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
1816 + intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
1817 + intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
1818 +
1819 +@@ -188,6 +189,10 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
1820 + for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
1821 + intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
1822 +
1823 ++#define with_intel_runtime_pm_if_active(rpm, wf) \
1824 ++ for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
1825 ++ intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
1826 ++
1827 + void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
1828 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1829 + void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
1830 +diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
1831 +index a45fe95aff494..3dc65877fa10d 100644
1832 +--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
1833 ++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
1834 +@@ -163,7 +163,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
1835 + break;
1836 + case MSM_DSI_PHY_7NM:
1837 + case MSM_DSI_PHY_7NM_V4_1:
1838 +- pll = msm_dsi_pll_7nm_init(pdev, id);
1839 ++ pll = msm_dsi_pll_7nm_init(pdev, type, id);
1840 + break;
1841 + default:
1842 + pll = ERR_PTR(-ENXIO);
1843 +diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
1844 +index 3405982a092c4..bbecb1de5678e 100644
1845 +--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
1846 ++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
1847 +@@ -117,10 +117,12 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
1848 + }
1849 + #endif
1850 + #ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
1851 +-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
1852 ++struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
1853 ++ enum msm_dsi_phy_type type, int id);
1854 + #else
1855 + static inline struct msm_dsi_pll *
1856 +-msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
1857 ++msm_dsi_pll_7nm_init(struct platform_device *pdev,
1858 ++ enum msm_dsi_phy_type type, int id)
1859 + {
1860 + return ERR_PTR(-ENODEV);
1861 + }
1862 +diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
1863 +index 93bf142e4a4e6..c1f6708367ae9 100644
1864 +--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
1865 ++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
1866 +@@ -852,7 +852,8 @@ err_base_clk_hw:
1867 + return ret;
1868 + }
1869 +
1870 +-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
1871 ++struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
1872 ++ enum msm_dsi_phy_type type, int id)
1873 + {
1874 + struct dsi_pll_7nm *pll_7nm;
1875 + struct msm_dsi_pll *pll;
1876 +@@ -885,7 +886,7 @@ struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
1877 + pll = &pll_7nm->base;
1878 + pll->min_rate = 1000000000UL;
1879 + pll->max_rate = 3500000000UL;
1880 +- if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
1881 ++ if (type == MSM_DSI_PHY_7NM_V4_1) {
1882 + pll->min_rate = 600000000UL;
1883 + pll->max_rate = (unsigned long)5000000000ULL;
1884 + /* workaround for max rate overflowing on 32-bit builds: */
1885 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
1886 +index 3d0adfa6736a5..b38ebccad42ff 100644
1887 +--- a/drivers/gpu/drm/msm/msm_drv.c
1888 ++++ b/drivers/gpu/drm/msm/msm_drv.c
1889 +@@ -1079,6 +1079,10 @@ static int __maybe_unused msm_pm_resume(struct device *dev)
1890 + static int __maybe_unused msm_pm_prepare(struct device *dev)
1891 + {
1892 + struct drm_device *ddev = dev_get_drvdata(dev);
1893 ++ struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
1894 ++
1895 ++ if (!priv || !priv->kms)
1896 ++ return 0;
1897 +
1898 + return drm_mode_config_helper_suspend(ddev);
1899 + }
1900 +@@ -1086,6 +1090,10 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
1901 + static void __maybe_unused msm_pm_complete(struct device *dev)
1902 + {
1903 + struct drm_device *ddev = dev_get_drvdata(dev);
1904 ++ struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
1905 ++
1906 ++ if (!priv || !priv->kms)
1907 ++ return;
1908 +
1909 + drm_mode_config_helper_resume(ddev);
1910 + }
1911 +@@ -1318,6 +1326,10 @@ static int msm_pdev_remove(struct platform_device *pdev)
1912 + static void msm_pdev_shutdown(struct platform_device *pdev)
1913 + {
1914 + struct drm_device *drm = platform_get_drvdata(pdev);
1915 ++ struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
1916 ++
1917 ++ if (!priv || !priv->kms)
1918 ++ return;
1919 +
1920 + drm_atomic_helper_shutdown(drm);
1921 + }
1922 +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
1923 +index 8769e7aa097f4..81903749d2415 100644
1924 +--- a/drivers/infiniband/hw/cxgb4/cm.c
1925 ++++ b/drivers/infiniband/hw/cxgb4/cm.c
1926 +@@ -3610,13 +3610,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
1927 + ep->com.local_addr.ss_family == AF_INET) {
1928 + err = cxgb4_remove_server_filter(
1929 + ep->com.dev->rdev.lldi.ports[0], ep->stid,
1930 +- ep->com.dev->rdev.lldi.rxq_ids[0], 0);
1931 ++ ep->com.dev->rdev.lldi.rxq_ids[0], false);
1932 + } else {
1933 + struct sockaddr_in6 *sin6;
1934 + c4iw_init_wr_wait(ep->com.wr_waitp);
1935 + err = cxgb4_remove_server(
1936 + ep->com.dev->rdev.lldi.ports[0], ep->stid,
1937 +- ep->com.dev->rdev.lldi.rxq_ids[0], 0);
1938 ++ ep->com.dev->rdev.lldi.rxq_ids[0], true);
1939 + if (err)
1940 + goto done;
1941 + err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
1942 +diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c
1943 +index 7a7222d4c19c0..b938d1d04d96e 100644
1944 +--- a/drivers/irqchip/irq-ingenic-tcu.c
1945 ++++ b/drivers/irqchip/irq-ingenic-tcu.c
1946 +@@ -179,5 +179,6 @@ err_free_tcu:
1947 + }
1948 + IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init);
1949 + IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init);
1950 ++IRQCHIP_DECLARE(jz4760_tcu_irq, "ingenic,jz4760-tcu", ingenic_tcu_irq_init);
1951 + IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init);
1952 + IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init);
1953 +diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
1954 +index b61a8901ef722..ea36bb00be80b 100644
1955 +--- a/drivers/irqchip/irq-ingenic.c
1956 ++++ b/drivers/irqchip/irq-ingenic.c
1957 +@@ -155,6 +155,7 @@ static int __init intc_2chip_of_init(struct device_node *node,
1958 + {
1959 + return ingenic_intc_of_init(node, 2);
1960 + }
1961 ++IRQCHIP_DECLARE(jz4760_intc, "ingenic,jz4760-intc", intc_2chip_of_init);
1962 + IRQCHIP_DECLARE(jz4770_intc, "ingenic,jz4770-intc", intc_2chip_of_init);
1963 + IRQCHIP_DECLARE(jz4775_intc, "ingenic,jz4775-intc", intc_2chip_of_init);
1964 + IRQCHIP_DECLARE(jz4780_intc, "ingenic,jz4780-intc", intc_2chip_of_init);
1965 +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
1966 +index 5e306bba43751..1ca65b434f1fa 100644
1967 +--- a/drivers/md/dm-ioctl.c
1968 ++++ b/drivers/md/dm-ioctl.c
1969 +@@ -529,7 +529,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
1970 + * Grab our output buffer.
1971 + */
1972 + nl = orig_nl = get_result_buffer(param, param_size, &len);
1973 +- if (len < needed) {
1974 ++ if (len < needed || len < sizeof(nl->dev)) {
1975 + param->flags |= DM_BUFFER_FULL_FLAG;
1976 + goto out;
1977 + }
1978 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1979 +index 9b824c21580a4..5c590895c14c3 100644
1980 +--- a/drivers/md/dm-table.c
1981 ++++ b/drivers/md/dm-table.c
1982 +@@ -1387,6 +1387,13 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
1983 + return !q || blk_queue_zoned_model(q) != *zoned_model;
1984 + }
1985 +
1986 ++/*
1987 ++ * Check the device zoned model based on the target feature flag. If the target
1988 ++ * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
1989 ++ * also accepted but all devices must have the same zoned model. If the target
1990 ++ * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
1991 ++ * zoned model with all zoned devices having the same zone size.
1992 ++ */
1993 + static bool dm_table_supports_zoned_model(struct dm_table *t,
1994 + enum blk_zoned_model zoned_model)
1995 + {
1996 +@@ -1396,13 +1403,15 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
1997 + for (i = 0; i < dm_table_get_num_targets(t); i++) {
1998 + ti = dm_table_get_target(t, i);
1999 +
2000 +- if (zoned_model == BLK_ZONED_HM &&
2001 +- !dm_target_supports_zoned_hm(ti->type))
2002 +- return false;
2003 +-
2004 +- if (!ti->type->iterate_devices ||
2005 +- ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
2006 +- return false;
2007 ++ if (dm_target_supports_zoned_hm(ti->type)) {
2008 ++ if (!ti->type->iterate_devices ||
2009 ++ ti->type->iterate_devices(ti, device_not_zoned_model,
2010 ++ &zoned_model))
2011 ++ return false;
2012 ++ } else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
2013 ++ if (zoned_model == BLK_ZONED_HM)
2014 ++ return false;
2015 ++ }
2016 + }
2017 +
2018 + return true;
2019 +@@ -1414,9 +1423,17 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
2020 + struct request_queue *q = bdev_get_queue(dev->bdev);
2021 + unsigned int *zone_sectors = data;
2022 +
2023 ++ if (!blk_queue_is_zoned(q))
2024 ++ return 0;
2025 ++
2026 + return !q || blk_queue_zone_sectors(q) != *zone_sectors;
2027 + }
2028 +
2029 ++/*
2030 ++ * Check consistency of zoned model and zone sectors across all targets. For
2031 ++ * zone sectors, if the destination device is a zoned block device, it shall
2032 ++ * have the specified zone_sectors.
2033 ++ */
2034 + static int validate_hardware_zoned_model(struct dm_table *table,
2035 + enum blk_zoned_model zoned_model,
2036 + unsigned int zone_sectors)
2037 +@@ -1435,7 +1452,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
2038 + return -EINVAL;
2039 +
2040 + if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
2041 +- DMERR("%s: zone sectors is not consistent across all devices",
2042 ++ DMERR("%s: zone sectors is not consistent across all zoned devices",
2043 + dm_device_name(table->md));
2044 + return -EINVAL;
2045 + }
2046 +diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
2047 +index 6b8e5bdd8526d..808a98ef624c3 100644
2048 +--- a/drivers/md/dm-verity-target.c
2049 ++++ b/drivers/md/dm-verity-target.c
2050 +@@ -34,7 +34,7 @@
2051 + #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
2052 + #define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
2053 +
2054 +-#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC + \
2055 ++#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \
2056 + DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
2057 +
2058 + static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
2059 +diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
2060 +index 697f9de37355e..7e88df64d197b 100644
2061 +--- a/drivers/md/dm-zoned-target.c
2062 ++++ b/drivers/md/dm-zoned-target.c
2063 +@@ -1143,7 +1143,7 @@ static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
2064 + static struct target_type dmz_type = {
2065 + .name = "zoned",
2066 + .version = {2, 0, 0},
2067 +- .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
2068 ++ .features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL,
2069 + .module = THIS_MODULE,
2070 + .ctr = dmz_ctr,
2071 + .dtr = dmz_dtr,
2072 +diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
2073 +index 71b3a4d5adc65..7b0bf470795c4 100644
2074 +--- a/drivers/misc/habanalabs/common/device.c
2075 ++++ b/drivers/misc/habanalabs/common/device.c
2076 +@@ -106,6 +106,8 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
2077 + list_del(&hpriv->dev_node);
2078 + mutex_unlock(&hdev->fpriv_list_lock);
2079 +
2080 ++ put_pid(hpriv->taskpid);
2081 ++
2082 + kfree(hpriv);
2083 +
2084 + return 0;
2085 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2086 +index 6d5a39af10978..47afc5938c26b 100644
2087 +--- a/drivers/net/bonding/bond_main.c
2088 ++++ b/drivers/net/bonding/bond_main.c
2089 +@@ -3918,15 +3918,11 @@ static int bond_neigh_init(struct neighbour *n)
2090 +
2091 + rcu_read_lock();
2092 + slave = bond_first_slave_rcu(bond);
2093 +- if (!slave) {
2094 +- ret = -EINVAL;
2095 ++ if (!slave)
2096 + goto out;
2097 +- }
2098 + slave_ops = slave->dev->netdev_ops;
2099 +- if (!slave_ops->ndo_neigh_setup) {
2100 +- ret = -EINVAL;
2101 ++ if (!slave_ops->ndo_neigh_setup)
2102 + goto out;
2103 +- }
2104 +
2105 + /* TODO: find another way [1] to implement this.
2106 + * Passing a zeroed structure is fragile,
2107 +diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
2108 +index 1a9e9b9a4bf6c..6c75e5897620d 100644
2109 +--- a/drivers/net/can/c_can/c_can.c
2110 ++++ b/drivers/net/can/c_can/c_can.c
2111 +@@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = {
2112 + .brp_inc = 1,
2113 + };
2114 +
2115 +-static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
2116 +-{
2117 +- if (priv->device)
2118 +- pm_runtime_enable(priv->device);
2119 +-}
2120 +-
2121 +-static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
2122 +-{
2123 +- if (priv->device)
2124 +- pm_runtime_disable(priv->device);
2125 +-}
2126 +-
2127 + static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
2128 + {
2129 + if (priv->device)
2130 +@@ -1335,7 +1323,6 @@ static const struct net_device_ops c_can_netdev_ops = {
2131 +
2132 + int register_c_can_dev(struct net_device *dev)
2133 + {
2134 +- struct c_can_priv *priv = netdev_priv(dev);
2135 + int err;
2136 +
2137 + /* Deactivate pins to prevent DRA7 DCAN IP from being
2138 +@@ -1345,28 +1332,19 @@ int register_c_can_dev(struct net_device *dev)
2139 + */
2140 + pinctrl_pm_select_sleep_state(dev->dev.parent);
2141 +
2142 +- c_can_pm_runtime_enable(priv);
2143 +-
2144 + dev->flags |= IFF_ECHO; /* we support local echo */
2145 + dev->netdev_ops = &c_can_netdev_ops;
2146 +
2147 + err = register_candev(dev);
2148 +- if (err)
2149 +- c_can_pm_runtime_disable(priv);
2150 +- else
2151 ++ if (!err)
2152 + devm_can_led_init(dev);
2153 +-
2154 + return err;
2155 + }
2156 + EXPORT_SYMBOL_GPL(register_c_can_dev);
2157 +
2158 + void unregister_c_can_dev(struct net_device *dev)
2159 + {
2160 +- struct c_can_priv *priv = netdev_priv(dev);
2161 +-
2162 + unregister_candev(dev);
2163 +-
2164 +- c_can_pm_runtime_disable(priv);
2165 + }
2166 + EXPORT_SYMBOL_GPL(unregister_c_can_dev);
2167 +
2168 +diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
2169 +index 406b4847e5dc3..7efb60b508762 100644
2170 +--- a/drivers/net/can/c_can/c_can_pci.c
2171 ++++ b/drivers/net/can/c_can/c_can_pci.c
2172 +@@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev)
2173 + {
2174 + struct net_device *dev = pci_get_drvdata(pdev);
2175 + struct c_can_priv *priv = netdev_priv(dev);
2176 ++ void __iomem *addr = priv->base;
2177 +
2178 + unregister_c_can_dev(dev);
2179 +
2180 + free_c_can_dev(dev);
2181 +
2182 +- pci_iounmap(pdev, priv->base);
2183 ++ pci_iounmap(pdev, addr);
2184 + pci_disable_msi(pdev);
2185 + pci_clear_master(pdev);
2186 + pci_release_regions(pdev);
2187 +diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
2188 +index 05f425ceb53a2..47b251b1607ce 100644
2189 +--- a/drivers/net/can/c_can/c_can_platform.c
2190 ++++ b/drivers/net/can/c_can/c_can_platform.c
2191 +@@ -29,6 +29,7 @@
2192 + #include <linux/list.h>
2193 + #include <linux/io.h>
2194 + #include <linux/platform_device.h>
2195 ++#include <linux/pm_runtime.h>
2196 + #include <linux/clk.h>
2197 + #include <linux/of.h>
2198 + #include <linux/of_device.h>
2199 +@@ -386,6 +387,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
2200 + platform_set_drvdata(pdev, dev);
2201 + SET_NETDEV_DEV(dev, &pdev->dev);
2202 +
2203 ++ pm_runtime_enable(priv->device);
2204 + ret = register_c_can_dev(dev);
2205 + if (ret) {
2206 + dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
2207 +@@ -398,6 +400,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
2208 + return 0;
2209 +
2210 + exit_free_device:
2211 ++ pm_runtime_disable(priv->device);
2212 + free_c_can_dev(dev);
2213 + exit:
2214 + dev_err(&pdev->dev, "probe failed\n");
2215 +@@ -408,9 +411,10 @@ exit:
2216 + static int c_can_plat_remove(struct platform_device *pdev)
2217 + {
2218 + struct net_device *dev = platform_get_drvdata(pdev);
2219 ++ struct c_can_priv *priv = netdev_priv(dev);
2220 +
2221 + unregister_c_can_dev(dev);
2222 +-
2223 ++ pm_runtime_disable(priv->device);
2224 + free_c_can_dev(dev);
2225 +
2226 + return 0;
2227 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
2228 +index 24cd3c1027ecc..dc9b4aae3abb6 100644
2229 +--- a/drivers/net/can/dev.c
2230 ++++ b/drivers/net/can/dev.c
2231 +@@ -1255,6 +1255,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head)
2232 +
2233 + static struct rtnl_link_ops can_link_ops __read_mostly = {
2234 + .kind = "can",
2235 ++ .netns_refund = true,
2236 + .maxtype = IFLA_CAN_MAX,
2237 + .policy = can_policy,
2238 + .setup = can_setup,
2239 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
2240 +index d712c6fdbc87d..7cbaac238ff62 100644
2241 +--- a/drivers/net/can/flexcan.c
2242 ++++ b/drivers/net/can/flexcan.c
2243 +@@ -658,9 +658,15 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
2244 + static int flexcan_chip_freeze(struct flexcan_priv *priv)
2245 + {
2246 + struct flexcan_regs __iomem *regs = priv->regs;
2247 +- unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
2248 ++ unsigned int timeout;
2249 ++ u32 bitrate = priv->can.bittiming.bitrate;
2250 + u32 reg;
2251 +
2252 ++ if (bitrate)
2253 ++ timeout = 1000 * 1000 * 10 / bitrate;
2254 ++ else
2255 ++ timeout = FLEXCAN_TIMEOUT_US / 10;
2256 ++
2257 + reg = priv->read(&regs->mcr);
2258 + reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT;
2259 + priv->write(reg, &regs->mcr);
2260 +diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
2261 +index 43151dd6cb1c3..99323c273aa56 100644
2262 +--- a/drivers/net/can/kvaser_pciefd.c
2263 ++++ b/drivers/net/can/kvaser_pciefd.c
2264 +@@ -57,6 +57,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
2265 + #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
2266 + #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
2267 + #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
2268 ++#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
2269 + #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
2270 + #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
2271 + /* Loopback control register */
2272 +@@ -949,6 +950,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
2273 + timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
2274 + 0);
2275 +
2276 ++ /* Disable Bus load reporting */
2277 ++ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
2278 ++
2279 + tx_npackets = ioread32(can->reg_base +
2280 + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
2281 + if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
2282 +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
2283 +index 3c1e379751683..6f0bf5db885cd 100644
2284 +--- a/drivers/net/can/m_can/m_can.c
2285 ++++ b/drivers/net/can/m_can/m_can.c
2286 +@@ -502,9 +502,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
2287 + }
2288 +
2289 + while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
2290 +- if (rxfs & RXFS_RFL)
2291 +- netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
2292 +-
2293 + m_can_read_fifo(dev, rxfs);
2294 +
2295 + quota--;
2296 +@@ -885,7 +882,7 @@ static int m_can_rx_peripheral(struct net_device *dev)
2297 + {
2298 + struct m_can_classdev *cdev = netdev_priv(dev);
2299 +
2300 +- m_can_rx_handler(dev, 1);
2301 ++ m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
2302 +
2303 + m_can_enable_all_interrupts(cdev);
2304 +
2305 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
2306 +index f504b6858ed29..52100d4fe5a25 100644
2307 +--- a/drivers/net/dsa/b53/b53_common.c
2308 ++++ b/drivers/net/dsa/b53/b53_common.c
2309 +@@ -1070,13 +1070,6 @@ static int b53_setup(struct dsa_switch *ds)
2310 + b53_disable_port(ds, port);
2311 + }
2312 +
2313 +- /* Let DSA handle the case were multiple bridges span the same switch
2314 +- * device and different VLAN awareness settings are requested, which
2315 +- * would be breaking filtering semantics for any of the other bridge
2316 +- * devices. (not hardware supported)
2317 +- */
2318 +- ds->vlan_filtering_is_global = true;
2319 +-
2320 + return b53_setup_devlink_resources(ds);
2321 + }
2322 +
2323 +@@ -2627,6 +2620,13 @@ struct b53_device *b53_switch_alloc(struct device *base,
2324 + ds->configure_vlan_while_not_filtering = true;
2325 + ds->untag_bridge_pvid = true;
2326 + dev->vlan_enabled = ds->configure_vlan_while_not_filtering;
2327 ++ /* Let DSA handle the case were multiple bridges span the same switch
2328 ++ * device and different VLAN awareness settings are requested, which
2329 ++ * would be breaking filtering semantics for any of the other bridge
2330 ++ * devices. (not hardware supported)
2331 ++ */
2332 ++ ds->vlan_filtering_is_global = true;
2333 ++
2334 + mutex_init(&dev->reg_mutex);
2335 + mutex_init(&dev->stats_mutex);
2336 +
2337 +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
2338 +index edb0a1027b38f..510324916e916 100644
2339 +--- a/drivers/net/dsa/bcm_sf2.c
2340 ++++ b/drivers/net/dsa/bcm_sf2.c
2341 +@@ -584,8 +584,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
2342 + * in bits 15:8 and the patch level in bits 7:0 which is exactly what
2343 + * the REG_PHY_REVISION register layout is.
2344 + */
2345 +-
2346 +- return priv->hw_params.gphy_rev;
2347 ++ if (priv->int_phy_mask & BIT(port))
2348 ++ return priv->hw_params.gphy_rev;
2349 ++ else
2350 ++ return 0;
2351 + }
2352 +
2353 + static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
2354 +diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
2355 +index 1b7e8c91b5417..423d6d78d15c7 100644
2356 +--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
2357 ++++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
2358 +@@ -727,7 +727,7 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
2359 + kvfree(tx_info);
2360 + return 0;
2361 + }
2362 +- tx_info->open_state = false;
2363 ++ tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
2364 + spin_unlock(&tx_info->lock);
2365 +
2366 + complete(&tx_info->completion);
2367 +diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
2368 +index ba7f857d1710d..ae09cac876028 100644
2369 +--- a/drivers/net/ethernet/davicom/dm9000.c
2370 ++++ b/drivers/net/ethernet/davicom/dm9000.c
2371 +@@ -1510,7 +1510,7 @@ dm9000_probe(struct platform_device *pdev)
2372 + goto out;
2373 + }
2374 +
2375 +- db->irq_wake = platform_get_irq(pdev, 1);
2376 ++ db->irq_wake = platform_get_irq_optional(pdev, 1);
2377 + if (db->irq_wake >= 0) {
2378 + dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
2379 +
2380 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
2381 +index 80fb1f537bb33..c9c380c508791 100644
2382 +--- a/drivers/net/ethernet/faraday/ftgmac100.c
2383 ++++ b/drivers/net/ethernet/faraday/ftgmac100.c
2384 +@@ -1308,6 +1308,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
2385 + */
2386 + if (unlikely(priv->need_mac_restart)) {
2387 + ftgmac100_start_hw(priv);
2388 ++ priv->need_mac_restart = false;
2389 +
2390 + /* Re-enable "bad" interrupts */
2391 + iowrite32(FTGMAC100_INT_BAD,
2392 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
2393 +index 21a6ce415cb22..2b90a345507b8 100644
2394 +--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
2395 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
2396 +@@ -234,6 +234,8 @@ enum enetc_bdr_type {TX, RX};
2397 + #define ENETC_PM0_MAXFRM 0x8014
2398 + #define ENETC_SET_TX_MTU(val) ((val) << 16)
2399 + #define ENETC_SET_MAXFRM(val) ((val) & 0xffff)
2400 ++#define ENETC_PM0_RX_FIFO 0x801c
2401 ++#define ENETC_PM0_RX_FIFO_VAL 1
2402 +
2403 + #define ENETC_PM_IMDIO_BASE 0x8030
2404 +
2405 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2406 +index 83187cd59fddd..68133563a40c1 100644
2407 +--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2408 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2409 +@@ -490,6 +490,12 @@ static void enetc_configure_port_mac(struct enetc_hw *hw)
2410 +
2411 + enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
2412 + ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
2413 ++
2414 ++ /* On LS1028A, the MAC RX FIFO defaults to 2, which is too high
2415 ++ * and may lead to RX lock-up under traffic. Set it to 1 instead,
2416 ++ * as recommended by the hardware team.
2417 ++ */
2418 ++ enetc_port_wr(hw, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
2419 + }
2420 +
2421 + static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
2422 +diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
2423 +index 2e344aada4c60..1753807cbf97e 100644
2424 +--- a/drivers/net/ethernet/freescale/fec_ptp.c
2425 ++++ b/drivers/net/ethernet/freescale/fec_ptp.c
2426 +@@ -377,9 +377,16 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
2427 + u64 ns;
2428 + unsigned long flags;
2429 +
2430 ++ mutex_lock(&adapter->ptp_clk_mutex);
2431 ++ /* Check the ptp clock */
2432 ++ if (!adapter->ptp_clk_on) {
2433 ++ mutex_unlock(&adapter->ptp_clk_mutex);
2434 ++ return -EINVAL;
2435 ++ }
2436 + spin_lock_irqsave(&adapter->tmreg_lock, flags);
2437 + ns = timecounter_read(&adapter->tc);
2438 + spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
2439 ++ mutex_unlock(&adapter->ptp_clk_mutex);
2440 +
2441 + *ts = ns_to_timespec64(ns);
2442 +
2443 +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
2444 +index d391a45cebb66..4fab2ee5bbf58 100644
2445 +--- a/drivers/net/ethernet/freescale/gianfar.c
2446 ++++ b/drivers/net/ethernet/freescale/gianfar.c
2447 +@@ -2391,6 +2391,10 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2448 + if (lstatus & BD_LFLAG(RXBD_LAST))
2449 + size -= skb->len;
2450 +
2451 ++ WARN(size < 0, "gianfar: rx fragment size underflow");
2452 ++ if (size < 0)
2453 ++ return false;
2454 ++
2455 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2456 + rxb->page_offset + RXBUF_ALIGNMENT,
2457 + size, GFAR_RXB_TRUESIZE);
2458 +@@ -2553,6 +2557,17 @@ static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2459 + if (lstatus & BD_LFLAG(RXBD_EMPTY))
2460 + break;
2461 +
2462 ++ /* lost RXBD_LAST descriptor due to overrun */
2463 ++ if (skb &&
2464 ++ (lstatus & BD_LFLAG(RXBD_FIRST))) {
2465 ++ /* discard faulty buffer */
2466 ++ dev_kfree_skb(skb);
2467 ++ skb = NULL;
2468 ++ rx_queue->stats.rx_dropped++;
2469 ++
2470 ++ /* can continue normally */
2471 ++ }
2472 ++
2473 + /* order rx buffer descriptor reads */
2474 + rmb();
2475 +
2476 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2477 +index 858cb293152a9..8bce5f1510bec 100644
2478 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2479 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2480 +@@ -1663,8 +1663,10 @@ static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
2481 + for (j = 0; j < fetch_num; j++) {
2482 + /* alloc one skb and init */
2483 + skb = hns_assemble_skb(ndev);
2484 +- if (!skb)
2485 ++ if (!skb) {
2486 ++ ret = -ENOMEM;
2487 + goto out;
2488 ++ }
2489 + rd = &tx_ring_data(priv, skb->queue_mapping);
2490 + hns_nic_net_xmit_hw(ndev, skb, rd);
2491 +
2492 +diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
2493 +index 88faf05e23baf..0b1e890dd583b 100644
2494 +--- a/drivers/net/ethernet/intel/e1000e/82571.c
2495 ++++ b/drivers/net/ethernet/intel/e1000e/82571.c
2496 +@@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
2497 + } else {
2498 + data &= ~IGP02E1000_PM_D0_LPLU;
2499 + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
2500 ++ if (ret_val)
2501 ++ return ret_val;
2502 + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
2503 + * during Dx states where the power conservation is most
2504 + * important. During driver activity we should enable
2505 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
2506 +index e9b82c209c2df..a0948002ddf85 100644
2507 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
2508 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
2509 +@@ -5974,15 +5974,19 @@ static void e1000_reset_task(struct work_struct *work)
2510 + struct e1000_adapter *adapter;
2511 + adapter = container_of(work, struct e1000_adapter, reset_task);
2512 +
2513 ++ rtnl_lock();
2514 + /* don't run the task if already down */
2515 +- if (test_bit(__E1000_DOWN, &adapter->state))
2516 ++ if (test_bit(__E1000_DOWN, &adapter->state)) {
2517 ++ rtnl_unlock();
2518 + return;
2519 ++ }
2520 +
2521 + if (!(adapter->flags & FLAG_RESTART_NOW)) {
2522 + e1000e_dump(adapter);
2523 + e_err("Reset adapter unexpectedly\n");
2524 + }
2525 + e1000e_reinit_locked(adapter);
2526 ++ rtnl_unlock();
2527 + }
2528 +
2529 + /**
2530 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
2531 +index 0a867d64d4675..dc5b3c06d1e01 100644
2532 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
2533 ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
2534 +@@ -1776,7 +1776,8 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
2535 + goto err_alloc;
2536 + }
2537 +
2538 +- if (iavf_process_config(adapter))
2539 ++ err = iavf_process_config(adapter);
2540 ++ if (err)
2541 + goto err_alloc;
2542 + adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2543 +
2544 +diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
2545 +index aaa954aae5744..7bda8c5edea5d 100644
2546 +--- a/drivers/net/ethernet/intel/igb/igb.h
2547 ++++ b/drivers/net/ethernet/intel/igb/igb.h
2548 +@@ -748,8 +748,8 @@ void igb_ptp_suspend(struct igb_adapter *adapter);
2549 + void igb_ptp_rx_hang(struct igb_adapter *adapter);
2550 + void igb_ptp_tx_hang(struct igb_adapter *adapter);
2551 + void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
2552 +-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
2553 +- struct sk_buff *skb);
2554 ++int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
2555 ++ struct sk_buff *skb);
2556 + int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
2557 + int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
2558 + void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
2559 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
2560 +index 0d343d0509739..fecfcfcf161ca 100644
2561 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
2562 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
2563 +@@ -8232,7 +8232,8 @@ static inline bool igb_page_is_reserved(struct page *page)
2564 + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2565 + }
2566 +
2567 +-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
2568 ++static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
2569 ++ int rx_buf_pgcnt)
2570 + {
2571 + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
2572 + struct page *page = rx_buffer->page;
2573 +@@ -8243,7 +8244,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
2574 +
2575 + #if (PAGE_SIZE < 8192)
2576 + /* if we are only owner of page we can reuse it */
2577 +- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
2578 ++ if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
2579 + return false;
2580 + #else
2581 + #define IGB_LAST_OFFSET \
2582 +@@ -8319,9 +8320,10 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
2583 + return NULL;
2584 +
2585 + if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
2586 +- igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb);
2587 +- xdp->data += IGB_TS_HDR_LEN;
2588 +- size -= IGB_TS_HDR_LEN;
2589 ++ if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
2590 ++ xdp->data += IGB_TS_HDR_LEN;
2591 ++ size -= IGB_TS_HDR_LEN;
2592 ++ }
2593 + }
2594 +
2595 + /* Determine available headroom for copy */
2596 +@@ -8382,8 +8384,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
2597 +
2598 + /* pull timestamp out of packet data */
2599 + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
2600 +- igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
2601 +- __skb_pull(skb, IGB_TS_HDR_LEN);
2602 ++ if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
2603 ++ __skb_pull(skb, IGB_TS_HDR_LEN);
2604 + }
2605 +
2606 + /* update buffer offset */
2607 +@@ -8632,11 +8634,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
2608 + }
2609 +
2610 + static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
2611 +- const unsigned int size)
2612 ++ const unsigned int size, int *rx_buf_pgcnt)
2613 + {
2614 + struct igb_rx_buffer *rx_buffer;
2615 +
2616 + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2617 ++ *rx_buf_pgcnt =
2618 ++#if (PAGE_SIZE < 8192)
2619 ++ page_count(rx_buffer->page);
2620 ++#else
2621 ++ 0;
2622 ++#endif
2623 + prefetchw(rx_buffer->page);
2624 +
2625 + /* we are reusing so sync this buffer for CPU use */
2626 +@@ -8652,9 +8660,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
2627 + }
2628 +
2629 + static void igb_put_rx_buffer(struct igb_ring *rx_ring,
2630 +- struct igb_rx_buffer *rx_buffer)
2631 ++ struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
2632 + {
2633 +- if (igb_can_reuse_rx_page(rx_buffer)) {
2634 ++ if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
2635 + /* hand second half of page back to the ring */
2636 + igb_reuse_rx_page(rx_ring, rx_buffer);
2637 + } else {
2638 +@@ -8681,6 +8689,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
2639 + u16 cleaned_count = igb_desc_unused(rx_ring);
2640 + unsigned int xdp_xmit = 0;
2641 + struct xdp_buff xdp;
2642 ++ int rx_buf_pgcnt;
2643 +
2644 + xdp.rxq = &rx_ring->xdp_rxq;
2645 +
2646 +@@ -8711,7 +8720,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
2647 + */
2648 + dma_rmb();
2649 +
2650 +- rx_buffer = igb_get_rx_buffer(rx_ring, size);
2651 ++ rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
2652 +
2653 + /* retrieve a buffer from the ring */
2654 + if (!skb) {
2655 +@@ -8754,7 +8763,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
2656 + break;
2657 + }
2658 +
2659 +- igb_put_rx_buffer(rx_ring, rx_buffer);
2660 ++ igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
2661 + cleaned_count++;
2662 +
2663 + /* fetch next buffer in frame if non-eop */
2664 +diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
2665 +index 7cc5428c3b3d2..86a576201f5ff 100644
2666 +--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
2667 ++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
2668 +@@ -856,6 +856,9 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
2669 + dev_kfree_skb_any(skb);
2670 + }
2671 +
2672 ++#define IGB_RET_PTP_DISABLED 1
2673 ++#define IGB_RET_PTP_INVALID 2
2674 ++
2675 + /**
2676 + * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
2677 + * @q_vector: Pointer to interrupt specific structure
2678 +@@ -864,19 +867,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
2679 + *
2680 + * This function is meant to retrieve a timestamp from the first buffer of an
2681 + * incoming frame. The value is stored in little endian format starting on
2682 +- * byte 8.
2683 ++ * byte 8
2684 ++ *
2685 ++ * Returns: 0 if success, nonzero if failure
2686 + **/
2687 +-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
2688 +- struct sk_buff *skb)
2689 ++int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
2690 ++ struct sk_buff *skb)
2691 + {
2692 +- __le64 *regval = (__le64 *)va;
2693 + struct igb_adapter *adapter = q_vector->adapter;
2694 ++ __le64 *regval = (__le64 *)va;
2695 + int adjust = 0;
2696 +
2697 ++ if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
2698 ++ return IGB_RET_PTP_DISABLED;
2699 ++
2700 + /* The timestamp is recorded in little endian format.
2701 + * DWORD: 0 1 2 3
2702 + * Field: Reserved Reserved SYSTIML SYSTIMH
2703 + */
2704 ++
2705 ++ /* check reserved dwords are zero, be/le doesn't matter for zero */
2706 ++ if (regval[0])
2707 ++ return IGB_RET_PTP_INVALID;
2708 ++
2709 + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
2710 + le64_to_cpu(regval[1]));
2711 +
2712 +@@ -896,6 +909,8 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
2713 + }
2714 + skb_hwtstamps(skb)->hwtstamp =
2715 + ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
2716 ++
2717 ++ return 0;
2718 + }
2719 +
2720 + /**
2721 +@@ -906,13 +921,15 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
2722 + * This function is meant to retrieve a timestamp from the internal registers
2723 + * of the adapter and store it in the skb.
2724 + **/
2725 +-void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
2726 +- struct sk_buff *skb)
2727 ++void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
2728 + {
2729 + struct igb_adapter *adapter = q_vector->adapter;
2730 + struct e1000_hw *hw = &adapter->hw;
2731 +- u64 regval;
2732 + int adjust = 0;
2733 ++ u64 regval;
2734 ++
2735 ++ if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
2736 ++ return;
2737 +
2738 + /* If this bit is set, then the RX registers contain the time stamp. No
2739 + * other packet will be time stamped until we read these registers, so
2740 +diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
2741 +index 35baae900c1fd..6dca67d9c25d8 100644
2742 +--- a/drivers/net/ethernet/intel/igc/igc.h
2743 ++++ b/drivers/net/ethernet/intel/igc/igc.h
2744 +@@ -545,7 +545,7 @@ void igc_ptp_init(struct igc_adapter *adapter);
2745 + void igc_ptp_reset(struct igc_adapter *adapter);
2746 + void igc_ptp_suspend(struct igc_adapter *adapter);
2747 + void igc_ptp_stop(struct igc_adapter *adapter);
2748 +-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
2749 ++void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
2750 + struct sk_buff *skb);
2751 + int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
2752 + int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
2753 +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
2754 +index ec8cd69d49928..da259cd59adda 100644
2755 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
2756 ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
2757 +@@ -1695,6 +1695,9 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
2758 + Autoneg);
2759 + }
2760 +
2761 ++ /* Set pause flow control settings */
2762 ++ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2763 ++
2764 + switch (hw->fc.requested_mode) {
2765 + case igc_fc_full:
2766 + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
2767 +@@ -1709,9 +1712,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
2768 + Asym_Pause);
2769 + break;
2770 + default:
2771 +- ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
2772 +- ethtool_link_ksettings_add_link_mode(cmd, advertising,
2773 +- Asym_Pause);
2774 ++ break;
2775 + }
2776 +
2777 + status = pm_runtime_suspended(&adapter->pdev->dev) ?
2778 +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
2779 +index b673ac1199bbc..7b822cdcc6c58 100644
2780 +--- a/drivers/net/ethernet/intel/igc/igc_main.c
2781 ++++ b/drivers/net/ethernet/intel/igc/igc_main.c
2782 +@@ -3846,10 +3846,19 @@ static void igc_reset_task(struct work_struct *work)
2783 +
2784 + adapter = container_of(work, struct igc_adapter, reset_task);
2785 +
2786 ++ rtnl_lock();
2787 ++ /* If we're already down or resetting, just bail */
2788 ++ if (test_bit(__IGC_DOWN, &adapter->state) ||
2789 ++ test_bit(__IGC_RESETTING, &adapter->state)) {
2790 ++ rtnl_unlock();
2791 ++ return;
2792 ++ }
2793 ++
2794 + igc_rings_dump(adapter);
2795 + igc_regs_dump(adapter);
2796 + netdev_err(adapter->netdev, "Reset adapter\n");
2797 + igc_reinit_locked(adapter);
2798 ++ rtnl_unlock();
2799 + }
2800 +
2801 + /**
2802 +diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
2803 +index ac0b9c85da7ca..545f4d0e67cf4 100644
2804 +--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
2805 ++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
2806 +@@ -152,46 +152,54 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
2807 + }
2808 +
2809 + /**
2810 +- * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp
2811 ++ * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
2812 + * @q_vector: Pointer to interrupt specific structure
2813 + * @va: Pointer to address containing Rx buffer
2814 + * @skb: Buffer containing timestamp and packet
2815 + *
2816 +- * This function is meant to retrieve the first timestamp from the
2817 +- * first buffer of an incoming frame. The value is stored in little
2818 +- * endian format starting on byte 0. There's a second timestamp
2819 +- * starting on byte 8.
2820 +- **/
2821 +-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
2822 ++ * This function retrieves the timestamp saved in the beginning of packet
2823 ++ * buffer. While two timestamps are available, one in timer0 reference and the
2824 ++ * other in timer1 reference, this function considers only the timestamp in
2825 ++ * timer0 reference.
2826 ++ */
2827 ++void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
2828 + struct sk_buff *skb)
2829 + {
2830 + struct igc_adapter *adapter = q_vector->adapter;
2831 +- __le64 *regval = (__le64 *)va;
2832 +- int adjust = 0;
2833 +-
2834 +- /* The timestamp is recorded in little endian format.
2835 +- * DWORD: | 0 | 1 | 2 | 3
2836 +- * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High
2837 ++ u64 regval;
2838 ++ int adjust;
2839 ++
2840 ++ /* Timestamps are saved in little endian at the beginning of the packet
2841 ++ * buffer following the layout:
2842 ++ *
2843 ++ * DWORD: | 0 | 1 | 2 | 3 |
2844 ++ * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
2845 ++ *
2846 ++ * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
2847 ++ * part of the timestamp.
2848 + */
2849 +- igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
2850 +- le64_to_cpu(regval[0]));
2851 +-
2852 +- /* adjust timestamp for the RX latency based on link speed */
2853 +- if (adapter->hw.mac.type == igc_i225) {
2854 +- switch (adapter->link_speed) {
2855 +- case SPEED_10:
2856 +- adjust = IGC_I225_RX_LATENCY_10;
2857 +- break;
2858 +- case SPEED_100:
2859 +- adjust = IGC_I225_RX_LATENCY_100;
2860 +- break;
2861 +- case SPEED_1000:
2862 +- adjust = IGC_I225_RX_LATENCY_1000;
2863 +- break;
2864 +- case SPEED_2500:
2865 +- adjust = IGC_I225_RX_LATENCY_2500;
2866 +- break;
2867 +- }
2868 ++ regval = le32_to_cpu(va[2]);
2869 ++ regval |= (u64)le32_to_cpu(va[3]) << 32;
2870 ++ igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
2871 ++
2872 ++ /* Adjust timestamp for the RX latency based on link speed */
2873 ++ switch (adapter->link_speed) {
2874 ++ case SPEED_10:
2875 ++ adjust = IGC_I225_RX_LATENCY_10;
2876 ++ break;
2877 ++ case SPEED_100:
2878 ++ adjust = IGC_I225_RX_LATENCY_100;
2879 ++ break;
2880 ++ case SPEED_1000:
2881 ++ adjust = IGC_I225_RX_LATENCY_1000;
2882 ++ break;
2883 ++ case SPEED_2500:
2884 ++ adjust = IGC_I225_RX_LATENCY_2500;
2885 ++ break;
2886 ++ default:
2887 ++ adjust = 0;
2888 ++ netdev_warn_once(adapter->netdev, "Imprecise timestamp\n");
2889 ++ break;
2890 + }
2891 + skb_hwtstamps(skb)->hwtstamp =
2892 + ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
2893 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2894 +index f3f449f53920f..278fc866fad49 100644
2895 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2896 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2897 +@@ -9582,8 +9582,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
2898 + ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
2899 + err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
2900 + input->sw_idx, queue);
2901 +- if (!err)
2902 +- ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2903 ++ if (err)
2904 ++ goto err_out_w_lock;
2905 ++
2906 ++ ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2907 + spin_unlock(&adapter->fdir_perfect_lock);
2908 +
2909 + if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
2910 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
2911 +index 91a9d00e4fb51..407b9477da248 100644
2912 +--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
2913 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
2914 +@@ -140,6 +140,15 @@ enum npc_kpu_lh_ltype {
2915 + NPC_LT_LH_CUSTOM1 = 0xF,
2916 + };
2917 +
2918 ++/* NPC port kind defines how the incoming or outgoing packets
2919 ++ * are processed. NPC accepts packets from up to 64 pkinds.
2920 ++ * Software assigns pkind for each incoming port such as CGX
2921 ++ * Ethernet interfaces, LBK interfaces, etc.
2922 ++ */
2923 ++enum npc_pkind_type {
2924 ++ NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */
2925 ++};
2926 ++
2927 + struct npc_kpu_profile_cam {
2928 + u8 state;
2929 + u8 state_mask;
2930 +@@ -300,6 +309,28 @@ struct nix_rx_action {
2931 + /* NPC_AF_INTFX_KEX_CFG field masks */
2932 + #define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
2933 +
2934 ++/* NPC_PARSE_KEX_S nibble definitions for each field */
2935 ++#define NPC_PARSE_NIBBLE_CHAN GENMASK_ULL(2, 0)
2936 ++#define NPC_PARSE_NIBBLE_ERRLEV BIT_ULL(3)
2937 ++#define NPC_PARSE_NIBBLE_ERRCODE GENMASK_ULL(5, 4)
2938 ++#define NPC_PARSE_NIBBLE_L2L3_BCAST BIT_ULL(6)
2939 ++#define NPC_PARSE_NIBBLE_LA_FLAGS GENMASK_ULL(8, 7)
2940 ++#define NPC_PARSE_NIBBLE_LA_LTYPE BIT_ULL(9)
2941 ++#define NPC_PARSE_NIBBLE_LB_FLAGS GENMASK_ULL(11, 10)
2942 ++#define NPC_PARSE_NIBBLE_LB_LTYPE BIT_ULL(12)
2943 ++#define NPC_PARSE_NIBBLE_LC_FLAGS GENMASK_ULL(14, 13)
2944 ++#define NPC_PARSE_NIBBLE_LC_LTYPE BIT_ULL(15)
2945 ++#define NPC_PARSE_NIBBLE_LD_FLAGS GENMASK_ULL(17, 16)
2946 ++#define NPC_PARSE_NIBBLE_LD_LTYPE BIT_ULL(18)
2947 ++#define NPC_PARSE_NIBBLE_LE_FLAGS GENMASK_ULL(20, 19)
2948 ++#define NPC_PARSE_NIBBLE_LE_LTYPE BIT_ULL(21)
2949 ++#define NPC_PARSE_NIBBLE_LF_FLAGS GENMASK_ULL(23, 22)
2950 ++#define NPC_PARSE_NIBBLE_LF_LTYPE BIT_ULL(24)
2951 ++#define NPC_PARSE_NIBBLE_LG_FLAGS GENMASK_ULL(26, 25)
2952 ++#define NPC_PARSE_NIBBLE_LG_LTYPE BIT_ULL(27)
2953 ++#define NPC_PARSE_NIBBLE_LH_FLAGS GENMASK_ULL(29, 28)
2954 ++#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30)
2955 ++
2956 + /* NIX Receive Vtag Action Structure */
2957 + #define VTAG0_VALID_BIT BIT_ULL(15)
2958 + #define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
2959 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
2960 +index 77bb4ed326005..0e4af93be0fb4 100644
2961 +--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
2962 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
2963 +@@ -148,6 +148,20 @@
2964 + (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
2965 + ((flags_ena) << 6) | ((key_ofs) & 0x3F))
2966 +
2967 ++/* Rx parse key extract nibble enable */
2968 ++#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
2969 ++ NPC_PARSE_NIBBLE_LA_LTYPE | \
2970 ++ NPC_PARSE_NIBBLE_LB_LTYPE | \
2971 ++ NPC_PARSE_NIBBLE_LC_LTYPE | \
2972 ++ NPC_PARSE_NIBBLE_LD_LTYPE | \
2973 ++ NPC_PARSE_NIBBLE_LE_LTYPE)
2974 ++/* Tx parse key extract nibble enable */
2975 ++#define NPC_PARSE_NIBBLE_INTF_TX (NPC_PARSE_NIBBLE_LA_LTYPE | \
2976 ++ NPC_PARSE_NIBBLE_LB_LTYPE | \
2977 ++ NPC_PARSE_NIBBLE_LC_LTYPE | \
2978 ++ NPC_PARSE_NIBBLE_LD_LTYPE | \
2979 ++ NPC_PARSE_NIBBLE_LE_LTYPE)
2980 ++
2981 + enum npc_kpu_parser_state {
2982 + NPC_S_NA = 0,
2983 + NPC_S_KPU1_ETHER,
2984 +@@ -13385,9 +13399,10 @@ static const struct npc_mcam_kex npc_mkex_default = {
2985 + .name = "default",
2986 + .kpu_version = NPC_KPU_PROFILE_VER,
2987 + .keyx_cfg = {
2988 +- /* nibble: LA..LE (ltype only) + Channel */
2989 +- [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x49247,
2990 +- [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | ((1ULL << 19) - 1),
2991 ++ /* nibble: LA..LE (ltype only) + channel */
2992 ++ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
2993 ++ /* nibble: LA..LE (ltype only) */
2994 ++ [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
2995 + },
2996 + .intf_lid_lt_ld = {
2997 + /* Default RX MCAM KEX profile */
2998 +@@ -13405,12 +13420,14 @@ static const struct npc_mcam_kex npc_mkex_default = {
2999 + /* Layer B: Single VLAN (CTAG) */
3000 + /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
3001 + [NPC_LT_LB_CTAG] = {
3002 +- KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4),
3003 ++ KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
3004 + },
3005 + /* Layer B: Stacked VLAN (STAG|QinQ) */
3006 + [NPC_LT_LB_STAG_QINQ] = {
3007 +- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
3008 +- KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4),
3009 ++ /* Outer VLAN: 2 bytes, KW0[63:48] */
3010 ++ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
3011 ++ /* Ethertype: 2 bytes, KW0[47:32] */
3012 ++ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
3013 + },
3014 + [NPC_LT_LB_FDSA] = {
3015 + /* SWITCH PORT: 1 byte, KW0[63:48] */
3016 +@@ -13436,17 +13453,69 @@ static const struct npc_mcam_kex npc_mkex_default = {
3017 + [NPC_LID_LD] = {
3018 + /* Layer D:UDP */
3019 + [NPC_LT_LD_UDP] = {
3020 +- /* SPORT: 2 bytes, KW3[15:0] */
3021 +- KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
3022 +- /* DPORT: 2 bytes, KW3[31:16] */
3023 +- KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
3024 ++ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
3025 ++ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
3026 ++ },
3027 ++ /* Layer D:TCP */
3028 ++ [NPC_LT_LD_TCP] = {
3029 ++ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
3030 ++ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
3031 ++ },
3032 ++ },
3033 ++ },
3034 ++
3035 ++ /* Default TX MCAM KEX profile */
3036 ++ [NIX_INTF_TX] = {
3037 ++ [NPC_LID_LA] = {
3038 ++ /* Layer A: NIX_INST_HDR_S + Ethernet */
3039 ++ /* NIX appends 8 bytes of NIX_INST_HDR_S at the
3040 ++ * start of each TX packet supplied to NPC.
3041 ++ */
3042 ++ [NPC_LT_LA_IH_NIX_ETHER] = {
3043 ++ /* PF_FUNC: 2B , KW0 [47:32] */
3044 ++ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
3045 ++ /* DMAC: 6 bytes, KW1[63:16] */
3046 ++ KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
3047 ++ },
3048 ++ },
3049 ++ [NPC_LID_LB] = {
3050 ++ /* Layer B: Single VLAN (CTAG) */
3051 ++ [NPC_LT_LB_CTAG] = {
3052 ++ /* CTAG VLAN[2..3] KW0[63:48] */
3053 ++ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
3054 ++ /* CTAG VLAN[2..3] KW1[15:0] */
3055 ++ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x8),
3056 ++ },
3057 ++ /* Layer B: Stacked VLAN (STAG|QinQ) */
3058 ++ [NPC_LT_LB_STAG_QINQ] = {
3059 ++ /* Outer VLAN: 2 bytes, KW0[63:48] */
3060 ++ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
3061 ++ /* Outer VLAN: 2 Bytes, KW1[15:0] */
3062 ++ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x8),
3063 ++ },
3064 ++ },
3065 ++ [NPC_LID_LC] = {
3066 ++ /* Layer C: IPv4 */
3067 ++ [NPC_LT_LC_IP] = {
3068 ++ /* SIP+DIP: 8 bytes, KW2[63:0] */
3069 ++ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
3070 ++ },
3071 ++ /* Layer C: IPv6 */
3072 ++ [NPC_LT_LC_IP6] = {
3073 ++ /* Everything up to SADDR: 8 bytes, KW2[63:0] */
3074 ++ KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
3075 ++ },
3076 ++ },
3077 ++ [NPC_LID_LD] = {
3078 ++ /* Layer D:UDP */
3079 ++ [NPC_LT_LD_UDP] = {
3080 ++ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
3081 ++ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
3082 + },
3083 + /* Layer D:TCP */
3084 + [NPC_LT_LD_TCP] = {
3085 +- /* SPORT: 2 bytes, KW3[15:0] */
3086 +- KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
3087 +- /* DPORT: 2 bytes, KW3[31:16] */
3088 +- KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
3089 ++ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
3090 ++ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
3091 + },
3092 + },
3093 + },
3094 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3095 +index e1f9189607303..644d28b0692b3 100644
3096 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3097 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3098 +@@ -2151,8 +2151,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
3099 + INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
3100 +
3101 + for (irq = 0; irq < rvu->num_vec; irq++) {
3102 +- if (rvu->irq_allocated[irq])
3103 ++ if (rvu->irq_allocated[irq]) {
3104 + free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
3105 ++ rvu->irq_allocated[irq] = false;
3106 ++ }
3107 + }
3108 +
3109 + pci_free_irq_vectors(rvu->pdev);
3110 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3111 +index 809f50ab0432e..bc870bff14df7 100644
3112 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3113 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3114 +@@ -144,12 +144,14 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
3115 + char __user *buffer,
3116 + size_t count, loff_t *ppos)
3117 + {
3118 +- int index, off = 0, flag = 0, go_back = 0, off_prev;
3119 ++ int index, off = 0, flag = 0, go_back = 0, len = 0;
3120 + struct rvu *rvu = filp->private_data;
3121 + int lf, pf, vf, pcifunc;
3122 + struct rvu_block block;
3123 + int bytes_not_copied;
3124 ++ int lf_str_size = 12;
3125 + int buf_size = 2048;
3126 ++ char *lfs;
3127 + char *buf;
3128 +
3129 + /* don't allow partial reads */
3130 +@@ -159,12 +161,20 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
3131 + buf = kzalloc(buf_size, GFP_KERNEL);
3132 + if (!buf)
3133 + return -ENOSPC;
3134 +- off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
3135 ++
3136 ++ lfs = kzalloc(lf_str_size, GFP_KERNEL);
3137 ++ if (!lfs) {
3138 ++ kfree(buf);
3139 ++ return -ENOMEM;
3140 ++ }
3141 ++ off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
3142 ++ "pcifunc");
3143 + for (index = 0; index < BLK_COUNT; index++)
3144 +- if (strlen(rvu->hw->block[index].name))
3145 +- off += scnprintf(&buf[off], buf_size - 1 - off,
3146 +- "%*s\t", (index - 1) * 2,
3147 +- rvu->hw->block[index].name);
3148 ++ if (strlen(rvu->hw->block[index].name)) {
3149 ++ off += scnprintf(&buf[off], buf_size - 1 - off,
3150 ++ "%-*s", lf_str_size,
3151 ++ rvu->hw->block[index].name);
3152 ++ }
3153 + off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
3154 + for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3155 + for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
3156 +@@ -173,14 +183,15 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
3157 + continue;
3158 +
3159 + if (vf) {
3160 ++ sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
3161 + go_back = scnprintf(&buf[off],
3162 + buf_size - 1 - off,
3163 +- "PF%d:VF%d\t\t", pf,
3164 +- vf - 1);
3165 ++ "%-*s", lf_str_size, lfs);
3166 + } else {
3167 ++ sprintf(lfs, "PF%d", pf);
3168 + go_back = scnprintf(&buf[off],
3169 + buf_size - 1 - off,
3170 +- "PF%d\t\t", pf);
3171 ++ "%-*s", lf_str_size, lfs);
3172 + }
3173 +
3174 + off += go_back;
3175 +@@ -188,20 +199,22 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
3176 + block = rvu->hw->block[index];
3177 + if (!strlen(block.name))
3178 + continue;
3179 +- off_prev = off;
3180 ++ len = 0;
3181 ++ lfs[len] = '\0';
3182 + for (lf = 0; lf < block.lf.max; lf++) {
3183 + if (block.fn_map[lf] != pcifunc)
3184 + continue;
3185 + flag = 1;
3186 +- off += scnprintf(&buf[off], buf_size - 1
3187 +- - off, "%3d,", lf);
3188 ++ len += sprintf(&lfs[len], "%d,", lf);
3189 + }
3190 +- if (flag && off_prev != off)
3191 +- off--;
3192 +- else
3193 +- go_back++;
3194 ++
3195 ++ if (flag)
3196 ++ len--;
3197 ++ lfs[len] = '\0';
3198 + off += scnprintf(&buf[off], buf_size - 1 - off,
3199 +- "\t");
3200 ++ "%-*s", lf_str_size, lfs);
3201 ++ if (!strlen(lfs))
3202 ++ go_back += lf_str_size;
3203 + }
3204 + if (!flag)
3205 + off -= go_back;
3206 +@@ -213,6 +226,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
3207 + }
3208 +
3209 + bytes_not_copied = copy_to_user(buffer, buf, off);
3210 ++ kfree(lfs);
3211 + kfree(buf);
3212 +
3213 + if (bytes_not_copied)
3214 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3215 +index 21a89dd76d3c1..f6a3cf3e6f236 100644
3216 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3217 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3218 +@@ -1129,6 +1129,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
3219 + /* Config Rx pkt length, csum checks and apad enable / disable */
3220 + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
3221 +
3222 ++ /* Configure pkind for TX parse config */
3223 ++ cfg = NPC_TX_DEF_PKIND;
3224 ++ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
3225 ++
3226 + intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
3227 + err = nix_interface_init(rvu, pcifunc, intf, nixlf);
3228 + if (err)
3229 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
3230 +index 511b01dd03edc..169ae491f9786 100644
3231 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
3232 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
3233 +@@ -2035,10 +2035,10 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
3234 + index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
3235 + if (index >= mcam->bmap_entries)
3236 + break;
3237 ++ entry = index + 1;
3238 + if (mcam->entry2cntr_map[index] != req->cntr)
3239 + continue;
3240 +
3241 +- entry = index + 1;
3242 + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
3243 + index, req->cntr);
3244 + }
3245 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
3246 +index 66f1a212f1f4e..9fef9be015e5e 100644
3247 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
3248 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
3249 +@@ -1616,6 +1616,7 @@ int otx2_stop(struct net_device *netdev)
3250 + struct otx2_nic *pf = netdev_priv(netdev);
3251 + struct otx2_cq_poll *cq_poll = NULL;
3252 + struct otx2_qset *qset = &pf->qset;
3253 ++ struct otx2_rss_info *rss;
3254 + int qidx, vec, wrk;
3255 +
3256 + netif_carrier_off(netdev);
3257 +@@ -1628,6 +1629,10 @@ int otx2_stop(struct net_device *netdev)
3258 + /* First stop packet Rx/Tx */
3259 + otx2_rxtx_enable(pf, false);
3260 +
3261 ++ /* Clear RSS enable flag */
3262 ++ rss = &pf->hw.rss_info;
3263 ++ rss->enable = false;
3264 ++
3265 + /* Cleanup Queue IRQ */
3266 + vec = pci_irq_vector(pf->pdev,
3267 + pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
3268 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
3269 +index 2f05b0f9de019..9da34f82d4668 100644
3270 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
3271 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
3272 +@@ -90,14 +90,15 @@ struct page_pool;
3273 + MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
3274 + #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
3275 +
3276 +-#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
3277 ++#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
3278 ++#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
3279 ++#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
3280 + /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
3281 + * WQEs, This page will absorb write overflow by the hardware, when
3282 + * receiving packets larger than MTU. These oversize packets are
3283 + * dropped by the driver at a later stage.
3284 + */
3285 +-#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8))
3286 +-#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
3287 ++#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
3288 + #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
3289 + #define MLX5E_MAX_RQ_NUM_MTTS \
3290 + ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
3291 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
3292 +index 24e2c0d955b99..b42396df3111d 100644
3293 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
3294 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
3295 +@@ -1182,7 +1182,8 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
3296 +
3297 + mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG,
3298 + &ctstate, &ctstate_mask);
3299 +- if (ctstate_mask)
3300 ++
3301 ++ if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT)
3302 + return -EOPNOTSUPP;
3303 +
3304 + ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
3305 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
3306 +index e472ed0eacfbc..7ed3f9f79f11a 100644
3307 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
3308 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
3309 +@@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
3310 + option_key = (struct geneve_opt *)&enc_opts.key->data[0];
3311 + option_mask = (struct geneve_opt *)&enc_opts.mask->data[0];
3312 +
3313 ++ if (option_mask->opt_class == 0 && option_mask->type == 0 &&
3314 ++ !memchr_inv(option_mask->opt_data, 0, option_mask->length * 4))
3315 ++ return 0;
3316 ++
3317 + if (option_key->length > max_tlv_option_data_len) {
3318 + NL_SET_ERR_MSG_MOD(extack,
3319 + "Matching on GENEVE options: unsupported option len");
3320 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3321 +index b8622440243b4..bcd05457647e2 100644
3322 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3323 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3324 +@@ -1873,6 +1873,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
3325 + {
3326 + struct mlx5e_priv *priv = netdev_priv(netdev);
3327 + struct mlx5_core_dev *mdev = priv->mdev;
3328 ++ int err;
3329 +
3330 + if (!MLX5_CAP_GEN(mdev, cqe_compression))
3331 + return -EOPNOTSUPP;
3332 +@@ -1882,7 +1883,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
3333 + return -EINVAL;
3334 + }
3335 +
3336 +- mlx5e_modify_rx_cqe_compression_locked(priv, enable);
3337 ++ err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
3338 ++ if (err)
3339 ++ return err;
3340 ++
3341 + priv->channels.params.rx_cqe_compress_def = enable;
3342 +
3343 + return 0;
3344 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3345 +index 6394f9d8c6851..e2006c6053c9c 100644
3346 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3347 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3348 +@@ -303,9 +303,9 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
3349 + rq->wqe_overflow.addr);
3350 + }
3351 +
3352 +-static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
3353 ++static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
3354 + {
3355 +- return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
3356 ++ return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT;
3357 + }
3358 +
3359 + static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
3360 +@@ -544,7 +544,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
3361 + mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
3362 + u32 byte_count =
3363 + rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
3364 +- u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
3365 ++ u64 dma_offset = mlx5e_get_mpwqe_offset(i);
3366 +
3367 + wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
3368 + wqe->data[0].byte_count = cpu_to_be32(byte_count);
3369 +@@ -3666,10 +3666,17 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3370 + }
3371 +
3372 + if (mlx5e_is_uplink_rep(priv)) {
3373 ++ struct mlx5e_vport_stats *vstats = &priv->stats.vport;
3374 ++
3375 + stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3376 + stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3377 + stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3378 + stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3379 ++
3380 ++ /* vport multicast also counts packets that are dropped due to steering
3381 ++ * or rx out of buffer
3382 ++ */
3383 ++ stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
3384 + } else {
3385 + mlx5e_fold_sw_stats64(priv, stats);
3386 + }
3387 +@@ -4494,8 +4501,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3388 + struct mlx5e_channel *c = priv->channels.c[i];
3389 +
3390 + mlx5e_rq_replace_xdp_prog(&c->rq, prog);
3391 +- if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
3392 ++ if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) {
3393 ++ bpf_prog_inc(prog);
3394 + mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
3395 ++ }
3396 + }
3397 +
3398 + unlock:
3399 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
3400 +index 6d2ba8b84187c..7e1f8660dfec0 100644
3401 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
3402 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
3403 +@@ -506,7 +506,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
3404 + struct mlx5e_icosq *sq = &rq->channel->icosq;
3405 + struct mlx5_wq_cyc *wq = &sq->wq;
3406 + struct mlx5e_umr_wqe *umr_wqe;
3407 +- u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
3408 + u16 pi;
3409 + int err;
3410 + int i;
3411 +@@ -537,7 +536,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
3412 + umr_wqe->ctrl.opmod_idx_opcode =
3413 + cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
3414 + MLX5_OPCODE_UMR);
3415 +- umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
3416 ++ umr_wqe->uctrl.xlt_offset =
3417 ++ cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix)));
3418 +
3419 + sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
3420 + .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
3421 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
3422 +index 4b8a442f09cd6..930f19c598bb6 100644
3423 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
3424 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
3425 +@@ -2597,6 +2597,16 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
3426 + *match_level = MLX5_MATCH_L4;
3427 + }
3428 +
3429 ++ /* Currenlty supported only for MPLS over UDP */
3430 ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
3431 ++ !netif_is_bareudp(filter_dev)) {
3432 ++ NL_SET_ERR_MSG_MOD(extack,
3433 ++ "Matching on MPLS is supported only for MPLS over UDP");
3434 ++ netdev_err(priv->netdev,
3435 ++ "Matching on MPLS is supported only for MPLS over UDP\n");
3436 ++ return -EOPNOTSUPP;
3437 ++ }
3438 ++
3439 + return 0;
3440 + }
3441 +
3442 +@@ -3200,6 +3210,37 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
3443 + return 0;
3444 + }
3445 +
3446 ++static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
3447 ++ bool ct_flow, struct netlink_ext_ack *extack,
3448 ++ struct mlx5e_priv *priv,
3449 ++ struct mlx5_flow_spec *spec)
3450 ++{
3451 ++ if (!modify_tuple || ct_clear)
3452 ++ return true;
3453 ++
3454 ++ if (ct_flow) {
3455 ++ NL_SET_ERR_MSG_MOD(extack,
3456 ++ "can't offload tuple modification with non-clear ct()");
3457 ++ netdev_info(priv->netdev,
3458 ++ "can't offload tuple modification with non-clear ct()");
3459 ++ return false;
3460 ++ }
3461 ++
3462 ++ /* Add ct_state=-trk match so it will be offloaded for non ct flows
3463 ++ * (or after clear action), as otherwise, since the tuple is changed,
3464 ++ * we can't restore ct state
3465 ++ */
3466 ++ if (mlx5_tc_ct_add_no_trk_match(spec)) {
3467 ++ NL_SET_ERR_MSG_MOD(extack,
3468 ++ "can't offload tuple modification with ct matches and no ct(clear) action");
3469 ++ netdev_info(priv->netdev,
3470 ++ "can't offload tuple modification with ct matches and no ct(clear) action");
3471 ++ return false;
3472 ++ }
3473 ++
3474 ++ return true;
3475 ++}
3476 ++
3477 + static bool modify_header_match_supported(struct mlx5e_priv *priv,
3478 + struct mlx5_flow_spec *spec,
3479 + struct flow_action *flow_action,
3480 +@@ -3238,18 +3279,9 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
3481 + return err;
3482 + }
3483 +
3484 +- /* Add ct_state=-trk match so it will be offloaded for non ct flows
3485 +- * (or after clear action), as otherwise, since the tuple is changed,
3486 +- * we can't restore ct state
3487 +- */
3488 +- if (!ct_clear && modify_tuple &&
3489 +- mlx5_tc_ct_add_no_trk_match(spec)) {
3490 +- NL_SET_ERR_MSG_MOD(extack,
3491 +- "can't offload tuple modify header with ct matches");
3492 +- netdev_info(priv->netdev,
3493 +- "can't offload tuple modify header with ct matches");
3494 ++ if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
3495 ++ priv, spec))
3496 + return false;
3497 +- }
3498 +
3499 + ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3500 + if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3501 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
3502 +index 5defd31d481c2..aa06fcb38f8b9 100644
3503 +--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
3504 ++++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
3505 +@@ -327,8 +327,14 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
3506 + goto err_free_ctx_entry;
3507 + }
3508 +
3509 ++ /* Do net allocate a mask-id for pre_tun_rules. These flows are used to
3510 ++ * configure the pre_tun table and are never actually send to the
3511 ++ * firmware as an add-flow message. This causes the mask-id allocation
3512 ++ * on the firmware to get out of sync if allocated here.
3513 ++ */
3514 + new_mask_id = 0;
3515 +- if (!nfp_check_mask_add(app, nfp_flow->mask_data,
3516 ++ if (!nfp_flow->pre_tun_rule.dev &&
3517 ++ !nfp_check_mask_add(app, nfp_flow->mask_data,
3518 + nfp_flow->meta.mask_len,
3519 + &nfp_flow->meta.flags, &new_mask_id)) {
3520 + NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
3521 +@@ -359,7 +365,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
3522 + goto err_remove_mask;
3523 + }
3524 +
3525 +- if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
3526 ++ if (!nfp_flow->pre_tun_rule.dev &&
3527 ++ !nfp_check_mask_remove(app, nfp_flow->mask_data,
3528 + nfp_flow->meta.mask_len,
3529 + NULL, &new_mask_id)) {
3530 + NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
3531 +@@ -374,8 +381,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
3532 + return 0;
3533 +
3534 + err_remove_mask:
3535 +- nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
3536 +- NULL, &new_mask_id);
3537 ++ if (!nfp_flow->pre_tun_rule.dev)
3538 ++ nfp_check_mask_remove(app, nfp_flow->mask_data,
3539 ++ nfp_flow->meta.mask_len,
3540 ++ NULL, &new_mask_id);
3541 + err_remove_rhash:
3542 + WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
3543 + &ctx_entry->ht_node,
3544 +@@ -406,9 +415,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
3545 +
3546 + __nfp_modify_flow_metadata(priv, nfp_flow);
3547 +
3548 +- nfp_check_mask_remove(app, nfp_flow->mask_data,
3549 +- nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
3550 +- &new_mask_id);
3551 ++ if (!nfp_flow->pre_tun_rule.dev)
3552 ++ nfp_check_mask_remove(app, nfp_flow->mask_data,
3553 ++ nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
3554 ++ &new_mask_id);
3555 +
3556 + /* Update flow payload with mask ids. */
3557 + nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
3558 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
3559 +index 1c59aff2163c7..d72225d64a75d 100644
3560 +--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
3561 ++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
3562 +@@ -1142,6 +1142,12 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
3563 + return -EOPNOTSUPP;
3564 + }
3565 +
3566 ++ if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
3567 ++ !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
3568 ++ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
3569 ++ return -EOPNOTSUPP;
3570 ++ }
3571 ++
3572 + /* Skip fields known to exist. */
3573 + mask += sizeof(struct nfp_flower_meta_tci);
3574 + ext += sizeof(struct nfp_flower_meta_tci);
3575 +@@ -1152,6 +1158,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
3576 + mask += sizeof(struct nfp_flower_in_port);
3577 + ext += sizeof(struct nfp_flower_in_port);
3578 +
3579 ++ /* Ensure destination MAC address matches pre_tun_dev. */
3580 ++ mac = (struct nfp_flower_mac_mpls *)ext;
3581 ++ if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
3582 ++ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
3583 ++ return -EOPNOTSUPP;
3584 ++ }
3585 ++
3586 + /* Ensure destination MAC address is fully matched. */
3587 + mac = (struct nfp_flower_mac_mpls *)mask;
3588 + if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
3589 +@@ -1159,6 +1172,11 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
3590 + return -EOPNOTSUPP;
3591 + }
3592 +
3593 ++ if (mac->mpls_lse) {
3594 ++ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
3595 ++ return -EOPNOTSUPP;
3596 ++ }
3597 ++
3598 + mask += sizeof(struct nfp_flower_mac_mpls);
3599 + ext += sizeof(struct nfp_flower_mac_mpls);
3600 + if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
3601 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
3602 +index 7248d248f6041..d19c02e991145 100644
3603 +--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
3604 ++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
3605 +@@ -16,8 +16,9 @@
3606 + #define NFP_FL_MAX_ROUTES 32
3607 +
3608 + #define NFP_TUN_PRE_TUN_RULE_LIMIT 32
3609 +-#define NFP_TUN_PRE_TUN_RULE_DEL 0x1
3610 +-#define NFP_TUN_PRE_TUN_IDX_BIT 0x8
3611 ++#define NFP_TUN_PRE_TUN_RULE_DEL BIT(0)
3612 ++#define NFP_TUN_PRE_TUN_IDX_BIT BIT(3)
3613 ++#define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7)
3614 +
3615 + /**
3616 + * struct nfp_tun_pre_run_rule - rule matched before decap
3617 +@@ -1268,6 +1269,7 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
3618 + {
3619 + struct nfp_flower_priv *app_priv = app->priv;
3620 + struct nfp_tun_offloaded_mac *mac_entry;
3621 ++ struct nfp_flower_meta_tci *key_meta;
3622 + struct nfp_tun_pre_tun_rule payload;
3623 + struct net_device *internal_dev;
3624 + int err;
3625 +@@ -1290,6 +1292,15 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
3626 + if (!mac_entry)
3627 + return -ENOENT;
3628 +
3629 ++ /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being
3630 ++ * set/clear for port_idx.
3631 ++ */
3632 ++ key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data;
3633 ++ if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6)
3634 ++ mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT;
3635 ++ else
3636 ++ mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT;
3637 ++
3638 + payload.port_idx = cpu_to_be16(mac_entry->index);
3639 +
3640 + /* Copy mac id and vlan to flow - dev may not exist at delete time. */
3641 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
3642 +index a81feffb09b8b..909eca14f647f 100644
3643 +--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
3644 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
3645 +@@ -1077,15 +1077,17 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
3646 + {
3647 + int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
3648 + struct ionic_tx_stats *stats = q_to_tx_stats(q);
3649 ++ int ndescs;
3650 + int err;
3651 +
3652 +- /* If TSO, need roundup(skb->len/mss) descs */
3653 ++ /* Each desc is mss long max, so a descriptor for each gso_seg */
3654 + if (skb_is_gso(skb))
3655 +- return (skb->len / skb_shinfo(skb)->gso_size) + 1;
3656 ++ ndescs = skb_shinfo(skb)->gso_segs;
3657 ++ else
3658 ++ ndescs = 1;
3659 +
3660 +- /* If non-TSO, just need 1 desc and nr_frags sg elems */
3661 + if (skb_shinfo(skb)->nr_frags <= sg_elems)
3662 +- return 1;
3663 ++ return ndescs;
3664 +
3665 + /* Too many frags, so linearize */
3666 + err = skb_linearize(skb);
3667 +@@ -1094,8 +1096,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
3668 +
3669 + stats->linearize++;
3670 +
3671 +- /* Need 1 desc and zero sg elems */
3672 +- return 1;
3673 ++ return ndescs;
3674 + }
3675 +
3676 + static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
3677 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
3678 +index 7760a3394e93c..7ecb3dfe30bd2 100644
3679 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
3680 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
3681 +@@ -1425,6 +1425,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
3682 +
3683 + if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
3684 + vfree(fw_dump->tmpl_hdr);
3685 ++ fw_dump->tmpl_hdr = NULL;
3686 +
3687 + if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
3688 + extended = !qlcnic_83xx_extend_md_capab(adapter);
3689 +@@ -1443,6 +1444,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
3690 + struct qlcnic_83xx_dump_template_hdr *hdr;
3691 +
3692 + hdr = fw_dump->tmpl_hdr;
3693 ++ if (!hdr)
3694 ++ return;
3695 + hdr->drv_cap_mask = 0x1f;
3696 + fw_dump->cap_mask = 0x1f;
3697 + dev_info(&pdev->dev,
3698 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
3699 +index 1591715c97177..d634da20b4f94 100644
3700 +--- a/drivers/net/ethernet/realtek/r8169_main.c
3701 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
3702 +@@ -4726,6 +4726,9 @@ static void rtl8169_down(struct rtl8169_private *tp)
3703 +
3704 + rtl8169_update_counters(tp);
3705 +
3706 ++ pci_clear_master(tp->pci_dev);
3707 ++ rtl_pci_commit(tp);
3708 ++
3709 + rtl8169_cleanup(tp, true);
3710 +
3711 + rtl_pll_power_down(tp);
3712 +@@ -4733,6 +4736,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
3713 +
3714 + static void rtl8169_up(struct rtl8169_private *tp)
3715 + {
3716 ++ pci_set_master(tp->pci_dev);
3717 + rtl_pll_power_up(tp);
3718 + rtl8169_init_phy(tp);
3719 + napi_enable(&tp->napi);
3720 +@@ -5394,8 +5398,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3721 +
3722 + rtl_hw_reset(tp);
3723 +
3724 +- pci_set_master(pdev);
3725 +-
3726 + rc = rtl_alloc_irq(tp);
3727 + if (rc < 0) {
3728 + dev_err(&pdev->dev, "Can't allocate interrupt\n");
3729 +diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
3730 +index 1503cc9ec6e2d..ef3634d1b9f7f 100644
3731 +--- a/drivers/net/ethernet/socionext/netsec.c
3732 ++++ b/drivers/net/ethernet/socionext/netsec.c
3733 +@@ -1708,14 +1708,17 @@ static int netsec_netdev_init(struct net_device *ndev)
3734 + goto err1;
3735 +
3736 + /* set phy power down */
3737 +- data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
3738 +- BMCR_PDOWN;
3739 +- netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
3740 ++ data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR);
3741 ++ netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR,
3742 ++ data | BMCR_PDOWN);
3743 +
3744 + ret = netsec_reset_hardware(priv, true);
3745 + if (ret)
3746 + goto err2;
3747 +
3748 ++ /* Restore phy power state */
3749 ++ netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
3750 ++
3751 + spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
3752 + spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
3753 +
3754 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
3755 +index a5e0eff4a3874..9f5ccf1a0a540 100644
3756 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
3757 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
3758 +@@ -1217,6 +1217,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
3759 + plat_dat->init = sun8i_dwmac_init;
3760 + plat_dat->exit = sun8i_dwmac_exit;
3761 + plat_dat->setup = sun8i_dwmac_setup;
3762 ++ plat_dat->tx_fifo_size = 4096;
3763 ++ plat_dat->rx_fifo_size = 16384;
3764 +
3765 + ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
3766 + if (ret)
3767 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
3768 +index 2ecd3a8a690c2..cbf4429fb1d23 100644
3769 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
3770 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
3771 +@@ -402,19 +402,53 @@ static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
3772 + p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
3773 + }
3774 +
3775 +-static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
3776 ++static void dwmac4_display_ring(void *head, unsigned int size, bool rx,
3777 ++ dma_addr_t dma_rx_phy, unsigned int desc_size)
3778 + {
3779 +- struct dma_desc *p = (struct dma_desc *)head;
3780 ++ dma_addr_t dma_addr;
3781 + int i;
3782 +
3783 + pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
3784 +
3785 +- for (i = 0; i < size; i++) {
3786 +- pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3787 +- i, (unsigned int)virt_to_phys(p),
3788 +- le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3789 +- le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3790 +- p++;
3791 ++ if (desc_size == sizeof(struct dma_desc)) {
3792 ++ struct dma_desc *p = (struct dma_desc *)head;
3793 ++
3794 ++ for (i = 0; i < size; i++) {
3795 ++ dma_addr = dma_rx_phy + i * sizeof(*p);
3796 ++ pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
3797 ++ i, &dma_addr,
3798 ++ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3799 ++ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3800 ++ p++;
3801 ++ }
3802 ++ } else if (desc_size == sizeof(struct dma_extended_desc)) {
3803 ++ struct dma_extended_desc *extp = (struct dma_extended_desc *)head;
3804 ++
3805 ++ for (i = 0; i < size; i++) {
3806 ++ dma_addr = dma_rx_phy + i * sizeof(*extp);
3807 ++ pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
3808 ++ i, &dma_addr,
3809 ++ le32_to_cpu(extp->basic.des0), le32_to_cpu(extp->basic.des1),
3810 ++ le32_to_cpu(extp->basic.des2), le32_to_cpu(extp->basic.des3),
3811 ++ le32_to_cpu(extp->des4), le32_to_cpu(extp->des5),
3812 ++ le32_to_cpu(extp->des6), le32_to_cpu(extp->des7));
3813 ++ extp++;
3814 ++ }
3815 ++ } else if (desc_size == sizeof(struct dma_edesc)) {
3816 ++ struct dma_edesc *ep = (struct dma_edesc *)head;
3817 ++
3818 ++ for (i = 0; i < size; i++) {
3819 ++ dma_addr = dma_rx_phy + i * sizeof(*ep);
3820 ++ pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
3821 ++ i, &dma_addr,
3822 ++ le32_to_cpu(ep->des4), le32_to_cpu(ep->des5),
3823 ++ le32_to_cpu(ep->des6), le32_to_cpu(ep->des7),
3824 ++ le32_to_cpu(ep->basic.des0), le32_to_cpu(ep->basic.des1),
3825 ++ le32_to_cpu(ep->basic.des2), le32_to_cpu(ep->basic.des3));
3826 ++ ep++;
3827 ++ }
3828 ++ } else {
3829 ++ pr_err("unsupported descriptor!");
3830 + }
3831 + }
3832 +
3833 +diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
3834 +index d02cec296f51e..6650edfab5bc4 100644
3835 +--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
3836 ++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
3837 +@@ -417,19 +417,22 @@ static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
3838 + }
3839 + }
3840 +
3841 +-static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
3842 ++static void enh_desc_display_ring(void *head, unsigned int size, bool rx,
3843 ++ dma_addr_t dma_rx_phy, unsigned int desc_size)
3844 + {
3845 + struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3846 ++ dma_addr_t dma_addr;
3847 + int i;
3848 +
3849 + pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
3850 +
3851 + for (i = 0; i < size; i++) {
3852 + u64 x;
3853 ++ dma_addr = dma_rx_phy + i * sizeof(*ep);
3854 +
3855 + x = *(u64 *)ep;
3856 +- pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3857 +- i, (unsigned int)virt_to_phys(ep),
3858 ++ pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
3859 ++ i, &dma_addr,
3860 + (unsigned int)x, (unsigned int)(x >> 32),
3861 + ep->basic.des2, ep->basic.des3);
3862 + ep++;
3863 +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
3864 +index afe7ec496545a..b0b84244ef107 100644
3865 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
3866 ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
3867 +@@ -78,7 +78,8 @@ struct stmmac_desc_ops {
3868 + /* get rx timestamp status */
3869 + int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
3870 + /* Display ring */
3871 +- void (*display_ring)(void *head, unsigned int size, bool rx);
3872 ++ void (*display_ring)(void *head, unsigned int size, bool rx,
3873 ++ dma_addr_t dma_rx_phy, unsigned int desc_size);
3874 + /* set MSS via context descriptor */
3875 + void (*set_mss)(struct dma_desc *p, unsigned int mss);
3876 + /* get descriptor skbuff address */
3877 +diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
3878 +index f083360e4ba67..98ef43f35802a 100644
3879 +--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
3880 ++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
3881 +@@ -269,19 +269,22 @@ static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
3882 + return 1;
3883 + }
3884 +
3885 +-static void ndesc_display_ring(void *head, unsigned int size, bool rx)
3886 ++static void ndesc_display_ring(void *head, unsigned int size, bool rx,
3887 ++ dma_addr_t dma_rx_phy, unsigned int desc_size)
3888 + {
3889 + struct dma_desc *p = (struct dma_desc *)head;
3890 ++ dma_addr_t dma_addr;
3891 + int i;
3892 +
3893 + pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
3894 +
3895 + for (i = 0; i < size; i++) {
3896 + u64 x;
3897 ++ dma_addr = dma_rx_phy + i * sizeof(*p);
3898 +
3899 + x = *(u64 *)p;
3900 +- pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
3901 +- i, (unsigned int)virt_to_phys(p),
3902 ++ pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x",
3903 ++ i, &dma_addr,
3904 + (unsigned int)x, (unsigned int)(x >> 32),
3905 + p->des2, p->des3);
3906 + p++;
3907 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3908 +index 7d01c5cf60c96..6012eadae4604 100644
3909 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3910 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3911 +@@ -1109,6 +1109,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
3912 + static void stmmac_display_rx_rings(struct stmmac_priv *priv)
3913 + {
3914 + u32 rx_cnt = priv->plat->rx_queues_to_use;
3915 ++ unsigned int desc_size;
3916 + void *head_rx;
3917 + u32 queue;
3918 +
3919 +@@ -1118,19 +1119,24 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
3920 +
3921 + pr_info("\tRX Queue %u rings\n", queue);
3922 +
3923 +- if (priv->extend_desc)
3924 ++ if (priv->extend_desc) {
3925 + head_rx = (void *)rx_q->dma_erx;
3926 +- else
3927 ++ desc_size = sizeof(struct dma_extended_desc);
3928 ++ } else {
3929 + head_rx = (void *)rx_q->dma_rx;
3930 ++ desc_size = sizeof(struct dma_desc);
3931 ++ }
3932 +
3933 + /* Display RX ring */
3934 +- stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true);
3935 ++ stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
3936 ++ rx_q->dma_rx_phy, desc_size);
3937 + }
3938 + }
3939 +
3940 + static void stmmac_display_tx_rings(struct stmmac_priv *priv)
3941 + {
3942 + u32 tx_cnt = priv->plat->tx_queues_to_use;
3943 ++ unsigned int desc_size;
3944 + void *head_tx;
3945 + u32 queue;
3946 +
3947 +@@ -1140,14 +1146,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
3948 +
3949 + pr_info("\tTX Queue %d rings\n", queue);
3950 +
3951 +- if (priv->extend_desc)
3952 ++ if (priv->extend_desc) {
3953 + head_tx = (void *)tx_q->dma_etx;
3954 +- else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3955 ++ desc_size = sizeof(struct dma_extended_desc);
3956 ++ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
3957 + head_tx = (void *)tx_q->dma_entx;
3958 +- else
3959 ++ desc_size = sizeof(struct dma_edesc);
3960 ++ } else {
3961 + head_tx = (void *)tx_q->dma_tx;
3962 ++ desc_size = sizeof(struct dma_desc);
3963 ++ }
3964 +
3965 +- stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false);
3966 ++ stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
3967 ++ tx_q->dma_tx_phy, desc_size);
3968 + }
3969 + }
3970 +
3971 +@@ -3710,18 +3721,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3972 + unsigned int count = 0, error = 0, len = 0;
3973 + int status = 0, coe = priv->hw->rx_csum;
3974 + unsigned int next_entry = rx_q->cur_rx;
3975 ++ unsigned int desc_size;
3976 + struct sk_buff *skb = NULL;
3977 +
3978 + if (netif_msg_rx_status(priv)) {
3979 + void *rx_head;
3980 +
3981 + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3982 +- if (priv->extend_desc)
3983 ++ if (priv->extend_desc) {
3984 + rx_head = (void *)rx_q->dma_erx;
3985 +- else
3986 ++ desc_size = sizeof(struct dma_extended_desc);
3987 ++ } else {
3988 + rx_head = (void *)rx_q->dma_rx;
3989 ++ desc_size = sizeof(struct dma_desc);
3990 ++ }
3991 +
3992 +- stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true);
3993 ++ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
3994 ++ rx_q->dma_rx_phy, desc_size);
3995 + }
3996 + while (count < limit) {
3997 + unsigned int buf1_len = 0, buf2_len = 0;
3998 +@@ -4289,24 +4305,27 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3999 + static struct dentry *stmmac_fs_dir;
4000 +
4001 + static void sysfs_display_ring(void *head, int size, int extend_desc,
4002 +- struct seq_file *seq)
4003 ++ struct seq_file *seq, dma_addr_t dma_phy_addr)
4004 + {
4005 + int i;
4006 + struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4007 + struct dma_desc *p = (struct dma_desc *)head;
4008 ++ dma_addr_t dma_addr;
4009 +
4010 + for (i = 0; i < size; i++) {
4011 + if (extend_desc) {
4012 +- seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4013 +- i, (unsigned int)virt_to_phys(ep),
4014 ++ dma_addr = dma_phy_addr + i * sizeof(*ep);
4015 ++ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4016 ++ i, &dma_addr,
4017 + le32_to_cpu(ep->basic.des0),
4018 + le32_to_cpu(ep->basic.des1),
4019 + le32_to_cpu(ep->basic.des2),
4020 + le32_to_cpu(ep->basic.des3));
4021 + ep++;
4022 + } else {
4023 +- seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4024 +- i, (unsigned int)virt_to_phys(p),
4025 ++ dma_addr = dma_phy_addr + i * sizeof(*p);
4026 ++ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4027 ++ i, &dma_addr,
4028 + le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4029 + le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4030 + p++;
4031 +@@ -4334,11 +4353,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4032 + if (priv->extend_desc) {
4033 + seq_printf(seq, "Extended descriptor ring:\n");
4034 + sysfs_display_ring((void *)rx_q->dma_erx,
4035 +- priv->dma_rx_size, 1, seq);
4036 ++ priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
4037 + } else {
4038 + seq_printf(seq, "Descriptor ring:\n");
4039 + sysfs_display_ring((void *)rx_q->dma_rx,
4040 +- priv->dma_rx_size, 0, seq);
4041 ++ priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
4042 + }
4043 + }
4044 +
4045 +@@ -4350,11 +4369,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4046 + if (priv->extend_desc) {
4047 + seq_printf(seq, "Extended descriptor ring:\n");
4048 + sysfs_display_ring((void *)tx_q->dma_etx,
4049 +- priv->dma_tx_size, 1, seq);
4050 ++ priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
4051 + } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4052 + seq_printf(seq, "Descriptor ring:\n");
4053 + sysfs_display_ring((void *)tx_q->dma_tx,
4054 +- priv->dma_tx_size, 0, seq);
4055 ++ priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
4056 + }
4057 + }
4058 +
4059 +diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
4060 +index 68695d4afacd5..707ccdd03b19e 100644
4061 +--- a/drivers/net/ethernet/sun/niu.c
4062 ++++ b/drivers/net/ethernet/sun/niu.c
4063 +@@ -3931,8 +3931,6 @@ static void niu_xmac_interrupt(struct niu *np)
4064 + mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
4065 + if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
4066 + mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
4067 +- if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
4068 +- mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
4069 + if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
4070 + mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
4071 + if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
4072 +diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
4073 +index b8f4f419173f9..d054c6e83b1c9 100644
4074 +--- a/drivers/net/ethernet/tehuti/tehuti.c
4075 ++++ b/drivers/net/ethernet/tehuti/tehuti.c
4076 +@@ -2044,6 +2044,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4077 + /*bdx_hw_reset(priv); */
4078 + if (bdx_read_mac(priv)) {
4079 + pr_err("load MAC address failed\n");
4080 ++ err = -EFAULT;
4081 + goto err_out_iomap;
4082 + }
4083 + SET_NETDEV_DEV(ndev, &pdev->dev);
4084 +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
4085 +index f34c7903ff524..7326ad4d5e1c7 100644
4086 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
4087 ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
4088 +@@ -419,6 +419,9 @@ struct axienet_local {
4089 + struct phylink *phylink;
4090 + struct phylink_config phylink_config;
4091 +
4092 ++ /* Reference to PCS/PMA PHY if used */
4093 ++ struct mdio_device *pcs_phy;
4094 ++
4095 + /* Clock for AXI bus */
4096 + struct clk *clk;
4097 +
4098 +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
4099 +index eea0bb7c23ede..69c79cc24e6e4 100644
4100 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
4101 ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
4102 +@@ -1517,10 +1517,27 @@ static void axienet_validate(struct phylink_config *config,
4103 +
4104 + phylink_set(mask, Asym_Pause);
4105 + phylink_set(mask, Pause);
4106 +- phylink_set(mask, 1000baseX_Full);
4107 +- phylink_set(mask, 10baseT_Full);
4108 +- phylink_set(mask, 100baseT_Full);
4109 +- phylink_set(mask, 1000baseT_Full);
4110 ++
4111 ++ switch (state->interface) {
4112 ++ case PHY_INTERFACE_MODE_NA:
4113 ++ case PHY_INTERFACE_MODE_1000BASEX:
4114 ++ case PHY_INTERFACE_MODE_SGMII:
4115 ++ case PHY_INTERFACE_MODE_GMII:
4116 ++ case PHY_INTERFACE_MODE_RGMII:
4117 ++ case PHY_INTERFACE_MODE_RGMII_ID:
4118 ++ case PHY_INTERFACE_MODE_RGMII_RXID:
4119 ++ case PHY_INTERFACE_MODE_RGMII_TXID:
4120 ++ phylink_set(mask, 1000baseX_Full);
4121 ++ phylink_set(mask, 1000baseT_Full);
4122 ++ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
4123 ++ break;
4124 ++ fallthrough;
4125 ++ case PHY_INTERFACE_MODE_MII:
4126 ++ phylink_set(mask, 100baseT_Full);
4127 ++ phylink_set(mask, 10baseT_Full);
4128 ++ default:
4129 ++ break;
4130 ++ }
4131 +
4132 + bitmap_and(supported, supported, mask,
4133 + __ETHTOOL_LINK_MODE_MASK_NBITS);
4134 +@@ -1533,38 +1550,46 @@ static void axienet_mac_pcs_get_state(struct phylink_config *config,
4135 + {
4136 + struct net_device *ndev = to_net_dev(config->dev);
4137 + struct axienet_local *lp = netdev_priv(ndev);
4138 +- u32 emmc_reg, fcc_reg;
4139 +-
4140 +- state->interface = lp->phy_mode;
4141 +-
4142 +- emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
4143 +- if (emmc_reg & XAE_EMMC_LINKSPD_1000)
4144 +- state->speed = SPEED_1000;
4145 +- else if (emmc_reg & XAE_EMMC_LINKSPD_100)
4146 +- state->speed = SPEED_100;
4147 +- else
4148 +- state->speed = SPEED_10;
4149 +-
4150 +- state->pause = 0;
4151 +- fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
4152 +- if (fcc_reg & XAE_FCC_FCTX_MASK)
4153 +- state->pause |= MLO_PAUSE_TX;
4154 +- if (fcc_reg & XAE_FCC_FCRX_MASK)
4155 +- state->pause |= MLO_PAUSE_RX;
4156 +
4157 +- state->an_complete = 0;
4158 +- state->duplex = 1;
4159 ++ switch (state->interface) {
4160 ++ case PHY_INTERFACE_MODE_SGMII:
4161 ++ case PHY_INTERFACE_MODE_1000BASEX:
4162 ++ phylink_mii_c22_pcs_get_state(lp->pcs_phy, state);
4163 ++ break;
4164 ++ default:
4165 ++ break;
4166 ++ }
4167 + }
4168 +
4169 + static void axienet_mac_an_restart(struct phylink_config *config)
4170 + {
4171 +- /* Unsupported, do nothing */
4172 ++ struct net_device *ndev = to_net_dev(config->dev);
4173 ++ struct axienet_local *lp = netdev_priv(ndev);
4174 ++
4175 ++ phylink_mii_c22_pcs_an_restart(lp->pcs_phy);
4176 + }
4177 +
4178 + static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
4179 + const struct phylink_link_state *state)
4180 + {
4181 +- /* nothing meaningful to do */
4182 ++ struct net_device *ndev = to_net_dev(config->dev);
4183 ++ struct axienet_local *lp = netdev_priv(ndev);
4184 ++ int ret;
4185 ++
4186 ++ switch (state->interface) {
4187 ++ case PHY_INTERFACE_MODE_SGMII:
4188 ++ case PHY_INTERFACE_MODE_1000BASEX:
4189 ++ ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode,
4190 ++ state->interface,
4191 ++ state->advertising);
4192 ++ if (ret < 0)
4193 ++ netdev_warn(ndev, "Failed to configure PCS: %d\n",
4194 ++ ret);
4195 ++ break;
4196 ++
4197 ++ default:
4198 ++ break;
4199 ++ }
4200 + }
4201 +
4202 + static void axienet_mac_link_down(struct phylink_config *config,
4203 +@@ -1823,7 +1848,7 @@ static int axienet_probe(struct platform_device *pdev)
4204 + if (IS_ERR(lp->regs)) {
4205 + dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
4206 + ret = PTR_ERR(lp->regs);
4207 +- goto free_netdev;
4208 ++ goto cleanup_clk;
4209 + }
4210 + lp->regs_start = ethres->start;
4211 +
4212 +@@ -1898,12 +1923,12 @@ static int axienet_probe(struct platform_device *pdev)
4213 + break;
4214 + default:
4215 + ret = -EINVAL;
4216 +- goto free_netdev;
4217 ++ goto cleanup_clk;
4218 + }
4219 + } else {
4220 + ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
4221 + if (ret)
4222 +- goto free_netdev;
4223 ++ goto cleanup_clk;
4224 + }
4225 +
4226 + /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
4227 +@@ -1916,7 +1941,7 @@ static int axienet_probe(struct platform_device *pdev)
4228 + dev_err(&pdev->dev,
4229 + "unable to get DMA resource\n");
4230 + of_node_put(np);
4231 +- goto free_netdev;
4232 ++ goto cleanup_clk;
4233 + }
4234 + lp->dma_regs = devm_ioremap_resource(&pdev->dev,
4235 + &dmares);
4236 +@@ -1936,12 +1961,12 @@ static int axienet_probe(struct platform_device *pdev)
4237 + if (IS_ERR(lp->dma_regs)) {
4238 + dev_err(&pdev->dev, "could not map DMA regs\n");
4239 + ret = PTR_ERR(lp->dma_regs);
4240 +- goto free_netdev;
4241 ++ goto cleanup_clk;
4242 + }
4243 + if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
4244 + dev_err(&pdev->dev, "could not determine irqs\n");
4245 + ret = -ENOMEM;
4246 +- goto free_netdev;
4247 ++ goto cleanup_clk;
4248 + }
4249 +
4250 + /* Autodetect the need for 64-bit DMA pointers.
4251 +@@ -1971,7 +1996,7 @@ static int axienet_probe(struct platform_device *pdev)
4252 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
4253 + if (ret) {
4254 + dev_err(&pdev->dev, "No suitable DMA available\n");
4255 +- goto free_netdev;
4256 ++ goto cleanup_clk;
4257 + }
4258 +
4259 + /* Check for Ethernet core IRQ (optional) */
4260 +@@ -1997,6 +2022,20 @@ static int axienet_probe(struct platform_device *pdev)
4261 + dev_warn(&pdev->dev,
4262 + "error registering MDIO bus: %d\n", ret);
4263 + }
4264 ++ if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
4265 ++ lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
4266 ++ if (!lp->phy_node) {
4267 ++ dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
4268 ++ ret = -EINVAL;
4269 ++ goto cleanup_mdio;
4270 ++ }
4271 ++ lp->pcs_phy = of_mdio_find_device(lp->phy_node);
4272 ++ if (!lp->pcs_phy) {
4273 ++ ret = -EPROBE_DEFER;
4274 ++ goto cleanup_mdio;
4275 ++ }
4276 ++ lp->phylink_config.pcs_poll = true;
4277 ++ }
4278 +
4279 + lp->phylink_config.dev = &ndev->dev;
4280 + lp->phylink_config.type = PHYLINK_NETDEV;
4281 +@@ -2007,17 +2046,30 @@ static int axienet_probe(struct platform_device *pdev)
4282 + if (IS_ERR(lp->phylink)) {
4283 + ret = PTR_ERR(lp->phylink);
4284 + dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
4285 +- goto free_netdev;
4286 ++ goto cleanup_mdio;
4287 + }
4288 +
4289 + ret = register_netdev(lp->ndev);
4290 + if (ret) {
4291 + dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
4292 +- goto free_netdev;
4293 ++ goto cleanup_phylink;
4294 + }
4295 +
4296 + return 0;
4297 +
4298 ++cleanup_phylink:
4299 ++ phylink_destroy(lp->phylink);
4300 ++
4301 ++cleanup_mdio:
4302 ++ if (lp->pcs_phy)
4303 ++ put_device(&lp->pcs_phy->dev);
4304 ++ if (lp->mii_bus)
4305 ++ axienet_mdio_teardown(lp);
4306 ++ of_node_put(lp->phy_node);
4307 ++
4308 ++cleanup_clk:
4309 ++ clk_disable_unprepare(lp->clk);
4310 ++
4311 + free_netdev:
4312 + free_netdev(ndev);
4313 +
4314 +@@ -2034,6 +2086,9 @@ static int axienet_remove(struct platform_device *pdev)
4315 + if (lp->phylink)
4316 + phylink_destroy(lp->phylink);
4317 +
4318 ++ if (lp->pcs_phy)
4319 ++ put_device(&lp->pcs_phy->dev);
4320 ++
4321 + axienet_mdio_teardown(lp);
4322 +
4323 + clk_disable_unprepare(lp->clk);
4324 +diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
4325 +index 5090f0f923ad5..1a87a49538c50 100644
4326 +--- a/drivers/net/ipa/ipa_qmi.c
4327 ++++ b/drivers/net/ipa/ipa_qmi.c
4328 +@@ -249,6 +249,7 @@ static struct qmi_msg_handler ipa_server_msg_handlers[] = {
4329 + .decoded_size = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ,
4330 + .fn = ipa_server_driver_init_complete,
4331 + },
4332 ++ { },
4333 + };
4334 +
4335 + /* Handle an INIT_DRIVER response message from the modem. */
4336 +@@ -269,6 +270,7 @@ static struct qmi_msg_handler ipa_client_msg_handlers[] = {
4337 + .decoded_size = IPA_QMI_INIT_DRIVER_RSP_SZ,
4338 + .fn = ipa_client_init_driver,
4339 + },
4340 ++ { },
4341 + };
4342 +
4343 + /* Return a pointer to an init modem driver request structure, which contains
4344 +diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
4345 +index cd271de9609be..dbed15dc0fe77 100644
4346 +--- a/drivers/net/phy/broadcom.c
4347 ++++ b/drivers/net/phy/broadcom.c
4348 +@@ -26,7 +26,46 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
4349 + MODULE_AUTHOR("Maciej W. Rozycki");
4350 + MODULE_LICENSE("GPL");
4351 +
4352 +-static int bcm54xx_config_clock_delay(struct phy_device *phydev);
4353 ++static int bcm54xx_config_clock_delay(struct phy_device *phydev)
4354 ++{
4355 ++ int rc, val;
4356 ++
4357 ++ /* handling PHY's internal RX clock delay */
4358 ++ val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
4359 ++ val |= MII_BCM54XX_AUXCTL_MISC_WREN;
4360 ++ if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
4361 ++ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
4362 ++ /* Disable RGMII RXC-RXD skew */
4363 ++ val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
4364 ++ }
4365 ++ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
4366 ++ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
4367 ++ /* Enable RGMII RXC-RXD skew */
4368 ++ val |= MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
4369 ++ }
4370 ++ rc = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
4371 ++ val);
4372 ++ if (rc < 0)
4373 ++ return rc;
4374 ++
4375 ++ /* handling PHY's internal TX clock delay */
4376 ++ val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
4377 ++ if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
4378 ++ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
4379 ++ /* Disable internal TX clock delay */
4380 ++ val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
4381 ++ }
4382 ++ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
4383 ++ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
4384 ++ /* Enable internal TX clock delay */
4385 ++ val |= BCM54810_SHD_CLK_CTL_GTXCLK_EN;
4386 ++ }
4387 ++ rc = bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
4388 ++ if (rc < 0)
4389 ++ return rc;
4390 ++
4391 ++ return 0;
4392 ++}
4393 +
4394 + static int bcm54210e_config_init(struct phy_device *phydev)
4395 + {
4396 +@@ -64,45 +103,62 @@ static int bcm54612e_config_init(struct phy_device *phydev)
4397 + return 0;
4398 + }
4399 +
4400 +-static int bcm54xx_config_clock_delay(struct phy_device *phydev)
4401 ++static int bcm54616s_config_init(struct phy_device *phydev)
4402 + {
4403 + int rc, val;
4404 +
4405 +- /* handling PHY's internal RX clock delay */
4406 ++ if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
4407 ++ phydev->interface != PHY_INTERFACE_MODE_1000BASEX)
4408 ++ return 0;
4409 ++
4410 ++ /* Ensure proper interface mode is selected. */
4411 ++ /* Disable RGMII mode */
4412 + val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
4413 ++ if (val < 0)
4414 ++ return val;
4415 ++ val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN;
4416 + val |= MII_BCM54XX_AUXCTL_MISC_WREN;
4417 +- if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
4418 +- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
4419 +- /* Disable RGMII RXC-RXD skew */
4420 +- val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
4421 +- }
4422 +- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
4423 +- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
4424 +- /* Enable RGMII RXC-RXD skew */
4425 +- val |= MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
4426 +- }
4427 + rc = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
4428 + val);
4429 + if (rc < 0)
4430 + return rc;
4431 +
4432 +- /* handling PHY's internal TX clock delay */
4433 +- val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
4434 +- if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
4435 +- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
4436 +- /* Disable internal TX clock delay */
4437 +- val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
4438 +- }
4439 +- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
4440 +- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
4441 +- /* Enable internal TX clock delay */
4442 +- val |= BCM54810_SHD_CLK_CTL_GTXCLK_EN;
4443 +- }
4444 +- rc = bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
4445 ++ /* Select 1000BASE-X register set (primary SerDes) */
4446 ++ val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
4447 ++ if (val < 0)
4448 ++ return val;
4449 ++ val |= BCM54XX_SHD_MODE_1000BX;
4450 ++ rc = bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE, val);
4451 + if (rc < 0)
4452 + return rc;
4453 +
4454 +- return 0;
4455 ++ /* Power down SerDes interface */
4456 ++ rc = phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
4457 ++ if (rc < 0)
4458 ++ return rc;
4459 ++
4460 ++ /* Select proper interface mode */
4461 ++ val &= ~BCM54XX_SHD_INTF_SEL_MASK;
4462 ++ val |= phydev->interface == PHY_INTERFACE_MODE_SGMII ?
4463 ++ BCM54XX_SHD_INTF_SEL_SGMII :
4464 ++ BCM54XX_SHD_INTF_SEL_GBIC;
4465 ++ rc = bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE, val);
4466 ++ if (rc < 0)
4467 ++ return rc;
4468 ++
4469 ++ /* Power up SerDes interface */
4470 ++ rc = phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
4471 ++ if (rc < 0)
4472 ++ return rc;
4473 ++
4474 ++ /* Select copper register set */
4475 ++ val &= ~BCM54XX_SHD_MODE_1000BX;
4476 ++ rc = bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE, val);
4477 ++ if (rc < 0)
4478 ++ return rc;
4479 ++
4480 ++ /* Power up copper interface */
4481 ++ return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
4482 + }
4483 +
4484 + /* Needs SMDSP clock enabled via bcm54xx_phydsp_config() */
4485 +@@ -283,15 +339,21 @@ static int bcm54xx_config_init(struct phy_device *phydev)
4486 +
4487 + bcm54xx_adjust_rxrefclk(phydev);
4488 +
4489 +- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) {
4490 ++ switch (BRCM_PHY_MODEL(phydev)) {
4491 ++ case PHY_ID_BCM50610:
4492 ++ case PHY_ID_BCM50610M:
4493 ++ err = bcm54xx_config_clock_delay(phydev);
4494 ++ break;
4495 ++ case PHY_ID_BCM54210E:
4496 + err = bcm54210e_config_init(phydev);
4497 +- if (err)
4498 +- return err;
4499 +- } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54612E) {
4500 ++ break;
4501 ++ case PHY_ID_BCM54612E:
4502 + err = bcm54612e_config_init(phydev);
4503 +- if (err)
4504 +- return err;
4505 +- } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) {
4506 ++ break;
4507 ++ case PHY_ID_BCM54616S:
4508 ++ err = bcm54616s_config_init(phydev);
4509 ++ break;
4510 ++ case PHY_ID_BCM54810:
4511 + /* For BCM54810, we need to disable BroadR-Reach function */
4512 + val = bcm_phy_read_exp(phydev,
4513 + BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
4514 +@@ -299,9 +361,10 @@ static int bcm54xx_config_init(struct phy_device *phydev)
4515 + err = bcm_phy_write_exp(phydev,
4516 + BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
4517 + val);
4518 +- if (err < 0)
4519 +- return err;
4520 ++ break;
4521 + }
4522 ++ if (err)
4523 ++ return err;
4524 +
4525 + bcm54xx_phydsp_config(phydev);
4526 +
4527 +@@ -332,6 +395,11 @@ static int bcm54xx_resume(struct phy_device *phydev)
4528 + if (ret < 0)
4529 + return ret;
4530 +
4531 ++ /* Upon exiting power down, the PHY remains in an internal reset state
4532 ++ * for 40us
4533 ++ */
4534 ++ fsleep(40);
4535 ++
4536 + return bcm54xx_config_init(phydev);
4537 + }
4538 +
4539 +@@ -475,7 +543,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
4540 +
4541 + static int bcm54616s_probe(struct phy_device *phydev)
4542 + {
4543 +- int val, intf_sel;
4544 ++ int val;
4545 +
4546 + val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
4547 + if (val < 0)
4548 +@@ -487,8 +555,7 @@ static int bcm54616s_probe(struct phy_device *phydev)
4549 + * RGMII-1000Base-X is properly supported, but RGMII-100Base-FX
4550 + * support is still missing as of now.
4551 + */
4552 +- intf_sel = (val & BCM54XX_SHD_INTF_SEL_MASK) >> 1;
4553 +- if (intf_sel == 1) {
4554 ++ if ((val & BCM54XX_SHD_INTF_SEL_MASK) == BCM54XX_SHD_INTF_SEL_RGMII) {
4555 + val = bcm_phy_read_shadow(phydev, BCM54616S_SHD_100FX_CTRL);
4556 + if (val < 0)
4557 + return val;
4558 +@@ -500,6 +567,8 @@ static int bcm54616s_probe(struct phy_device *phydev)
4559 + */
4560 + if (!(val & BCM54616S_100FX_MODE))
4561 + phydev->dev_flags |= PHY_BCM_FLAGS_MODE_1000BX;
4562 ++
4563 ++ phydev->port = PORT_FIBRE;
4564 + }
4565 +
4566 + return 0;
4567 +diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
4568 +index c162c9551bd11..a9b058bb1be87 100644
4569 +--- a/drivers/net/phy/dp83822.c
4570 ++++ b/drivers/net/phy/dp83822.c
4571 +@@ -534,6 +534,9 @@ static int dp83822_probe(struct phy_device *phydev)
4572 +
4573 + dp83822_of_init(phydev);
4574 +
4575 ++ if (dp83822->fx_enabled)
4576 ++ phydev->port = PORT_FIBRE;
4577 ++
4578 + return 0;
4579 + }
4580 +
4581 +diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
4582 +index cf6dec7b7d8e7..a9daff88006b3 100644
4583 +--- a/drivers/net/phy/dp83869.c
4584 ++++ b/drivers/net/phy/dp83869.c
4585 +@@ -821,6 +821,10 @@ static int dp83869_probe(struct phy_device *phydev)
4586 + if (ret)
4587 + return ret;
4588 +
4589 ++ if (dp83869->mode == DP83869_RGMII_100_BASE ||
4590 ++ dp83869->mode == DP83869_RGMII_1000_BASE)
4591 ++ phydev->port = PORT_FIBRE;
4592 ++
4593 + return dp83869_config_init(phydev);
4594 + }
4595 +
4596 +diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
4597 +index fec58ad69e02c..cb8e4f0215fe8 100644
4598 +--- a/drivers/net/phy/lxt.c
4599 ++++ b/drivers/net/phy/lxt.c
4600 +@@ -218,6 +218,7 @@ static int lxt973_probe(struct phy_device *phydev)
4601 + phy_write(phydev, MII_BMCR, val);
4602 + /* Remember that the port is in fiber mode. */
4603 + phydev->priv = lxt973_probe;
4604 ++ phydev->port = PORT_FIBRE;
4605 + } else {
4606 + phydev->priv = NULL;
4607 + }
4608 +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
4609 +index 5aec673a0120b..5dbdaf0f5f09c 100644
4610 +--- a/drivers/net/phy/marvell.c
4611 ++++ b/drivers/net/phy/marvell.c
4612 +@@ -1449,6 +1449,7 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
4613 + phydev->asym_pause = 0;
4614 + phydev->speed = SPEED_UNKNOWN;
4615 + phydev->duplex = DUPLEX_UNKNOWN;
4616 ++ phydev->port = fiber ? PORT_FIBRE : PORT_TP;
4617 +
4618 + if (phydev->autoneg == AUTONEG_ENABLE)
4619 + err = marvell_read_status_page_an(phydev, fiber, status);
4620 +diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
4621 +index 1901ba277413d..b1bb9b8e1e4ed 100644
4622 +--- a/drivers/net/phy/marvell10g.c
4623 ++++ b/drivers/net/phy/marvell10g.c
4624 +@@ -631,6 +631,7 @@ static int mv3310_read_status_10gbaser(struct phy_device *phydev)
4625 + phydev->link = 1;
4626 + phydev->speed = SPEED_10000;
4627 + phydev->duplex = DUPLEX_FULL;
4628 ++ phydev->port = PORT_FIBRE;
4629 +
4630 + return 0;
4631 + }
4632 +@@ -690,6 +691,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev)
4633 +
4634 + phydev->duplex = cssr1 & MV_PCS_CSSR1_DUPLEX_FULL ?
4635 + DUPLEX_FULL : DUPLEX_HALF;
4636 ++ phydev->port = PORT_TP;
4637 + phydev->mdix = cssr1 & MV_PCS_CSSR1_MDIX ?
4638 + ETH_TP_MDI_X : ETH_TP_MDI;
4639 +
4640 +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
4641 +index 47ae1d1723c54..9b0bc8b74bc01 100644
4642 +--- a/drivers/net/phy/micrel.c
4643 ++++ b/drivers/net/phy/micrel.c
4644 +@@ -308,14 +308,19 @@ static int kszphy_config_init(struct phy_device *phydev)
4645 + return kszphy_config_reset(phydev);
4646 + }
4647 +
4648 ++static int ksz8041_fiber_mode(struct phy_device *phydev)
4649 ++{
4650 ++ struct device_node *of_node = phydev->mdio.dev.of_node;
4651 ++
4652 ++ return of_property_read_bool(of_node, "micrel,fiber-mode");
4653 ++}
4654 ++
4655 + static int ksz8041_config_init(struct phy_device *phydev)
4656 + {
4657 + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
4658 +
4659 +- struct device_node *of_node = phydev->mdio.dev.of_node;
4660 +-
4661 + /* Limit supported and advertised modes in fiber mode */
4662 +- if (of_property_read_bool(of_node, "micrel,fiber-mode")) {
4663 ++ if (ksz8041_fiber_mode(phydev)) {
4664 + phydev->dev_flags |= MICREL_PHY_FXEN;
4665 + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
4666 + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
4667 +@@ -1143,6 +1148,9 @@ static int kszphy_probe(struct phy_device *phydev)
4668 + }
4669 + }
4670 +
4671 ++ if (ksz8041_fiber_mode(phydev))
4672 ++ phydev->port = PORT_FIBRE;
4673 ++
4674 + /* Support legacy board-file configuration */
4675 + if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
4676 + priv->rmii_ref_clk_sel = true;
4677 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
4678 +index 49e96ca585fff..28ddaad721ed1 100644
4679 +--- a/drivers/net/phy/phy.c
4680 ++++ b/drivers/net/phy/phy.c
4681 +@@ -327,7 +327,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
4682 + if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
4683 + cmd->base.port = PORT_BNC;
4684 + else
4685 +- cmd->base.port = PORT_MII;
4686 ++ cmd->base.port = phydev->port;
4687 + cmd->base.transceiver = phy_is_internal(phydev) ?
4688 + XCVR_INTERNAL : XCVR_EXTERNAL;
4689 + cmd->base.phy_address = phydev->mdio.addr;
4690 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
4691 +index 2d4eed2d61ce9..85f3cde5ffd09 100644
4692 +--- a/drivers/net/phy/phy_device.c
4693 ++++ b/drivers/net/phy/phy_device.c
4694 +@@ -576,6 +576,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
4695 + dev->pause = 0;
4696 + dev->asym_pause = 0;
4697 + dev->link = 0;
4698 ++ dev->port = PORT_TP;
4699 + dev->interface = PHY_INTERFACE_MODE_GMII;
4700 +
4701 + dev->autoneg = AUTONEG_ENABLE;
4702 +@@ -1384,6 +1385,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
4703 +
4704 + phydev->state = PHY_READY;
4705 +
4706 ++ /* Port is set to PORT_TP by default and the actual PHY driver will set
4707 ++ * it to different value depending on the PHY configuration. If we have
4708 ++ * the generic PHY driver we can't figure it out, thus set the old
4709 ++ * legacy PORT_MII value.
4710 ++ */
4711 ++ if (using_genphy)
4712 ++ phydev->port = PORT_MII;
4713 ++
4714 + /* Initial carrier state is off as the phy is about to be
4715 + * (re)initialized.
4716 + */
4717 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
4718 +index fe2296fdda19d..6072e87ed6c3c 100644
4719 +--- a/drivers/net/phy/phylink.c
4720 ++++ b/drivers/net/phy/phylink.c
4721 +@@ -472,7 +472,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
4722 + err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode,
4723 + state->interface);
4724 + if (err < 0)
4725 +- phylink_err(pl, "mac_prepare failed: %pe\n",
4726 ++ phylink_err(pl, "mac_finish failed: %pe\n",
4727 + ERR_PTR(err));
4728 + }
4729 + }
4730 +diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
4731 +index dba847f280962..2520421946a6a 100644
4732 +--- a/drivers/net/usb/cdc-phonet.c
4733 ++++ b/drivers/net/usb/cdc-phonet.c
4734 +@@ -387,6 +387,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i
4735 +
4736 + err = register_netdev(dev);
4737 + if (err) {
4738 ++ /* Set disconnected flag so that disconnect() returns early. */
4739 ++ pnd->disconnected = 1;
4740 + usb_driver_release_interface(&usbpn_driver, data_intf);
4741 + goto out;
4742 + }
4743 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
4744 +index 88f177aca342e..f5010f8ac1ec7 100644
4745 +--- a/drivers/net/usb/r8152.c
4746 ++++ b/drivers/net/usb/r8152.c
4747 +@@ -3033,29 +3033,6 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
4748 + device_set_wakeup_enable(&tp->udev->dev, false);
4749 + }
4750 +
4751 +-static void r8153_mac_clk_spd(struct r8152 *tp, bool enable)
4752 +-{
4753 +- /* MAC clock speed down */
4754 +- if (enable) {
4755 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL,
4756 +- ALDPS_SPDWN_RATIO);
4757 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2,
4758 +- EEE_SPDWN_RATIO);
4759 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
4760 +- PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
4761 +- U1U2_SPDWN_EN | L1_SPDWN_EN);
4762 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
4763 +- PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
4764 +- TP100_SPDWN_EN | TP500_SPDWN_EN | EEE_SPDWN_EN |
4765 +- TP1000_SPDWN_EN);
4766 +- } else {
4767 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
4768 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
4769 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
4770 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
4771 +- }
4772 +-}
4773 +-
4774 + static void r8153_u1u2en(struct r8152 *tp, bool enable)
4775 + {
4776 + u8 u1u2[8];
4777 +@@ -3355,11 +3332,9 @@ static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
4778 + if (enable) {
4779 + r8153_u1u2en(tp, false);
4780 + r8153_u2p3en(tp, false);
4781 +- r8153_mac_clk_spd(tp, true);
4782 + rtl_runtime_suspend_enable(tp, true);
4783 + } else {
4784 + rtl_runtime_suspend_enable(tp, false);
4785 +- r8153_mac_clk_spd(tp, false);
4786 +
4787 + switch (tp->version) {
4788 + case RTL_VER_03:
4789 +@@ -4695,7 +4670,6 @@ static void r8153_first_init(struct r8152 *tp)
4790 + {
4791 + u32 ocp_data;
4792 +
4793 +- r8153_mac_clk_spd(tp, false);
4794 + rxdy_gated_en(tp, true);
4795 + r8153_teredo_off(tp);
4796 +
4797 +@@ -4746,8 +4720,6 @@ static void r8153_enter_oob(struct r8152 *tp)
4798 + {
4799 + u32 ocp_data;
4800 +
4801 +- r8153_mac_clk_spd(tp, true);
4802 +-
4803 + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
4804 + ocp_data &= ~NOW_IS_OOB;
4805 + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
4806 +@@ -5473,10 +5445,15 @@ static void r8153_init(struct r8152 *tp)
4807 +
4808 + ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
4809 +
4810 ++ /* MAC clock speed down */
4811 ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
4812 ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
4813 ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
4814 ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
4815 ++
4816 + r8153_power_cut_en(tp, false);
4817 + rtl_runtime_suspend_enable(tp, false);
4818 + r8153_u1u2en(tp, true);
4819 +- r8153_mac_clk_spd(tp, false);
4820 + usb_enable_lpm(tp->udev);
4821 +
4822 + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
4823 +@@ -6542,7 +6519,10 @@ static int rtl_ops_init(struct r8152 *tp)
4824 + ops->in_nway = rtl8153_in_nway;
4825 + ops->hw_phy_cfg = r8153_hw_phy_cfg;
4826 + ops->autosuspend_en = rtl8153_runtime_enable;
4827 +- tp->rx_buf_sz = 32 * 1024;
4828 ++ if (tp->udev->speed < USB_SPEED_SUPER)
4829 ++ tp->rx_buf_sz = 16 * 1024;
4830 ++ else
4831 ++ tp->rx_buf_sz = 32 * 1024;
4832 + tp->eee_en = true;
4833 + tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX;
4834 + break;
4835 +diff --git a/drivers/net/veth.c b/drivers/net/veth.c
4836 +index 8c737668008a0..be18b243642f0 100644
4837 +--- a/drivers/net/veth.c
4838 ++++ b/drivers/net/veth.c
4839 +@@ -301,8 +301,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
4840 + if (rxq < rcv->real_num_rx_queues) {
4841 + rq = &rcv_priv->rq[rxq];
4842 + rcv_xdp = rcu_access_pointer(rq->xdp_prog);
4843 +- if (rcv_xdp)
4844 +- skb_record_rx_queue(skb, rxq);
4845 ++ skb_record_rx_queue(skb, rxq);
4846 + }
4847 +
4848 + skb_tx_timestamp(skb);
4849 +diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
4850 +index dca97cd7c4e75..7eac6a3e1cdee 100644
4851 +--- a/drivers/net/wan/fsl_ucc_hdlc.c
4852 ++++ b/drivers/net/wan/fsl_ucc_hdlc.c
4853 +@@ -204,14 +204,18 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
4854 + priv->rx_skbuff = kcalloc(priv->rx_ring_size,
4855 + sizeof(*priv->rx_skbuff),
4856 + GFP_KERNEL);
4857 +- if (!priv->rx_skbuff)
4858 ++ if (!priv->rx_skbuff) {
4859 ++ ret = -ENOMEM;
4860 + goto free_ucc_pram;
4861 ++ }
4862 +
4863 + priv->tx_skbuff = kcalloc(priv->tx_ring_size,
4864 + sizeof(*priv->tx_skbuff),
4865 + GFP_KERNEL);
4866 +- if (!priv->tx_skbuff)
4867 ++ if (!priv->tx_skbuff) {
4868 ++ ret = -ENOMEM;
4869 + goto free_rx_skbuff;
4870 ++ }
4871 +
4872 + priv->skb_curtx = 0;
4873 + priv->skb_dirtytx = 0;
4874 +diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
4875 +index 34bc53facd11c..6938cb3bdf4e9 100644
4876 +--- a/drivers/net/wan/hdlc_x25.c
4877 ++++ b/drivers/net/wan/hdlc_x25.c
4878 +@@ -23,6 +23,8 @@
4879 +
4880 + struct x25_state {
4881 + x25_hdlc_proto settings;
4882 ++ bool up;
4883 ++ spinlock_t up_lock; /* Protects "up" */
4884 + };
4885 +
4886 + static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
4887 +@@ -105,6 +107,8 @@ static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
4888 +
4889 + static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
4890 + {
4891 ++ hdlc_device *hdlc = dev_to_hdlc(dev);
4892 ++ struct x25_state *x25st = state(hdlc);
4893 + int result;
4894 +
4895 + /* There should be a pseudo header of 1 byte added by upper layers.
4896 +@@ -115,12 +119,20 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
4897 + return NETDEV_TX_OK;
4898 + }
4899 +
4900 ++ spin_lock_bh(&x25st->up_lock);
4901 ++ if (!x25st->up) {
4902 ++ spin_unlock_bh(&x25st->up_lock);
4903 ++ kfree_skb(skb);
4904 ++ return NETDEV_TX_OK;
4905 ++ }
4906 ++
4907 + switch (skb->data[0]) {
4908 + case X25_IFACE_DATA: /* Data to be transmitted */
4909 + skb_pull(skb, 1);
4910 + skb_reset_network_header(skb);
4911 + if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
4912 + dev_kfree_skb(skb);
4913 ++ spin_unlock_bh(&x25st->up_lock);
4914 + return NETDEV_TX_OK;
4915 +
4916 + case X25_IFACE_CONNECT:
4917 +@@ -149,6 +161,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
4918 + break;
4919 + }
4920 +
4921 ++ spin_unlock_bh(&x25st->up_lock);
4922 + dev_kfree_skb(skb);
4923 + return NETDEV_TX_OK;
4924 + }
4925 +@@ -166,6 +179,7 @@ static int x25_open(struct net_device *dev)
4926 + .data_transmit = x25_data_transmit,
4927 + };
4928 + hdlc_device *hdlc = dev_to_hdlc(dev);
4929 ++ struct x25_state *x25st = state(hdlc);
4930 + struct lapb_parms_struct params;
4931 + int result;
4932 +
4933 +@@ -192,6 +206,10 @@ static int x25_open(struct net_device *dev)
4934 + if (result != LAPB_OK)
4935 + return -EINVAL;
4936 +
4937 ++ spin_lock_bh(&x25st->up_lock);
4938 ++ x25st->up = true;
4939 ++ spin_unlock_bh(&x25st->up_lock);
4940 ++
4941 + return 0;
4942 + }
4943 +
4944 +@@ -199,6 +217,13 @@ static int x25_open(struct net_device *dev)
4945 +
4946 + static void x25_close(struct net_device *dev)
4947 + {
4948 ++ hdlc_device *hdlc = dev_to_hdlc(dev);
4949 ++ struct x25_state *x25st = state(hdlc);
4950 ++
4951 ++ spin_lock_bh(&x25st->up_lock);
4952 ++ x25st->up = false;
4953 ++ spin_unlock_bh(&x25st->up_lock);
4954 ++
4955 + lapb_unregister(dev);
4956 + }
4957 +
4958 +@@ -207,15 +232,28 @@ static void x25_close(struct net_device *dev)
4959 + static int x25_rx(struct sk_buff *skb)
4960 + {
4961 + struct net_device *dev = skb->dev;
4962 ++ hdlc_device *hdlc = dev_to_hdlc(dev);
4963 ++ struct x25_state *x25st = state(hdlc);
4964 +
4965 + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
4966 + dev->stats.rx_dropped++;
4967 + return NET_RX_DROP;
4968 + }
4969 +
4970 +- if (lapb_data_received(dev, skb) == LAPB_OK)
4971 ++ spin_lock_bh(&x25st->up_lock);
4972 ++ if (!x25st->up) {
4973 ++ spin_unlock_bh(&x25st->up_lock);
4974 ++ kfree_skb(skb);
4975 ++ dev->stats.rx_dropped++;
4976 ++ return NET_RX_DROP;
4977 ++ }
4978 ++
4979 ++ if (lapb_data_received(dev, skb) == LAPB_OK) {
4980 ++ spin_unlock_bh(&x25st->up_lock);
4981 + return NET_RX_SUCCESS;
4982 ++ }
4983 +
4984 ++ spin_unlock_bh(&x25st->up_lock);
4985 + dev->stats.rx_errors++;
4986 + dev_kfree_skb_any(skb);
4987 + return NET_RX_DROP;
4988 +@@ -300,6 +338,8 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
4989 + return result;
4990 +
4991 + memcpy(&state(hdlc)->settings, &new_settings, size);
4992 ++ state(hdlc)->up = false;
4993 ++ spin_lock_init(&state(hdlc)->up_lock);
4994 +
4995 + /* There's no header_ops so hard_header_len should be 0. */
4996 + dev->hard_header_len = 0;
4997 +diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
4998 +index 262c40dc14a63..665a03ebf9efd 100644
4999 +--- a/drivers/net/wireless/mediatek/mt76/dma.c
5000 ++++ b/drivers/net/wireless/mediatek/mt76/dma.c
5001 +@@ -355,7 +355,6 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
5002 + };
5003 + struct ieee80211_hw *hw;
5004 + int len, n = 0, ret = -ENOMEM;
5005 +- struct mt76_queue_entry e;
5006 + struct mt76_txwi_cache *t;
5007 + struct sk_buff *iter;
5008 + dma_addr_t addr;
5009 +@@ -397,6 +396,11 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
5010 + }
5011 + tx_info.nbuf = n;
5012 +
5013 ++ if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
5014 ++ ret = -ENOMEM;
5015 ++ goto unmap;
5016 ++ }
5017 ++
5018 + dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
5019 + DMA_TO_DEVICE);
5020 + ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
5021 +@@ -405,11 +409,6 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
5022 + if (ret < 0)
5023 + goto unmap;
5024 +
5025 +- if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
5026 +- ret = -ENOMEM;
5027 +- goto unmap;
5028 +- }
5029 +-
5030 + return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
5031 + tx_info.info, tx_info.skb, t);
5032 +
5033 +@@ -425,9 +424,7 @@ free:
5034 + dev->test.tx_done--;
5035 + #endif
5036 +
5037 +- e.skb = tx_info.skb;
5038 +- e.txwi = t;
5039 +- dev->drv->tx_complete_skb(dev, &e);
5040 ++ dev_kfree_skb(tx_info.skb);
5041 + mt76_put_txwi(dev, t);
5042 + return ret;
5043 + }
5044 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
5045 +index de846aaa8728b..610d2bc43ea2d 100644
5046 +--- a/drivers/nvme/host/core.c
5047 ++++ b/drivers/nvme/host/core.c
5048 +@@ -346,6 +346,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
5049 + return true;
5050 +
5051 + nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
5052 ++ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
5053 + blk_mq_complete_request(req);
5054 + return true;
5055 + }
5056 +@@ -1371,7 +1372,7 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
5057 + goto out_free_id;
5058 + }
5059 +
5060 +- error = -ENODEV;
5061 ++ error = NVME_SC_INVALID_NS | NVME_SC_DNR;
5062 + if ((*id)->ncap == 0) /* namespace not allocated or attached */
5063 + goto out_free_id;
5064 +
5065 +@@ -3959,7 +3960,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
5066 + static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
5067 + {
5068 + struct nvme_id_ns *id;
5069 +- int ret = -ENODEV;
5070 ++ int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
5071 +
5072 + if (test_bit(NVME_NS_DEAD, &ns->flags))
5073 + goto out;
5074 +@@ -3968,7 +3969,7 @@ static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
5075 + if (ret)
5076 + goto out;
5077 +
5078 +- ret = -ENODEV;
5079 ++ ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
5080 + if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
5081 + dev_err(ns->ctrl->device,
5082 + "identifiers changed for nsid %d\n", ns->head->ns_id);
5083 +@@ -3986,7 +3987,7 @@ out:
5084 + *
5085 + * TODO: we should probably schedule a delayed retry here.
5086 + */
5087 +- if (ret && ret != -ENOMEM && !(ret > 0 && !(ret & NVME_SC_DNR)))
5088 ++ if (ret > 0 && (ret & NVME_SC_DNR))
5089 + nvme_ns_remove(ns);
5090 + else
5091 + revalidate_disk_size(ns->disk, true);
5092 +@@ -4018,6 +4019,12 @@ static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
5093 + nsid);
5094 + break;
5095 + }
5096 ++ if (!nvme_multi_css(ctrl)) {
5097 ++ dev_warn(ctrl->device,
5098 ++ "command set not reported for nsid: %d\n",
5099 ++ nsid);
5100 ++ break;
5101 ++ }
5102 + nvme_alloc_ns(ctrl, nsid, &ids);
5103 + break;
5104 + default:
5105 +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
5106 +index fab068c8ba026..41257daf7464d 100644
5107 +--- a/drivers/nvme/host/fc.c
5108 ++++ b/drivers/nvme/host/fc.c
5109 +@@ -1956,7 +1956,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
5110 + sizeof(op->rsp_iu), DMA_FROM_DEVICE);
5111 +
5112 + if (opstate == FCPOP_STATE_ABORTED)
5113 +- status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
5114 ++ status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
5115 + else if (freq->status) {
5116 + status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
5117 + dev_info(ctrl->ctrl.device,
5118 +@@ -2443,6 +2443,7 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
5119 + struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
5120 + struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
5121 +
5122 ++ op->nreq.flags |= NVME_REQ_CANCELLED;
5123 + __nvme_fc_abort_op(ctrl, op);
5124 + return true;
5125 + }
5126 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
5127 +index 99c59f93a0641..4dca58f4afdf7 100644
5128 +--- a/drivers/nvme/host/pci.c
5129 ++++ b/drivers/nvme/host/pci.c
5130 +@@ -3247,6 +3247,7 @@ static const struct pci_device_id nvme_id_table[] = {
5131 + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
5132 + { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
5133 + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
5134 ++ NVME_QUIRK_DISABLE_WRITE_ZEROES|
5135 + NVME_QUIRK_IGNORE_DEV_SUBNQN, },
5136 + { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
5137 + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
5138 +diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
5139 +index 06b6b742bb213..6c1f3ab7649c7 100644
5140 +--- a/drivers/nvme/target/rdma.c
5141 ++++ b/drivers/nvme/target/rdma.c
5142 +@@ -802,9 +802,8 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
5143 + nvmet_req_uninit(&rsp->req);
5144 + nvmet_rdma_release_rsp(rsp);
5145 + if (wc->status != IB_WC_WR_FLUSH_ERR) {
5146 +- pr_info("RDMA WRITE for CQE 0x%p failed with status %s (%d).\n",
5147 +- wc->wr_cqe, ib_wc_status_msg(wc->status),
5148 +- wc->status);
5149 ++ pr_info("RDMA WRITE for CQE failed with status %s (%d).\n",
5150 ++ ib_wc_status_msg(wc->status), wc->status);
5151 + nvmet_rdma_error_comp(queue);
5152 + }
5153 + return;
5154 +diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
5155 +index 30a9062d2b4b8..a90c32d072da3 100644
5156 +--- a/drivers/platform/x86/intel-vbtn.c
5157 ++++ b/drivers/platform/x86/intel-vbtn.c
5158 +@@ -47,8 +47,16 @@ static const struct key_entry intel_vbtn_keymap[] = {
5159 + };
5160 +
5161 + static const struct key_entry intel_vbtn_switchmap[] = {
5162 +- { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */
5163 +- { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
5164 ++ /*
5165 ++ * SW_DOCK should only be reported for docking stations, but DSDTs using the
5166 ++ * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set
5167 ++ * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0).
5168 ++ * This causes userspace to think the laptop is docked to a port-replicator
5169 ++ * and to disable suspend-on-lid-close, which is undesirable.
5170 ++ * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting.
5171 ++ */
5172 ++ { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */
5173 ++ { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
5174 + { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */
5175 + { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */
5176 + };
5177 +diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
5178 +index 52e4396d40717..c3036591b259a 100644
5179 +--- a/drivers/regulator/qcom-rpmh-regulator.c
5180 ++++ b/drivers/regulator/qcom-rpmh-regulator.c
5181 +@@ -726,8 +726,8 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
5182 + static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
5183 + .regulator_type = VRM,
5184 + .ops = &rpmh_regulator_vrm_ops,
5185 +- .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
5186 +- .n_voltages = 5,
5187 ++ .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000),
5188 ++ .n_voltages = 236,
5189 + .pmic_mode_map = pmic_mode_map_pmic5_smps,
5190 + .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
5191 + };
5192 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
5193 +index bb940cbcbb5dd..ac25ec5f97388 100644
5194 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
5195 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
5196 +@@ -7358,14 +7358,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5197 + ioc->pend_os_device_add_sz++;
5198 + ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
5199 + GFP_KERNEL);
5200 +- if (!ioc->pend_os_device_add)
5201 ++ if (!ioc->pend_os_device_add) {
5202 ++ r = -ENOMEM;
5203 + goto out_free_resources;
5204 ++ }
5205 +
5206 + ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
5207 + ioc->device_remove_in_progress =
5208 + kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
5209 +- if (!ioc->device_remove_in_progress)
5210 ++ if (!ioc->device_remove_in_progress) {
5211 ++ r = -ENOMEM;
5212 + goto out_free_resources;
5213 ++ }
5214 +
5215 + ioc->fwfault_debug = mpt3sas_fwfault_debug;
5216 +
5217 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
5218 +index 47ad64b066236..69c5b5ee2169b 100644
5219 +--- a/drivers/scsi/qedi/qedi_main.c
5220 ++++ b/drivers/scsi/qedi/qedi_main.c
5221 +@@ -1675,6 +1675,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
5222 + if (!qedi->global_queues[i]) {
5223 + QEDI_ERR(&qedi->dbg_ctx,
5224 + "Unable to allocation global queue %d.\n", i);
5225 ++ status = -ENOMEM;
5226 + goto mem_alloc_failure;
5227 + }
5228 +
5229 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
5230 +index a27a625839e68..dcae8f071c355 100644
5231 +--- a/drivers/scsi/qla2xxx/qla_target.c
5232 ++++ b/drivers/scsi/qla2xxx/qla_target.c
5233 +@@ -3222,8 +3222,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
5234 + if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
5235 + (cmd->sess && cmd->sess->deleted)) {
5236 + cmd->state = QLA_TGT_STATE_PROCESSED;
5237 +- res = 0;
5238 +- goto free;
5239 ++ return 0;
5240 + }
5241 +
5242 + ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
5243 +@@ -3234,8 +3233,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
5244 +
5245 + res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
5246 + &full_req_cnt);
5247 +- if (unlikely(res != 0))
5248 +- goto free;
5249 ++ if (unlikely(res != 0)) {
5250 ++ return res;
5251 ++ }
5252 +
5253 + spin_lock_irqsave(qpair->qp_lock_ptr, flags);
5254 +
5255 +@@ -3255,8 +3255,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
5256 + vha->flags.online, qla2x00_reset_active(vha),
5257 + cmd->reset_count, qpair->chip_reset);
5258 + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5259 +- res = 0;
5260 +- goto free;
5261 ++ return 0;
5262 + }
5263 +
5264 + /* Does F/W have an IOCBs for this request */
5265 +@@ -3359,8 +3358,6 @@ out_unmap_unlock:
5266 + qlt_unmap_sg(vha, cmd);
5267 + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5268 +
5269 +-free:
5270 +- vha->hw->tgt.tgt_ops->free_cmd(cmd);
5271 + return res;
5272 + }
5273 + EXPORT_SYMBOL(qlt_xmit_response);
5274 +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
5275 +index 61017acd3458b..7405fab324c82 100644
5276 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
5277 ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
5278 +@@ -646,7 +646,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
5279 + {
5280 + struct qla_tgt_cmd *cmd = container_of(se_cmd,
5281 + struct qla_tgt_cmd, se_cmd);
5282 +- struct scsi_qla_host *vha = cmd->vha;
5283 +
5284 + if (cmd->aborted) {
5285 + /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
5286 +@@ -659,7 +658,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
5287 + cmd->se_cmd.transport_state,
5288 + cmd->se_cmd.t_state,
5289 + cmd->se_cmd.se_cmd_flags);
5290 +- vha->hw->tgt.tgt_ops->free_cmd(cmd);
5291 + return 0;
5292 + }
5293 +
5294 +@@ -687,7 +685,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
5295 + {
5296 + struct qla_tgt_cmd *cmd = container_of(se_cmd,
5297 + struct qla_tgt_cmd, se_cmd);
5298 +- struct scsi_qla_host *vha = cmd->vha;
5299 + int xmit_type = QLA_TGT_XMIT_STATUS;
5300 +
5301 + if (cmd->aborted) {
5302 +@@ -701,7 +698,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
5303 + cmd, kref_read(&cmd->se_cmd.cmd_kref),
5304 + cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
5305 + cmd->se_cmd.se_cmd_flags);
5306 +- vha->hw->tgt.tgt_ops->free_cmd(cmd);
5307 + return 0;
5308 + }
5309 + cmd->bufflen = se_cmd->data_length;
5310 +diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
5311 +index a244c8ae1b4eb..20182e39cb282 100644
5312 +--- a/drivers/scsi/ufs/ufs-qcom.c
5313 ++++ b/drivers/scsi/ufs/ufs-qcom.c
5314 +@@ -253,12 +253,17 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
5315 + {
5316 + int ret = 0;
5317 + struct ufs_qcom_host *host = ufshcd_get_variant(hba);
5318 ++ bool reenable_intr = false;
5319 +
5320 + if (!host->core_reset) {
5321 + dev_warn(hba->dev, "%s: reset control not set\n", __func__);
5322 + goto out;
5323 + }
5324 +
5325 ++ reenable_intr = hba->is_irq_enabled;
5326 ++ disable_irq(hba->irq);
5327 ++ hba->is_irq_enabled = false;
5328 ++
5329 + ret = reset_control_assert(host->core_reset);
5330 + if (ret) {
5331 + dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
5332 +@@ -280,6 +285,11 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
5333 +
5334 + usleep_range(1000, 1100);
5335 +
5336 ++ if (reenable_intr) {
5337 ++ enable_irq(hba->irq);
5338 ++ hba->is_irq_enabled = true;
5339 ++ }
5340 ++
5341 + out:
5342 + return ret;
5343 + }
5344 +diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
5345 +index c8b14b3a171f7..fb067b5e4a977 100644
5346 +--- a/drivers/soc/ti/omap_prm.c
5347 ++++ b/drivers/soc/ti/omap_prm.c
5348 +@@ -522,8 +522,12 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
5349 + reset->prm->data->name, id);
5350 +
5351 + exit:
5352 +- if (reset->clkdm)
5353 ++ if (reset->clkdm) {
5354 ++ /* At least dra7 iva needs a delay before clkdm idle */
5355 ++ if (has_rstst)
5356 ++ udelay(1);
5357 + pdata->clkdm_allow_idle(reset->clkdm);
5358 ++ }
5359 +
5360 + return ret;
5361 + }
5362 +diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
5363 +index 03fcc23516fd3..6e7d84ac06f50 100644
5364 +--- a/drivers/staging/rtl8192e/Kconfig
5365 ++++ b/drivers/staging/rtl8192e/Kconfig
5366 +@@ -26,6 +26,7 @@ config RTLLIB_CRYPTO_CCMP
5367 + config RTLLIB_CRYPTO_TKIP
5368 + tristate "Support for rtllib TKIP crypto"
5369 + depends on RTLLIB
5370 ++ select CRYPTO
5371 + select CRYPTO_LIB_ARC4
5372 + select CRYPTO_MICHAEL_MIC
5373 + default y
5374 +diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
5375 +index 41645fe6ad48a..ea0efd290c372 100644
5376 +--- a/drivers/xen/Kconfig
5377 ++++ b/drivers/xen/Kconfig
5378 +@@ -50,11 +50,11 @@ config XEN_BALLOON_MEMORY_HOTPLUG
5379 +
5380 + SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'"
5381 +
5382 +-config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
5383 ++config XEN_MEMORY_HOTPLUG_LIMIT
5384 + int "Hotplugged memory limit (in GiB) for a PV guest"
5385 + default 512
5386 + depends on XEN_HAVE_PVMMU
5387 +- depends on XEN_BALLOON_MEMORY_HOTPLUG
5388 ++ depends on MEMORY_HOTPLUG
5389 + help
5390 + Maxmium amount of memory (in GiB) that a PV guest can be
5391 + expanded to when using memory hotplug.
5392 +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
5393 +index cd9b1a16489b4..4bac32a274ceb 100644
5394 +--- a/fs/btrfs/qgroup.c
5395 ++++ b/fs/btrfs/qgroup.c
5396 +@@ -226,7 +226,6 @@ static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
5397 + {
5398 + struct btrfs_qgroup_list *list;
5399 +
5400 +- btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
5401 + list_del(&qgroup->dirty);
5402 + while (!list_empty(&qgroup->groups)) {
5403 + list = list_first_entry(&qgroup->groups,
5404 +@@ -243,7 +242,6 @@ static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
5405 + list_del(&list->next_member);
5406 + kfree(list);
5407 + }
5408 +- kfree(qgroup);
5409 + }
5410 +
5411 + /* must be called with qgroup_lock held */
5412 +@@ -569,6 +567,8 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
5413 + qgroup = rb_entry(n, struct btrfs_qgroup, node);
5414 + rb_erase(n, &fs_info->qgroup_tree);
5415 + __del_qgroup_rb(fs_info, qgroup);
5416 ++ btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
5417 ++ kfree(qgroup);
5418 + }
5419 + /*
5420 + * We call btrfs_free_qgroup_config() when unmounting
5421 +@@ -1580,6 +1580,14 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
5422 + spin_lock(&fs_info->qgroup_lock);
5423 + del_qgroup_rb(fs_info, qgroupid);
5424 + spin_unlock(&fs_info->qgroup_lock);
5425 ++
5426 ++ /*
5427 ++ * Remove the qgroup from sysfs now without holding the qgroup_lock
5428 ++ * spinlock, since the sysfs_remove_group() function needs to take
5429 ++ * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
5430 ++ */
5431 ++ btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
5432 ++ kfree(qgroup);
5433 + out:
5434 + mutex_unlock(&fs_info->qgroup_ioctl_lock);
5435 + return ret;
5436 +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
5437 +index e027c718ca01a..8ffc40e84a594 100644
5438 +--- a/fs/cachefiles/rdwr.c
5439 ++++ b/fs/cachefiles/rdwr.c
5440 +@@ -24,17 +24,16 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
5441 + container_of(wait, struct cachefiles_one_read, monitor);
5442 + struct cachefiles_object *object;
5443 + struct fscache_retrieval *op = monitor->op;
5444 +- struct wait_bit_key *key = _key;
5445 ++ struct wait_page_key *key = _key;
5446 + struct page *page = wait->private;
5447 +
5448 + ASSERT(key);
5449 +
5450 + _enter("{%lu},%u,%d,{%p,%u}",
5451 + monitor->netfs_page->index, mode, sync,
5452 +- key->flags, key->bit_nr);
5453 ++ key->page, key->bit_nr);
5454 +
5455 +- if (key->flags != &page->flags ||
5456 +- key->bit_nr != PG_locked)
5457 ++ if (key->page != page || key->bit_nr != PG_locked)
5458 + return 0;
5459 +
5460 + _debug("--- monitor %p %lx ---", page, page->flags);
5461 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
5462 +index 3295516af2aec..248ee81e01516 100644
5463 +--- a/fs/cifs/cifsglob.h
5464 ++++ b/fs/cifs/cifsglob.h
5465 +@@ -1002,8 +1002,8 @@ struct cifs_ses {
5466 + bool binding:1; /* are we binding the session? */
5467 + __u16 session_flags;
5468 + __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
5469 +- __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
5470 +- __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
5471 ++ __u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
5472 ++ __u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
5473 + __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
5474 +
5475 + __u8 binding_preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
5476 +diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
5477 +index 593d826820c34..a843422942b1d 100644
5478 +--- a/fs/cifs/cifspdu.h
5479 ++++ b/fs/cifs/cifspdu.h
5480 +@@ -147,6 +147,11 @@
5481 + */
5482 + #define SMB3_SIGN_KEY_SIZE (16)
5483 +
5484 ++/*
5485 ++ * Size of the smb3 encryption/decryption keys
5486 ++ */
5487 ++#define SMB3_ENC_DEC_KEY_SIZE (32)
5488 ++
5489 + #define CIFS_CLIENT_CHALLENGE_SIZE (8)
5490 + #define CIFS_SERVER_CHALLENGE_SIZE (8)
5491 + #define CIFS_HMAC_MD5_HASH_SIZE (16)
5492 +diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
5493 +index 99a1951a01ec2..d9a990c991213 100644
5494 +--- a/fs/cifs/smb2glob.h
5495 ++++ b/fs/cifs/smb2glob.h
5496 +@@ -58,6 +58,7 @@
5497 + #define SMB2_HMACSHA256_SIZE (32)
5498 + #define SMB2_CMACAES_SIZE (16)
5499 + #define SMB3_SIGNKEY_SIZE (16)
5500 ++#define SMB3_GCM128_CRYPTKEY_SIZE (16)
5501 + #define SMB3_GCM256_CRYPTKEY_SIZE (32)
5502 +
5503 + /* Maximum buffer size value we can send with 1 credit */
5504 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
5505 +index 02998c79bb907..46d247c722eec 100644
5506 +--- a/fs/cifs/smb2ops.c
5507 ++++ b/fs/cifs/smb2ops.c
5508 +@@ -1980,6 +1980,7 @@ smb2_duplicate_extents(const unsigned int xid,
5509 + {
5510 + int rc;
5511 + unsigned int ret_data_len;
5512 ++ struct inode *inode;
5513 + struct duplicate_extents_to_file dup_ext_buf;
5514 + struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
5515 +
5516 +@@ -1996,10 +1997,21 @@ smb2_duplicate_extents(const unsigned int xid,
5517 + cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
5518 + src_off, dest_off, len);
5519 +
5520 +- rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
5521 +- if (rc)
5522 +- goto duplicate_extents_out;
5523 ++ inode = d_inode(trgtfile->dentry);
5524 ++ if (inode->i_size < dest_off + len) {
5525 ++ rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
5526 ++ if (rc)
5527 ++ goto duplicate_extents_out;
5528 +
5529 ++ /*
5530 ++ * Although also could set plausible allocation size (i_blocks)
5531 ++ * here in addition to setting the file size, in reflink
5532 ++ * it is likely that the target file is sparse. Its allocation
5533 ++ * size will be queried on next revalidate, but it is important
5534 ++ * to make sure that file's cached size is updated immediately
5535 ++ */
5536 ++ cifs_setsize(inode, dest_off + len);
5537 ++ }
5538 + rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
5539 + trgtfile->fid.volatile_fid,
5540 + FSCTL_DUPLICATE_EXTENTS_TO_FILE,
5541 +@@ -4056,7 +4068,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
5542 + if (ses->Suid == ses_id) {
5543 + ses_enc_key = enc ? ses->smb3encryptionkey :
5544 + ses->smb3decryptionkey;
5545 +- memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
5546 ++ memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
5547 + spin_unlock(&cifs_tcp_ses_lock);
5548 + return 0;
5549 + }
5550 +@@ -4083,7 +4095,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
5551 + int rc = 0;
5552 + struct scatterlist *sg;
5553 + u8 sign[SMB2_SIGNATURE_SIZE] = {};
5554 +- u8 key[SMB3_SIGN_KEY_SIZE];
5555 ++ u8 key[SMB3_ENC_DEC_KEY_SIZE];
5556 + struct aead_request *req;
5557 + char *iv;
5558 + unsigned int iv_len;
5559 +@@ -4107,10 +4119,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
5560 + tfm = enc ? server->secmech.ccmaesencrypt :
5561 + server->secmech.ccmaesdecrypt;
5562 +
5563 +- if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
5564 ++ if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
5565 ++ (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
5566 + rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
5567 + else
5568 +- rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
5569 ++ rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
5570 +
5571 + if (rc) {
5572 + cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
5573 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
5574 +index c6f8bc6729aa1..d1d550647cd64 100644
5575 +--- a/fs/cifs/smb2pdu.c
5576 ++++ b/fs/cifs/smb2pdu.c
5577 +@@ -4032,8 +4032,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
5578 + if (rdata->credits.value > 0) {
5579 + shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
5580 + SMB2_MAX_BUFFER_SIZE));
5581 +- shdr->CreditRequest =
5582 +- cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
5583 ++ shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
5584 +
5585 + rc = adjust_credits(server, &rdata->credits, rdata->bytes);
5586 + if (rc)
5587 +@@ -4339,8 +4338,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
5588 + if (wdata->credits.value > 0) {
5589 + shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
5590 + SMB2_MAX_BUFFER_SIZE));
5591 +- shdr->CreditRequest =
5592 +- cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
5593 ++ shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
5594 +
5595 + rc = adjust_credits(server, &wdata->credits, wdata->bytes);
5596 + if (rc)
5597 +diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
5598 +index ebccd71cc60a3..e6fa76ab70be7 100644
5599 +--- a/fs/cifs/smb2transport.c
5600 ++++ b/fs/cifs/smb2transport.c
5601 +@@ -298,7 +298,8 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
5602 + {
5603 + unsigned char zero = 0x0;
5604 + __u8 i[4] = {0, 0, 0, 1};
5605 +- __u8 L[4] = {0, 0, 0, 128};
5606 ++ __u8 L128[4] = {0, 0, 0, 128};
5607 ++ __u8 L256[4] = {0, 0, 1, 0};
5608 + int rc = 0;
5609 + unsigned char prfhash[SMB2_HMACSHA256_SIZE];
5610 + unsigned char *hashptr = prfhash;
5611 +@@ -354,8 +355,14 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
5612 + goto smb3signkey_ret;
5613 + }
5614 +
5615 +- rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
5616 +- L, 4);
5617 ++ if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
5618 ++ (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
5619 ++ rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
5620 ++ L256, 4);
5621 ++ } else {
5622 ++ rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
5623 ++ L128, 4);
5624 ++ }
5625 + if (rc) {
5626 + cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__);
5627 + goto smb3signkey_ret;
5628 +@@ -390,6 +397,9 @@ generate_smb3signingkey(struct cifs_ses *ses,
5629 + const struct derivation_triplet *ptriplet)
5630 + {
5631 + int rc;
5632 ++#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
5633 ++ struct TCP_Server_Info *server = ses->server;
5634 ++#endif
5635 +
5636 + /*
5637 + * All channels use the same encryption/decryption keys but
5638 +@@ -422,11 +432,11 @@ generate_smb3signingkey(struct cifs_ses *ses,
5639 + rc = generate_key(ses, ptriplet->encryption.label,
5640 + ptriplet->encryption.context,
5641 + ses->smb3encryptionkey,
5642 +- SMB3_SIGN_KEY_SIZE);
5643 ++ SMB3_ENC_DEC_KEY_SIZE);
5644 + rc = generate_key(ses, ptriplet->decryption.label,
5645 + ptriplet->decryption.context,
5646 + ses->smb3decryptionkey,
5647 +- SMB3_SIGN_KEY_SIZE);
5648 ++ SMB3_ENC_DEC_KEY_SIZE);
5649 + if (rc)
5650 + return rc;
5651 + }
5652 +@@ -442,14 +452,23 @@ generate_smb3signingkey(struct cifs_ses *ses,
5653 + */
5654 + cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid),
5655 + &ses->Suid);
5656 ++ cifs_dbg(VFS, "Cipher type %d\n", server->cipher_type);
5657 + cifs_dbg(VFS, "Session Key %*ph\n",
5658 + SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
5659 + cifs_dbg(VFS, "Signing Key %*ph\n",
5660 + SMB3_SIGN_KEY_SIZE, ses->smb3signingkey);
5661 +- cifs_dbg(VFS, "ServerIn Key %*ph\n",
5662 +- SMB3_SIGN_KEY_SIZE, ses->smb3encryptionkey);
5663 +- cifs_dbg(VFS, "ServerOut Key %*ph\n",
5664 +- SMB3_SIGN_KEY_SIZE, ses->smb3decryptionkey);
5665 ++ if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
5666 ++ (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
5667 ++ cifs_dbg(VFS, "ServerIn Key %*ph\n",
5668 ++ SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3encryptionkey);
5669 ++ cifs_dbg(VFS, "ServerOut Key %*ph\n",
5670 ++ SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3decryptionkey);
5671 ++ } else {
5672 ++ cifs_dbg(VFS, "ServerIn Key %*ph\n",
5673 ++ SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3encryptionkey);
5674 ++ cifs_dbg(VFS, "ServerOut Key %*ph\n",
5675 ++ SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3decryptionkey);
5676 ++ }
5677 + #endif
5678 + return rc;
5679 + }
5680 +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
5681 +index 7b45b3b79df56..503a0056b60f2 100644
5682 +--- a/fs/cifs/transport.c
5683 ++++ b/fs/cifs/transport.c
5684 +@@ -1170,7 +1170,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
5685 + }
5686 + if (rc != 0) {
5687 + for (; i < num_rqst; i++) {
5688 +- cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
5689 ++ cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
5690 + midQ[i]->mid, le16_to_cpu(midQ[i]->command));
5691 + send_cancel(server, &rqst[i], midQ[i]);
5692 + spin_lock(&GlobalMid_Lock);
5693 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
5694 +index e67d5de6f28ca..b6229fe1aa233 100644
5695 +--- a/fs/ext4/mballoc.c
5696 ++++ b/fs/ext4/mballoc.c
5697 +@@ -2732,8 +2732,15 @@ static int ext4_mb_init_backend(struct super_block *sb)
5698 + }
5699 +
5700 + if (ext4_has_feature_flex_bg(sb)) {
5701 +- /* a single flex group is supposed to be read by a single IO */
5702 +- sbi->s_mb_prefetch = min(1 << sbi->s_es->s_log_groups_per_flex,
5703 ++ /* a single flex group is supposed to be read by a single IO.
5704 ++ * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
5705 ++ * unsigned integer, so the maximum shift is 32.
5706 ++ */
5707 ++ if (sbi->s_es->s_log_groups_per_flex >= 32) {
5708 ++ ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
5709 ++ goto err_freesgi;
5710 ++ }
5711 ++ sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
5712 + BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
5713 + sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
5714 + } else {
5715 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
5716 +index 4698471795732..5462f26907c19 100644
5717 +--- a/fs/ext4/xattr.c
5718 ++++ b/fs/ext4/xattr.c
5719 +@@ -1459,6 +1459,9 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
5720 + if (!ce)
5721 + return NULL;
5722 +
5723 ++ WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) &&
5724 ++ !(current->flags & PF_MEMALLOC_NOFS));
5725 ++
5726 + ea_data = kvmalloc(value_len, GFP_KERNEL);
5727 + if (!ea_data) {
5728 + mb_cache_entry_put(ea_inode_cache, ce);
5729 +@@ -2325,6 +2328,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
5730 + error = -ENOSPC;
5731 + goto cleanup;
5732 + }
5733 ++ WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
5734 + }
5735 +
5736 + error = ext4_reserve_inode_write(handle, inode, &is.iloc);
5737 +diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
5738 +index 2e9314091c81d..1955dea999f79 100644
5739 +--- a/fs/gfs2/log.c
5740 ++++ b/fs/gfs2/log.c
5741 +@@ -935,12 +935,16 @@ static void trans_drain(struct gfs2_trans *tr)
5742 + while (!list_empty(head)) {
5743 + bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
5744 + list_del_init(&bd->bd_list);
5745 ++ if (!list_empty(&bd->bd_ail_st_list))
5746 ++ gfs2_remove_from_ail(bd);
5747 + kmem_cache_free(gfs2_bufdata_cachep, bd);
5748 + }
5749 + head = &tr->tr_databuf;
5750 + while (!list_empty(head)) {
5751 + bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
5752 + list_del_init(&bd->bd_list);
5753 ++ if (!list_empty(&bd->bd_ail_st_list))
5754 ++ gfs2_remove_from_ail(bd);
5755 + kmem_cache_free(gfs2_bufdata_cachep, bd);
5756 + }
5757 + }
5758 +diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
5759 +index 6d4bf7ea7b3be..7f850ff6a05de 100644
5760 +--- a/fs/gfs2/trans.c
5761 ++++ b/fs/gfs2/trans.c
5762 +@@ -134,6 +134,8 @@ static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
5763 + bd->bd_bh = bh;
5764 + bd->bd_gl = gl;
5765 + INIT_LIST_HEAD(&bd->bd_list);
5766 ++ INIT_LIST_HEAD(&bd->bd_ail_st_list);
5767 ++ INIT_LIST_HEAD(&bd->bd_ail_gl_list);
5768 + bh->b_private = bd;
5769 + return bd;
5770 + }
5771 +diff --git a/fs/io_uring.c b/fs/io_uring.c
5772 +index 06e9c21819957..dde290eb7dd0c 100644
5773 +--- a/fs/io_uring.c
5774 ++++ b/fs/io_uring.c
5775 +@@ -3996,6 +3996,7 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
5776 + static int io_provide_buffers_prep(struct io_kiocb *req,
5777 + const struct io_uring_sqe *sqe)
5778 + {
5779 ++ unsigned long size;
5780 + struct io_provide_buf *p = &req->pbuf;
5781 + u64 tmp;
5782 +
5783 +@@ -4009,7 +4010,8 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
5784 + p->addr = READ_ONCE(sqe->addr);
5785 + p->len = READ_ONCE(sqe->len);
5786 +
5787 +- if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
5788 ++ size = (unsigned long)p->len * p->nbufs;
5789 ++ if (!access_ok(u64_to_user_ptr(p->addr), size))
5790 + return -EFAULT;
5791 +
5792 + p->bgid = READ_ONCE(sqe->buf_group);
5793 +diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
5794 +index e2a488d403a61..14a72224b6571 100644
5795 +--- a/fs/nfs/Kconfig
5796 ++++ b/fs/nfs/Kconfig
5797 +@@ -127,7 +127,7 @@ config PNFS_BLOCK
5798 + config PNFS_FLEXFILE_LAYOUT
5799 + tristate
5800 + depends on NFS_V4_1 && NFS_V3
5801 +- default m
5802 ++ default NFS_V4
5803 +
5804 + config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
5805 + string "NFSv4.1 Implementation ID Domain"
5806 +diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
5807 +index 69971f6c840d2..dff6b52d26a85 100644
5808 +--- a/fs/nfs/nfs3xdr.c
5809 ++++ b/fs/nfs/nfs3xdr.c
5810 +@@ -35,6 +35,7 @@
5811 + */
5812 + #define NFS3_fhandle_sz (1+16)
5813 + #define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */
5814 ++#define NFS3_post_op_fh_sz (1+NFS3_fh_sz)
5815 + #define NFS3_sattr_sz (15)
5816 + #define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2))
5817 + #define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2))
5818 +@@ -72,7 +73,7 @@
5819 + #define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1+1)
5820 + #define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3+1)
5821 + #define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4)
5822 +-#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
5823 ++#define NFS3_createres_sz (1+NFS3_post_op_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
5824 + #define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz))
5825 + #define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
5826 + #define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2+1)
5827 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
5828 +index ba2dfba4854bf..15ac6b6893e76 100644
5829 +--- a/fs/nfs/nfs4proc.c
5830 ++++ b/fs/nfs/nfs4proc.c
5831 +@@ -5891,6 +5891,9 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
5832 + unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
5833 + int ret, i;
5834 +
5835 ++ /* You can't remove system.nfs4_acl: */
5836 ++ if (buflen == 0)
5837 ++ return -EINVAL;
5838 + if (!nfs4_server_supports_acls(server))
5839 + return -EOPNOTSUPP;
5840 + if (npages > ARRAY_SIZE(pages))
5841 +diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
5842 +index eb02072d28dd6..723763746238d 100644
5843 +--- a/fs/squashfs/export.c
5844 ++++ b/fs/squashfs/export.c
5845 +@@ -152,14 +152,18 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
5846 + start = le64_to_cpu(table[n]);
5847 + end = le64_to_cpu(table[n + 1]);
5848 +
5849 +- if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
5850 ++ if (start >= end
5851 ++ || (end - start) >
5852 ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
5853 + kfree(table);
5854 + return ERR_PTR(-EINVAL);
5855 + }
5856 + }
5857 +
5858 + start = le64_to_cpu(table[indexes - 1]);
5859 +- if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
5860 ++ if (start >= lookup_table_start ||
5861 ++ (lookup_table_start - start) >
5862 ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
5863 + kfree(table);
5864 + return ERR_PTR(-EINVAL);
5865 + }
5866 +diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
5867 +index 11581bf31af41..ea5387679723f 100644
5868 +--- a/fs/squashfs/id.c
5869 ++++ b/fs/squashfs/id.c
5870 +@@ -97,14 +97,16 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
5871 + start = le64_to_cpu(table[n]);
5872 + end = le64_to_cpu(table[n + 1]);
5873 +
5874 +- if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
5875 ++ if (start >= end || (end - start) >
5876 ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
5877 + kfree(table);
5878 + return ERR_PTR(-EINVAL);
5879 + }
5880 + }
5881 +
5882 + start = le64_to_cpu(table[indexes - 1]);
5883 +- if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
5884 ++ if (start >= id_table_start || (id_table_start - start) >
5885 ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
5886 + kfree(table);
5887 + return ERR_PTR(-EINVAL);
5888 + }
5889 +diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
5890 +index 8d64edb80ebf0..b3fdc8212c5f5 100644
5891 +--- a/fs/squashfs/squashfs_fs.h
5892 ++++ b/fs/squashfs/squashfs_fs.h
5893 +@@ -17,6 +17,7 @@
5894 +
5895 + /* size of metadata (inode and directory) blocks */
5896 + #define SQUASHFS_METADATA_SIZE 8192
5897 ++#define SQUASHFS_BLOCK_OFFSET 2
5898 +
5899 + /* default size of block device I/O */
5900 + #ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
5901 +diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
5902 +index ead66670b41a5..087cab8c78f4e 100644
5903 +--- a/fs/squashfs/xattr_id.c
5904 ++++ b/fs/squashfs/xattr_id.c
5905 +@@ -109,14 +109,16 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
5906 + start = le64_to_cpu(table[n]);
5907 + end = le64_to_cpu(table[n + 1]);
5908 +
5909 +- if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
5910 ++ if (start >= end || (end - start) >
5911 ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
5912 + kfree(table);
5913 + return ERR_PTR(-EINVAL);
5914 + }
5915 + }
5916 +
5917 + start = le64_to_cpu(table[indexes - 1]);
5918 +- if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
5919 ++ if (start >= table_start || (table_start - start) >
5920 ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
5921 + kfree(table);
5922 + return ERR_PTR(-EINVAL);
5923 + }
5924 +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
5925 +index 6d1879bf94403..37dac195adbb4 100644
5926 +--- a/include/acpi/acpi_bus.h
5927 ++++ b/include/acpi/acpi_bus.h
5928 +@@ -233,6 +233,7 @@ struct acpi_pnp_type {
5929 +
5930 + struct acpi_device_pnp {
5931 + acpi_bus_id bus_id; /* Object name */
5932 ++ int instance_no; /* Instance number of this object */
5933 + struct acpi_pnp_type type; /* ID type */
5934 + acpi_bus_address bus_address; /* _ADR */
5935 + char *unique_id; /* _UID */
5936 +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
5937 +index 34d8287cd7749..d7efbc5490e8c 100644
5938 +--- a/include/asm-generic/vmlinux.lds.h
5939 ++++ b/include/asm-generic/vmlinux.lds.h
5940 +@@ -393,7 +393,10 @@
5941 + . = ALIGN(8); \
5942 + __start_static_call_sites = .; \
5943 + KEEP(*(.static_call_sites)) \
5944 +- __stop_static_call_sites = .;
5945 ++ __stop_static_call_sites = .; \
5946 ++ __start_static_call_tramp_key = .; \
5947 ++ KEEP(*(.static_call_tramp_key)) \
5948 ++ __stop_static_call_tramp_key = .;
5949 +
5950 + /*
5951 + * Allow architectures to handle ro_after_init data on their
5952 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
5953 +index 76322b6452c80..dd236ef59db3e 100644
5954 +--- a/include/linux/bpf.h
5955 ++++ b/include/linux/bpf.h
5956 +@@ -1059,7 +1059,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
5957 + struct bpf_prog *include_prog,
5958 + struct bpf_prog_array **new_array);
5959 +
5960 +-#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
5961 ++#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
5962 + ({ \
5963 + struct bpf_prog_array_item *_item; \
5964 + struct bpf_prog *_prog; \
5965 +@@ -1072,7 +1072,8 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
5966 + goto _out; \
5967 + _item = &_array->items[0]; \
5968 + while ((_prog = READ_ONCE(_item->prog))) { \
5969 +- bpf_cgroup_storage_set(_item->cgroup_storage); \
5970 ++ if (set_cg_storage) \
5971 ++ bpf_cgroup_storage_set(_item->cgroup_storage); \
5972 + _ret &= func(_prog, ctx); \
5973 + _item++; \
5974 + } \
5975 +@@ -1133,10 +1134,10 @@ _out: \
5976 + })
5977 +
5978 + #define BPF_PROG_RUN_ARRAY(array, ctx, func) \
5979 +- __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
5980 ++ __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
5981 +
5982 + #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
5983 +- __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
5984 ++ __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
5985 +
5986 + #ifdef CONFIG_BPF_SYSCALL
5987 + DECLARE_PER_CPU(int, bpf_prog_active);
5988 +diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
5989 +index d0bd226d6bd96..54665952d6ade 100644
5990 +--- a/include/linux/brcmphy.h
5991 ++++ b/include/linux/brcmphy.h
5992 +@@ -136,6 +136,7 @@
5993 +
5994 + #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
5995 + #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
5996 ++#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN 0x0080
5997 + #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
5998 + #define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
5999 + #define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
6000 +@@ -222,6 +223,9 @@
6001 + /* 11111: Mode Control Register */
6002 + #define BCM54XX_SHD_MODE 0x1f
6003 + #define BCM54XX_SHD_INTF_SEL_MASK GENMASK(2, 1) /* INTERF_SEL[1:0] */
6004 ++#define BCM54XX_SHD_INTF_SEL_RGMII 0x02
6005 ++#define BCM54XX_SHD_INTF_SEL_SGMII 0x04
6006 ++#define BCM54XX_SHD_INTF_SEL_GBIC 0x06
6007 + #define BCM54XX_SHD_MODE_1000BX BIT(0) /* Enable 1000-X registers */
6008 +
6009 + /*
6010 +diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
6011 +index d2d7f9b6a2761..50cc070cb1f7c 100644
6012 +--- a/include/linux/device-mapper.h
6013 ++++ b/include/linux/device-mapper.h
6014 +@@ -246,7 +246,11 @@ struct target_type {
6015 + #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
6016 +
6017 + /*
6018 +- * Indicates that a target supports host-managed zoned block devices.
6019 ++ * Indicates support for zoned block devices:
6020 ++ * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
6021 ++ * block devices but does not support combining different zoned models.
6022 ++ * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
6023 ++ * devices with different zoned models.
6024 + */
6025 + #define DM_TARGET_ZONED_HM 0x00000040
6026 + #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
6027 +@@ -257,6 +261,15 @@ struct target_type {
6028 + #define DM_TARGET_NOWAIT 0x00000080
6029 + #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
6030 +
6031 ++#ifdef CONFIG_BLK_DEV_ZONED
6032 ++#define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
6033 ++#define dm_target_supports_mixed_zoned_model(type) \
6034 ++ ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
6035 ++#else
6036 ++#define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
6037 ++#define dm_target_supports_mixed_zoned_model(type) (false)
6038 ++#endif
6039 ++
6040 + struct dm_target {
6041 + struct dm_table *table;
6042 + struct target_type *type;
6043 +diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
6044 +index 2ad6e92f124ad..0bff345c4bc68 100644
6045 +--- a/include/linux/hugetlb_cgroup.h
6046 ++++ b/include/linux/hugetlb_cgroup.h
6047 +@@ -113,6 +113,11 @@ static inline bool hugetlb_cgroup_disabled(void)
6048 + return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
6049 + }
6050 +
6051 ++static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
6052 ++{
6053 ++ css_put(&h_cg->css);
6054 ++}
6055 ++
6056 + extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
6057 + struct hugetlb_cgroup **ptr);
6058 + extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
6059 +@@ -138,7 +143,8 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
6060 +
6061 + extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
6062 + struct file_region *rg,
6063 +- unsigned long nr_pages);
6064 ++ unsigned long nr_pages,
6065 ++ bool region_del);
6066 +
6067 + extern void hugetlb_cgroup_file_init(void) __init;
6068 + extern void hugetlb_cgroup_migrate(struct page *oldhpage,
6069 +@@ -147,7 +153,8 @@ extern void hugetlb_cgroup_migrate(struct page *oldhpage,
6070 + #else
6071 + static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
6072 + struct file_region *rg,
6073 +- unsigned long nr_pages)
6074 ++ unsigned long nr_pages,
6075 ++ bool region_del)
6076 + {
6077 + }
6078 +
6079 +@@ -185,6 +192,10 @@ static inline bool hugetlb_cgroup_disabled(void)
6080 + return true;
6081 + }
6082 +
6083 ++static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
6084 ++{
6085 ++}
6086 ++
6087 + static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
6088 + struct hugetlb_cgroup **ptr)
6089 + {
6090 +diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
6091 +index a367ead4bf4bb..e11555989090c 100644
6092 +--- a/include/linux/if_macvlan.h
6093 ++++ b/include/linux/if_macvlan.h
6094 +@@ -42,13 +42,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
6095 + if (likely(success)) {
6096 + struct vlan_pcpu_stats *pcpu_stats;
6097 +
6098 +- pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
6099 ++ pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
6100 + u64_stats_update_begin(&pcpu_stats->syncp);
6101 + pcpu_stats->rx_packets++;
6102 + pcpu_stats->rx_bytes += len;
6103 + if (multicast)
6104 + pcpu_stats->rx_multicast++;
6105 + u64_stats_update_end(&pcpu_stats->syncp);
6106 ++ put_cpu_ptr(vlan->pcpu_stats);
6107 + } else {
6108 + this_cpu_inc(vlan->pcpu_stats->rx_errors);
6109 + }
6110 +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
6111 +index 922a7f6004658..c691b1ac95f88 100644
6112 +--- a/include/linux/memcontrol.h
6113 ++++ b/include/linux/memcontrol.h
6114 +@@ -937,9 +937,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
6115 + rcu_read_unlock();
6116 + }
6117 +
6118 +-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6119 +-void mem_cgroup_split_huge_fixup(struct page *head);
6120 +-#endif
6121 ++void split_page_memcg(struct page *head, unsigned int nr);
6122 +
6123 + #else /* CONFIG_MEMCG */
6124 +
6125 +@@ -1267,7 +1265,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
6126 + return 0;
6127 + }
6128 +
6129 +-static inline void mem_cgroup_split_huge_fixup(struct page *head)
6130 ++static inline void split_page_memcg(struct page *head, unsigned int nr)
6131 + {
6132 + }
6133 +
6134 +diff --git a/include/linux/mm.h b/include/linux/mm.h
6135 +index b8eadd9f96802..08a48d3eaeaae 100644
6136 +--- a/include/linux/mm.h
6137 ++++ b/include/linux/mm.h
6138 +@@ -1414,13 +1414,26 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
6139 + #endif /* CONFIG_NUMA_BALANCING */
6140 +
6141 + #ifdef CONFIG_KASAN_SW_TAGS
6142 ++
6143 ++/*
6144 ++ * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
6145 ++ * setting tags for all pages to native kernel tag value 0xff, as the default
6146 ++ * value 0x00 maps to 0xff.
6147 ++ */
6148 ++
6149 + static inline u8 page_kasan_tag(const struct page *page)
6150 + {
6151 +- return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
6152 ++ u8 tag;
6153 ++
6154 ++ tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
6155 ++ tag ^= 0xff;
6156 ++
6157 ++ return tag;
6158 + }
6159 +
6160 + static inline void page_kasan_tag_set(struct page *page, u8 tag)
6161 + {
6162 ++ tag ^= 0xff;
6163 + page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
6164 + page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
6165 + }
6166 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
6167 +index 915f4f100383b..3433ecc9c1f74 100644
6168 +--- a/include/linux/mm_types.h
6169 ++++ b/include/linux/mm_types.h
6170 +@@ -23,6 +23,7 @@
6171 + #endif
6172 + #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
6173 +
6174 ++#define INIT_PASID 0
6175 +
6176 + struct address_space;
6177 + struct mem_cgroup;
6178 +diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
6179 +index b8200782dedeb..1a6a9eb6d3fac 100644
6180 +--- a/include/linux/mmu_notifier.h
6181 ++++ b/include/linux/mmu_notifier.h
6182 +@@ -169,11 +169,11 @@ struct mmu_notifier_ops {
6183 + * the last refcount is dropped.
6184 + *
6185 + * If blockable argument is set to false then the callback cannot
6186 +- * sleep and has to return with -EAGAIN. 0 should be returned
6187 +- * otherwise. Please note that if invalidate_range_start approves
6188 +- * a non-blocking behavior then the same applies to
6189 +- * invalidate_range_end.
6190 +- *
6191 ++ * sleep and has to return with -EAGAIN if sleeping would be required.
6192 ++ * 0 should be returned otherwise. Please note that notifiers that can
6193 ++ * fail invalidate_range_start are not allowed to implement
6194 ++ * invalidate_range_end, as there is no mechanism for informing the
6195 ++ * notifier that its start failed.
6196 + */
6197 + int (*invalidate_range_start)(struct mmu_notifier *subscription,
6198 + const struct mmu_notifier_range *range);
6199 +diff --git a/include/linux/mutex.h b/include/linux/mutex.h
6200 +index dcd185cbfe793..4d671fba3cab4 100644
6201 +--- a/include/linux/mutex.h
6202 ++++ b/include/linux/mutex.h
6203 +@@ -185,7 +185,7 @@ extern void mutex_lock_io(struct mutex *lock);
6204 + # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
6205 + # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
6206 + # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
6207 +-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
6208 ++# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
6209 + #endif
6210 +
6211 + /*
6212 +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
6213 +index 8ebb641937571..8ec48466410a6 100644
6214 +--- a/include/linux/netfilter/x_tables.h
6215 ++++ b/include/linux/netfilter/x_tables.h
6216 +@@ -227,7 +227,7 @@ struct xt_table {
6217 + unsigned int valid_hooks;
6218 +
6219 + /* Man behind the curtain... */
6220 +- struct xt_table_info __rcu *private;
6221 ++ struct xt_table_info *private;
6222 +
6223 + /* Set this to THIS_MODULE if you are a module, otherwise NULL */
6224 + struct module *me;
6225 +@@ -376,7 +376,7 @@ static inline unsigned int xt_write_recseq_begin(void)
6226 + * since addend is most likely 1
6227 + */
6228 + __this_cpu_add(xt_recseq.sequence, addend);
6229 +- smp_wmb();
6230 ++ smp_mb();
6231 +
6232 + return addend;
6233 + }
6234 +@@ -448,9 +448,6 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
6235 +
6236 + struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
6237 +
6238 +-struct xt_table_info
6239 +-*xt_table_get_private_protected(const struct xt_table *table);
6240 +-
6241 + #ifdef CONFIG_COMPAT
6242 + #include <net/compat.h>
6243 +
6244 +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
6245 +index d5570deff4003..b032f094a7827 100644
6246 +--- a/include/linux/pagemap.h
6247 ++++ b/include/linux/pagemap.h
6248 +@@ -559,7 +559,6 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
6249 + return pgoff;
6250 + }
6251 +
6252 +-/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
6253 + struct wait_page_key {
6254 + struct page *page;
6255 + int bit_nr;
6256 +diff --git a/include/linux/phy.h b/include/linux/phy.h
6257 +index 56563e5e0dc75..08725a262f320 100644
6258 +--- a/include/linux/phy.h
6259 ++++ b/include/linux/phy.h
6260 +@@ -499,6 +499,7 @@ struct macsec_ops;
6261 + *
6262 + * @speed: Current link speed
6263 + * @duplex: Current duplex
6264 ++ * @port: Current port
6265 + * @pause: Current pause
6266 + * @asym_pause: Current asymmetric pause
6267 + * @supported: Combined MAC/PHY supported linkmodes
6268 +@@ -577,6 +578,7 @@ struct phy_device {
6269 + */
6270 + int speed;
6271 + int duplex;
6272 ++ int port;
6273 + int pause;
6274 + int asym_pause;
6275 + u8 master_slave_get;
6276 +diff --git a/include/linux/static_call.h b/include/linux/static_call.h
6277 +index 695da4c9b3381..04e6042d252d3 100644
6278 +--- a/include/linux/static_call.h
6279 ++++ b/include/linux/static_call.h
6280 +@@ -107,26 +107,10 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
6281 +
6282 + #define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
6283 +
6284 +-/*
6285 +- * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
6286 +- * the symbol table so that objtool can reference it when it generates the
6287 +- * .static_call_sites section.
6288 +- */
6289 +-#define __static_call(name) \
6290 +-({ \
6291 +- __ADDRESSABLE(STATIC_CALL_KEY(name)); \
6292 +- &STATIC_CALL_TRAMP(name); \
6293 +-})
6294 +-
6295 + #else
6296 + #define STATIC_CALL_TRAMP_ADDR(name) NULL
6297 + #endif
6298 +
6299 +-
6300 +-#define DECLARE_STATIC_CALL(name, func) \
6301 +- extern struct static_call_key STATIC_CALL_KEY(name); \
6302 +- extern typeof(func) STATIC_CALL_TRAMP(name);
6303 +-
6304 + #define static_call_update(name, func) \
6305 + ({ \
6306 + BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \
6307 +@@ -154,6 +138,12 @@ struct static_call_key {
6308 + };
6309 + };
6310 +
6311 ++/* For finding the key associated with a trampoline */
6312 ++struct static_call_tramp_key {
6313 ++ s32 tramp;
6314 ++ s32 key;
6315 ++};
6316 ++
6317 + extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
6318 + extern int static_call_mod_init(struct module *mod);
6319 + extern int static_call_text_reserved(void *start, void *end);
6320 +@@ -174,17 +164,23 @@ extern int static_call_text_reserved(void *start, void *end);
6321 + }; \
6322 + ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
6323 +
6324 +-#define static_call(name) __static_call(name)
6325 + #define static_call_cond(name) (void)__static_call(name)
6326 +
6327 + #define EXPORT_STATIC_CALL(name) \
6328 + EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
6329 + EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
6330 +-
6331 + #define EXPORT_STATIC_CALL_GPL(name) \
6332 + EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
6333 + EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
6334 +
6335 ++/* Leave the key unexported, so modules can't change static call targets: */
6336 ++#define EXPORT_STATIC_CALL_TRAMP(name) \
6337 ++ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)); \
6338 ++ ARCH_ADD_TRAMP_KEY(name)
6339 ++#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
6340 ++ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)); \
6341 ++ ARCH_ADD_TRAMP_KEY(name)
6342 ++
6343 + #elif defined(CONFIG_HAVE_STATIC_CALL)
6344 +
6345 + static inline int static_call_init(void) { return 0; }
6346 +@@ -207,7 +203,6 @@ struct static_call_key {
6347 + }; \
6348 + ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
6349 +
6350 +-#define static_call(name) __static_call(name)
6351 + #define static_call_cond(name) (void)__static_call(name)
6352 +
6353 + static inline
6354 +@@ -227,11 +222,16 @@ static inline int static_call_text_reserved(void *start, void *end)
6355 + #define EXPORT_STATIC_CALL(name) \
6356 + EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
6357 + EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
6358 +-
6359 + #define EXPORT_STATIC_CALL_GPL(name) \
6360 + EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
6361 + EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
6362 +
6363 ++/* Leave the key unexported, so modules can't change static call targets: */
6364 ++#define EXPORT_STATIC_CALL_TRAMP(name) \
6365 ++ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
6366 ++#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
6367 ++ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
6368 ++
6369 + #else /* Generic implementation */
6370 +
6371 + static inline int static_call_init(void) { return 0; }
6372 +@@ -252,9 +252,6 @@ struct static_call_key {
6373 + .func = NULL, \
6374 + }
6375 +
6376 +-#define static_call(name) \
6377 +- ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
6378 +-
6379 + static inline void __static_call_nop(void) { }
6380 +
6381 + /*
6382 +diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h
6383 +index 89135bb35bf76..ae5662d368b98 100644
6384 +--- a/include/linux/static_call_types.h
6385 ++++ b/include/linux/static_call_types.h
6386 +@@ -4,11 +4,13 @@
6387 +
6388 + #include <linux/types.h>
6389 + #include <linux/stringify.h>
6390 ++#include <linux/compiler.h>
6391 +
6392 + #define STATIC_CALL_KEY_PREFIX __SCK__
6393 + #define STATIC_CALL_KEY_PREFIX_STR __stringify(STATIC_CALL_KEY_PREFIX)
6394 + #define STATIC_CALL_KEY_PREFIX_LEN (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
6395 + #define STATIC_CALL_KEY(name) __PASTE(STATIC_CALL_KEY_PREFIX, name)
6396 ++#define STATIC_CALL_KEY_STR(name) __stringify(STATIC_CALL_KEY(name))
6397 +
6398 + #define STATIC_CALL_TRAMP_PREFIX __SCT__
6399 + #define STATIC_CALL_TRAMP_PREFIX_STR __stringify(STATIC_CALL_TRAMP_PREFIX)
6400 +@@ -32,4 +34,52 @@ struct static_call_site {
6401 + s32 key;
6402 + };
6403 +
6404 ++#define DECLARE_STATIC_CALL(name, func) \
6405 ++ extern struct static_call_key STATIC_CALL_KEY(name); \
6406 ++ extern typeof(func) STATIC_CALL_TRAMP(name);
6407 ++
6408 ++#ifdef CONFIG_HAVE_STATIC_CALL
6409 ++
6410 ++#define __raw_static_call(name) (&STATIC_CALL_TRAMP(name))
6411 ++
6412 ++#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
6413 ++
6414 ++/*
6415 ++ * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
6416 ++ * the symbol table so that objtool can reference it when it generates the
6417 ++ * .static_call_sites section.
6418 ++ */
6419 ++#define __STATIC_CALL_ADDRESSABLE(name) \
6420 ++ __ADDRESSABLE(STATIC_CALL_KEY(name))
6421 ++
6422 ++#define __static_call(name) \
6423 ++({ \
6424 ++ __STATIC_CALL_ADDRESSABLE(name); \
6425 ++ __raw_static_call(name); \
6426 ++})
6427 ++
6428 ++#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
6429 ++
6430 ++#define __STATIC_CALL_ADDRESSABLE(name)
6431 ++#define __static_call(name) __raw_static_call(name)
6432 ++
6433 ++#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
6434 ++
6435 ++#ifdef MODULE
6436 ++#define __STATIC_CALL_MOD_ADDRESSABLE(name)
6437 ++#define static_call_mod(name) __raw_static_call(name)
6438 ++#else
6439 ++#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
6440 ++#define static_call_mod(name) __static_call(name)
6441 ++#endif
6442 ++
6443 ++#define static_call(name) __static_call(name)
6444 ++
6445 ++#else
6446 ++
6447 ++#define static_call(name) \
6448 ++ ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
6449 ++
6450 ++#endif /* CONFIG_HAVE_STATIC_CALL */
6451 ++
6452 + #endif /* _STATIC_CALL_TYPES_H */
6453 +diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
6454 +index c6abb79501b33..e81856c0ba134 100644
6455 +--- a/include/linux/u64_stats_sync.h
6456 ++++ b/include/linux/u64_stats_sync.h
6457 +@@ -115,12 +115,13 @@ static inline void u64_stats_inc(u64_stats_t *p)
6458 + }
6459 + #endif
6460 +
6461 ++#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
6462 ++#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
6463 ++#else
6464 + static inline void u64_stats_init(struct u64_stats_sync *syncp)
6465 + {
6466 +-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
6467 +- seqcount_init(&syncp->seq);
6468 +-#endif
6469 + }
6470 ++#endif
6471 +
6472 + static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
6473 + {
6474 +diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h
6475 +index 073a9e0ec07d0..ad970416260dd 100644
6476 +--- a/include/linux/usermode_driver.h
6477 ++++ b/include/linux/usermode_driver.h
6478 +@@ -14,5 +14,6 @@ struct umd_info {
6479 + int umd_load_blob(struct umd_info *info, const void *data, size_t len);
6480 + int umd_unload_blob(struct umd_info *info);
6481 + int fork_usermode_driver(struct umd_info *info);
6482 ++void umd_cleanup_helper(struct umd_info *info);
6483 +
6484 + #endif /* __LINUX_USERMODE_DRIVER_H__ */
6485 +diff --git a/include/net/dst.h b/include/net/dst.h
6486 +index 8ea8812b0b418..acd15c544cf37 100644
6487 +--- a/include/net/dst.h
6488 ++++ b/include/net/dst.h
6489 +@@ -535,4 +535,15 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
6490 + dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
6491 + }
6492 +
6493 ++struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
6494 ++void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
6495 ++ struct sk_buff *skb, u32 mtu, bool confirm_neigh);
6496 ++void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
6497 ++ struct sk_buff *skb);
6498 ++u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old);
6499 ++struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
6500 ++ struct sk_buff *skb,
6501 ++ const void *daddr);
6502 ++unsigned int dst_blackhole_mtu(const struct dst_entry *dst);
6503 ++
6504 + #endif /* _NET_DST_H */
6505 +diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
6506 +index 111d7771b2081..aa92af3dd444d 100644
6507 +--- a/include/net/inet_connection_sock.h
6508 ++++ b/include/net/inet_connection_sock.h
6509 +@@ -284,7 +284,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
6510 + return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
6511 + }
6512 +
6513 +-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
6514 ++bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
6515 + void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
6516 +
6517 + static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
6518 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
6519 +index c1c0a4ff92ae9..ed4a9d098164f 100644
6520 +--- a/include/net/netfilter/nf_tables.h
6521 ++++ b/include/net/netfilter/nf_tables.h
6522 +@@ -1508,6 +1508,7 @@ struct nft_trans_flowtable {
6523 + struct nft_flowtable *flowtable;
6524 + bool update;
6525 + struct list_head hook_list;
6526 ++ u32 flags;
6527 + };
6528 +
6529 + #define nft_trans_flowtable(trans) \
6530 +@@ -1516,6 +1517,8 @@ struct nft_trans_flowtable {
6531 + (((struct nft_trans_flowtable *)trans->data)->update)
6532 + #define nft_trans_flowtable_hooks(trans) \
6533 + (((struct nft_trans_flowtable *)trans->data)->hook_list)
6534 ++#define nft_trans_flowtable_flags(trans) \
6535 ++ (((struct nft_trans_flowtable *)trans->data)->flags)
6536 +
6537 + int __init nft_chain_filter_init(void);
6538 + void nft_chain_filter_fini(void);
6539 +diff --git a/include/net/nexthop.h b/include/net/nexthop.h
6540 +index 2fd76a9b6dc8b..4c8c9fe9a3f0e 100644
6541 +--- a/include/net/nexthop.h
6542 ++++ b/include/net/nexthop.h
6543 +@@ -362,6 +362,7 @@ static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel)
6544 + int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
6545 + struct netlink_ext_ack *extack);
6546 +
6547 ++/* Caller should either hold rcu_read_lock(), or RTNL. */
6548 + static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
6549 + {
6550 + struct nh_info *nhi;
6551 +@@ -382,6 +383,29 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
6552 + return NULL;
6553 + }
6554 +
6555 ++/* Variant of nexthop_fib6_nh().
6556 ++ * Caller should either hold rcu_read_lock_bh(), or RTNL.
6557 ++ */
6558 ++static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
6559 ++{
6560 ++ struct nh_info *nhi;
6561 ++
6562 ++ if (nh->is_group) {
6563 ++ struct nh_group *nh_grp;
6564 ++
6565 ++ nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp);
6566 ++ nh = nexthop_mpath_select(nh_grp, 0);
6567 ++ if (!nh)
6568 ++ return NULL;
6569 ++ }
6570 ++
6571 ++ nhi = rcu_dereference_bh_rtnl(nh->nh_info);
6572 ++ if (nhi->family == AF_INET6)
6573 ++ return &nhi->fib6_nh;
6574 ++
6575 ++ return NULL;
6576 ++}
6577 ++
6578 + static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
6579 + {
6580 + struct fib6_nh *fib6_nh;
6581 +diff --git a/include/net/red.h b/include/net/red.h
6582 +index 932f0d79d60cb..9e6647c4ccd1f 100644
6583 +--- a/include/net/red.h
6584 ++++ b/include/net/red.h
6585 +@@ -168,7 +168,8 @@ static inline void red_set_vars(struct red_vars *v)
6586 + v->qcount = -1;
6587 + }
6588 +
6589 +-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
6590 ++static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
6591 ++ u8 Scell_log, u8 *stab)
6592 + {
6593 + if (fls(qth_min) + Wlog > 32)
6594 + return false;
6595 +@@ -178,6 +179,13 @@ static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_
6596 + return false;
6597 + if (qth_max < qth_min)
6598 + return false;
6599 ++ if (stab) {
6600 ++ int i;
6601 ++
6602 ++ for (i = 0; i < RED_STAB_SIZE; i++)
6603 ++ if (stab[i] >= 32)
6604 ++ return false;
6605 ++ }
6606 + return true;
6607 + }
6608 +
6609 +diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
6610 +index e2091bb2b3a8e..4da61c950e931 100644
6611 +--- a/include/net/rtnetlink.h
6612 ++++ b/include/net/rtnetlink.h
6613 +@@ -33,6 +33,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
6614 + *
6615 + * @list: Used internally
6616 + * @kind: Identifier
6617 ++ * @netns_refund: Physical device, move to init_net on netns exit
6618 + * @maxtype: Highest device specific netlink attribute number
6619 + * @policy: Netlink policy for device specific attribute validation
6620 + * @validate: Optional validation function for netlink/changelink parameters
6621 +@@ -64,6 +65,7 @@ struct rtnl_link_ops {
6622 + size_t priv_size;
6623 + void (*setup)(struct net_device *dev);
6624 +
6625 ++ bool netns_refund;
6626 + unsigned int maxtype;
6627 + const struct nla_policy *policy;
6628 + int (*validate)(struct nlattr *tb[],
6629 +diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h
6630 +index aea26ab1431c1..bff5032c98df4 100644
6631 +--- a/include/uapi/linux/psample.h
6632 ++++ b/include/uapi/linux/psample.h
6633 +@@ -3,7 +3,6 @@
6634 + #define __UAPI_PSAMPLE_H
6635 +
6636 + enum {
6637 +- /* sampled packet metadata */
6638 + PSAMPLE_ATTR_IIFINDEX,
6639 + PSAMPLE_ATTR_OIFINDEX,
6640 + PSAMPLE_ATTR_ORIGSIZE,
6641 +@@ -11,10 +10,8 @@ enum {
6642 + PSAMPLE_ATTR_GROUP_SEQ,
6643 + PSAMPLE_ATTR_SAMPLE_RATE,
6644 + PSAMPLE_ATTR_DATA,
6645 +- PSAMPLE_ATTR_TUNNEL,
6646 +-
6647 +- /* commands attributes */
6648 + PSAMPLE_ATTR_GROUP_REFCOUNT,
6649 ++ PSAMPLE_ATTR_TUNNEL,
6650 +
6651 + __PSAMPLE_ATTR_MAX
6652 + };
6653 +diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
6654 +index c2a501cd90eba..a4ac48c7dada1 100644
6655 +--- a/kernel/bpf/bpf_inode_storage.c
6656 ++++ b/kernel/bpf/bpf_inode_storage.c
6657 +@@ -109,7 +109,7 @@ static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
6658 + fd = *(int *)key;
6659 + f = fget_raw(fd);
6660 + if (!f)
6661 +- return NULL;
6662 ++ return ERR_PTR(-EBADF);
6663 +
6664 + sdata = inode_storage_lookup(f->f_inode, map, true);
6665 + fput(f);
6666 +diff --git a/kernel/bpf/preload/bpf_preload_kern.c b/kernel/bpf/preload/bpf_preload_kern.c
6667 +index 79c5772465f14..53736e52c1dfa 100644
6668 +--- a/kernel/bpf/preload/bpf_preload_kern.c
6669 ++++ b/kernel/bpf/preload/bpf_preload_kern.c
6670 +@@ -60,9 +60,12 @@ static int finish(void)
6671 + &magic, sizeof(magic), &pos);
6672 + if (n != sizeof(magic))
6673 + return -EPIPE;
6674 ++
6675 + tgid = umd_ops.info.tgid;
6676 +- wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
6677 +- umd_ops.info.tgid = NULL;
6678 ++ if (tgid) {
6679 ++ wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
6680 ++ umd_cleanup_helper(&umd_ops.info);
6681 ++ }
6682 + return 0;
6683 + }
6684 +
6685 +@@ -80,10 +83,18 @@ static int __init load_umd(void)
6686 +
6687 + static void __exit fini_umd(void)
6688 + {
6689 ++ struct pid *tgid;
6690 ++
6691 + bpf_preload_ops = NULL;
6692 ++
6693 + /* kill UMD in case it's still there due to earlier error */
6694 +- kill_pid(umd_ops.info.tgid, SIGKILL, 1);
6695 +- umd_ops.info.tgid = NULL;
6696 ++ tgid = umd_ops.info.tgid;
6697 ++ if (tgid) {
6698 ++ kill_pid(tgid, SIGKILL, 1);
6699 ++
6700 ++ wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
6701 ++ umd_cleanup_helper(&umd_ops.info);
6702 ++ }
6703 + umd_unload_blob(&umd_ops.info);
6704 + }
6705 + late_initcall(load_umd);
6706 +diff --git a/kernel/fork.c b/kernel/fork.c
6707 +index c675fdbd3dce1..7c044d377926c 100644
6708 +--- a/kernel/fork.c
6709 ++++ b/kernel/fork.c
6710 +@@ -992,6 +992,13 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
6711 + #endif
6712 + }
6713 +
6714 ++static void mm_init_pasid(struct mm_struct *mm)
6715 ++{
6716 ++#ifdef CONFIG_IOMMU_SUPPORT
6717 ++ mm->pasid = INIT_PASID;
6718 ++#endif
6719 ++}
6720 ++
6721 + static void mm_init_uprobes_state(struct mm_struct *mm)
6722 + {
6723 + #ifdef CONFIG_UPROBES
6724 +@@ -1022,6 +1029,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
6725 + mm_init_cpumask(mm);
6726 + mm_init_aio(mm);
6727 + mm_init_owner(mm, p);
6728 ++ mm_init_pasid(mm);
6729 + RCU_INIT_POINTER(mm->exe_file, NULL);
6730 + mmu_notifier_subscriptions_init(mm);
6731 + init_tlb_flush_pending(mm);
6732 +diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c
6733 +index c94b820a1b62c..8743150db2acc 100644
6734 +--- a/kernel/gcov/clang.c
6735 ++++ b/kernel/gcov/clang.c
6736 +@@ -75,7 +75,9 @@ struct gcov_fn_info {
6737 +
6738 + u32 num_counters;
6739 + u64 *counters;
6740 ++#if CONFIG_CLANG_VERSION < 110000
6741 + const char *function_name;
6742 ++#endif
6743 + };
6744 +
6745 + static struct gcov_info *current_info;
6746 +@@ -105,6 +107,7 @@ void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush)
6747 + }
6748 + EXPORT_SYMBOL(llvm_gcov_init);
6749 +
6750 ++#if CONFIG_CLANG_VERSION < 110000
6751 + void llvm_gcda_start_file(const char *orig_filename, const char version[4],
6752 + u32 checksum)
6753 + {
6754 +@@ -113,7 +116,17 @@ void llvm_gcda_start_file(const char *orig_filename, const char version[4],
6755 + current_info->checksum = checksum;
6756 + }
6757 + EXPORT_SYMBOL(llvm_gcda_start_file);
6758 ++#else
6759 ++void llvm_gcda_start_file(const char *orig_filename, u32 version, u32 checksum)
6760 ++{
6761 ++ current_info->filename = orig_filename;
6762 ++ current_info->version = version;
6763 ++ current_info->checksum = checksum;
6764 ++}
6765 ++EXPORT_SYMBOL(llvm_gcda_start_file);
6766 ++#endif
6767 +
6768 ++#if CONFIG_CLANG_VERSION < 110000
6769 + void llvm_gcda_emit_function(u32 ident, const char *function_name,
6770 + u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum)
6771 + {
6772 +@@ -133,6 +146,24 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
6773 + list_add_tail(&info->head, &current_info->functions);
6774 + }
6775 + EXPORT_SYMBOL(llvm_gcda_emit_function);
6776 ++#else
6777 ++void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
6778 ++ u8 use_extra_checksum, u32 cfg_checksum)
6779 ++{
6780 ++ struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
6781 ++
6782 ++ if (!info)
6783 ++ return;
6784 ++
6785 ++ INIT_LIST_HEAD(&info->head);
6786 ++ info->ident = ident;
6787 ++ info->checksum = func_checksum;
6788 ++ info->use_extra_checksum = use_extra_checksum;
6789 ++ info->cfg_checksum = cfg_checksum;
6790 ++ list_add_tail(&info->head, &current_info->functions);
6791 ++}
6792 ++EXPORT_SYMBOL(llvm_gcda_emit_function);
6793 ++#endif
6794 +
6795 + void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
6796 + {
6797 +@@ -295,6 +326,7 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
6798 + }
6799 + }
6800 +
6801 ++#if CONFIG_CLANG_VERSION < 110000
6802 + static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
6803 + {
6804 + size_t cv_size; /* counter values size */
6805 +@@ -322,6 +354,28 @@ err_name:
6806 + kfree(fn_dup);
6807 + return NULL;
6808 + }
6809 ++#else
6810 ++static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
6811 ++{
6812 ++ size_t cv_size; /* counter values size */
6813 ++ struct gcov_fn_info *fn_dup = kmemdup(fn, sizeof(*fn),
6814 ++ GFP_KERNEL);
6815 ++ if (!fn_dup)
6816 ++ return NULL;
6817 ++ INIT_LIST_HEAD(&fn_dup->head);
6818 ++
6819 ++ cv_size = fn->num_counters * sizeof(fn->counters[0]);
6820 ++ fn_dup->counters = vmalloc(cv_size);
6821 ++ if (!fn_dup->counters) {
6822 ++ kfree(fn_dup);
6823 ++ return NULL;
6824 ++ }
6825 ++
6826 ++ memcpy(fn_dup->counters, fn->counters, cv_size);
6827 ++
6828 ++ return fn_dup;
6829 ++}
6830 ++#endif
6831 +
6832 + /**
6833 + * gcov_info_dup - duplicate profiling data set
6834 +@@ -362,6 +416,7 @@ err:
6835 + * gcov_info_free - release memory for profiling data set duplicate
6836 + * @info: profiling data set duplicate to free
6837 + */
6838 ++#if CONFIG_CLANG_VERSION < 110000
6839 + void gcov_info_free(struct gcov_info *info)
6840 + {
6841 + struct gcov_fn_info *fn, *tmp;
6842 +@@ -375,6 +430,20 @@ void gcov_info_free(struct gcov_info *info)
6843 + kfree(info->filename);
6844 + kfree(info);
6845 + }
6846 ++#else
6847 ++void gcov_info_free(struct gcov_info *info)
6848 ++{
6849 ++ struct gcov_fn_info *fn, *tmp;
6850 ++
6851 ++ list_for_each_entry_safe(fn, tmp, &info->functions, head) {
6852 ++ vfree(fn->counters);
6853 ++ list_del(&fn->head);
6854 ++ kfree(fn);
6855 ++ }
6856 ++ kfree(info->filename);
6857 ++ kfree(info);
6858 ++}
6859 ++#endif
6860 +
6861 + #define ITER_STRIDE PAGE_SIZE
6862 +
6863 +diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
6864 +index c1ff7fa030abe..994ca8353543a 100644
6865 +--- a/kernel/power/energy_model.c
6866 ++++ b/kernel/power/energy_model.c
6867 +@@ -85,7 +85,7 @@ static int __init em_debug_init(void)
6868 +
6869 + return 0;
6870 + }
6871 +-core_initcall(em_debug_init);
6872 ++fs_initcall(em_debug_init);
6873 + #else /* CONFIG_DEBUG_FS */
6874 + static void em_debug_create_pd(struct device *dev) {}
6875 + static void em_debug_remove_pd(struct device *dev) {}
6876 +diff --git a/kernel/static_call.c b/kernel/static_call.c
6877 +index db914da6e7854..49efbdc5b4800 100644
6878 +--- a/kernel/static_call.c
6879 ++++ b/kernel/static_call.c
6880 +@@ -12,6 +12,8 @@
6881 +
6882 + extern struct static_call_site __start_static_call_sites[],
6883 + __stop_static_call_sites[];
6884 ++extern struct static_call_tramp_key __start_static_call_tramp_key[],
6885 ++ __stop_static_call_tramp_key[];
6886 +
6887 + static bool static_call_initialized;
6888 +
6889 +@@ -33,27 +35,30 @@ static inline void *static_call_addr(struct static_call_site *site)
6890 + return (void *)((long)site->addr + (long)&site->addr);
6891 + }
6892 +
6893 ++static inline unsigned long __static_call_key(const struct static_call_site *site)
6894 ++{
6895 ++ return (long)site->key + (long)&site->key;
6896 ++}
6897 +
6898 + static inline struct static_call_key *static_call_key(const struct static_call_site *site)
6899 + {
6900 +- return (struct static_call_key *)
6901 +- (((long)site->key + (long)&site->key) & ~STATIC_CALL_SITE_FLAGS);
6902 ++ return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
6903 + }
6904 +
6905 + /* These assume the key is word-aligned. */
6906 + static inline bool static_call_is_init(struct static_call_site *site)
6907 + {
6908 +- return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_INIT;
6909 ++ return __static_call_key(site) & STATIC_CALL_SITE_INIT;
6910 + }
6911 +
6912 + static inline bool static_call_is_tail(struct static_call_site *site)
6913 + {
6914 +- return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_TAIL;
6915 ++ return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
6916 + }
6917 +
6918 + static inline void static_call_set_init(struct static_call_site *site)
6919 + {
6920 +- site->key = ((long)static_call_key(site) | STATIC_CALL_SITE_INIT) -
6921 ++ site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
6922 + (long)&site->key;
6923 + }
6924 +
6925 +@@ -197,7 +202,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
6926 + }
6927 +
6928 + arch_static_call_transform(site_addr, NULL, func,
6929 +- static_call_is_tail(site));
6930 ++ static_call_is_tail(site));
6931 + }
6932 + }
6933 +
6934 +@@ -332,10 +337,60 @@ static int __static_call_mod_text_reserved(void *start, void *end)
6935 + return ret;
6936 + }
6937 +
6938 ++static unsigned long tramp_key_lookup(unsigned long addr)
6939 ++{
6940 ++ struct static_call_tramp_key *start = __start_static_call_tramp_key;
6941 ++ struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
6942 ++ struct static_call_tramp_key *tramp_key;
6943 ++
6944 ++ for (tramp_key = start; tramp_key != stop; tramp_key++) {
6945 ++ unsigned long tramp;
6946 ++
6947 ++ tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
6948 ++ if (tramp == addr)
6949 ++ return (long)tramp_key->key + (long)&tramp_key->key;
6950 ++ }
6951 ++
6952 ++ return 0;
6953 ++}
6954 ++
6955 + static int static_call_add_module(struct module *mod)
6956 + {
6957 +- return __static_call_init(mod, mod->static_call_sites,
6958 +- mod->static_call_sites + mod->num_static_call_sites);
6959 ++ struct static_call_site *start = mod->static_call_sites;
6960 ++ struct static_call_site *stop = start + mod->num_static_call_sites;
6961 ++ struct static_call_site *site;
6962 ++
6963 ++ for (site = start; site != stop; site++) {
6964 ++ unsigned long s_key = __static_call_key(site);
6965 ++ unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
6966 ++ unsigned long key;
6967 ++
6968 ++ /*
6969 ++ * Is the key is exported, 'addr' points to the key, which
6970 ++ * means modules are allowed to call static_call_update() on
6971 ++ * it.
6972 ++ *
6973 ++ * Otherwise, the key isn't exported, and 'addr' points to the
6974 ++ * trampoline so we need to lookup the key.
6975 ++ *
6976 ++ * We go through this dance to prevent crazy modules from
6977 ++ * abusing sensitive static calls.
6978 ++ */
6979 ++ if (!kernel_text_address(addr))
6980 ++ continue;
6981 ++
6982 ++ key = tramp_key_lookup(addr);
6983 ++ if (!key) {
6984 ++ pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
6985 ++ static_call_addr(site));
6986 ++ return -EINVAL;
6987 ++ }
6988 ++
6989 ++ key |= s_key & STATIC_CALL_SITE_FLAGS;
6990 ++ site->key = key - (long)&site->key;
6991 ++ }
6992 ++
6993 ++ return __static_call_init(mod, start, stop);
6994 + }
6995 +
6996 + static void static_call_del_module(struct module *mod)
6997 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
6998 +index 9c1bba8cc51b0..82041bbf8fc2d 100644
6999 +--- a/kernel/trace/ftrace.c
7000 ++++ b/kernel/trace/ftrace.c
7001 +@@ -5045,6 +5045,20 @@ struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
7002 + return NULL;
7003 + }
7004 +
7005 ++static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
7006 ++{
7007 ++ struct ftrace_direct_func *direct;
7008 ++
7009 ++ direct = kmalloc(sizeof(*direct), GFP_KERNEL);
7010 ++ if (!direct)
7011 ++ return NULL;
7012 ++ direct->addr = addr;
7013 ++ direct->count = 0;
7014 ++ list_add_rcu(&direct->next, &ftrace_direct_funcs);
7015 ++ ftrace_direct_func_count++;
7016 ++ return direct;
7017 ++}
7018 ++
7019 + /**
7020 + * register_ftrace_direct - Call a custom trampoline directly
7021 + * @ip: The address of the nop at the beginning of a function
7022 +@@ -5120,15 +5134,11 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
7023 +
7024 + direct = ftrace_find_direct_func(addr);
7025 + if (!direct) {
7026 +- direct = kmalloc(sizeof(*direct), GFP_KERNEL);
7027 ++ direct = ftrace_alloc_direct_func(addr);
7028 + if (!direct) {
7029 + kfree(entry);
7030 + goto out_unlock;
7031 + }
7032 +- direct->addr = addr;
7033 +- direct->count = 0;
7034 +- list_add_rcu(&direct->next, &ftrace_direct_funcs);
7035 +- ftrace_direct_func_count++;
7036 + }
7037 +
7038 + entry->ip = ip;
7039 +@@ -5329,6 +5339,7 @@ int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
7040 + int modify_ftrace_direct(unsigned long ip,
7041 + unsigned long old_addr, unsigned long new_addr)
7042 + {
7043 ++ struct ftrace_direct_func *direct, *new_direct = NULL;
7044 + struct ftrace_func_entry *entry;
7045 + struct dyn_ftrace *rec;
7046 + int ret = -ENODEV;
7047 +@@ -5344,6 +5355,20 @@ int modify_ftrace_direct(unsigned long ip,
7048 + if (entry->direct != old_addr)
7049 + goto out_unlock;
7050 +
7051 ++ direct = ftrace_find_direct_func(old_addr);
7052 ++ if (WARN_ON(!direct))
7053 ++ goto out_unlock;
7054 ++ if (direct->count > 1) {
7055 ++ ret = -ENOMEM;
7056 ++ new_direct = ftrace_alloc_direct_func(new_addr);
7057 ++ if (!new_direct)
7058 ++ goto out_unlock;
7059 ++ direct->count--;
7060 ++ new_direct->count++;
7061 ++ } else {
7062 ++ direct->addr = new_addr;
7063 ++ }
7064 ++
7065 + /*
7066 + * If there's no other ftrace callback on the rec->ip location,
7067 + * then it can be changed directly by the architecture.
7068 +@@ -5357,6 +5382,14 @@ int modify_ftrace_direct(unsigned long ip,
7069 + ret = 0;
7070 + }
7071 +
7072 ++ if (unlikely(ret && new_direct)) {
7073 ++ direct->count++;
7074 ++ list_del_rcu(&new_direct->next);
7075 ++ synchronize_rcu_tasks();
7076 ++ kfree(new_direct);
7077 ++ ftrace_direct_func_count--;
7078 ++ }
7079 ++
7080 + out_unlock:
7081 + mutex_unlock(&ftrace_lock);
7082 + mutex_unlock(&direct_mutex);
7083 +diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c
7084 +index 0b35212ffc3d0..bb7bb3b478abf 100644
7085 +--- a/kernel/usermode_driver.c
7086 ++++ b/kernel/usermode_driver.c
7087 +@@ -139,13 +139,22 @@ static void umd_cleanup(struct subprocess_info *info)
7088 + struct umd_info *umd_info = info->data;
7089 +
7090 + /* cleanup if umh_setup() was successful but exec failed */
7091 +- if (info->retval) {
7092 +- fput(umd_info->pipe_to_umh);
7093 +- fput(umd_info->pipe_from_umh);
7094 +- put_pid(umd_info->tgid);
7095 +- umd_info->tgid = NULL;
7096 +- }
7097 ++ if (info->retval)
7098 ++ umd_cleanup_helper(umd_info);
7099 ++}
7100 ++
7101 ++/**
7102 ++ * umd_cleanup_helper - release the resources which were allocated in umd_setup
7103 ++ * @info: information about usermode driver
7104 ++ */
7105 ++void umd_cleanup_helper(struct umd_info *info)
7106 ++{
7107 ++ fput(info->pipe_to_umh);
7108 ++ fput(info->pipe_from_umh);
7109 ++ put_pid(info->tgid);
7110 ++ info->tgid = NULL;
7111 + }
7112 ++EXPORT_SYMBOL_GPL(umd_cleanup_helper);
7113 +
7114 + /**
7115 + * fork_usermode_driver - fork a usermode driver
7116 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
7117 +index 4a78514830d5a..d9ade23ac2b22 100644
7118 +--- a/mm/huge_memory.c
7119 ++++ b/mm/huge_memory.c
7120 +@@ -2433,7 +2433,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
7121 + lruvec = mem_cgroup_page_lruvec(head, pgdat);
7122 +
7123 + /* complete memcg works before add pages to LRU */
7124 +- mem_cgroup_split_huge_fixup(head);
7125 ++ split_page_memcg(head, nr);
7126 +
7127 + if (PageAnon(head) && PageSwapCache(head)) {
7128 + swp_entry_t entry = { .val = page_private(head) };
7129 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
7130 +index 94c6b3d4df96a..573f1a0183be6 100644
7131 +--- a/mm/hugetlb.c
7132 ++++ b/mm/hugetlb.c
7133 +@@ -285,6 +285,17 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
7134 + nrg->reservation_counter =
7135 + &h_cg->rsvd_hugepage[hstate_index(h)];
7136 + nrg->css = &h_cg->css;
7137 ++ /*
7138 ++ * The caller will hold exactly one h_cg->css reference for the
7139 ++ * whole contiguous reservation region. But this area might be
7140 ++ * scattered when there are already some file_regions reside in
7141 ++ * it. As a result, many file_regions may share only one css
7142 ++ * reference. In order to ensure that one file_region must hold
7143 ++ * exactly one h_cg->css reference, we should do css_get for
7144 ++ * each file_region and leave the reference held by caller
7145 ++ * untouched.
7146 ++ */
7147 ++ css_get(&h_cg->css);
7148 + if (!resv->pages_per_hpage)
7149 + resv->pages_per_hpage = pages_per_huge_page(h);
7150 + /* pages_per_hpage should be the same for all entries in
7151 +@@ -298,6 +309,14 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
7152 + #endif
7153 + }
7154 +
7155 ++static void put_uncharge_info(struct file_region *rg)
7156 ++{
7157 ++#ifdef CONFIG_CGROUP_HUGETLB
7158 ++ if (rg->css)
7159 ++ css_put(rg->css);
7160 ++#endif
7161 ++}
7162 ++
7163 + static bool has_same_uncharge_info(struct file_region *rg,
7164 + struct file_region *org)
7165 + {
7166 +@@ -321,6 +340,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
7167 + prg->to = rg->to;
7168 +
7169 + list_del(&rg->link);
7170 ++ put_uncharge_info(rg);
7171 + kfree(rg);
7172 +
7173 + rg = prg;
7174 +@@ -332,6 +352,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
7175 + nrg->from = rg->from;
7176 +
7177 + list_del(&rg->link);
7178 ++ put_uncharge_info(rg);
7179 + kfree(rg);
7180 + }
7181 + }
7182 +@@ -664,7 +685,7 @@ retry:
7183 +
7184 + del += t - f;
7185 + hugetlb_cgroup_uncharge_file_region(
7186 +- resv, rg, t - f);
7187 ++ resv, rg, t - f, false);
7188 +
7189 + /* New entry for end of split region */
7190 + nrg->from = t;
7191 +@@ -685,7 +706,7 @@ retry:
7192 + if (f <= rg->from && t >= rg->to) { /* Remove entire region */
7193 + del += rg->to - rg->from;
7194 + hugetlb_cgroup_uncharge_file_region(resv, rg,
7195 +- rg->to - rg->from);
7196 ++ rg->to - rg->from, true);
7197 + list_del(&rg->link);
7198 + kfree(rg);
7199 + continue;
7200 +@@ -693,13 +714,13 @@ retry:
7201 +
7202 + if (f <= rg->from) { /* Trim beginning of region */
7203 + hugetlb_cgroup_uncharge_file_region(resv, rg,
7204 +- t - rg->from);
7205 ++ t - rg->from, false);
7206 +
7207 + del += t - rg->from;
7208 + rg->from = t;
7209 + } else { /* Trim end of region */
7210 + hugetlb_cgroup_uncharge_file_region(resv, rg,
7211 +- rg->to - f);
7212 ++ rg->to - f, false);
7213 +
7214 + del += rg->to - f;
7215 + rg->to = f;
7216 +@@ -5189,6 +5210,10 @@ int hugetlb_reserve_pages(struct inode *inode,
7217 + */
7218 + long rsv_adjust;
7219 +
7220 ++ /*
7221 ++ * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
7222 ++ * reference to h_cg->css. See comment below for detail.
7223 ++ */
7224 + hugetlb_cgroup_uncharge_cgroup_rsvd(
7225 + hstate_index(h),
7226 + (chg - add) * pages_per_huge_page(h), h_cg);
7227 +@@ -5196,6 +5221,14 @@ int hugetlb_reserve_pages(struct inode *inode,
7228 + rsv_adjust = hugepage_subpool_put_pages(spool,
7229 + chg - add);
7230 + hugetlb_acct_memory(h, -rsv_adjust);
7231 ++ } else if (h_cg) {
7232 ++ /*
7233 ++ * The file_regions will hold their own reference to
7234 ++ * h_cg->css. So we should release the reference held
7235 ++ * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
7236 ++ * done.
7237 ++ */
7238 ++ hugetlb_cgroup_put_rsvd_cgroup(h_cg);
7239 + }
7240 + }
7241 + return 0;
7242 +diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
7243 +index 9182848dda3e0..1348819f546cb 100644
7244 +--- a/mm/hugetlb_cgroup.c
7245 ++++ b/mm/hugetlb_cgroup.c
7246 +@@ -391,7 +391,8 @@ void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
7247 +
7248 + void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
7249 + struct file_region *rg,
7250 +- unsigned long nr_pages)
7251 ++ unsigned long nr_pages,
7252 ++ bool region_del)
7253 + {
7254 + if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
7255 + return;
7256 +@@ -400,7 +401,12 @@ void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
7257 + !resv->reservation_counter) {
7258 + page_counter_uncharge(rg->reservation_counter,
7259 + nr_pages * resv->pages_per_hpage);
7260 +- css_put(rg->css);
7261 ++ /*
7262 ++ * Only do css_put(rg->css) when we delete the entire region
7263 ++ * because one file_region must hold exactly one css reference.
7264 ++ */
7265 ++ if (region_del)
7266 ++ css_put(rg->css);
7267 + }
7268 + }
7269 +
7270 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
7271 +index d6966f1ebc7af..d72d2b90474a4 100644
7272 +--- a/mm/memcontrol.c
7273 ++++ b/mm/memcontrol.c
7274 +@@ -3268,26 +3268,25 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
7275 +
7276 + #endif /* CONFIG_MEMCG_KMEM */
7277 +
7278 +-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
7279 +-
7280 + /*
7281 +- * Because tail pages are not marked as "used", set it. We're under
7282 +- * pgdat->lru_lock and migration entries setup in all page mappings.
7283 ++ * Because head->mem_cgroup is not set on tails, set it now.
7284 + */
7285 +-void mem_cgroup_split_huge_fixup(struct page *head)
7286 ++void split_page_memcg(struct page *head, unsigned int nr)
7287 + {
7288 + struct mem_cgroup *memcg = head->mem_cgroup;
7289 ++ int kmemcg = PageKmemcg(head);
7290 + int i;
7291 +
7292 +- if (mem_cgroup_disabled())
7293 ++ if (mem_cgroup_disabled() || !memcg)
7294 + return;
7295 +
7296 +- for (i = 1; i < HPAGE_PMD_NR; i++) {
7297 +- css_get(&memcg->css);
7298 ++ for (i = 1; i < nr; i++) {
7299 + head[i].mem_cgroup = memcg;
7300 ++ if (kmemcg)
7301 ++ __SetPageKmemcg(head + i);
7302 + }
7303 ++ css_get_many(&memcg->css, nr - 1);
7304 + }
7305 +-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
7306 +
7307 + #ifdef CONFIG_MEMCG_SWAP
7308 + /**
7309 +diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
7310 +index 5654dd19addc0..07f42a7a60657 100644
7311 +--- a/mm/mmu_notifier.c
7312 ++++ b/mm/mmu_notifier.c
7313 +@@ -501,10 +501,33 @@ static int mn_hlist_invalidate_range_start(
7314 + "");
7315 + WARN_ON(mmu_notifier_range_blockable(range) ||
7316 + _ret != -EAGAIN);
7317 ++ /*
7318 ++ * We call all the notifiers on any EAGAIN,
7319 ++ * there is no way for a notifier to know if
7320 ++ * its start method failed, thus a start that
7321 ++ * does EAGAIN can't also do end.
7322 ++ */
7323 ++ WARN_ON(ops->invalidate_range_end);
7324 + ret = _ret;
7325 + }
7326 + }
7327 + }
7328 ++
7329 ++ if (ret) {
7330 ++ /*
7331 ++ * Must be non-blocking to get here. If there are multiple
7332 ++ * notifiers and one or more failed start, any that succeeded
7333 ++ * start are expecting their end to be called. Do so now.
7334 ++ */
7335 ++ hlist_for_each_entry_rcu(subscription, &subscriptions->list,
7336 ++ hlist, srcu_read_lock_held(&srcu)) {
7337 ++ if (!subscription->ops->invalidate_range_end)
7338 ++ continue;
7339 ++
7340 ++ subscription->ops->invalidate_range_end(subscription,
7341 ++ range);
7342 ++ }
7343 ++ }
7344 + srcu_read_unlock(&srcu, id);
7345 +
7346 + return ret;
7347 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
7348 +index 690f79c781cf7..7ffa706e5c305 100644
7349 +--- a/mm/page_alloc.c
7350 ++++ b/mm/page_alloc.c
7351 +@@ -3272,6 +3272,7 @@ void split_page(struct page *page, unsigned int order)
7352 + for (i = 1; i < (1 << order); i++)
7353 + set_page_refcounted(page + i);
7354 + split_page_owner(page, 1 << order);
7355 ++ split_page_memcg(page, 1 << order);
7356 + }
7357 + EXPORT_SYMBOL_GPL(split_page);
7358 +
7359 +diff --git a/mm/z3fold.c b/mm/z3fold.c
7360 +index 0152ad9931a87..8ae944eeb8e20 100644
7361 +--- a/mm/z3fold.c
7362 ++++ b/mm/z3fold.c
7363 +@@ -1350,8 +1350,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
7364 + page = list_entry(pos, struct page, lru);
7365 +
7366 + zhdr = page_address(page);
7367 +- if (test_bit(PAGE_HEADLESS, &page->private))
7368 ++ if (test_bit(PAGE_HEADLESS, &page->private)) {
7369 ++ /*
7370 ++ * For non-headless pages, we wait to do this
7371 ++ * until we have the page lock to avoid racing
7372 ++ * with __z3fold_alloc(). Headless pages don't
7373 ++ * have a lock (and __z3fold_alloc() will never
7374 ++ * see them), but we still need to test and set
7375 ++ * PAGE_CLAIMED to avoid racing with
7376 ++ * z3fold_free(), so just do it now before
7377 ++ * leaving the loop.
7378 ++ */
7379 ++ if (test_and_set_bit(PAGE_CLAIMED, &page->private))
7380 ++ continue;
7381 ++
7382 + break;
7383 ++ }
7384 +
7385 + if (kref_get_unless_zero(&zhdr->refcount) == 0) {
7386 + zhdr = NULL;
7387 +diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
7388 +index 015209bf44aa4..3c42095fa75fd 100644
7389 +--- a/net/bridge/br_switchdev.c
7390 ++++ b/net/bridge/br_switchdev.c
7391 +@@ -123,6 +123,8 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
7392 + {
7393 + if (!fdb->dst)
7394 + return;
7395 ++ if (test_bit(BR_FDB_LOCAL, &fdb->flags))
7396 ++ return;
7397 +
7398 + switch (type) {
7399 + case RTM_DELNEIGH:
7400 +diff --git a/net/can/isotp.c b/net/can/isotp.c
7401 +index 8bd565f2073e7..ea1e227b8e544 100644
7402 +--- a/net/can/isotp.c
7403 ++++ b/net/can/isotp.c
7404 +@@ -196,7 +196,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
7405 + nskb->dev = dev;
7406 + can_skb_set_owner(nskb, sk);
7407 + ncf = (struct canfd_frame *)nskb->data;
7408 +- skb_put(nskb, so->ll.mtu);
7409 ++ skb_put_zero(nskb, so->ll.mtu);
7410 +
7411 + /* create & send flow control reply */
7412 + ncf->can_id = so->txid;
7413 +@@ -215,8 +215,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
7414 + if (ae)
7415 + ncf->data[0] = so->opt.ext_address;
7416 +
7417 +- if (so->ll.mtu == CANFD_MTU)
7418 +- ncf->flags = so->ll.tx_flags;
7419 ++ ncf->flags = so->ll.tx_flags;
7420 +
7421 + can_send_ret = can_send(nskb, 1);
7422 + if (can_send_ret)
7423 +@@ -780,7 +779,7 @@ isotp_tx_burst:
7424 + can_skb_prv(skb)->skbcnt = 0;
7425 +
7426 + cf = (struct canfd_frame *)skb->data;
7427 +- skb_put(skb, so->ll.mtu);
7428 ++ skb_put_zero(skb, so->ll.mtu);
7429 +
7430 + /* create consecutive frame */
7431 + isotp_fill_dataframe(cf, so, ae, 0);
7432 +@@ -790,8 +789,7 @@ isotp_tx_burst:
7433 + so->tx.sn %= 16;
7434 + so->tx.bs++;
7435 +
7436 +- if (so->ll.mtu == CANFD_MTU)
7437 +- cf->flags = so->ll.tx_flags;
7438 ++ cf->flags = so->ll.tx_flags;
7439 +
7440 + skb->dev = dev;
7441 + can_skb_set_owner(skb, sk);
7442 +@@ -889,7 +887,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
7443 + so->tx.idx = 0;
7444 +
7445 + cf = (struct canfd_frame *)skb->data;
7446 +- skb_put(skb, so->ll.mtu);
7447 ++ skb_put_zero(skb, so->ll.mtu);
7448 +
7449 + /* take care of a potential SF_DL ESC offset for TX_DL > 8 */
7450 + off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
7451 +@@ -934,8 +932,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
7452 + }
7453 +
7454 + /* send the first or only CAN frame */
7455 +- if (so->ll.mtu == CANFD_MTU)
7456 +- cf->flags = so->ll.tx_flags;
7457 ++ cf->flags = so->ll.tx_flags;
7458 +
7459 + skb->dev = dev;
7460 + skb->sk = sk;
7461 +@@ -1212,7 +1209,8 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname,
7462 + if (ll.mtu != CAN_MTU && ll.mtu != CANFD_MTU)
7463 + return -EINVAL;
7464 +
7465 +- if (ll.mtu == CAN_MTU && ll.tx_dl > CAN_MAX_DLEN)
7466 ++ if (ll.mtu == CAN_MTU &&
7467 ++ (ll.tx_dl > CAN_MAX_DLEN || ll.tx_flags != 0))
7468 + return -EINVAL;
7469 +
7470 + memcpy(&so->ll, &ll, sizeof(ll));
7471 +diff --git a/net/core/dev.c b/net/core/dev.c
7472 +index 75ca6c6d01d6e..62ff7121b22d3 100644
7473 +--- a/net/core/dev.c
7474 ++++ b/net/core/dev.c
7475 +@@ -1195,6 +1195,18 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
7476 + return -ENOMEM;
7477 +
7478 + for_each_netdev(net, d) {
7479 ++ struct netdev_name_node *name_node;
7480 ++ list_for_each_entry(name_node, &d->name_node->list, list) {
7481 ++ if (!sscanf(name_node->name, name, &i))
7482 ++ continue;
7483 ++ if (i < 0 || i >= max_netdevices)
7484 ++ continue;
7485 ++
7486 ++ /* avoid cases where sscanf is not exact inverse of printf */
7487 ++ snprintf(buf, IFNAMSIZ, name, i);
7488 ++ if (!strncmp(buf, name_node->name, IFNAMSIZ))
7489 ++ set_bit(i, inuse);
7490 ++ }
7491 + if (!sscanf(d->name, name, &i))
7492 + continue;
7493 + if (i < 0 || i >= max_netdevices)
7494 +@@ -11094,7 +11106,7 @@ static void __net_exit default_device_exit(struct net *net)
7495 + continue;
7496 +
7497 + /* Leave virtual devices for the generic cleanup */
7498 +- if (dev->rtnl_link_ops)
7499 ++ if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
7500 + continue;
7501 +
7502 + /* Push remaining network devices to init_net */
7503 +diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
7504 +index 571f191c06d94..db65ce62b625a 100644
7505 +--- a/net/core/drop_monitor.c
7506 ++++ b/net/core/drop_monitor.c
7507 +@@ -1053,6 +1053,20 @@ static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
7508 + return 0;
7509 +
7510 + err_module_put:
7511 ++ for_each_possible_cpu(cpu) {
7512 ++ struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
7513 ++ struct sk_buff *skb;
7514 ++
7515 ++ del_timer_sync(&hw_data->send_timer);
7516 ++ cancel_work_sync(&hw_data->dm_alert_work);
7517 ++ while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
7518 ++ struct devlink_trap_metadata *hw_metadata;
7519 ++
7520 ++ hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
7521 ++ net_dm_hw_metadata_free(hw_metadata);
7522 ++ consume_skb(skb);
7523 ++ }
7524 ++ }
7525 + module_put(THIS_MODULE);
7526 + return rc;
7527 + }
7528 +@@ -1134,6 +1148,15 @@ static int net_dm_trace_on_set(struct netlink_ext_ack *extack)
7529 + err_unregister_trace:
7530 + unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
7531 + err_module_put:
7532 ++ for_each_possible_cpu(cpu) {
7533 ++ struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
7534 ++ struct sk_buff *skb;
7535 ++
7536 ++ del_timer_sync(&data->send_timer);
7537 ++ cancel_work_sync(&data->dm_alert_work);
7538 ++ while ((skb = __skb_dequeue(&data->drop_queue)))
7539 ++ consume_skb(skb);
7540 ++ }
7541 + module_put(THIS_MODULE);
7542 + return rc;
7543 + }
7544 +diff --git a/net/core/dst.c b/net/core/dst.c
7545 +index 0c01bd8d9d81e..fb3bcba87744d 100644
7546 +--- a/net/core/dst.c
7547 ++++ b/net/core/dst.c
7548 +@@ -237,37 +237,62 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
7549 + }
7550 + EXPORT_SYMBOL(__dst_destroy_metrics_generic);
7551 +
7552 +-static struct dst_ops md_dst_ops = {
7553 +- .family = AF_UNSPEC,
7554 +-};
7555 ++struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
7556 ++{
7557 ++ return NULL;
7558 ++}
7559 +
7560 +-static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
7561 ++u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
7562 + {
7563 +- WARN_ONCE(1, "Attempting to call output on metadata dst\n");
7564 +- kfree_skb(skb);
7565 +- return 0;
7566 ++ return NULL;
7567 + }
7568 +
7569 +-static int dst_md_discard(struct sk_buff *skb)
7570 ++struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
7571 ++ struct sk_buff *skb,
7572 ++ const void *daddr)
7573 + {
7574 +- WARN_ONCE(1, "Attempting to call input on metadata dst\n");
7575 +- kfree_skb(skb);
7576 +- return 0;
7577 ++ return NULL;
7578 ++}
7579 ++
7580 ++void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
7581 ++ struct sk_buff *skb, u32 mtu,
7582 ++ bool confirm_neigh)
7583 ++{
7584 ++}
7585 ++EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
7586 ++
7587 ++void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
7588 ++ struct sk_buff *skb)
7589 ++{
7590 ++}
7591 ++EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
7592 ++
7593 ++unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
7594 ++{
7595 ++ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
7596 ++
7597 ++ return mtu ? : dst->dev->mtu;
7598 + }
7599 ++EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
7600 ++
7601 ++static struct dst_ops dst_blackhole_ops = {
7602 ++ .family = AF_UNSPEC,
7603 ++ .neigh_lookup = dst_blackhole_neigh_lookup,
7604 ++ .check = dst_blackhole_check,
7605 ++ .cow_metrics = dst_blackhole_cow_metrics,
7606 ++ .update_pmtu = dst_blackhole_update_pmtu,
7607 ++ .redirect = dst_blackhole_redirect,
7608 ++ .mtu = dst_blackhole_mtu,
7609 ++};
7610 +
7611 + static void __metadata_dst_init(struct metadata_dst *md_dst,
7612 + enum metadata_type type, u8 optslen)
7613 +-
7614 + {
7615 + struct dst_entry *dst;
7616 +
7617 + dst = &md_dst->dst;
7618 +- dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
7619 ++ dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE,
7620 + DST_METADATA | DST_NOCOUNT);
7621 +-
7622 +- dst->input = dst_md_discard;
7623 +- dst->output = dst_md_discard_out;
7624 +-
7625 + memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
7626 + md_dst->type = type;
7627 + }
7628 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
7629 +index e21950a2c8970..c79be25b2e0c2 100644
7630 +--- a/net/core/flow_dissector.c
7631 ++++ b/net/core/flow_dissector.c
7632 +@@ -175,7 +175,7 @@ void skb_flow_get_icmp_tci(const struct sk_buff *skb,
7633 + * avoid confusion with packets without such field
7634 + */
7635 + if (icmp_has_id(ih->type))
7636 +- key_icmp->id = ih->un.echo.id ? : 1;
7637 ++ key_icmp->id = ih->un.echo.id ? ntohs(ih->un.echo.id) : 1;
7638 + else
7639 + key_icmp->id = 0;
7640 + }
7641 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
7642 +index 78ee1b5acf1f1..49f4034bf1263 100644
7643 +--- a/net/dccp/ipv6.c
7644 ++++ b/net/dccp/ipv6.c
7645 +@@ -319,6 +319,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
7646 + if (!ipv6_unicast_destination(skb))
7647 + return 0; /* discard, don't send a reset here */
7648 +
7649 ++ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
7650 ++ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
7651 ++ return 0;
7652 ++ }
7653 ++
7654 + if (dccp_bad_service_code(sk, service)) {
7655 + dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
7656 + goto drop;
7657 +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
7658 +index 48d2b615edc26..1dfa561e8f981 100644
7659 +--- a/net/ipv4/inet_connection_sock.c
7660 ++++ b/net/ipv4/inet_connection_sock.c
7661 +@@ -705,12 +705,15 @@ static bool reqsk_queue_unlink(struct request_sock *req)
7662 + return found;
7663 + }
7664 +
7665 +-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
7666 ++bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
7667 + {
7668 +- if (reqsk_queue_unlink(req)) {
7669 ++ bool unlinked = reqsk_queue_unlink(req);
7670 ++
7671 ++ if (unlinked) {
7672 + reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
7673 + reqsk_put(req);
7674 + }
7675 ++ return unlinked;
7676 + }
7677 + EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
7678 +
7679 +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
7680 +index c576a63d09db1..d1e04d2b5170e 100644
7681 +--- a/net/ipv4/netfilter/arp_tables.c
7682 ++++ b/net/ipv4/netfilter/arp_tables.c
7683 +@@ -203,7 +203,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
7684 +
7685 + local_bh_disable();
7686 + addend = xt_write_recseq_begin();
7687 +- private = rcu_access_pointer(table->private);
7688 ++ private = READ_ONCE(table->private); /* Address dependency. */
7689 + cpu = smp_processor_id();
7690 + table_base = private->entries;
7691 + jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
7692 +@@ -649,7 +649,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
7693 + {
7694 + unsigned int countersize;
7695 + struct xt_counters *counters;
7696 +- const struct xt_table_info *private = xt_table_get_private_protected(table);
7697 ++ const struct xt_table_info *private = table->private;
7698 +
7699 + /* We need atomic snapshot of counters: rest doesn't change
7700 + * (other than comefrom, which userspace doesn't care
7701 +@@ -673,7 +673,7 @@ static int copy_entries_to_user(unsigned int total_size,
7702 + unsigned int off, num;
7703 + const struct arpt_entry *e;
7704 + struct xt_counters *counters;
7705 +- struct xt_table_info *private = xt_table_get_private_protected(table);
7706 ++ struct xt_table_info *private = table->private;
7707 + int ret = 0;
7708 + void *loc_cpu_entry;
7709 +
7710 +@@ -807,7 +807,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
7711 + t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
7712 + if (!IS_ERR(t)) {
7713 + struct arpt_getinfo info;
7714 +- const struct xt_table_info *private = xt_table_get_private_protected(t);
7715 ++ const struct xt_table_info *private = t->private;
7716 + #ifdef CONFIG_COMPAT
7717 + struct xt_table_info tmp;
7718 +
7719 +@@ -860,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
7720 +
7721 + t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
7722 + if (!IS_ERR(t)) {
7723 +- const struct xt_table_info *private = xt_table_get_private_protected(t);
7724 ++ const struct xt_table_info *private = t->private;
7725 +
7726 + if (get.size == private->size)
7727 + ret = copy_entries_to_user(private->size,
7728 +@@ -1017,7 +1017,7 @@ static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
7729 + }
7730 +
7731 + local_bh_disable();
7732 +- private = xt_table_get_private_protected(t);
7733 ++ private = t->private;
7734 + if (private->number != tmp.num_counters) {
7735 + ret = -EINVAL;
7736 + goto unlock_up_free;
7737 +@@ -1330,7 +1330,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
7738 + void __user *userptr)
7739 + {
7740 + struct xt_counters *counters;
7741 +- const struct xt_table_info *private = xt_table_get_private_protected(table);
7742 ++ const struct xt_table_info *private = table->private;
7743 + void __user *pos;
7744 + unsigned int size;
7745 + int ret = 0;
7746 +@@ -1379,7 +1379,7 @@ static int compat_get_entries(struct net *net,
7747 + xt_compat_lock(NFPROTO_ARP);
7748 + t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
7749 + if (!IS_ERR(t)) {
7750 +- const struct xt_table_info *private = xt_table_get_private_protected(t);
7751 ++ const struct xt_table_info *private = t->private;
7752 + struct xt_table_info info;
7753 +
7754 + ret = compat_table_info(private, &info);
7755 +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
7756 +index e8f6f9d862376..f15bc21d73016 100644
7757 +--- a/net/ipv4/netfilter/ip_tables.c
7758 ++++ b/net/ipv4/netfilter/ip_tables.c
7759 +@@ -258,7 +258,7 @@ ipt_do_table(struct sk_buff *skb,
7760 + WARN_ON(!(table->valid_hooks & (1 << hook)));
7761 + local_bh_disable();
7762 + addend = xt_write_recseq_begin();
7763 +- private = rcu_access_pointer(table->private);
7764 ++ private = READ_ONCE(table->private); /* Address dependency. */
7765 + cpu = smp_processor_id();
7766 + table_base = private->entries;
7767 + jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
7768 +@@ -791,7 +791,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
7769 + {
7770 + unsigned int countersize;
7771 + struct xt_counters *counters;
7772 +- const struct xt_table_info *private = xt_table_get_private_protected(table);
7773 ++ const struct xt_table_info *private = table->private;
7774 +
7775 + /* We need atomic snapshot of counters: rest doesn't change
7776 + (other than comefrom, which userspace doesn't care
7777 +@@ -815,7 +815,7 @@ copy_entries_to_user(unsigned int total_size,
7778 + unsigned int off, num;
7779 + const struct ipt_entry *e;
7780 + struct xt_counters *counters;
7781 +- const struct xt_table_info *private = xt_table_get_private_protected(table);
7782 ++ const struct xt_table_info *private = table->private;
7783 + int ret = 0;
7784 + const void *loc_cpu_entry;
7785 +
7786 +@@ -964,7 +964,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
7787 + t = xt_request_find_table_lock(net, AF_INET, name);
7788 + if (!IS_ERR(t)) {
7789 + struct ipt_getinfo info;
7790 +- const struct xt_table_info *private = xt_table_get_private_protected(t);
7791 ++ const struct xt_table_info *private = t->private;
7792 + #ifdef CONFIG_COMPAT
7793 + struct xt_table_info tmp;
7794 +
7795 +@@ -1018,7 +1018,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
7796 +
7797 + t = xt_find_table_lock(net, AF_INET, get.name);
7798 + if (!IS_ERR(t)) {
7799 +- const struct xt_table_info *private = xt_table_get_private_protected(t);
7800 ++ const struct xt_table_info *private = t->private;
7801 + if (get.size == private->size)
7802 + ret = copy_entries_to_user(private->size,
7803 + t, uptr->entrytable);
7804 +@@ -1173,7 +1173,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
7805 + }
7806 +
7807 + local_bh_disable();
7808 +- private = xt_table_get_private_protected(t);
7809 ++ private = t->private;
7810 + if (private->number != tmp.num_counters) {
7811 + ret = -EINVAL;
7812 + goto unlock_up_free;
7813 +@@ -1543,7 +1543,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
7814 + void __user *userptr)
7815 + {
7816 + struct xt_counters *counters;
7817 +- const struct xt_table_info *private = xt_table_get_private_protected(table);
7818 ++ const struct xt_table_info *private = table->private;
7819 + void __user *pos;
7820 + unsigned int size;
7821 + int ret = 0;
7822 +@@ -1589,7 +1589,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
7823 + xt_compat_lock(AF_INET);
7824 + t = xt_find_table_lock(net, AF_INET, get.name);
7825 + if (!IS_ERR(t)) {
7826 +- const struct xt_table_info *private = xt_table_get_private_protected(t);
7827 ++ const struct xt_table_info *private = t->private;
7828 + struct xt_table_info info;
7829 + ret = compat_table_info(private, &info);
7830 + if (!ret && get.size == info.size)
7831 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
7832 +index 9f43abeac3a8e..50a6d935376f5 100644
7833 +--- a/net/ipv4/route.c
7834 ++++ b/net/ipv4/route.c
7835 +@@ -2682,44 +2682,15 @@ out:
7836 + return rth;
7837 + }
7838 +
7839 +-static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
7840 +-{
7841 +- return NULL;
7842 +-}
7843 +-
7844 +-static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
7845 +-{
7846 +- unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
7847 +-
7848 +- return mtu ? : dst->dev->mtu;
7849 +-}
7850 +-
7851 +-static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
7852 +- struct sk_buff *skb, u32 mtu,
7853 +- bool confirm_neigh)
7854 +-{
7855 +-}
7856 +-
7857 +-static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
7858 +- struct sk_buff *skb)
7859 +-{
7860 +-}
7861 +-
7862 +-static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
7863 +- unsigned long old)
7864 +-{
7865 +- return NULL;
7866 +-}
7867 +-
7868 + static struct dst_ops ipv4_dst_blackhole_ops = {
7869 +- .family = AF_INET,
7870 +- .check = ipv4_blackhole_dst_check,
7871 +- .mtu = ipv4_blackhole_mtu,
7872 +- .default_advmss = ipv4_default_advmss,
7873 +- .update_pmtu = ipv4_rt_blackhole_update_pmtu,
7874 +- .redirect = ipv4_rt_blackhole_redirect,
7875 +- .cow_metrics = ipv4_rt_blackhole_cow_metrics,
7876 +- .neigh_lookup = ipv4_neigh_lookup,
7877 ++ .family = AF_INET,
7878 ++ .default_advmss = ipv4_default_advmss,
7879 ++ .neigh_lookup = ipv4_neigh_lookup,
7880 ++ .check = dst_blackhole_check,
7881 ++ .cow_metrics = dst_blackhole_cow_metrics,
7882 ++ .update_pmtu = dst_blackhole_update_pmtu,
7883 ++ .redirect = dst_blackhole_redirect,
7884 ++ .mtu = dst_blackhole_mtu,
7885 + };
7886 +
7887 + struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
7888 +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
7889 +index 495dda2449fe5..f0f67b25c97ab 100644
7890 +--- a/net/ipv4/tcp_minisocks.c
7891 ++++ b/net/ipv4/tcp_minisocks.c
7892 +@@ -804,8 +804,11 @@ embryonic_reset:
7893 + tcp_reset(sk);
7894 + }
7895 + if (!fastopen) {
7896 +- inet_csk_reqsk_queue_drop(sk, req);
7897 +- __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
7898 ++ bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
7899 ++
7900 ++ if (unlinked)
7901 ++ __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
7902 ++ *req_stolen = !unlinked;
7903 + }
7904 + return NULL;
7905 + }
7906 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
7907 +index f43e275557251..1fb79dbde0cb3 100644
7908 +--- a/net/ipv6/ip6_fib.c
7909 ++++ b/net/ipv6/ip6_fib.c
7910 +@@ -2485,7 +2485,7 @@ static int ipv6_route_native_seq_show(struct seq_file *seq, void *v)
7911 + const struct net_device *dev;
7912 +
7913 + if (rt->nh)
7914 +- fib6_nh = nexthop_fib6_nh(rt->nh);
7915 ++ fib6_nh = nexthop_fib6_nh_bh(rt->nh);
7916 +
7917 + seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
7918 +
7919 +diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
7920 +index e96304d8a4a7f..06d60662717d1 100644
7921 +--- a/net/ipv6/ip6_input.c
7922 ++++ b/net/ipv6/ip6_input.c
7923 +@@ -245,16 +245,6 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
7924 + if (ipv6_addr_is_multicast(&hdr->saddr))
7925 + goto err;
7926 +
7927 +- /* While RFC4291 is not explicit about v4mapped addresses
7928 +- * in IPv6 headers, it seems clear linux dual-stack
7929 +- * model can not deal properly with these.
7930 +- * Security models could be fooled by ::ffff:127.0.0.1 for example.
7931 +- *
7932 +- * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
7933 +- */
7934 +- if (ipv6_addr_v4mapped(&hdr->saddr))
7935 +- goto err;
7936 +-
7937 + skb->transport_header = skb->network_header + sizeof(*hdr);
7938 + IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
7939 +
7940 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
7941 +index 0d453fa9e327b..2e2119bfcf137 100644
7942 +--- a/net/ipv6/netfilter/ip6_tables.c
7943 ++++ b/net/ipv6/netfilter/ip6_tables.c
7944 +@@ -280,7 +280,7 @@ ip6t_do_table(struct sk_buff *skb,
7945 +
7946 + local_bh_disable();
7947 + addend = xt_write_recseq_begin();
7948 +- private = rcu_access_pointer(table->private);
7949 ++ private = READ_ONCE(table->private); /* Address dependency. */
7950 + cpu = smp_processor_id();
7951 + table_base = private->entries;
7952 + jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
7953 +@@ -807,7 +807,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
7954 + {
7955 + unsigned int countersize;
7956 + struct xt_counters *counters;
7957 +- const struct xt_table_info *private = xt_table_get_private_protected(table);
7958 ++ const struct xt_table_info *private = table->private;
7959 +
7960 + /* We need atomic snapshot of counters: rest doesn't change
7961 + (other than comefrom, which userspace doesn't care
7962 +@@ -831,7 +831,7 @@ copy_entries_to_user(unsigned int total_size,
7963 + unsigned int off, num;
7964 + const struct ip6t_entry *e;
7965 + struct xt_counters *counters;
7966 +- const struct xt_table_info *private = xt_table_get_private_protected(table);
7967 ++ const struct xt_table_info *private = table->private;
7968 + int ret = 0;
7969 + const void *loc_cpu_entry;
7970 +
7971 +@@ -980,7 +980,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
7972 + t = xt_request_find_table_lock(net, AF_INET6, name);
7973 + if (!IS_ERR(t)) {
7974 + struct ip6t_getinfo info;
7975 +- const struct xt_table_info *private = xt_table_get_private_protected(t);
7976 ++ const struct xt_table_info *private = t->private;
7977 + #ifdef CONFIG_COMPAT
7978 + struct xt_table_info tmp;
7979 +
7980 +@@ -1035,7 +1035,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
7981 +
7982 + t = xt_find_table_lock(net, AF_INET6, get.name);
7983 + if (!IS_ERR(t)) {
7984 +- struct xt_table_info *private = xt_table_get_private_protected(t);
7985 ++ struct xt_table_info *private = t->private;
7986 + if (get.size == private->size)
7987 + ret = copy_entries_to_user(private->size,
7988 + t, uptr->entrytable);
7989 +@@ -1189,7 +1189,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
7990 + }
7991 +
7992 + local_bh_disable();
7993 +- private = xt_table_get_private_protected(t);
7994 ++ private = t->private;
7995 + if (private->number != tmp.num_counters) {
7996 + ret = -EINVAL;
7997 + goto unlock_up_free;
7998 +@@ -1552,7 +1552,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
7999 + void __user *userptr)
8000 + {
8001 + struct xt_counters *counters;
8002 +- const struct xt_table_info *private = xt_table_get_private_protected(table);
8003 ++ const struct xt_table_info *private = table->private;
8004 + void __user *pos;
8005 + unsigned int size;
8006 + int ret = 0;
8007 +@@ -1598,7 +1598,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
8008 + xt_compat_lock(AF_INET6);
8009 + t = xt_find_table_lock(net, AF_INET6, get.name);
8010 + if (!IS_ERR(t)) {
8011 +- const struct xt_table_info *private = xt_table_get_private_protected(t);
8012 ++ const struct xt_table_info *private = t->private;
8013 + struct xt_table_info info;
8014 + ret = compat_table_info(private, &info);
8015 + if (!ret && get.size == info.size)
8016 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
8017 +index 7e0ce7af82346..fa276448d5a2a 100644
8018 +--- a/net/ipv6/route.c
8019 ++++ b/net/ipv6/route.c
8020 +@@ -258,34 +258,16 @@ static struct dst_ops ip6_dst_ops_template = {
8021 + .confirm_neigh = ip6_confirm_neigh,
8022 + };
8023 +
8024 +-static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
8025 +-{
8026 +- unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
8027 +-
8028 +- return mtu ? : dst->dev->mtu;
8029 +-}
8030 +-
8031 +-static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
8032 +- struct sk_buff *skb, u32 mtu,
8033 +- bool confirm_neigh)
8034 +-{
8035 +-}
8036 +-
8037 +-static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
8038 +- struct sk_buff *skb)
8039 +-{
8040 +-}
8041 +-
8042 + static struct dst_ops ip6_dst_blackhole_ops = {
8043 +- .family = AF_INET6,
8044 +- .destroy = ip6_dst_destroy,
8045 +- .check = ip6_dst_check,
8046 +- .mtu = ip6_blackhole_mtu,
8047 +- .default_advmss = ip6_default_advmss,
8048 +- .update_pmtu = ip6_rt_blackhole_update_pmtu,
8049 +- .redirect = ip6_rt_blackhole_redirect,
8050 +- .cow_metrics = dst_cow_metrics_generic,
8051 +- .neigh_lookup = ip6_dst_neigh_lookup,
8052 ++ .family = AF_INET6,
8053 ++ .default_advmss = ip6_default_advmss,
8054 ++ .neigh_lookup = ip6_dst_neigh_lookup,
8055 ++ .check = ip6_dst_check,
8056 ++ .destroy = ip6_dst_destroy,
8057 ++ .cow_metrics = dst_cow_metrics_generic,
8058 ++ .update_pmtu = dst_blackhole_update_pmtu,
8059 ++ .redirect = dst_blackhole_redirect,
8060 ++ .mtu = dst_blackhole_mtu,
8061 + };
8062 +
8063 + static const u32 ip6_template_metrics[RTAX_MAX] = {
8064 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
8065 +index 991dc36f95ff0..3f9bb6dd1f986 100644
8066 +--- a/net/ipv6/tcp_ipv6.c
8067 ++++ b/net/ipv6/tcp_ipv6.c
8068 +@@ -1170,6 +1170,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
8069 + if (!ipv6_unicast_destination(skb))
8070 + goto drop;
8071 +
8072 ++ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
8073 ++ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
8074 ++ return 0;
8075 ++ }
8076 ++
8077 + return tcp_conn_request(&tcp6_request_sock_ops,
8078 + &tcp_request_sock_ipv6_ops, sk, skb);
8079 +
8080 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
8081 +index 7276e66ae435e..2bf6271d9e3f6 100644
8082 +--- a/net/mac80211/cfg.c
8083 ++++ b/net/mac80211/cfg.c
8084 +@@ -2961,14 +2961,14 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
8085 + continue;
8086 +
8087 + for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
8088 +- if (~sdata->rc_rateidx_mcs_mask[i][j]) {
8089 ++ if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) {
8090 + sdata->rc_has_mcs_mask[i] = true;
8091 + break;
8092 + }
8093 + }
8094 +
8095 + for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
8096 +- if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
8097 ++ if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) {
8098 + sdata->rc_has_vht_mcs_mask[i] = true;
8099 + break;
8100 + }
8101 +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
8102 +index 1f552f374e97d..a7ac53a2f00d8 100644
8103 +--- a/net/mac80211/ibss.c
8104 ++++ b/net/mac80211/ibss.c
8105 +@@ -1874,6 +1874,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
8106 +
8107 + /* remove beacon */
8108 + kfree(sdata->u.ibss.ie);
8109 ++ sdata->u.ibss.ie = NULL;
8110 ++ sdata->u.ibss.ie_len = 0;
8111 +
8112 + /* on the next join, re-program HT parameters */
8113 + memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
8114 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
8115 +index 6adfcb9c06dcc..3f483e84d5df3 100644
8116 +--- a/net/mac80211/mlme.c
8117 ++++ b/net/mac80211/mlme.c
8118 +@@ -5023,7 +5023,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
8119 + he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION,
8120 + ies->data, ies->len);
8121 + if (he_oper_ie &&
8122 +- he_oper_ie[1] == ieee80211_he_oper_size(&he_oper_ie[3]))
8123 ++ he_oper_ie[1] >= ieee80211_he_oper_size(&he_oper_ie[3]))
8124 + he_oper = (void *)(he_oper_ie + 3);
8125 + else
8126 + he_oper = NULL;
8127 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
8128 +index 94e624e9439b7..d8f9fb0646a4d 100644
8129 +--- a/net/mac80211/util.c
8130 ++++ b/net/mac80211/util.c
8131 +@@ -967,7 +967,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
8132 + break;
8133 + case WLAN_EID_EXT_HE_OPERATION:
8134 + if (len >= sizeof(*elems->he_operation) &&
8135 +- len == ieee80211_he_oper_size(data) - 1) {
8136 ++ len >= ieee80211_he_oper_size(data) - 1) {
8137 + if (crc)
8138 + *crc = crc32_be(*crc, (void *)elem,
8139 + elem->datalen + 2);
8140 +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
8141 +index 16adba172fb94..6317b9bc86815 100644
8142 +--- a/net/mptcp/subflow.c
8143 ++++ b/net/mptcp/subflow.c
8144 +@@ -398,6 +398,11 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
8145 + if (!ipv6_unicast_destination(skb))
8146 + goto drop;
8147 +
8148 ++ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
8149 ++ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
8150 ++ return 0;
8151 ++ }
8152 ++
8153 + return tcp_conn_request(&mptcp_subflow_request_sock_ops,
8154 + &subflow_request_sock_ipv6_ops, sk, skb);
8155 +
8156 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
8157 +index 3d0fd33be0186..c1bfd8181341a 100644
8158 +--- a/net/netfilter/nf_conntrack_netlink.c
8159 ++++ b/net/netfilter/nf_conntrack_netlink.c
8160 +@@ -2960,6 +2960,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
8161 + memset(&m, 0xFF, sizeof(m));
8162 + memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
8163 + m.src.u.all = mask->src.u.all;
8164 ++ m.src.l3num = tuple->src.l3num;
8165 + m.dst.protonum = tuple->dst.protonum;
8166 +
8167 + nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
8168 +diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
8169 +index 4a4acbba78ff7..b03feb6e1226a 100644
8170 +--- a/net/netfilter/nf_flow_table_core.c
8171 ++++ b/net/netfilter/nf_flow_table_core.c
8172 +@@ -506,7 +506,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
8173 + {
8174 + int err;
8175 +
8176 +- INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
8177 ++ INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
8178 + flow_block_init(&flowtable->flow_block);
8179 + init_rwsem(&flowtable->flow_block_lock);
8180 +
8181 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
8182 +index 8739ef135156b..978a968d7aeda 100644
8183 +--- a/net/netfilter/nf_tables_api.c
8184 ++++ b/net/netfilter/nf_tables_api.c
8185 +@@ -6632,6 +6632,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
8186 + struct nft_hook *hook, *next;
8187 + struct nft_trans *trans;
8188 + bool unregister = false;
8189 ++ u32 flags;
8190 + int err;
8191 +
8192 + err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
8193 +@@ -6646,6 +6647,17 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
8194 + }
8195 + }
8196 +
8197 ++ if (nla[NFTA_FLOWTABLE_FLAGS]) {
8198 ++ flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
8199 ++ if (flags & ~NFT_FLOWTABLE_MASK)
8200 ++ return -EOPNOTSUPP;
8201 ++ if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^
8202 ++ (flags & NFT_FLOWTABLE_HW_OFFLOAD))
8203 ++ return -EOPNOTSUPP;
8204 ++ } else {
8205 ++ flags = flowtable->data.flags;
8206 ++ }
8207 ++
8208 + err = nft_register_flowtable_net_hooks(ctx->net, ctx->table,
8209 + &flowtable_hook.list, flowtable);
8210 + if (err < 0)
8211 +@@ -6659,6 +6671,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
8212 + goto err_flowtable_update_hook;
8213 + }
8214 +
8215 ++ nft_trans_flowtable_flags(trans) = flags;
8216 + nft_trans_flowtable(trans) = flowtable;
8217 + nft_trans_flowtable_update(trans) = true;
8218 + INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
8219 +@@ -6753,8 +6766,10 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
8220 + if (nla[NFTA_FLOWTABLE_FLAGS]) {
8221 + flowtable->data.flags =
8222 + ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
8223 +- if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK)
8224 ++ if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK) {
8225 ++ err = -EOPNOTSUPP;
8226 + goto err3;
8227 ++ }
8228 + }
8229 +
8230 + write_pnet(&flowtable->data.net, net);
8231 +@@ -7966,6 +7981,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
8232 + break;
8233 + case NFT_MSG_NEWFLOWTABLE:
8234 + if (nft_trans_flowtable_update(trans)) {
8235 ++ nft_trans_flowtable(trans)->data.flags =
8236 ++ nft_trans_flowtable_flags(trans);
8237 + nf_tables_flowtable_notify(&trans->ctx,
8238 + nft_trans_flowtable(trans),
8239 + &nft_trans_flowtable_hooks(trans),
8240 +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
8241 +index bce6ca203d462..6bd31a7a27fc5 100644
8242 +--- a/net/netfilter/x_tables.c
8243 ++++ b/net/netfilter/x_tables.c
8244 +@@ -1351,14 +1351,6 @@ struct xt_counters *xt_counters_alloc(unsigned int counters)
8245 + }
8246 + EXPORT_SYMBOL(xt_counters_alloc);
8247 +
8248 +-struct xt_table_info
8249 +-*xt_table_get_private_protected(const struct xt_table *table)
8250 +-{
8251 +- return rcu_dereference_protected(table->private,
8252 +- mutex_is_locked(&xt[table->af].mutex));
8253 +-}
8254 +-EXPORT_SYMBOL(xt_table_get_private_protected);
8255 +-
8256 + struct xt_table_info *
8257 + xt_replace_table(struct xt_table *table,
8258 + unsigned int num_counters,
8259 +@@ -1366,6 +1358,7 @@ xt_replace_table(struct xt_table *table,
8260 + int *error)
8261 + {
8262 + struct xt_table_info *private;
8263 ++ unsigned int cpu;
8264 + int ret;
8265 +
8266 + ret = xt_jumpstack_alloc(newinfo);
8267 +@@ -1375,20 +1368,47 @@ xt_replace_table(struct xt_table *table,
8268 + }
8269 +
8270 + /* Do the substitution. */
8271 +- private = xt_table_get_private_protected(table);
8272 ++ local_bh_disable();
8273 ++ private = table->private;
8274 +
8275 + /* Check inside lock: is the old number correct? */
8276 + if (num_counters != private->number) {
8277 + pr_debug("num_counters != table->private->number (%u/%u)\n",
8278 + num_counters, private->number);
8279 ++ local_bh_enable();
8280 + *error = -EAGAIN;
8281 + return NULL;
8282 + }
8283 +
8284 + newinfo->initial_entries = private->initial_entries;
8285 ++ /*
8286 ++ * Ensure contents of newinfo are visible before assigning to
8287 ++ * private.
8288 ++ */
8289 ++ smp_wmb();
8290 ++ table->private = newinfo;
8291 ++
8292 ++ /* make sure all cpus see new ->private value */
8293 ++ smp_mb();
8294 +
8295 +- rcu_assign_pointer(table->private, newinfo);
8296 +- synchronize_rcu();
8297 ++ /*
8298 ++ * Even though table entries have now been swapped, other CPU's
8299 ++ * may still be using the old entries...
8300 ++ */
8301 ++ local_bh_enable();
8302 ++
8303 ++ /* ... so wait for even xt_recseq on all cpus */
8304 ++ for_each_possible_cpu(cpu) {
8305 ++ seqcount_t *s = &per_cpu(xt_recseq, cpu);
8306 ++ u32 seq = raw_read_seqcount(s);
8307 ++
8308 ++ if (seq & 1) {
8309 ++ do {
8310 ++ cond_resched();
8311 ++ cpu_relax();
8312 ++ } while (seq == raw_read_seqcount(s));
8313 ++ }
8314 ++ }
8315 +
8316 + audit_log_nfcfg(table->name, table->af, private->number,
8317 + !private->number ? AUDIT_XT_OP_REGISTER :
8318 +@@ -1424,12 +1444,12 @@ struct xt_table *xt_register_table(struct net *net,
8319 + }
8320 +
8321 + /* Simplifies replace_table code. */
8322 +- rcu_assign_pointer(table->private, bootstrap);
8323 ++ table->private = bootstrap;
8324 +
8325 + if (!xt_replace_table(table, 0, newinfo, &ret))
8326 + goto unlock;
8327 +
8328 +- private = xt_table_get_private_protected(table);
8329 ++ private = table->private;
8330 + pr_debug("table->private->number = %u\n", private->number);
8331 +
8332 + /* save number of initial entries */
8333 +@@ -1452,8 +1472,7 @@ void *xt_unregister_table(struct xt_table *table)
8334 + struct xt_table_info *private;
8335 +
8336 + mutex_lock(&xt[table->af].mutex);
8337 +- private = xt_table_get_private_protected(table);
8338 +- RCU_INIT_POINTER(table->private, NULL);
8339 ++ private = table->private;
8340 + list_del(&table->list);
8341 + mutex_unlock(&xt[table->af].mutex);
8342 + audit_log_nfcfg(table->name, table->af, private->number,
8343 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
8344 +index 54031ee079a2c..45fbf5f4dcd25 100644
8345 +--- a/net/qrtr/qrtr.c
8346 ++++ b/net/qrtr/qrtr.c
8347 +@@ -1035,6 +1035,11 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
8348 + rc = copied;
8349 +
8350 + if (addr) {
8351 ++ /* There is an anonymous 2-byte hole after sq_family,
8352 ++ * make sure to clear it.
8353 ++ */
8354 ++ memset(addr, 0, sizeof(*addr));
8355 ++
8356 + addr->sq_family = AF_QIPCRTR;
8357 + addr->sq_node = cb->src_node;
8358 + addr->sq_port = cb->src_port;
8359 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
8360 +index 46c1b3e9f66a5..14316ba9b3b32 100644
8361 +--- a/net/sched/cls_flower.c
8362 ++++ b/net/sched/cls_flower.c
8363 +@@ -1432,7 +1432,7 @@ static int fl_set_key_ct(struct nlattr **tb,
8364 + &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
8365 + sizeof(key->ct_state));
8366 +
8367 +- err = fl_validate_ct_state(mask->ct_state,
8368 ++ err = fl_validate_ct_state(key->ct_state & mask->ct_state,
8369 + tb[TCA_FLOWER_KEY_CT_STATE_MASK],
8370 + extack);
8371 + if (err)
8372 +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
8373 +index 50f680f03a547..2adbd945bf15a 100644
8374 +--- a/net/sched/sch_choke.c
8375 ++++ b/net/sched/sch_choke.c
8376 +@@ -345,6 +345,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
8377 + struct sk_buff **old = NULL;
8378 + unsigned int mask;
8379 + u32 max_P;
8380 ++ u8 *stab;
8381 +
8382 + if (opt == NULL)
8383 + return -EINVAL;
8384 +@@ -361,8 +362,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
8385 + max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
8386 +
8387 + ctl = nla_data(tb[TCA_CHOKE_PARMS]);
8388 +-
8389 +- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
8390 ++ stab = nla_data(tb[TCA_CHOKE_STAB]);
8391 ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
8392 + return -EINVAL;
8393 +
8394 + if (ctl->limit > CHOKE_MAX_QUEUE)
8395 +@@ -412,7 +413,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
8396 +
8397 + red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
8398 + ctl->Plog, ctl->Scell_log,
8399 +- nla_data(tb[TCA_CHOKE_STAB]),
8400 ++ stab,
8401 + max_P);
8402 + red_set_vars(&q->vars);
8403 +
8404 +diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
8405 +index e0bc77533acc3..f4132dc25ac05 100644
8406 +--- a/net/sched/sch_gred.c
8407 ++++ b/net/sched/sch_gred.c
8408 +@@ -480,7 +480,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
8409 + struct gred_sched *table = qdisc_priv(sch);
8410 + struct gred_sched_data *q = table->tab[dp];
8411 +
8412 +- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) {
8413 ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
8414 + NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
8415 + return -EINVAL;
8416 + }
8417 +diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
8418 +index b4ae34d7aa965..40adf1f07a82d 100644
8419 +--- a/net/sched/sch_red.c
8420 ++++ b/net/sched/sch_red.c
8421 +@@ -242,6 +242,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
8422 + unsigned char flags;
8423 + int err;
8424 + u32 max_P;
8425 ++ u8 *stab;
8426 +
8427 + if (tb[TCA_RED_PARMS] == NULL ||
8428 + tb[TCA_RED_STAB] == NULL)
8429 +@@ -250,7 +251,9 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
8430 + max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
8431 +
8432 + ctl = nla_data(tb[TCA_RED_PARMS]);
8433 +- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
8434 ++ stab = nla_data(tb[TCA_RED_STAB]);
8435 ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
8436 ++ ctl->Scell_log, stab))
8437 + return -EINVAL;
8438 +
8439 + err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
8440 +@@ -288,7 +291,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
8441 + red_set_parms(&q->parms,
8442 + ctl->qth_min, ctl->qth_max, ctl->Wlog,
8443 + ctl->Plog, ctl->Scell_log,
8444 +- nla_data(tb[TCA_RED_STAB]),
8445 ++ stab,
8446 + max_P);
8447 + red_set_vars(&q->vars);
8448 +
8449 +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
8450 +index b25e51440623b..066754a18569b 100644
8451 +--- a/net/sched/sch_sfq.c
8452 ++++ b/net/sched/sch_sfq.c
8453 +@@ -647,7 +647,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
8454 + }
8455 +
8456 + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
8457 +- ctl_v1->Wlog, ctl_v1->Scell_log))
8458 ++ ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
8459 + return -EINVAL;
8460 + if (ctl_v1 && ctl_v1->qth_min) {
8461 + p = kmalloc(sizeof(*p), GFP_KERNEL);
8462 +diff --git a/net/tipc/node.c b/net/tipc/node.c
8463 +index 83978d5dae594..e4452d55851f9 100644
8464 +--- a/net/tipc/node.c
8465 ++++ b/net/tipc/node.c
8466 +@@ -2855,17 +2855,22 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
8467 +
8468 + #ifdef CONFIG_TIPC_CRYPTO
8469 + static int tipc_nl_retrieve_key(struct nlattr **attrs,
8470 +- struct tipc_aead_key **key)
8471 ++ struct tipc_aead_key **pkey)
8472 + {
8473 + struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
8474 ++ struct tipc_aead_key *key;
8475 +
8476 + if (!attr)
8477 + return -ENODATA;
8478 +
8479 +- *key = (struct tipc_aead_key *)nla_data(attr);
8480 +- if (nla_len(attr) < tipc_aead_key_size(*key))
8481 ++ if (nla_len(attr) < sizeof(*key))
8482 ++ return -EINVAL;
8483 ++ key = (struct tipc_aead_key *)nla_data(attr);
8484 ++ if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
8485 ++ nla_len(attr) < tipc_aead_key_size(key))
8486 + return -EINVAL;
8487 +
8488 ++ *pkey = key;
8489 + return 0;
8490 + }
8491 +
8492 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
8493 +index 791955f5e7ec0..cf86c1376b1a4 100644
8494 +--- a/net/vmw_vsock/af_vsock.c
8495 ++++ b/net/vmw_vsock/af_vsock.c
8496 +@@ -738,6 +738,7 @@ static struct sock *__vsock_create(struct net *net,
8497 + vsk->buffer_size = psk->buffer_size;
8498 + vsk->buffer_min_size = psk->buffer_min_size;
8499 + vsk->buffer_max_size = psk->buffer_max_size;
8500 ++ security_sk_clone(parent, sk);
8501 + } else {
8502 + vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
8503 + vsk->owner = get_current_cred();
8504 +diff --git a/scripts/dummy-tools/gcc b/scripts/dummy-tools/gcc
8505 +index 33487e99d83e5..11c9f045ee4b9 100755
8506 +--- a/scripts/dummy-tools/gcc
8507 ++++ b/scripts/dummy-tools/gcc
8508 +@@ -89,3 +89,8 @@ if arg_contain -print-file-name=plugin "$@"; then
8509 + echo $plugin_dir
8510 + exit 0
8511 + fi
8512 ++
8513 ++# inverted return value
8514 ++if arg_contain -D__SIZEOF_INT128__=0 "$@"; then
8515 ++ exit 1
8516 ++fi
8517 +diff --git a/security/integrity/iint.c b/security/integrity/iint.c
8518 +index 1d20003243c3f..0ba01847e836c 100644
8519 +--- a/security/integrity/iint.c
8520 ++++ b/security/integrity/iint.c
8521 +@@ -98,6 +98,14 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
8522 + struct rb_node *node, *parent = NULL;
8523 + struct integrity_iint_cache *iint, *test_iint;
8524 +
8525 ++ /*
8526 ++ * The integrity's "iint_cache" is initialized at security_init(),
8527 ++ * unless it is not included in the ordered list of LSMs enabled
8528 ++ * on the boot command line.
8529 ++ */
8530 ++ if (!iint_cache)
8531 ++ panic("%s: lsm=integrity required.\n", __func__);
8532 ++
8533 + iint = integrity_iint_find(inode);
8534 + if (iint)
8535 + return iint;
8536 +diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
8537 +index 3cc8bab31ea85..63ca6e79daeb9 100644
8538 +--- a/security/selinux/include/security.h
8539 ++++ b/security/selinux/include/security.h
8540 +@@ -219,14 +219,21 @@ static inline bool selinux_policycap_genfs_seclabel_symlinks(void)
8541 + return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]);
8542 + }
8543 +
8544 ++struct selinux_policy_convert_data;
8545 ++
8546 ++struct selinux_load_state {
8547 ++ struct selinux_policy *policy;
8548 ++ struct selinux_policy_convert_data *convert_data;
8549 ++};
8550 ++
8551 + int security_mls_enabled(struct selinux_state *state);
8552 + int security_load_policy(struct selinux_state *state,
8553 +- void *data, size_t len,
8554 +- struct selinux_policy **newpolicyp);
8555 ++ void *data, size_t len,
8556 ++ struct selinux_load_state *load_state);
8557 + void selinux_policy_commit(struct selinux_state *state,
8558 +- struct selinux_policy *newpolicy);
8559 ++ struct selinux_load_state *load_state);
8560 + void selinux_policy_cancel(struct selinux_state *state,
8561 +- struct selinux_policy *policy);
8562 ++ struct selinux_load_state *load_state);
8563 + int security_read_policy(struct selinux_state *state,
8564 + void **data, size_t *len);
8565 +
8566 +diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
8567 +index 4bde570d56a2c..2b745ae8cb981 100644
8568 +--- a/security/selinux/selinuxfs.c
8569 ++++ b/security/selinux/selinuxfs.c
8570 +@@ -616,7 +616,7 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
8571 +
8572 + {
8573 + struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
8574 +- struct selinux_policy *newpolicy;
8575 ++ struct selinux_load_state load_state;
8576 + ssize_t length;
8577 + void *data = NULL;
8578 +
8579 +@@ -642,23 +642,22 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
8580 + if (copy_from_user(data, buf, count) != 0)
8581 + goto out;
8582 +
8583 +- length = security_load_policy(fsi->state, data, count, &newpolicy);
8584 ++ length = security_load_policy(fsi->state, data, count, &load_state);
8585 + if (length) {
8586 + pr_warn_ratelimited("SELinux: failed to load policy\n");
8587 + goto out;
8588 + }
8589 +
8590 +- length = sel_make_policy_nodes(fsi, newpolicy);
8591 ++ length = sel_make_policy_nodes(fsi, load_state.policy);
8592 + if (length) {
8593 +- selinux_policy_cancel(fsi->state, newpolicy);
8594 +- goto out1;
8595 ++ selinux_policy_cancel(fsi->state, &load_state);
8596 ++ goto out;
8597 + }
8598 +
8599 +- selinux_policy_commit(fsi->state, newpolicy);
8600 ++ selinux_policy_commit(fsi->state, &load_state);
8601 +
8602 + length = count;
8603 +
8604 +-out1:
8605 + audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
8606 + "auid=%u ses=%u lsm=selinux res=1",
8607 + from_kuid(&init_user_ns, audit_get_loginuid(current)),
8608 +diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
8609 +index 9704c8a32303f..495fc865faf55 100644
8610 +--- a/security/selinux/ss/services.c
8611 ++++ b/security/selinux/ss/services.c
8612 +@@ -66,6 +66,17 @@
8613 + #include "audit.h"
8614 + #include "policycap_names.h"
8615 +
8616 ++struct convert_context_args {
8617 ++ struct selinux_state *state;
8618 ++ struct policydb *oldp;
8619 ++ struct policydb *newp;
8620 ++};
8621 ++
8622 ++struct selinux_policy_convert_data {
8623 ++ struct convert_context_args args;
8624 ++ struct sidtab_convert_params sidtab_params;
8625 ++};
8626 ++
8627 + /* Forward declaration. */
8628 + static int context_struct_to_string(struct policydb *policydb,
8629 + struct context *context,
8630 +@@ -1975,12 +1986,6 @@ static inline int convert_context_handle_invalid_context(
8631 + return 0;
8632 + }
8633 +
8634 +-struct convert_context_args {
8635 +- struct selinux_state *state;
8636 +- struct policydb *oldp;
8637 +- struct policydb *newp;
8638 +-};
8639 +-
8640 + /*
8641 + * Convert the values in the security context
8642 + * structure `oldc' from the values specified
8643 +@@ -2160,7 +2165,7 @@ static void selinux_policy_cond_free(struct selinux_policy *policy)
8644 + }
8645 +
8646 + void selinux_policy_cancel(struct selinux_state *state,
8647 +- struct selinux_policy *policy)
8648 ++ struct selinux_load_state *load_state)
8649 + {
8650 + struct selinux_policy *oldpolicy;
8651 +
8652 +@@ -2168,7 +2173,8 @@ void selinux_policy_cancel(struct selinux_state *state,
8653 + lockdep_is_held(&state->policy_mutex));
8654 +
8655 + sidtab_cancel_convert(oldpolicy->sidtab);
8656 +- selinux_policy_free(policy);
8657 ++ selinux_policy_free(load_state->policy);
8658 ++ kfree(load_state->convert_data);
8659 + }
8660 +
8661 + static void selinux_notify_policy_change(struct selinux_state *state,
8662 +@@ -2183,9 +2189,9 @@ static void selinux_notify_policy_change(struct selinux_state *state,
8663 + }
8664 +
8665 + void selinux_policy_commit(struct selinux_state *state,
8666 +- struct selinux_policy *newpolicy)
8667 ++ struct selinux_load_state *load_state)
8668 + {
8669 +- struct selinux_policy *oldpolicy;
8670 ++ struct selinux_policy *oldpolicy, *newpolicy = load_state->policy;
8671 + u32 seqno;
8672 +
8673 + oldpolicy = rcu_dereference_protected(state->policy,
8674 +@@ -2225,6 +2231,7 @@ void selinux_policy_commit(struct selinux_state *state,
8675 + /* Free the old policy */
8676 + synchronize_rcu();
8677 + selinux_policy_free(oldpolicy);
8678 ++ kfree(load_state->convert_data);
8679 +
8680 + /* Notify others of the policy change */
8681 + selinux_notify_policy_change(state, seqno);
8682 +@@ -2241,11 +2248,10 @@ void selinux_policy_commit(struct selinux_state *state,
8683 + * loading the new policy.
8684 + */
8685 + int security_load_policy(struct selinux_state *state, void *data, size_t len,
8686 +- struct selinux_policy **newpolicyp)
8687 ++ struct selinux_load_state *load_state)
8688 + {
8689 + struct selinux_policy *newpolicy, *oldpolicy;
8690 +- struct sidtab_convert_params convert_params;
8691 +- struct convert_context_args args;
8692 ++ struct selinux_policy_convert_data *convert_data;
8693 + int rc = 0;
8694 + struct policy_file file = { data, len }, *fp = &file;
8695 +
8696 +@@ -2275,10 +2281,10 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len,
8697 + goto err_mapping;
8698 + }
8699 +
8700 +-
8701 + if (!selinux_initialized(state)) {
8702 + /* First policy load, so no need to preserve state from old policy */
8703 +- *newpolicyp = newpolicy;
8704 ++ load_state->policy = newpolicy;
8705 ++ load_state->convert_data = NULL;
8706 + return 0;
8707 + }
8708 +
8709 +@@ -2292,29 +2298,38 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len,
8710 + goto err_free_isids;
8711 + }
8712 +
8713 ++ convert_data = kmalloc(sizeof(*convert_data), GFP_KERNEL);
8714 ++ if (!convert_data) {
8715 ++ rc = -ENOMEM;
8716 ++ goto err_free_isids;
8717 ++ }
8718 ++
8719 + /*
8720 + * Convert the internal representations of contexts
8721 + * in the new SID table.
8722 + */
8723 +- args.state = state;
8724 +- args.oldp = &oldpolicy->policydb;
8725 +- args.newp = &newpolicy->policydb;
8726 ++ convert_data->args.state = state;
8727 ++ convert_data->args.oldp = &oldpolicy->policydb;
8728 ++ convert_data->args.newp = &newpolicy->policydb;
8729 +
8730 +- convert_params.func = convert_context;
8731 +- convert_params.args = &args;
8732 +- convert_params.target = newpolicy->sidtab;
8733 ++ convert_data->sidtab_params.func = convert_context;
8734 ++ convert_data->sidtab_params.args = &convert_data->args;
8735 ++ convert_data->sidtab_params.target = newpolicy->sidtab;
8736 +
8737 +- rc = sidtab_convert(oldpolicy->sidtab, &convert_params);
8738 ++ rc = sidtab_convert(oldpolicy->sidtab, &convert_data->sidtab_params);
8739 + if (rc) {
8740 + pr_err("SELinux: unable to convert the internal"
8741 + " representation of contexts in the new SID"
8742 + " table\n");
8743 +- goto err_free_isids;
8744 ++ goto err_free_convert_data;
8745 + }
8746 +
8747 +- *newpolicyp = newpolicy;
8748 ++ load_state->policy = newpolicy;
8749 ++ load_state->convert_data = convert_data;
8750 + return 0;
8751 +
8752 ++err_free_convert_data:
8753 ++ kfree(convert_data);
8754 + err_free_isids:
8755 + sidtab_destroy(newpolicy->sidtab);
8756 + err_mapping:
8757 +diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
8758 +index d053beccfaec3..e2237239d922a 100644
8759 +--- a/sound/hda/intel-nhlt.c
8760 ++++ b/sound/hda/intel-nhlt.c
8761 +@@ -39,6 +39,11 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
8762 + if (!nhlt)
8763 + return 0;
8764 +
8765 ++ if (nhlt->header.length <= sizeof(struct acpi_table_header)) {
8766 ++ dev_warn(dev, "Invalid DMIC description table\n");
8767 ++ return 0;
8768 ++ }
8769 ++
8770 + for (j = 0, epnt = nhlt->desc; j < nhlt->endpoint_count; j++,
8771 + epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length)) {
8772 +
8773 +diff --git a/tools/include/linux/static_call_types.h b/tools/include/linux/static_call_types.h
8774 +index 89135bb35bf76..ae5662d368b98 100644
8775 +--- a/tools/include/linux/static_call_types.h
8776 ++++ b/tools/include/linux/static_call_types.h
8777 +@@ -4,11 +4,13 @@
8778 +
8779 + #include <linux/types.h>
8780 + #include <linux/stringify.h>
8781 ++#include <linux/compiler.h>
8782 +
8783 + #define STATIC_CALL_KEY_PREFIX __SCK__
8784 + #define STATIC_CALL_KEY_PREFIX_STR __stringify(STATIC_CALL_KEY_PREFIX)
8785 + #define STATIC_CALL_KEY_PREFIX_LEN (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
8786 + #define STATIC_CALL_KEY(name) __PASTE(STATIC_CALL_KEY_PREFIX, name)
8787 ++#define STATIC_CALL_KEY_STR(name) __stringify(STATIC_CALL_KEY(name))
8788 +
8789 + #define STATIC_CALL_TRAMP_PREFIX __SCT__
8790 + #define STATIC_CALL_TRAMP_PREFIX_STR __stringify(STATIC_CALL_TRAMP_PREFIX)
8791 +@@ -32,4 +34,52 @@ struct static_call_site {
8792 + s32 key;
8793 + };
8794 +
8795 ++#define DECLARE_STATIC_CALL(name, func) \
8796 ++ extern struct static_call_key STATIC_CALL_KEY(name); \
8797 ++ extern typeof(func) STATIC_CALL_TRAMP(name);
8798 ++
8799 ++#ifdef CONFIG_HAVE_STATIC_CALL
8800 ++
8801 ++#define __raw_static_call(name) (&STATIC_CALL_TRAMP(name))
8802 ++
8803 ++#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
8804 ++
8805 ++/*
8806 ++ * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
8807 ++ * the symbol table so that objtool can reference it when it generates the
8808 ++ * .static_call_sites section.
8809 ++ */
8810 ++#define __STATIC_CALL_ADDRESSABLE(name) \
8811 ++ __ADDRESSABLE(STATIC_CALL_KEY(name))
8812 ++
8813 ++#define __static_call(name) \
8814 ++({ \
8815 ++ __STATIC_CALL_ADDRESSABLE(name); \
8816 ++ __raw_static_call(name); \
8817 ++})
8818 ++
8819 ++#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
8820 ++
8821 ++#define __STATIC_CALL_ADDRESSABLE(name)
8822 ++#define __static_call(name) __raw_static_call(name)
8823 ++
8824 ++#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
8825 ++
8826 ++#ifdef MODULE
8827 ++#define __STATIC_CALL_MOD_ADDRESSABLE(name)
8828 ++#define static_call_mod(name) __raw_static_call(name)
8829 ++#else
8830 ++#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
8831 ++#define static_call_mod(name) __static_call(name)
8832 ++#endif
8833 ++
8834 ++#define static_call(name) __static_call(name)
8835 ++
8836 ++#else
8837 ++
8838 ++#define static_call(name) \
8839 ++ ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
8840 ++
8841 ++#endif /* CONFIG_HAVE_STATIC_CALL */
8842 ++
8843 + #endif /* _STATIC_CALL_TYPES_H */
8844 +diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
8845 +index 55bd78b3496fb..310f647c2d5b6 100644
8846 +--- a/tools/lib/bpf/Makefile
8847 ++++ b/tools/lib/bpf/Makefile
8848 +@@ -236,7 +236,7 @@ define do_install
8849 + if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
8850 + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
8851 + fi; \
8852 +- $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
8853 ++ $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
8854 + endef
8855 +
8856 + install_lib: all_cmd
8857 +diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
8858 +index 2f9d685bd522c..0911aea4cdbe5 100644
8859 +--- a/tools/lib/bpf/btf_dump.c
8860 ++++ b/tools/lib/bpf/btf_dump.c
8861 +@@ -462,7 +462,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
8862 + return err;
8863 +
8864 + case BTF_KIND_ARRAY:
8865 +- return btf_dump_order_type(d, btf_array(t)->type, through_ptr);
8866 ++ return btf_dump_order_type(d, btf_array(t)->type, false);
8867 +
8868 + case BTF_KIND_STRUCT:
8869 + case BTF_KIND_UNION: {
8870 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
8871 +index b954db52bb807..95eef7ebdac5c 100644
8872 +--- a/tools/lib/bpf/libbpf.c
8873 ++++ b/tools/lib/bpf/libbpf.c
8874 +@@ -1162,7 +1162,8 @@ static int bpf_object__elf_init(struct bpf_object *obj)
8875 + if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
8876 + pr_warn("elf: failed to get section names strings from %s: %s\n",
8877 + obj->path, elf_errmsg(-1));
8878 +- return -LIBBPF_ERRNO__FORMAT;
8879 ++ err = -LIBBPF_ERRNO__FORMAT;
8880 ++ goto errout;
8881 + }
8882 +
8883 + /* Old LLVM set e_machine to EM_NONE */
8884 +diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
8885 +index 4dd73de00b6f1..d2cb28e9ef52e 100644
8886 +--- a/tools/lib/bpf/netlink.c
8887 ++++ b/tools/lib/bpf/netlink.c
8888 +@@ -40,7 +40,7 @@ static int libbpf_netlink_open(__u32 *nl_pid)
8889 + memset(&sa, 0, sizeof(sa));
8890 + sa.nl_family = AF_NETLINK;
8891 +
8892 +- sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
8893 ++ sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
8894 + if (sock < 0)
8895 + return -errno;
8896 +
8897 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
8898 +index dc24aac08edd6..5c83f73ad6687 100644
8899 +--- a/tools/objtool/check.c
8900 ++++ b/tools/objtool/check.c
8901 +@@ -502,8 +502,21 @@ static int create_static_call_sections(struct objtool_file *file)
8902 +
8903 + key_sym = find_symbol_by_name(file->elf, tmp);
8904 + if (!key_sym) {
8905 +- WARN("static_call: can't find static_call_key symbol: %s", tmp);
8906 +- return -1;
8907 ++ if (!module) {
8908 ++ WARN("static_call: can't find static_call_key symbol: %s", tmp);
8909 ++ return -1;
8910 ++ }
8911 ++
8912 ++ /*
8913 ++ * For modules(), the key might not be exported, which
8914 ++ * means the module can make static calls but isn't
8915 ++ * allowed to change them.
8916 ++ *
8917 ++ * In that case we temporarily set the key to be the
8918 ++ * trampoline address. This is fixed up in
8919 ++ * static_call_add_module().
8920 ++ */
8921 ++ key_sym = insn->call_dest;
8922 + }
8923 + free(key_name);
8924 +
8925 +diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
8926 +index 42a85c86421d7..d8ada6a3c555a 100644
8927 +--- a/tools/perf/util/auxtrace.c
8928 ++++ b/tools/perf/util/auxtrace.c
8929 +@@ -300,10 +300,6 @@ static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
8930 + queue->set = true;
8931 + queue->tid = buffer->tid;
8932 + queue->cpu = buffer->cpu;
8933 +- } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
8934 +- pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
8935 +- queue->cpu, queue->tid, buffer->cpu, buffer->tid);
8936 +- return -EINVAL;
8937 + }
8938 +
8939 + buffer->buffer_nr = queues->next_buffer_nr++;
8940 +diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
8941 +index d9c624377da73..b4cf6dd57dd6e 100644
8942 +--- a/tools/perf/util/synthetic-events.c
8943 ++++ b/tools/perf/util/synthetic-events.c
8944 +@@ -384,7 +384,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
8945 +
8946 + while (!io.eof) {
8947 + static const char anonstr[] = "//anon";
8948 +- size_t size;
8949 ++ size_t size, aligned_size;
8950 +
8951 + /* ensure null termination since stack will be reused. */
8952 + event->mmap2.filename[0] = '\0';
8953 +@@ -444,11 +444,12 @@ out:
8954 + }
8955 +
8956 + size = strlen(event->mmap2.filename) + 1;
8957 +- size = PERF_ALIGN(size, sizeof(u64));
8958 ++ aligned_size = PERF_ALIGN(size, sizeof(u64));
8959 + event->mmap2.len -= event->mmap.start;
8960 + event->mmap2.header.size = (sizeof(event->mmap2) -
8961 +- (sizeof(event->mmap2.filename) - size));
8962 +- memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
8963 ++ (sizeof(event->mmap2.filename) - aligned_size));
8964 ++ memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
8965 ++ (aligned_size - size));
8966 + event->mmap2.header.size += machine->id_hdr_size;
8967 + event->mmap2.pid = tgid;
8968 + event->mmap2.tid = pid;
8969 +diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c
8970 +index b2282be6f9384..612d3899614ac 100644
8971 +--- a/tools/testing/selftests/arm64/fp/sve-ptrace.c
8972 ++++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c
8973 +@@ -332,5 +332,5 @@ int main(void)
8974 +
8975 + ksft_print_cnts();
8976 +
8977 +- return 0;
8978 ++ return ret;
8979 + }
8980 +diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
8981 +index 9afe947cfae95..ba6eadfec5653 100644
8982 +--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
8983 ++++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
8984 +@@ -508,10 +508,8 @@ int _ip6geneve_get_tunnel(struct __sk_buff *skb)
8985 + }
8986 +
8987 + ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
8988 +- if (ret < 0) {
8989 +- ERROR(ret);
8990 +- return TC_ACT_SHOT;
8991 +- }
8992 ++ if (ret < 0)
8993 ++ gopt.opt_class = 0;
8994 +
8995 + bpf_trace_printk(fmt, sizeof(fmt),
8996 + key.tunnel_id, key.remote_ipv4, gopt.opt_class);
8997 +diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
8998 +index ce6bea9675c07..0ccb1dda099ae 100755
8999 +--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
9000 ++++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
9001 +@@ -658,7 +658,7 @@ test_ecn_decap()
9002 + # In accordance with INET_ECN_decapsulate()
9003 + __test_ecn_decap 00 00 0x00
9004 + __test_ecn_decap 01 01 0x01
9005 +- __test_ecn_decap 02 01 0x02
9006 ++ __test_ecn_decap 02 01 0x01
9007 + __test_ecn_decap 01 03 0x03
9008 + __test_ecn_decap 02 03 0x03
9009 + test_ecn_decap_error
9010 +diff --git a/tools/testing/selftests/net/reuseaddr_ports_exhausted.c b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
9011 +index 7b01b7c2ec104..066efd30e2946 100644
9012 +--- a/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
9013 ++++ b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
9014 +@@ -30,25 +30,25 @@ struct reuse_opts {
9015 + };
9016 +
9017 + struct reuse_opts unreusable_opts[12] = {
9018 +- {0, 0, 0, 0},
9019 +- {0, 0, 0, 1},
9020 +- {0, 0, 1, 0},
9021 +- {0, 0, 1, 1},
9022 +- {0, 1, 0, 0},
9023 +- {0, 1, 0, 1},
9024 +- {0, 1, 1, 0},
9025 +- {0, 1, 1, 1},
9026 +- {1, 0, 0, 0},
9027 +- {1, 0, 0, 1},
9028 +- {1, 0, 1, 0},
9029 +- {1, 0, 1, 1},
9030 ++ {{0, 0}, {0, 0}},
9031 ++ {{0, 0}, {0, 1}},
9032 ++ {{0, 0}, {1, 0}},
9033 ++ {{0, 0}, {1, 1}},
9034 ++ {{0, 1}, {0, 0}},
9035 ++ {{0, 1}, {0, 1}},
9036 ++ {{0, 1}, {1, 0}},
9037 ++ {{0, 1}, {1, 1}},
9038 ++ {{1, 0}, {0, 0}},
9039 ++ {{1, 0}, {0, 1}},
9040 ++ {{1, 0}, {1, 0}},
9041 ++ {{1, 0}, {1, 1}},
9042 + };
9043 +
9044 + struct reuse_opts reusable_opts[4] = {
9045 +- {1, 1, 0, 0},
9046 +- {1, 1, 0, 1},
9047 +- {1, 1, 1, 0},
9048 +- {1, 1, 1, 1},
9049 ++ {{1, 1}, {0, 0}},
9050 ++ {{1, 1}, {0, 1}},
9051 ++ {{1, 1}, {1, 0}},
9052 ++ {{1, 1}, {1, 1}},
9053 + };
9054 +
9055 + int bind_port(struct __test_metadata *_metadata, int reuseaddr, int reuseport)