Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Thu, 10 Jun 2021 12:09:10
Message-Id: 1623326936.1d2a2e1dbbbafa2ae51bb6ee4258ae1441d706c3.mpagano@gentoo
1 commit: 1d2a2e1dbbbafa2ae51bb6ee4258ae1441d706c3
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Jun 10 12:08:56 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Jun 10 12:08:56 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1d2a2e1d
7
8 Linux patch 5.10.43
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1042_linux-5.10.43.patch | 5652 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5656 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index fb74799..f258b9d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -211,6 +211,10 @@ Patch: 1041_linux-5.10.42.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.42
23
24 +Patch: 1042_linux-5.10.43.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.43
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1042_linux-5.10.43.patch b/1042_linux-5.10.43.patch
33 new file mode 100644
34 index 0000000..7d99626
35 --- /dev/null
36 +++ b/1042_linux-5.10.43.patch
37 @@ -0,0 +1,5652 @@
38 +diff --git a/Makefile b/Makefile
39 +index 290903d0e7dab..ec9ee8032a985 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 10
46 +-SUBLEVEL = 42
47 ++SUBLEVEL = 43
48 + EXTRAVERSION =
49 + NAME = Dare mighty things
50 +
51 +diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
52 +index 7d2c72562c735..9148a01ed6d9f 100644
53 +--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
54 ++++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
55 +@@ -105,9 +105,13 @@
56 + phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
57 + phy-reset-duration = <20>;
58 + phy-supply = <&sw2_reg>;
59 +- phy-handle = <&ethphy0>;
60 + status = "okay";
61 +
62 ++ fixed-link {
63 ++ speed = <1000>;
64 ++ full-duplex;
65 ++ };
66 ++
67 + mdio {
68 + #address-cells = <1>;
69 + #size-cells = <0>;
70 +diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
71 +index 236fc205c3890..d0768ae429faa 100644
72 +--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
73 ++++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
74 +@@ -406,6 +406,18 @@
75 + vin-supply = <&sw1_reg>;
76 + };
77 +
78 ++&reg_pu {
79 ++ vin-supply = <&sw1_reg>;
80 ++};
81 ++
82 ++&reg_vdd1p1 {
83 ++ vin-supply = <&sw2_reg>;
84 ++};
85 ++
86 ++&reg_vdd2p5 {
87 ++ vin-supply = <&sw2_reg>;
88 ++};
89 ++
90 + &uart1 {
91 + pinctrl-names = "default";
92 + pinctrl-0 = <&pinctrl_uart1>;
93 +diff --git a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
94 +index 828cf3e39784a..c4e146f3341bb 100644
95 +--- a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
96 ++++ b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
97 +@@ -126,7 +126,7 @@
98 + compatible = "nxp,pca8574";
99 + reg = <0x3a>;
100 + gpio-controller;
101 +- #gpio-cells = <1>;
102 ++ #gpio-cells = <2>;
103 + };
104 + };
105 +
106 +diff --git a/arch/arm/boot/dts/imx7d-meerkat96.dts b/arch/arm/boot/dts/imx7d-meerkat96.dts
107 +index 5339210b63d0f..dd8003bd1fc09 100644
108 +--- a/arch/arm/boot/dts/imx7d-meerkat96.dts
109 ++++ b/arch/arm/boot/dts/imx7d-meerkat96.dts
110 +@@ -193,7 +193,7 @@
111 + pinctrl-names = "default";
112 + pinctrl-0 = <&pinctrl_usdhc1>;
113 + keep-power-in-suspend;
114 +- tuning-step = <2>;
115 ++ fsl,tuning-step = <2>;
116 + vmmc-supply = <&reg_3p3v>;
117 + no-1-8-v;
118 + broken-cd;
119 +diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
120 +index e57da0d32b98d..e519897fae082 100644
121 +--- a/arch/arm/boot/dts/imx7d-pico.dtsi
122 ++++ b/arch/arm/boot/dts/imx7d-pico.dtsi
123 +@@ -351,7 +351,7 @@
124 + pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
125 + cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
126 + bus-width = <4>;
127 +- tuning-step = <2>;
128 ++ fsl,tuning-step = <2>;
129 + vmmc-supply = <&reg_3p3v>;
130 + wakeup-source;
131 + no-1-8-v;
132 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
133 +index df212ed5bb942..e65d1c477e2ce 100644
134 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
135 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
136 +@@ -31,11 +31,10 @@
137 + reg = <0x4>;
138 + eee-broken-1000t;
139 + eee-broken-100tx;
140 +-
141 + qca,clk-out-frequency = <125000000>;
142 + qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
143 +-
144 +- vddio-supply = <&vddh>;
145 ++ qca,keep-pll-enabled;
146 ++ vddio-supply = <&vddio>;
147 +
148 + vddio: vddio-regulator {
149 + regulator-name = "VDDIO";
150 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
151 +index 62f4dcb96e70d..f3b58bb9b8408 100644
152 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
153 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
154 +@@ -192,8 +192,8 @@
155 + ddr: memory-controller@1080000 {
156 + compatible = "fsl,qoriq-memory-controller";
157 + reg = <0x0 0x1080000 0x0 0x1000>;
158 +- interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
159 +- big-endian;
160 ++ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
161 ++ little-endian;
162 + };
163 +
164 + dcfg: syscon@1e00000 {
165 +diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
166 +index fa7a041ffcfde..825c83c71a9f1 100644
167 +--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
168 ++++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
169 +@@ -45,8 +45,8 @@
170 + reg_12p0_main: regulator-12p0-main {
171 + compatible = "regulator-fixed";
172 + regulator-name = "12V_MAIN";
173 +- regulator-min-microvolt = <5000000>;
174 +- regulator-max-microvolt = <5000000>;
175 ++ regulator-min-microvolt = <12000000>;
176 ++ regulator-max-microvolt = <12000000>;
177 + regulator-always-on;
178 + };
179 +
180 +diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
181 +index 72d6496e88dd4..689538244392c 100644
182 +--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
183 ++++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
184 +@@ -78,6 +78,8 @@
185 + #size-cells = <2>;
186 + ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
187 + ti,sci-dev-id = <199>;
188 ++ dma-coherent;
189 ++ dma-ranges;
190 +
191 + main_navss_intr: interrupt-controller1 {
192 + compatible = "ti,sci-intr";
193 +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
194 +index b246a4acba416..568f11e23830c 100644
195 +--- a/arch/arm64/kvm/sys_regs.c
196 ++++ b/arch/arm64/kvm/sys_regs.c
197 +@@ -464,14 +464,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
198 + struct sys_reg_params *p,
199 + const struct sys_reg_desc *rd)
200 + {
201 +- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
202 ++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
203 +
204 + if (p->is_write)
205 + reg_to_dbg(vcpu, p, dbg_reg);
206 + else
207 + dbg_to_reg(vcpu, p, dbg_reg);
208 +
209 +- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
210 ++ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
211 +
212 + return true;
213 + }
214 +@@ -479,7 +479,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
215 + static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
216 + const struct kvm_one_reg *reg, void __user *uaddr)
217 + {
218 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
219 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
220 +
221 + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
222 + return -EFAULT;
223 +@@ -489,7 +489,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
224 + static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
225 + const struct kvm_one_reg *reg, void __user *uaddr)
226 + {
227 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
228 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
229 +
230 + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
231 + return -EFAULT;
232 +@@ -499,21 +499,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
233 + static void reset_bvr(struct kvm_vcpu *vcpu,
234 + const struct sys_reg_desc *rd)
235 + {
236 +- vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
237 ++ vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
238 + }
239 +
240 + static bool trap_bcr(struct kvm_vcpu *vcpu,
241 + struct sys_reg_params *p,
242 + const struct sys_reg_desc *rd)
243 + {
244 +- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
245 ++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
246 +
247 + if (p->is_write)
248 + reg_to_dbg(vcpu, p, dbg_reg);
249 + else
250 + dbg_to_reg(vcpu, p, dbg_reg);
251 +
252 +- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
253 ++ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
254 +
255 + return true;
256 + }
257 +@@ -521,7 +521,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
258 + static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
259 + const struct kvm_one_reg *reg, void __user *uaddr)
260 + {
261 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
262 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
263 +
264 + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
265 + return -EFAULT;
266 +@@ -532,7 +532,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
267 + static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
268 + const struct kvm_one_reg *reg, void __user *uaddr)
269 + {
270 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
271 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
272 +
273 + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
274 + return -EFAULT;
275 +@@ -542,22 +542,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
276 + static void reset_bcr(struct kvm_vcpu *vcpu,
277 + const struct sys_reg_desc *rd)
278 + {
279 +- vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
280 ++ vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
281 + }
282 +
283 + static bool trap_wvr(struct kvm_vcpu *vcpu,
284 + struct sys_reg_params *p,
285 + const struct sys_reg_desc *rd)
286 + {
287 +- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
288 ++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
289 +
290 + if (p->is_write)
291 + reg_to_dbg(vcpu, p, dbg_reg);
292 + else
293 + dbg_to_reg(vcpu, p, dbg_reg);
294 +
295 +- trace_trap_reg(__func__, rd->reg, p->is_write,
296 +- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
297 ++ trace_trap_reg(__func__, rd->CRm, p->is_write,
298 ++ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
299 +
300 + return true;
301 + }
302 +@@ -565,7 +565,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
303 + static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
304 + const struct kvm_one_reg *reg, void __user *uaddr)
305 + {
306 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
307 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
308 +
309 + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
310 + return -EFAULT;
311 +@@ -575,7 +575,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
312 + static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
313 + const struct kvm_one_reg *reg, void __user *uaddr)
314 + {
315 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
316 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
317 +
318 + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
319 + return -EFAULT;
320 +@@ -585,21 +585,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
321 + static void reset_wvr(struct kvm_vcpu *vcpu,
322 + const struct sys_reg_desc *rd)
323 + {
324 +- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
325 ++ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
326 + }
327 +
328 + static bool trap_wcr(struct kvm_vcpu *vcpu,
329 + struct sys_reg_params *p,
330 + const struct sys_reg_desc *rd)
331 + {
332 +- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
333 ++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
334 +
335 + if (p->is_write)
336 + reg_to_dbg(vcpu, p, dbg_reg);
337 + else
338 + dbg_to_reg(vcpu, p, dbg_reg);
339 +
340 +- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
341 ++ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
342 +
343 + return true;
344 + }
345 +@@ -607,7 +607,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
346 + static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
347 + const struct kvm_one_reg *reg, void __user *uaddr)
348 + {
349 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
350 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
351 +
352 + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
353 + return -EFAULT;
354 +@@ -617,7 +617,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
355 + static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
356 + const struct kvm_one_reg *reg, void __user *uaddr)
357 + {
358 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
359 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
360 +
361 + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
362 + return -EFAULT;
363 +@@ -627,7 +627,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
364 + static void reset_wcr(struct kvm_vcpu *vcpu,
365 + const struct sys_reg_desc *rd)
366 + {
367 +- vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
368 ++ vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
369 + }
370 +
371 + static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
372 +diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
373 +index 01ab2163659e4..e8c2a6373157d 100644
374 +--- a/arch/powerpc/kernel/kprobes.c
375 ++++ b/arch/powerpc/kernel/kprobes.c
376 +@@ -108,7 +108,6 @@ int arch_prepare_kprobe(struct kprobe *p)
377 + int ret = 0;
378 + struct kprobe *prev;
379 + struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
380 +- struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
381 +
382 + if ((unsigned long)p->addr & 0x03) {
383 + printk("Attempt to register kprobe at an unaligned address\n");
384 +@@ -116,7 +115,8 @@ int arch_prepare_kprobe(struct kprobe *p)
385 + } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
386 + printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
387 + ret = -EINVAL;
388 +- } else if (ppc_inst_prefixed(prefix)) {
389 ++ } else if ((unsigned long)p->addr & ~PAGE_MASK &&
390 ++ ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
391 + printk("Cannot register a kprobe on the second word of prefixed instruction\n");
392 + ret = -EINVAL;
393 + }
394 +diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
395 +index ca2b40dfd24b8..24d936c147cdf 100644
396 +--- a/arch/riscv/kernel/vdso/Makefile
397 ++++ b/arch/riscv/kernel/vdso/Makefile
398 +@@ -23,7 +23,7 @@ ifneq ($(c-gettimeofday-y),)
399 + endif
400 +
401 + # Build rules
402 +-targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
403 ++targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S
404 + obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
405 +
406 + obj-y += vdso.o vdso-syms.o
407 +@@ -41,7 +41,7 @@ KASAN_SANITIZE := n
408 + $(obj)/vdso.o: $(obj)/vdso.so
409 +
410 + # link rule for the .so file, .lds has to be first
411 +-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
412 ++$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
413 + $(call if_changed,vdsold)
414 + LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
415 + --build-id=sha1 --hash-style=both --eh-frame-hdr
416 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
417 +index 51abd44ab8c2d..3b4412c83eec0 100644
418 +--- a/arch/x86/include/asm/apic.h
419 ++++ b/arch/x86/include/asm/apic.h
420 +@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
421 + extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
422 + extern void lapic_assign_system_vectors(void);
423 + extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
424 ++extern void lapic_update_legacy_vectors(void);
425 + extern void lapic_online(void);
426 + extern void lapic_offline(void);
427 + extern bool apic_needs_pit(void);
428 +diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
429 +index 5861d34f97718..09db5b8f1444a 100644
430 +--- a/arch/x86/include/asm/disabled-features.h
431 ++++ b/arch/x86/include/asm/disabled-features.h
432 +@@ -56,11 +56,8 @@
433 + # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
434 + #endif
435 +
436 +-#ifdef CONFIG_IOMMU_SUPPORT
437 +-# define DISABLE_ENQCMD 0
438 +-#else
439 +-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
440 +-#endif
441 ++/* Force disable because it's broken beyond repair */
442 ++#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
443 +
444 + /*
445 + * Make sure to add features to the correct mask
446 +diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
447 +index 38f4936045ab6..8b9bfaad6e662 100644
448 +--- a/arch/x86/include/asm/fpu/api.h
449 ++++ b/arch/x86/include/asm/fpu/api.h
450 +@@ -79,10 +79,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
451 + */
452 + #define PASID_DISABLED 0
453 +
454 +-#ifdef CONFIG_IOMMU_SUPPORT
455 +-/* Update current's PASID MSR/state by mm's PASID. */
456 +-void update_pasid(void);
457 +-#else
458 + static inline void update_pasid(void) { }
459 +-#endif
460 ++
461 + #endif /* _ASM_X86_FPU_API_H */
462 +diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
463 +index 8d33ad80704f2..ceeba9f631722 100644
464 +--- a/arch/x86/include/asm/fpu/internal.h
465 ++++ b/arch/x86/include/asm/fpu/internal.h
466 +@@ -584,13 +584,6 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
467 + pkru_val = pk->pkru;
468 + }
469 + __write_pkru(pkru_val);
470 +-
471 +- /*
472 +- * Expensive PASID MSR write will be avoided in update_pasid() because
473 +- * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
474 +- * unless it's different from mm->pasid to reduce overhead.
475 +- */
476 +- update_pasid();
477 + }
478 +
479 + #endif /* _ASM_X86_FPU_INTERNAL_H */
480 +diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
481 +index 3381198525126..69299878b200a 100644
482 +--- a/arch/x86/include/asm/kvm_para.h
483 ++++ b/arch/x86/include/asm/kvm_para.h
484 +@@ -7,8 +7,6 @@
485 + #include <linux/interrupt.h>
486 + #include <uapi/asm/kvm_para.h>
487 +
488 +-extern void kvmclock_init(void);
489 +-
490 + #ifdef CONFIG_KVM_GUEST
491 + bool kvm_check_and_clear_guest_paused(void);
492 + #else
493 +@@ -86,13 +84,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
494 + }
495 +
496 + #ifdef CONFIG_KVM_GUEST
497 ++void kvmclock_init(void);
498 ++void kvmclock_disable(void);
499 + bool kvm_para_available(void);
500 + unsigned int kvm_arch_para_features(void);
501 + unsigned int kvm_arch_para_hints(void);
502 + void kvm_async_pf_task_wait_schedule(u32 token);
503 + void kvm_async_pf_task_wake(u32 token);
504 + u32 kvm_read_and_reset_apf_flags(void);
505 +-void kvm_disable_steal_time(void);
506 + bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
507 +
508 + DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
509 +@@ -137,11 +136,6 @@ static inline u32 kvm_read_and_reset_apf_flags(void)
510 + return 0;
511 + }
512 +
513 +-static inline void kvm_disable_steal_time(void)
514 +-{
515 +- return;
516 +-}
517 +-
518 + static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
519 + {
520 + return false;
521 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
522 +index 539f3e88ca7cd..24539a05c58c7 100644
523 +--- a/arch/x86/kernel/apic/apic.c
524 ++++ b/arch/x86/kernel/apic/apic.c
525 +@@ -2539,6 +2539,7 @@ static void __init apic_bsp_setup(bool upmode)
526 + end_local_APIC_setup();
527 + irq_remap_enable_fault_handling();
528 + setup_IO_APIC();
529 ++ lapic_update_legacy_vectors();
530 + }
531 +
532 + #ifdef CONFIG_UP_LATE_INIT
533 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
534 +index 758bbf25ef748..bd557e9f5dd8e 100644
535 +--- a/arch/x86/kernel/apic/vector.c
536 ++++ b/arch/x86/kernel/apic/vector.c
537 +@@ -687,6 +687,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
538 + irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
539 + }
540 +
541 ++void __init lapic_update_legacy_vectors(void)
542 ++{
543 ++ unsigned int i;
544 ++
545 ++ if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
546 ++ return;
547 ++
548 ++ /*
549 ++ * If the IO/APIC is disabled via config, kernel command line or
550 ++ * lack of enumeration then all legacy interrupts are routed
551 ++ * through the PIC. Make sure that they are marked as legacy
552 ++ * vectors. PIC_CASCADE_IRQ has already been marked in
553 ++ * lapic_assign_system_vectors().
554 ++ */
555 ++ for (i = 0; i < nr_legacy_irqs(); i++) {
556 ++ if (i != PIC_CASCADE_IR)
557 ++ lapic_assign_legacy_vector(i, true);
558 ++ }
559 ++}
560 ++
561 + void __init lapic_assign_system_vectors(void)
562 + {
563 + unsigned int i, vector = 0;
564 +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
565 +index 5d8047441a0aa..67f1a03b9b235 100644
566 +--- a/arch/x86/kernel/fpu/xstate.c
567 ++++ b/arch/x86/kernel/fpu/xstate.c
568 +@@ -1402,60 +1402,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
569 + return 0;
570 + }
571 + #endif /* CONFIG_PROC_PID_ARCH_STATUS */
572 +-
573 +-#ifdef CONFIG_IOMMU_SUPPORT
574 +-void update_pasid(void)
575 +-{
576 +- u64 pasid_state;
577 +- u32 pasid;
578 +-
579 +- if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
580 +- return;
581 +-
582 +- if (!current->mm)
583 +- return;
584 +-
585 +- pasid = READ_ONCE(current->mm->pasid);
586 +- /* Set the valid bit in the PASID MSR/state only for valid pasid. */
587 +- pasid_state = pasid == PASID_DISABLED ?
588 +- pasid : pasid | MSR_IA32_PASID_VALID;
589 +-
590 +- /*
591 +- * No need to hold fregs_lock() since the task's fpstate won't
592 +- * be changed by others (e.g. ptrace) while the task is being
593 +- * switched to or is in IPI.
594 +- */
595 +- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
596 +- /* The MSR is active and can be directly updated. */
597 +- wrmsrl(MSR_IA32_PASID, pasid_state);
598 +- } else {
599 +- struct fpu *fpu = &current->thread.fpu;
600 +- struct ia32_pasid_state *ppasid_state;
601 +- struct xregs_state *xsave;
602 +-
603 +- /*
604 +- * The CPU's xstate registers are not currently active. Just
605 +- * update the PASID state in the memory buffer here. The
606 +- * PASID MSR will be loaded when returning to user mode.
607 +- */
608 +- xsave = &fpu->state.xsave;
609 +- xsave->header.xfeatures |= XFEATURE_MASK_PASID;
610 +- ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
611 +- /*
612 +- * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
613 +- * won't be NULL and no need to check its value.
614 +- *
615 +- * Only update the task's PASID state when it's different
616 +- * from the mm's pasid.
617 +- */
618 +- if (ppasid_state->pasid != pasid_state) {
619 +- /*
620 +- * Invalid fpregs so that state restoring will pick up
621 +- * the PASID state.
622 +- */
623 +- __fpu_invalidate_fpregs_state(fpu);
624 +- ppasid_state->pasid = pasid_state;
625 +- }
626 +- }
627 +-}
628 +-#endif /* CONFIG_IOMMU_SUPPORT */
629 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
630 +index 7f57ede3cb8e7..7462b79c39de6 100644
631 +--- a/arch/x86/kernel/kvm.c
632 ++++ b/arch/x86/kernel/kvm.c
633 +@@ -26,6 +26,7 @@
634 + #include <linux/kprobes.h>
635 + #include <linux/nmi.h>
636 + #include <linux/swait.h>
637 ++#include <linux/syscore_ops.h>
638 + #include <asm/timer.h>
639 + #include <asm/cpu.h>
640 + #include <asm/traps.h>
641 +@@ -37,6 +38,7 @@
642 + #include <asm/tlb.h>
643 + #include <asm/cpuidle_haltpoll.h>
644 + #include <asm/ptrace.h>
645 ++#include <asm/reboot.h>
646 + #include <asm/svm.h>
647 +
648 + DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
649 +@@ -374,6 +376,14 @@ static void kvm_pv_disable_apf(void)
650 + pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
651 + }
652 +
653 ++static void kvm_disable_steal_time(void)
654 ++{
655 ++ if (!has_steal_clock)
656 ++ return;
657 ++
658 ++ wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
659 ++}
660 ++
661 + static void kvm_pv_guest_cpu_reboot(void *unused)
662 + {
663 + /*
664 +@@ -416,14 +426,6 @@ static u64 kvm_steal_clock(int cpu)
665 + return steal;
666 + }
667 +
668 +-void kvm_disable_steal_time(void)
669 +-{
670 +- if (!has_steal_clock)
671 +- return;
672 +-
673 +- wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
674 +-}
675 +-
676 + static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
677 + {
678 + early_set_memory_decrypted((unsigned long) ptr, size);
679 +@@ -460,6 +462,27 @@ static bool pv_tlb_flush_supported(void)
680 +
681 + static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
682 +
683 ++static void kvm_guest_cpu_offline(bool shutdown)
684 ++{
685 ++ kvm_disable_steal_time();
686 ++ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
687 ++ wrmsrl(MSR_KVM_PV_EOI_EN, 0);
688 ++ kvm_pv_disable_apf();
689 ++ if (!shutdown)
690 ++ apf_task_wake_all();
691 ++ kvmclock_disable();
692 ++}
693 ++
694 ++static int kvm_cpu_online(unsigned int cpu)
695 ++{
696 ++ unsigned long flags;
697 ++
698 ++ local_irq_save(flags);
699 ++ kvm_guest_cpu_init();
700 ++ local_irq_restore(flags);
701 ++ return 0;
702 ++}
703 ++
704 + #ifdef CONFIG_SMP
705 +
706 + static bool pv_ipi_supported(void)
707 +@@ -587,29 +610,46 @@ static void __init kvm_smp_prepare_boot_cpu(void)
708 + kvm_spinlock_init();
709 + }
710 +
711 +-static void kvm_guest_cpu_offline(void)
712 ++static int kvm_cpu_down_prepare(unsigned int cpu)
713 + {
714 +- kvm_disable_steal_time();
715 +- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
716 +- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
717 +- kvm_pv_disable_apf();
718 +- apf_task_wake_all();
719 ++ unsigned long flags;
720 ++
721 ++ local_irq_save(flags);
722 ++ kvm_guest_cpu_offline(false);
723 ++ local_irq_restore(flags);
724 ++ return 0;
725 + }
726 +
727 +-static int kvm_cpu_online(unsigned int cpu)
728 ++#endif
729 ++
730 ++static int kvm_suspend(void)
731 + {
732 +- local_irq_disable();
733 +- kvm_guest_cpu_init();
734 +- local_irq_enable();
735 ++ kvm_guest_cpu_offline(false);
736 ++
737 + return 0;
738 + }
739 +
740 +-static int kvm_cpu_down_prepare(unsigned int cpu)
741 ++static void kvm_resume(void)
742 + {
743 +- local_irq_disable();
744 +- kvm_guest_cpu_offline();
745 +- local_irq_enable();
746 +- return 0;
747 ++ kvm_cpu_online(raw_smp_processor_id());
748 ++}
749 ++
750 ++static struct syscore_ops kvm_syscore_ops = {
751 ++ .suspend = kvm_suspend,
752 ++ .resume = kvm_resume,
753 ++};
754 ++
755 ++/*
756 ++ * After a PV feature is registered, the host will keep writing to the
757 ++ * registered memory location. If the guest happens to shutdown, this memory
758 ++ * won't be valid. In cases like kexec, in which you install a new kernel, this
759 ++ * means a random memory location will be kept being written.
760 ++ */
761 ++#ifdef CONFIG_KEXEC_CORE
762 ++static void kvm_crash_shutdown(struct pt_regs *regs)
763 ++{
764 ++ kvm_guest_cpu_offline(true);
765 ++ native_machine_crash_shutdown(regs);
766 + }
767 + #endif
768 +
769 +@@ -681,6 +721,12 @@ static void __init kvm_guest_init(void)
770 + kvm_guest_cpu_init();
771 + #endif
772 +
773 ++#ifdef CONFIG_KEXEC_CORE
774 ++ machine_ops.crash_shutdown = kvm_crash_shutdown;
775 ++#endif
776 ++
777 ++ register_syscore_ops(&kvm_syscore_ops);
778 ++
779 + /*
780 + * Hard lockup detection is enabled by default. Disable it, as guests
781 + * can get false positives too easily, for example if the host is
782 +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
783 +index 5ee705b44560b..c4ac26333bc41 100644
784 +--- a/arch/x86/kernel/kvmclock.c
785 ++++ b/arch/x86/kernel/kvmclock.c
786 +@@ -20,7 +20,6 @@
787 + #include <asm/hypervisor.h>
788 + #include <asm/mem_encrypt.h>
789 + #include <asm/x86_init.h>
790 +-#include <asm/reboot.h>
791 + #include <asm/kvmclock.h>
792 +
793 + static int kvmclock __initdata = 1;
794 +@@ -204,28 +203,9 @@ static void kvm_setup_secondary_clock(void)
795 + }
796 + #endif
797 +
798 +-/*
799 +- * After the clock is registered, the host will keep writing to the
800 +- * registered memory location. If the guest happens to shutdown, this memory
801 +- * won't be valid. In cases like kexec, in which you install a new kernel, this
802 +- * means a random memory location will be kept being written. So before any
803 +- * kind of shutdown from our side, we unregister the clock by writing anything
804 +- * that does not have the 'enable' bit set in the msr
805 +- */
806 +-#ifdef CONFIG_KEXEC_CORE
807 +-static void kvm_crash_shutdown(struct pt_regs *regs)
808 +-{
809 +- native_write_msr(msr_kvm_system_time, 0, 0);
810 +- kvm_disable_steal_time();
811 +- native_machine_crash_shutdown(regs);
812 +-}
813 +-#endif
814 +-
815 +-static void kvm_shutdown(void)
816 ++void kvmclock_disable(void)
817 + {
818 + native_write_msr(msr_kvm_system_time, 0, 0);
819 +- kvm_disable_steal_time();
820 +- native_machine_shutdown();
821 + }
822 +
823 + static void __init kvmclock_init_mem(void)
824 +@@ -352,10 +332,6 @@ void __init kvmclock_init(void)
825 + #endif
826 + x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
827 + x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
828 +- machine_ops.shutdown = kvm_shutdown;
829 +-#ifdef CONFIG_KEXEC_CORE
830 +- machine_ops.crash_shutdown = kvm_crash_shutdown;
831 +-#endif
832 + kvm_get_preset_lpj();
833 +
834 + /*
835 +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
836 +index 9d4eb114613c2..41d44fb5f753d 100644
837 +--- a/arch/x86/kvm/svm/svm.c
838 ++++ b/arch/x86/kvm/svm/svm.c
839 +@@ -2362,7 +2362,7 @@ static int cr_interception(struct vcpu_svm *svm)
840 + err = 0;
841 + if (cr >= 16) { /* mov to cr */
842 + cr -= 16;
843 +- val = kvm_register_read(&svm->vcpu, reg);
844 ++ val = kvm_register_readl(&svm->vcpu, reg);
845 + trace_kvm_cr_write(cr, val);
846 + switch (cr) {
847 + case 0:
848 +@@ -2408,7 +2408,7 @@ static int cr_interception(struct vcpu_svm *svm)
849 + kvm_queue_exception(&svm->vcpu, UD_VECTOR);
850 + return 1;
851 + }
852 +- kvm_register_write(&svm->vcpu, reg, val);
853 ++ kvm_register_writel(&svm->vcpu, reg, val);
854 + trace_kvm_cr_read(cr, val);
855 + }
856 + return kvm_complete_insn_gp(&svm->vcpu, err);
857 +@@ -2439,13 +2439,13 @@ static int dr_interception(struct vcpu_svm *svm)
858 + if (dr >= 16) { /* mov to DRn */
859 + if (!kvm_require_dr(&svm->vcpu, dr - 16))
860 + return 1;
861 +- val = kvm_register_read(&svm->vcpu, reg);
862 ++ val = kvm_register_readl(&svm->vcpu, reg);
863 + kvm_set_dr(&svm->vcpu, dr - 16, val);
864 + } else {
865 + if (!kvm_require_dr(&svm->vcpu, dr))
866 + return 1;
867 + kvm_get_dr(&svm->vcpu, dr, &val);
868 +- kvm_register_write(&svm->vcpu, reg, val);
869 ++ kvm_register_writel(&svm->vcpu, reg, val);
870 + }
871 +
872 + return kvm_skip_emulated_instruction(&svm->vcpu);
873 +diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
874 +index a19374d261013..65f599e9075bc 100644
875 +--- a/arch/x86/mm/mem_encrypt_identity.c
876 ++++ b/arch/x86/mm/mem_encrypt_identity.c
877 +@@ -504,10 +504,6 @@ void __init sme_enable(struct boot_params *bp)
878 + #define AMD_SME_BIT BIT(0)
879 + #define AMD_SEV_BIT BIT(1)
880 +
881 +- /* Check the SEV MSR whether SEV or SME is enabled */
882 +- sev_status = __rdmsr(MSR_AMD64_SEV);
883 +- feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
884 +-
885 + /*
886 + * Check for the SME/SEV feature:
887 + * CPUID Fn8000_001F[EAX]
888 +@@ -519,11 +515,16 @@ void __init sme_enable(struct boot_params *bp)
889 + eax = 0x8000001f;
890 + ecx = 0;
891 + native_cpuid(&eax, &ebx, &ecx, &edx);
892 +- if (!(eax & feature_mask))
893 ++ /* Check whether SEV or SME is supported */
894 ++ if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
895 + return;
896 +
897 + me_mask = 1UL << (ebx & 0x3f);
898 +
899 ++ /* Check the SEV MSR whether SEV or SME is enabled */
900 ++ sev_status = __rdmsr(MSR_AMD64_SEV);
901 ++ feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
902 ++
903 + /* Check if memory encryption is enabled */
904 + if (feature_mask == AMD_SME_BIT) {
905 + /*
906 +diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
907 +index 4c0d4e4341961..72d2c0b656339 100644
908 +--- a/drivers/acpi/acpica/utdelete.c
909 ++++ b/drivers/acpi/acpica/utdelete.c
910 +@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
911 + }
912 + break;
913 +
914 ++ case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
915 ++
916 ++ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
917 ++ "***** Address handler %p\n", object));
918 ++
919 ++ acpi_os_delete_mutex(object->address_space.context_mutex);
920 ++ break;
921 ++
922 + default:
923 +
924 + break;
925 +diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
926 +index 9afbe4992a1dd..818dc7f54f038 100644
927 +--- a/drivers/bus/ti-sysc.c
928 ++++ b/drivers/bus/ti-sysc.c
929 +@@ -1330,6 +1330,34 @@ err_allow_idle:
930 + return error;
931 + }
932 +
933 ++static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
934 ++{
935 ++ struct device *dev = ddata->dev;
936 ++ int error;
937 ++
938 ++ /* Disable target module if it is enabled */
939 ++ if (ddata->enabled) {
940 ++ error = sysc_runtime_suspend(dev);
941 ++ if (error)
942 ++ dev_warn(dev, "reinit suspend failed: %i\n", error);
943 ++ }
944 ++
945 ++ /* Enable target module */
946 ++ error = sysc_runtime_resume(dev);
947 ++ if (error)
948 ++ dev_warn(dev, "reinit resume failed: %i\n", error);
949 ++
950 ++ if (leave_enabled)
951 ++ return error;
952 ++
953 ++ /* Disable target module if no leave_enabled was set */
954 ++ error = sysc_runtime_suspend(dev);
955 ++ if (error)
956 ++ dev_warn(dev, "reinit suspend failed: %i\n", error);
957 ++
958 ++ return error;
959 ++}
960 ++
961 + static int __maybe_unused sysc_noirq_suspend(struct device *dev)
962 + {
963 + struct sysc *ddata;
964 +@@ -1340,12 +1368,18 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
965 + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
966 + return 0;
967 +
968 +- return pm_runtime_force_suspend(dev);
969 ++ if (!ddata->enabled)
970 ++ return 0;
971 ++
972 ++ ddata->needs_resume = 1;
973 ++
974 ++ return sysc_runtime_suspend(dev);
975 + }
976 +
977 + static int __maybe_unused sysc_noirq_resume(struct device *dev)
978 + {
979 + struct sysc *ddata;
980 ++ int error = 0;
981 +
982 + ddata = dev_get_drvdata(dev);
983 +
984 +@@ -1353,7 +1387,19 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
985 + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
986 + return 0;
987 +
988 +- return pm_runtime_force_resume(dev);
989 ++ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
990 ++ error = sysc_reinit_module(ddata, ddata->needs_resume);
991 ++ if (error)
992 ++ dev_warn(dev, "noirq_resume failed: %i\n", error);
993 ++ } else if (ddata->needs_resume) {
994 ++ error = sysc_runtime_resume(dev);
995 ++ if (error)
996 ++ dev_warn(dev, "noirq_resume failed: %i\n", error);
997 ++ }
998 ++
999 ++ ddata->needs_resume = 0;
1000 ++
1001 ++ return error;
1002 + }
1003 +
1004 + static const struct dev_pm_ops sysc_pm_ops = {
1005 +@@ -1404,9 +1450,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1006 + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
1007 + /* Uarts on omap4 and later */
1008 + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
1009 +- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1010 ++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
1011 + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
1012 +- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1013 ++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
1014 +
1015 + /* Quirks that need to be set based on the module address */
1016 + SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
1017 +@@ -1462,7 +1508,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1018 + SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
1019 + 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1020 + SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
1021 +- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1022 ++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1023 ++ SYSC_QUIRK_REINIT_ON_RESUME),
1024 + SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1025 + SYSC_MODULE_QUIRK_WDT),
1026 + /* PRUSS on am3, am4 and am5 */
1027 +diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
1028 +index e15d484b6a5a7..ea7ca74fc1730 100644
1029 +--- a/drivers/firmware/efi/cper.c
1030 ++++ b/drivers/firmware/efi/cper.c
1031 +@@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
1032 + if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
1033 + return 0;
1034 +
1035 +- n = 0;
1036 +- len = CPER_REC_LEN - 1;
1037 ++ len = CPER_REC_LEN;
1038 + dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
1039 + if (bank && device)
1040 + n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
1041 +@@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
1042 + "DIMM location: not present. DMI handle: 0x%.4x ",
1043 + mem->mem_dev_handle);
1044 +
1045 +- msg[n] = '\0';
1046 + return n;
1047 + }
1048 +
1049 +diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c
1050 +index bb042ab7c2be6..e901f8564ca0c 100644
1051 +--- a/drivers/firmware/efi/fdtparams.c
1052 ++++ b/drivers/firmware/efi/fdtparams.c
1053 +@@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
1054 + BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
1055 + BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
1056 +
1057 ++ if (!fdt)
1058 ++ return 0;
1059 ++
1060 + for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
1061 + node = fdt_path_offset(fdt, dt_params[i].path);
1062 + if (node < 0)
1063 +diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
1064 +index 4e81c6077188e..dd95f330fe6e1 100644
1065 +--- a/drivers/firmware/efi/libstub/file.c
1066 ++++ b/drivers/firmware/efi/libstub/file.c
1067 +@@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
1068 + return 0;
1069 +
1070 + /* Skip any leading slashes */
1071 +- while (cmdline[i] == L'/' || cmdline[i] == L'\\')
1072 ++ while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\'))
1073 + i++;
1074 +
1075 + while (--result_len > 0 && i < cmdline_len) {
1076 +diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
1077 +index 5737cb0fcd44e..0a9aba5f9ceff 100644
1078 +--- a/drivers/firmware/efi/memattr.c
1079 ++++ b/drivers/firmware/efi/memattr.c
1080 +@@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
1081 + return false;
1082 + }
1083 +
1084 +- if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
1085 +- pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
1086 +- return false;
1087 +- }
1088 +-
1089 + if (PAGE_SIZE > EFI_PAGE_SIZE &&
1090 + (!PAGE_ALIGNED(in->phys_addr) ||
1091 + !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
1092 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
1093 +index c80d8339f58c4..2c1c5f7f98deb 100644
1094 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
1095 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
1096 +@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
1097 + {
1098 + struct amdgpu_ctx *ctx;
1099 + struct amdgpu_ctx_mgr *mgr;
1100 +- unsigned long ras_counter;
1101 +
1102 + if (!fpriv)
1103 + return -EINVAL;
1104 +@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
1105 + if (atomic_read(&ctx->guilty))
1106 + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
1107 +
1108 +- /*query ue count*/
1109 +- ras_counter = amdgpu_ras_query_error_count(adev, false);
1110 +- /*ras counter is monotonic increasing*/
1111 +- if (ras_counter != ctx->ras_counter_ue) {
1112 +- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
1113 +- ctx->ras_counter_ue = ras_counter;
1114 +- }
1115 +-
1116 +- /*query ce count*/
1117 +- ras_counter = amdgpu_ras_query_error_count(adev, true);
1118 +- if (ras_counter != ctx->ras_counter_ce) {
1119 +- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
1120 +- ctx->ras_counter_ce = ras_counter;
1121 +- }
1122 +-
1123 + mutex_unlock(&mgr->lock);
1124 + return 0;
1125 + }
1126 +diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
1127 +index 63b3501823898..8c84e35c2719b 100644
1128 +--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
1129 ++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
1130 +@@ -187,14 +187,14 @@ static int jpeg_v2_5_hw_init(void *handle)
1131 + static int jpeg_v2_5_hw_fini(void *handle)
1132 + {
1133 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1134 +- struct amdgpu_ring *ring;
1135 + int i;
1136 +
1137 ++ cancel_delayed_work_sync(&adev->vcn.idle_work);
1138 ++
1139 + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
1140 + if (adev->jpeg.harvest_config & (1 << i))
1141 + continue;
1142 +
1143 +- ring = &adev->jpeg.inst[i].ring_dec;
1144 + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
1145 + RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
1146 + jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
1147 +diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
1148 +index 9259e35f0f55a..e00c88abeaed1 100644
1149 +--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
1150 ++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
1151 +@@ -159,9 +159,9 @@ static int jpeg_v3_0_hw_init(void *handle)
1152 + static int jpeg_v3_0_hw_fini(void *handle)
1153 + {
1154 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1155 +- struct amdgpu_ring *ring;
1156 +
1157 +- ring = &adev->jpeg.inst->ring_dec;
1158 ++ cancel_delayed_work_sync(&adev->vcn.idle_work);
1159 ++
1160 + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
1161 + RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
1162 + jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
1163 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1164 +index 666bfa4a0b8ea..53f0899eb3166 100644
1165 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1166 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1167 +@@ -356,6 +356,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1168 +
1169 + error:
1170 + dma_fence_put(fence);
1171 ++ amdgpu_bo_unpin(bo);
1172 + amdgpu_bo_unreserve(bo);
1173 + amdgpu_bo_unref(&bo);
1174 + return r;
1175 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1176 +index 700621ddc02e2..c9c888be12285 100644
1177 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1178 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1179 +@@ -345,15 +345,14 @@ done:
1180 + static int vcn_v3_0_hw_fini(void *handle)
1181 + {
1182 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1183 +- struct amdgpu_ring *ring;
1184 + int i;
1185 +
1186 ++ cancel_delayed_work_sync(&adev->vcn.idle_work);
1187 ++
1188 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1189 + if (adev->vcn.harvest_config & (1 << i))
1190 + continue;
1191 +
1192 +- ring = &adev->vcn.inst[i].ring_dec;
1193 +-
1194 + if (!amdgpu_sriov_vf(adev)) {
1195 + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
1196 + (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
1197 +diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
1198 +index e424a6d1a68c9..7a72faf29f272 100644
1199 +--- a/drivers/gpu/drm/i915/selftests/i915_request.c
1200 ++++ b/drivers/gpu/drm/i915/selftests/i915_request.c
1201 +@@ -1391,8 +1391,8 @@ static int live_breadcrumbs_smoketest(void *arg)
1202 +
1203 + for (n = 0; n < smoke[0].ncontexts; n++) {
1204 + smoke[0].contexts[n] = live_context(i915, file);
1205 +- if (!smoke[0].contexts[n]) {
1206 +- ret = -ENOMEM;
1207 ++ if (IS_ERR(smoke[0].contexts[n])) {
1208 ++ ret = PTR_ERR(smoke[0].contexts[n]);
1209 + goto out_contexts;
1210 + }
1211 + }
1212 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
1213 +index e69ea810e18d9..c8217f4858a15 100644
1214 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
1215 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
1216 +@@ -931,8 +931,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
1217 + DPU_DEBUG("REG_DMA is not defined");
1218 + }
1219 +
1220 +- if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
1221 +- dpu_kms_parse_data_bus_icc_path(dpu_kms);
1222 ++ dpu_kms_parse_data_bus_icc_path(dpu_kms);
1223 +
1224 + pm_runtime_get_sync(&dpu_kms->pdev->dev);
1225 +
1226 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
1227 +index cd4078807db1b..3416e9617ee9a 100644
1228 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
1229 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
1230 +@@ -31,40 +31,8 @@ struct dpu_mdss {
1231 + void __iomem *mmio;
1232 + struct dss_module_power mp;
1233 + struct dpu_irq_controller irq_controller;
1234 +- struct icc_path *path[2];
1235 +- u32 num_paths;
1236 + };
1237 +
1238 +-static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
1239 +- struct dpu_mdss *dpu_mdss)
1240 +-{
1241 +- struct icc_path *path0 = of_icc_get(dev->dev, "mdp0-mem");
1242 +- struct icc_path *path1 = of_icc_get(dev->dev, "mdp1-mem");
1243 +-
1244 +- if (IS_ERR_OR_NULL(path0))
1245 +- return PTR_ERR_OR_ZERO(path0);
1246 +-
1247 +- dpu_mdss->path[0] = path0;
1248 +- dpu_mdss->num_paths = 1;
1249 +-
1250 +- if (!IS_ERR_OR_NULL(path1)) {
1251 +- dpu_mdss->path[1] = path1;
1252 +- dpu_mdss->num_paths++;
1253 +- }
1254 +-
1255 +- return 0;
1256 +-}
1257 +-
1258 +-static void dpu_mdss_icc_request_bw(struct msm_mdss *mdss)
1259 +-{
1260 +- struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
1261 +- int i;
1262 +- u64 avg_bw = dpu_mdss->num_paths ? MAX_BW / dpu_mdss->num_paths : 0;
1263 +-
1264 +- for (i = 0; i < dpu_mdss->num_paths; i++)
1265 +- icc_set_bw(dpu_mdss->path[i], avg_bw, kBps_to_icc(MAX_BW));
1266 +-}
1267 +-
1268 + static void dpu_mdss_irq(struct irq_desc *desc)
1269 + {
1270 + struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
1271 +@@ -178,8 +146,6 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
1272 + struct dss_module_power *mp = &dpu_mdss->mp;
1273 + int ret;
1274 +
1275 +- dpu_mdss_icc_request_bw(mdss);
1276 +-
1277 + ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
1278 + if (ret) {
1279 + DPU_ERROR("clock enable failed, ret:%d\n", ret);
1280 +@@ -213,15 +179,12 @@ static int dpu_mdss_disable(struct msm_mdss *mdss)
1281 + {
1282 + struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
1283 + struct dss_module_power *mp = &dpu_mdss->mp;
1284 +- int ret, i;
1285 ++ int ret;
1286 +
1287 + ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
1288 + if (ret)
1289 + DPU_ERROR("clock disable failed, ret:%d\n", ret);
1290 +
1291 +- for (i = 0; i < dpu_mdss->num_paths; i++)
1292 +- icc_set_bw(dpu_mdss->path[i], 0, 0);
1293 +-
1294 + return ret;
1295 + }
1296 +
1297 +@@ -232,7 +195,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
1298 + struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
1299 + struct dss_module_power *mp = &dpu_mdss->mp;
1300 + int irq;
1301 +- int i;
1302 +
1303 + pm_runtime_suspend(dev->dev);
1304 + pm_runtime_disable(dev->dev);
1305 +@@ -242,9 +204,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
1306 + msm_dss_put_clk(mp->clk_config, mp->num_clk);
1307 + devm_kfree(&pdev->dev, mp->clk_config);
1308 +
1309 +- for (i = 0; i < dpu_mdss->num_paths; i++)
1310 +- icc_put(dpu_mdss->path[i]);
1311 +-
1312 + if (dpu_mdss->mmio)
1313 + devm_iounmap(&pdev->dev, dpu_mdss->mmio);
1314 + dpu_mdss->mmio = NULL;
1315 +@@ -276,12 +235,6 @@ int dpu_mdss_init(struct drm_device *dev)
1316 +
1317 + DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
1318 +
1319 +- if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) {
1320 +- ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
1321 +- if (ret)
1322 +- return ret;
1323 +- }
1324 +-
1325 + mp = &dpu_mdss->mp;
1326 + ret = msm_dss_parse_clock(pdev, mp);
1327 + if (ret) {
1328 +@@ -307,8 +260,6 @@ int dpu_mdss_init(struct drm_device *dev)
1329 +
1330 + pm_runtime_enable(dev->dev);
1331 +
1332 +- dpu_mdss_icc_request_bw(priv->mdss);
1333 +-
1334 + return ret;
1335 +
1336 + irq_error:
1337 +diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
1338 +index 74ebfb12c360e..66b1051620390 100644
1339 +--- a/drivers/hid/hid-logitech-hidpp.c
1340 ++++ b/drivers/hid/hid-logitech-hidpp.c
1341 +@@ -1259,6 +1259,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
1342 + int status;
1343 +
1344 + long flags = (long) data[2];
1345 ++ *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
1346 +
1347 + if (flags & 0x80)
1348 + switch (flags & 0x07) {
1349 +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
1350 +index abd86903875f0..fc4c074597539 100644
1351 +--- a/drivers/hid/hid-magicmouse.c
1352 ++++ b/drivers/hid/hid-magicmouse.c
1353 +@@ -597,7 +597,7 @@ static int magicmouse_probe(struct hid_device *hdev,
1354 + if (id->vendor == USB_VENDOR_ID_APPLE &&
1355 + id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
1356 + hdev->type != HID_TYPE_USBMOUSE)
1357 +- return 0;
1358 ++ return -ENODEV;
1359 +
1360 + msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
1361 + if (msc == NULL) {
1362 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1363 +index 8429ebe7097e4..8580ace596c25 100644
1364 +--- a/drivers/hid/hid-multitouch.c
1365 ++++ b/drivers/hid/hid-multitouch.c
1366 +@@ -604,9 +604,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
1367 + if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
1368 + continue;
1369 +
1370 +- for (n = 0; n < field->report_count; n++) {
1371 +- if (field->usage[n].hid == HID_DG_CONTACTID)
1372 +- rdata->is_mt_collection = true;
1373 ++ if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
1374 ++ for (n = 0; n < field->report_count; n++) {
1375 ++ if (field->usage[n].hid == HID_DG_CONTACTID) {
1376 ++ rdata->is_mt_collection = true;
1377 ++ break;
1378 ++ }
1379 ++ }
1380 + }
1381 + }
1382 +
1383 +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
1384 +index cb7758d59014e..1f08c848c33de 100644
1385 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c
1386 ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
1387 +@@ -50,6 +50,7 @@
1388 + #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
1389 + #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
1390 + #define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
1391 ++#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7)
1392 +
1393 +
1394 + /* flags */
1395 +@@ -183,6 +184,11 @@ static const struct i2c_hid_quirks {
1396 + I2C_HID_QUIRK_RESET_ON_RESUME },
1397 + { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
1398 + I2C_HID_QUIRK_BAD_INPUT_SIZE },
1399 ++ /*
1400 ++ * Sending the wakeup after reset actually break ELAN touchscreen controller
1401 ++ */
1402 ++ { USB_VENDOR_ID_ELAN, HID_ANY_ID,
1403 ++ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
1404 + { 0, 0 }
1405 + };
1406 +
1407 +@@ -466,7 +472,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
1408 + }
1409 +
1410 + /* At least some SIS devices need this after reset */
1411 +- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
1412 ++ if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
1413 ++ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
1414 +
1415 + out_unlock:
1416 + mutex_unlock(&ihid->reset_lock);
1417 +@@ -1131,8 +1138,8 @@ static int i2c_hid_probe(struct i2c_client *client,
1418 + hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
1419 + hid->product = le16_to_cpu(ihid->hdesc.wProductID);
1420 +
1421 +- snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
1422 +- client->name, hid->vendor, hid->product);
1423 ++ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
1424 ++ client->name, (u16)hid->vendor, (u16)hid->product);
1425 + strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
1426 +
1427 + ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
1428 +diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
1429 +index fddac7c72f645..07a9fe97d2e05 100644
1430 +--- a/drivers/hid/usbhid/hid-pidff.c
1431 ++++ b/drivers/hid/usbhid/hid-pidff.c
1432 +@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
1433 +
1434 + if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
1435 + pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
1436 ++ error = -EPERM;
1437 + hid_notice(hid,
1438 + "device does not support device managed pool\n");
1439 + goto fail;
1440 +diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
1441 +index 73b9db9e3aab6..63b74e781c5d9 100644
1442 +--- a/drivers/hwmon/dell-smm-hwmon.c
1443 ++++ b/drivers/hwmon/dell-smm-hwmon.c
1444 +@@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = {
1445 + static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
1446 + int index)
1447 + {
1448 +- if (disallow_fan_support && index >= 8)
1449 ++ if (disallow_fan_support && index >= 20)
1450 + return 0;
1451 + if (disallow_fan_type_call &&
1452 +- (index == 9 || index == 12 || index == 15))
1453 ++ (index == 21 || index == 25 || index == 28))
1454 + return 0;
1455 + if (index >= 0 && index <= 1 &&
1456 + !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
1457 +diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
1458 +index 7cad76e07f701..3f1b826dac8a0 100644
1459 +--- a/drivers/hwmon/pmbus/isl68137.c
1460 ++++ b/drivers/hwmon/pmbus/isl68137.c
1461 +@@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client)
1462 + info->read_word_data = raa_dmpvr2_read_word_data;
1463 + break;
1464 + case raa_dmpvr2_2rail_nontc:
1465 +- info->func[0] &= ~PMBUS_HAVE_TEMP;
1466 +- info->func[1] &= ~PMBUS_HAVE_TEMP;
1467 ++ info->func[0] &= ~PMBUS_HAVE_TEMP3;
1468 ++ info->func[1] &= ~PMBUS_HAVE_TEMP3;
1469 + fallthrough;
1470 + case raa_dmpvr2_2rail:
1471 + info->pages = 2;
1472 +diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
1473 +index 4a6dd05d6dbf9..86f028febce35 100644
1474 +--- a/drivers/i2c/busses/i2c-qcom-geni.c
1475 ++++ b/drivers/i2c/busses/i2c-qcom-geni.c
1476 +@@ -654,6 +654,14 @@ static int geni_i2c_remove(struct platform_device *pdev)
1477 + return 0;
1478 + }
1479 +
1480 ++static void geni_i2c_shutdown(struct platform_device *pdev)
1481 ++{
1482 ++ struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
1483 ++
1484 ++ /* Make client i2c transfers start failing */
1485 ++ i2c_mark_adapter_suspended(&gi2c->adap);
1486 ++}
1487 ++
1488 + static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
1489 + {
1490 + int ret;
1491 +@@ -694,6 +702,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
1492 + {
1493 + struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
1494 +
1495 ++ i2c_mark_adapter_suspended(&gi2c->adap);
1496 ++
1497 + if (!gi2c->suspended) {
1498 + geni_i2c_runtime_suspend(dev);
1499 + pm_runtime_disable(dev);
1500 +@@ -703,8 +713,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
1501 + return 0;
1502 + }
1503 +
1504 ++static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
1505 ++{
1506 ++ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
1507 ++
1508 ++ i2c_mark_adapter_resumed(&gi2c->adap);
1509 ++ return 0;
1510 ++}
1511 ++
1512 + static const struct dev_pm_ops geni_i2c_pm_ops = {
1513 +- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
1514 ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
1515 + SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
1516 + NULL)
1517 + };
1518 +@@ -718,6 +736,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
1519 + static struct platform_driver geni_i2c_driver = {
1520 + .probe = geni_i2c_probe,
1521 + .remove = geni_i2c_remove,
1522 ++ .shutdown = geni_i2c_shutdown,
1523 + .driver = {
1524 + .name = "geni_i2c",
1525 + .pm = &geni_i2c_pm_ops,
1526 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1527 +index 27308600da153..2dd4869156291 100644
1528 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1529 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1530 +@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
1531 + bool persistent, u8 *smt_idx);
1532 + int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
1533 + void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
1534 +-int cxgb_open(struct net_device *dev);
1535 +-int cxgb_close(struct net_device *dev);
1536 + void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
1537 + void cxgb4_quiesce_rx(struct sge_rspq *q);
1538 + int cxgb4_port_mirror_alloc(struct net_device *dev);
1539 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1540 +index 23c13f34a5727..04dcb5e4b3161 100644
1541 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1542 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1543 +@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter)
1544 + /*
1545 + * net_device operations
1546 + */
1547 +-int cxgb_open(struct net_device *dev)
1548 ++static int cxgb_open(struct net_device *dev)
1549 + {
1550 + struct port_info *pi = netdev_priv(dev);
1551 + struct adapter *adapter = pi->adapter;
1552 +@@ -2882,7 +2882,7 @@ out_unlock:
1553 + return err;
1554 + }
1555 +
1556 +-int cxgb_close(struct net_device *dev)
1557 ++static int cxgb_close(struct net_device *dev)
1558 + {
1559 + struct port_info *pi = netdev_priv(dev);
1560 + struct adapter *adapter = pi->adapter;
1561 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1562 +index 1b88bd1c2dbe4..dd9be229819a5 100644
1563 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1564 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1565 +@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
1566 + if (!ch_flower)
1567 + return -ENOENT;
1568 +
1569 ++ rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
1570 ++ adap->flower_ht_params);
1571 ++
1572 + ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
1573 + &ch_flower->fs, ch_flower->filter_id);
1574 + if (ret)
1575 +- goto err;
1576 ++ netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
1577 ++ ch_flower->filter_id, ret);
1578 +
1579 +- ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
1580 +- adap->flower_ht_params);
1581 +- if (ret) {
1582 +- netdev_err(dev, "Flow remove from rhashtable failed");
1583 +- goto err;
1584 +- }
1585 + kfree_rcu(ch_flower, rcu);
1586 +-
1587 +-err:
1588 + return ret;
1589 + }
1590 +
1591 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
1592 +index 6c259de96f969..338b04f339b3d 100644
1593 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
1594 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
1595 +@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
1596 + * down before configuring tc params.
1597 + */
1598 + if (netif_running(dev)) {
1599 +- cxgb_close(dev);
1600 ++ netif_tx_stop_all_queues(dev);
1601 ++ netif_carrier_off(dev);
1602 + needs_bring_up = true;
1603 + }
1604 +
1605 +@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
1606 + }
1607 +
1608 + out:
1609 +- if (needs_bring_up)
1610 +- cxgb_open(dev);
1611 ++ if (needs_bring_up) {
1612 ++ netif_tx_start_all_queues(dev);
1613 ++ netif_carrier_on(dev);
1614 ++ }
1615 +
1616 + mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
1617 + return ret;
1618 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
1619 +index 546301272271d..ccb6bd002b20d 100644
1620 +--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
1621 ++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
1622 +@@ -2552,6 +2552,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
1623 + if (!eosw_txq)
1624 + return -ENOMEM;
1625 +
1626 ++ if (!(adap->flags & CXGB4_FW_OK)) {
1627 ++ /* Don't stall caller when access to FW is lost */
1628 ++ complete(&eosw_txq->completion);
1629 ++ return -EIO;
1630 ++ }
1631 ++
1632 + skb = alloc_skb(len, GFP_KERNEL);
1633 + if (!skb)
1634 + return -ENOMEM;
1635 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
1636 +index 011f484606a3a..c40ac82db863e 100644
1637 +--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
1638 ++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
1639 +@@ -2205,15 +2205,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
1640 + case XDP_TX:
1641 + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
1642 + result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
1643 ++ if (result == I40E_XDP_CONSUMED)
1644 ++ goto out_failure;
1645 + break;
1646 + case XDP_REDIRECT:
1647 + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
1648 +- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
1649 ++ if (err)
1650 ++ goto out_failure;
1651 ++ result = I40E_XDP_REDIR;
1652 + break;
1653 + default:
1654 + bpf_warn_invalid_xdp_action(act);
1655 + fallthrough;
1656 + case XDP_ABORTED:
1657 ++out_failure:
1658 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1659 + fallthrough; /* handle aborts by dropping packet */
1660 + case XDP_DROP:
1661 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
1662 +index 8557807b41717..86c79f71c685a 100644
1663 +--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
1664 ++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
1665 +@@ -159,21 +159,28 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
1666 + xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1667 + act = bpf_prog_run_xdp(xdp_prog, xdp);
1668 +
1669 ++ if (likely(act == XDP_REDIRECT)) {
1670 ++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
1671 ++ if (err)
1672 ++ goto out_failure;
1673 ++ rcu_read_unlock();
1674 ++ return I40E_XDP_REDIR;
1675 ++ }
1676 ++
1677 + switch (act) {
1678 + case XDP_PASS:
1679 + break;
1680 + case XDP_TX:
1681 + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
1682 + result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
1683 +- break;
1684 +- case XDP_REDIRECT:
1685 +- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
1686 +- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
1687 ++ if (result == I40E_XDP_CONSUMED)
1688 ++ goto out_failure;
1689 + break;
1690 + default:
1691 + bpf_warn_invalid_xdp_action(act);
1692 + fallthrough;
1693 + case XDP_ABORTED:
1694 ++out_failure:
1695 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1696 + fallthrough; /* handle aborts by dropping packet */
1697 + case XDP_DROP:
1698 +diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
1699 +index d70573f5072c6..a7975afecf70f 100644
1700 +--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
1701 ++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
1702 +@@ -1797,49 +1797,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
1703 + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
1704 + 100000baseKR4_Full);
1705 + }
1706 +-
1707 +- /* Autoneg PHY types */
1708 +- if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
1709 +- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
1710 +- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
1711 +- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
1712 +- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
1713 +- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
1714 +- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
1715 +- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
1716 +- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
1717 +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
1718 +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
1719 +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
1720 +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
1721 +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
1722 +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
1723 +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
1724 +- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
1725 +- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
1726 +- ethtool_link_ksettings_add_link_mode(ks, supported,
1727 +- Autoneg);
1728 +- ethtool_link_ksettings_add_link_mode(ks, advertising,
1729 +- Autoneg);
1730 +- }
1731 +- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
1732 +- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
1733 +- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
1734 +- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
1735 +- ethtool_link_ksettings_add_link_mode(ks, supported,
1736 +- Autoneg);
1737 +- ethtool_link_ksettings_add_link_mode(ks, advertising,
1738 +- Autoneg);
1739 +- }
1740 +- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
1741 +- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
1742 +- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
1743 +- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
1744 +- ethtool_link_ksettings_add_link_mode(ks, supported,
1745 +- Autoneg);
1746 +- ethtool_link_ksettings_add_link_mode(ks, advertising,
1747 +- Autoneg);
1748 +- }
1749 + }
1750 +
1751 + #define TEST_SET_BITS_TIMEOUT 50
1752 +@@ -1996,9 +1953,7 @@ ice_get_link_ksettings(struct net_device *netdev,
1753 + ks->base.port = PORT_TP;
1754 + break;
1755 + case ICE_MEDIA_BACKPLANE:
1756 +- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
1757 + ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
1758 +- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
1759 + ethtool_link_ksettings_add_link_mode(ks, advertising,
1760 + Backplane);
1761 + ks->base.port = PORT_NONE;
1762 +@@ -2073,6 +2028,12 @@ ice_get_link_ksettings(struct net_device *netdev,
1763 + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
1764 + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
1765 +
1766 ++ /* Set supported and advertised autoneg */
1767 ++ if (ice_is_phy_caps_an_enabled(caps)) {
1768 ++ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
1769 ++ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
1770 ++ }
1771 ++
1772 + done:
1773 + kfree(caps);
1774 + return err;
1775 +diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
1776 +index 90abc8612a6ab..406dd6bd97a7d 100644
1777 +--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
1778 ++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
1779 +@@ -31,6 +31,7 @@
1780 + #define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
1781 + #define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
1782 + #define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
1783 ++#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4))
1784 + #define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
1785 + #define PF_FW_ATQT 0x00080400
1786 + #define PF_MBX_ARQBAH 0x0022E400
1787 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
1788 +index e1384503dd4d5..fb20c6971f4c7 100644
1789 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c
1790 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
1791 +@@ -192,6 +192,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
1792 + break;
1793 + case ICE_VSI_VF:
1794 + vf = &pf->vf[vsi->vf_id];
1795 ++ if (vf->num_req_qs)
1796 ++ vf->num_vf_qs = vf->num_req_qs;
1797 + vsi->alloc_txq = vf->num_vf_qs;
1798 + vsi->alloc_rxq = vf->num_vf_qs;
1799 + /* pf->num_msix_per_vf includes (VF miscellaneous vector +
1800 +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
1801 +index 0f2544c420ac3..442a9bcbf60a7 100644
1802 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
1803 ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
1804 +@@ -537,34 +537,35 @@ static int
1805 + ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
1806 + struct bpf_prog *xdp_prog)
1807 + {
1808 +- int err, result = ICE_XDP_PASS;
1809 + struct ice_ring *xdp_ring;
1810 ++ int err, result;
1811 + u32 act;
1812 +
1813 + act = bpf_prog_run_xdp(xdp_prog, xdp);
1814 + switch (act) {
1815 + case XDP_PASS:
1816 +- break;
1817 ++ return ICE_XDP_PASS;
1818 + case XDP_TX:
1819 + xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
1820 + result = ice_xmit_xdp_buff(xdp, xdp_ring);
1821 +- break;
1822 ++ if (result == ICE_XDP_CONSUMED)
1823 ++ goto out_failure;
1824 ++ return result;
1825 + case XDP_REDIRECT:
1826 + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
1827 +- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
1828 +- break;
1829 ++ if (err)
1830 ++ goto out_failure;
1831 ++ return ICE_XDP_REDIR;
1832 + default:
1833 + bpf_warn_invalid_xdp_action(act);
1834 + fallthrough;
1835 + case XDP_ABORTED:
1836 ++out_failure:
1837 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1838 + fallthrough;
1839 + case XDP_DROP:
1840 +- result = ICE_XDP_CONSUMED;
1841 +- break;
1842 ++ return ICE_XDP_CONSUMED;
1843 + }
1844 +-
1845 +- return result;
1846 + }
1847 +
1848 + /**
1849 +@@ -2373,6 +2374,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
1850 + struct ice_tx_offload_params offload = { 0 };
1851 + struct ice_vsi *vsi = tx_ring->vsi;
1852 + struct ice_tx_buf *first;
1853 ++ struct ethhdr *eth;
1854 + unsigned int count;
1855 + int tso, csum;
1856 +
1857 +@@ -2419,7 +2421,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
1858 + goto out_drop;
1859 +
1860 + /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
1861 +- if (unlikely(skb->priority == TC_PRIO_CONTROL &&
1862 ++ eth = (struct ethhdr *)skb_mac_header(skb);
1863 ++ if (unlikely((skb->priority == TC_PRIO_CONTROL ||
1864 ++ eth->h_proto == htons(ETH_P_LLDP)) &&
1865 + vsi->type == ICE_VSI_PF &&
1866 + vsi->port_info->qos_cfg.is_sw_lldp))
1867 + offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1868 +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1869 +index b3161c5def465..c9f82fd3cf48d 100644
1870 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1871 ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1872 +@@ -435,13 +435,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
1873 + */
1874 + clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
1875 +
1876 +- /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
1877 +- * in the case of VFR. If this is done for PFR, it can mess up VF
1878 +- * resets because the VF driver may already have started cleanup
1879 +- * by the time we get here.
1880 ++ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
1881 ++ * needs to clear them in the case of VFR/VFLR. If this is done for
1882 ++ * PFR, it can mess up VF resets because the VF driver may already
1883 ++ * have started cleanup by the time we get here.
1884 + */
1885 +- if (!is_pfr)
1886 ++ if (!is_pfr) {
1887 + wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
1888 ++ wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
1889 ++ }
1890 +
1891 + /* In the case of a VFLR, the HW has already reset the VF and we
1892 + * just need to clean up, so don't hit the VFRTRIG register.
1893 +@@ -1339,7 +1341,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1894 + }
1895 +
1896 + ice_vf_pre_vsi_rebuild(vf);
1897 +- ice_vf_rebuild_vsi_with_release(vf);
1898 ++
1899 ++ if (ice_vf_rebuild_vsi_with_release(vf)) {
1900 ++ dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
1901 ++ return false;
1902 ++ }
1903 ++
1904 + ice_vf_post_vsi_rebuild(vf);
1905 +
1906 + return true;
1907 +diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
1908 +index 98101a8e2952d..9f36f8d7a9854 100644
1909 +--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
1910 ++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
1911 +@@ -524,21 +524,29 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
1912 + }
1913 +
1914 + act = bpf_prog_run_xdp(xdp_prog, xdp);
1915 ++
1916 ++ if (likely(act == XDP_REDIRECT)) {
1917 ++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
1918 ++ if (err)
1919 ++ goto out_failure;
1920 ++ rcu_read_unlock();
1921 ++ return ICE_XDP_REDIR;
1922 ++ }
1923 ++
1924 + switch (act) {
1925 + case XDP_PASS:
1926 + break;
1927 + case XDP_TX:
1928 + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
1929 + result = ice_xmit_xdp_buff(xdp, xdp_ring);
1930 +- break;
1931 +- case XDP_REDIRECT:
1932 +- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
1933 +- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
1934 ++ if (result == ICE_XDP_CONSUMED)
1935 ++ goto out_failure;
1936 + break;
1937 + default:
1938 + bpf_warn_invalid_xdp_action(act);
1939 + fallthrough;
1940 + case XDP_ABORTED:
1941 ++out_failure:
1942 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1943 + fallthrough;
1944 + case XDP_DROP:
1945 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
1946 +index 368f0aac5e1d4..5c87c0a7ce3d7 100644
1947 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
1948 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
1949 +@@ -8419,18 +8419,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
1950 + break;
1951 + case XDP_TX:
1952 + result = igb_xdp_xmit_back(adapter, xdp);
1953 ++ if (result == IGB_XDP_CONSUMED)
1954 ++ goto out_failure;
1955 + break;
1956 + case XDP_REDIRECT:
1957 + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
1958 +- if (!err)
1959 +- result = IGB_XDP_REDIR;
1960 +- else
1961 +- result = IGB_XDP_CONSUMED;
1962 ++ if (err)
1963 ++ goto out_failure;
1964 ++ result = IGB_XDP_REDIR;
1965 + break;
1966 + default:
1967 + bpf_warn_invalid_xdp_action(act);
1968 + fallthrough;
1969 + case XDP_ABORTED:
1970 ++out_failure:
1971 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1972 + fallthrough;
1973 + case XDP_DROP:
1974 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1975 +index 0b9fddbc5db4f..1bfba87f1ff60 100644
1976 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1977 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1978 +@@ -2218,23 +2218,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
1979 + break;
1980 + case XDP_TX:
1981 + xdpf = xdp_convert_buff_to_frame(xdp);
1982 +- if (unlikely(!xdpf)) {
1983 +- result = IXGBE_XDP_CONSUMED;
1984 +- break;
1985 +- }
1986 ++ if (unlikely(!xdpf))
1987 ++ goto out_failure;
1988 + result = ixgbe_xmit_xdp_ring(adapter, xdpf);
1989 ++ if (result == IXGBE_XDP_CONSUMED)
1990 ++ goto out_failure;
1991 + break;
1992 + case XDP_REDIRECT:
1993 + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
1994 +- if (!err)
1995 +- result = IXGBE_XDP_REDIR;
1996 +- else
1997 +- result = IXGBE_XDP_CONSUMED;
1998 ++ if (err)
1999 ++ goto out_failure;
2000 ++ result = IXGBE_XDP_REDIR;
2001 + break;
2002 + default:
2003 + bpf_warn_invalid_xdp_action(act);
2004 + fallthrough;
2005 + case XDP_ABORTED:
2006 ++out_failure:
2007 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2008 + fallthrough; /* handle aborts by dropping packet */
2009 + case XDP_DROP:
2010 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
2011 +index 3771857cf887c..f72d2978263b9 100644
2012 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
2013 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
2014 +@@ -104,25 +104,30 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
2015 + xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2016 + act = bpf_prog_run_xdp(xdp_prog, xdp);
2017 +
2018 ++ if (likely(act == XDP_REDIRECT)) {
2019 ++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2020 ++ if (err)
2021 ++ goto out_failure;
2022 ++ rcu_read_unlock();
2023 ++ return IXGBE_XDP_REDIR;
2024 ++ }
2025 ++
2026 + switch (act) {
2027 + case XDP_PASS:
2028 + break;
2029 + case XDP_TX:
2030 + xdpf = xdp_convert_buff_to_frame(xdp);
2031 +- if (unlikely(!xdpf)) {
2032 +- result = IXGBE_XDP_CONSUMED;
2033 +- break;
2034 +- }
2035 ++ if (unlikely(!xdpf))
2036 ++ goto out_failure;
2037 + result = ixgbe_xmit_xdp_ring(adapter, xdpf);
2038 +- break;
2039 +- case XDP_REDIRECT:
2040 +- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2041 +- result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
2042 ++ if (result == IXGBE_XDP_CONSUMED)
2043 ++ goto out_failure;
2044 + break;
2045 + default:
2046 + bpf_warn_invalid_xdp_action(act);
2047 + fallthrough;
2048 + case XDP_ABORTED:
2049 ++out_failure:
2050 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2051 + fallthrough; /* handle aborts by dropping packet */
2052 + case XDP_DROP:
2053 +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2054 +index 82fce27f682bb..a7d0a459969a2 100644
2055 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2056 ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2057 +@@ -1072,11 +1072,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
2058 + case XDP_TX:
2059 + xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
2060 + result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
2061 ++ if (result == IXGBEVF_XDP_CONSUMED)
2062 ++ goto out_failure;
2063 + break;
2064 + default:
2065 + bpf_warn_invalid_xdp_action(act);
2066 + fallthrough;
2067 + case XDP_ABORTED:
2068 ++out_failure:
2069 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2070 + fallthrough; /* handle aborts by dropping packet */
2071 + case XDP_DROP:
2072 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2073 +index 986f0d86e94dc..bc7c1962f9e66 100644
2074 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2075 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2076 +@@ -1618,12 +1618,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
2077 + {
2078 + struct mlx5e_priv *priv = netdev_priv(netdev);
2079 + struct mlx5_core_dev *mdev = priv->mdev;
2080 ++ unsigned long fec_bitmap;
2081 + u16 fec_policy = 0;
2082 + int mode;
2083 + int err;
2084 +
2085 +- if (bitmap_weight((unsigned long *)&fecparam->fec,
2086 +- ETHTOOL_FEC_LLRS_BIT + 1) > 1)
2087 ++ bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE);
2088 ++ if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1)
2089 + return -EOPNOTSUPP;
2090 +
2091 + for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
2092 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2093 +index 1bdeb948f56d7..80abdb0b47d7e 100644
2094 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2095 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2096 +@@ -2253,11 +2253,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
2097 + misc_parameters);
2098 + struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2099 + struct flow_dissector *dissector = rule->match.dissector;
2100 ++ enum fs_flow_table_type fs_type;
2101 + u16 addr_type = 0;
2102 + u8 ip_proto = 0;
2103 + u8 *match_level;
2104 + int err;
2105 +
2106 ++ fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2107 + match_level = outer_match_level;
2108 +
2109 + if (dissector->used_keys &
2110 +@@ -2382,6 +2384,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
2111 + if (match.mask->vlan_id ||
2112 + match.mask->vlan_priority ||
2113 + match.mask->vlan_tpid) {
2114 ++ if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
2115 ++ fs_type)) {
2116 ++ NL_SET_ERR_MSG_MOD(extack,
2117 ++ "Matching on CVLAN is not supported");
2118 ++ return -EOPNOTSUPP;
2119 ++ }
2120 ++
2121 + if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2122 + MLX5_SET(fte_match_set_misc, misc_c,
2123 + outer_second_svlan_tag, 1);
2124 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
2125 +index f9042e147c7f6..ee710ce007950 100644
2126 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
2127 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
2128 +@@ -354,6 +354,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
2129 + reset_abort_work);
2130 + struct mlx5_core_dev *dev = fw_reset->dev;
2131 +
2132 ++ if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
2133 ++ return;
2134 ++
2135 + mlx5_sync_reset_clear_reset_requested(dev, true);
2136 + mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
2137 + }
2138 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
2139 +index 1fbcd012bb855..7ccfd40586cee 100644
2140 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
2141 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
2142 +@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
2143 + int ret;
2144 +
2145 + ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
2146 +- ft_attr.level = dmn->info.caps.max_ft_level - 2;
2147 ++ ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
2148 ++ MLX5_FT_MAX_MULTIPATH_LEVEL);
2149 + ft_attr.reformat_en = reformat_req;
2150 + ft_attr.decap_en = reformat_req;
2151 +
2152 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2153 +index 854c6624e6859..1d3bf810f2ca1 100644
2154 +--- a/drivers/net/usb/cdc_ncm.c
2155 ++++ b/drivers/net/usb/cdc_ncm.c
2156 +@@ -1827,6 +1827,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
2157 + uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
2158 + uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
2159 +
2160 ++ /* if the speed hasn't changed, don't report it.
2161 ++ * RTL8156 shipped before 2021 sends notification about every 32ms.
2162 ++ */
2163 ++ if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
2164 ++ return;
2165 ++
2166 ++ dev->rx_speed = rx_speed;
2167 ++ dev->tx_speed = tx_speed;
2168 ++
2169 + /*
2170 + * Currently the USB-NET API does not support reporting the actual
2171 + * device speed. Do print it instead.
2172 +@@ -1867,7 +1876,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
2173 + * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
2174 + * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
2175 + */
2176 +- usbnet_link_change(dev, !!event->wValue, 0);
2177 ++ if (netif_carrier_ok(dev->net) != !!event->wValue)
2178 ++ usbnet_link_change(dev, !!event->wValue, 0);
2179 + break;
2180 +
2181 + case USB_CDC_NOTIFY_SPEED_CHANGE:
2182 +diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile
2183 +index fc52b2cb500b3..dbe1f8514efc3 100644
2184 +--- a/drivers/net/wireguard/Makefile
2185 ++++ b/drivers/net/wireguard/Makefile
2186 +@@ -1,5 +1,4 @@
2187 +-ccflags-y := -O3
2188 +-ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
2189 ++ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
2190 + ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
2191 + wireguard-y := main.o
2192 + wireguard-y += noise.o
2193 +diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
2194 +index 3725e9cd85f4f..b7197e80f2264 100644
2195 +--- a/drivers/net/wireguard/allowedips.c
2196 ++++ b/drivers/net/wireguard/allowedips.c
2197 +@@ -6,6 +6,8 @@
2198 + #include "allowedips.h"
2199 + #include "peer.h"
2200 +
2201 ++static struct kmem_cache *node_cache;
2202 ++
2203 + static void swap_endian(u8 *dst, const u8 *src, u8 bits)
2204 + {
2205 + if (bits == 32) {
2206 +@@ -28,8 +30,11 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
2207 + node->bitlen = bits;
2208 + memcpy(node->bits, src, bits / 8U);
2209 + }
2210 +-#define CHOOSE_NODE(parent, key) \
2211 +- parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
2212 ++
2213 ++static inline u8 choose(struct allowedips_node *node, const u8 *key)
2214 ++{
2215 ++ return (key[node->bit_at_a] >> node->bit_at_b) & 1;
2216 ++}
2217 +
2218 + static void push_rcu(struct allowedips_node **stack,
2219 + struct allowedips_node __rcu *p, unsigned int *len)
2220 +@@ -40,6 +45,11 @@ static void push_rcu(struct allowedips_node **stack,
2221 + }
2222 + }
2223 +
2224 ++static void node_free_rcu(struct rcu_head *rcu)
2225 ++{
2226 ++ kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu));
2227 ++}
2228 ++
2229 + static void root_free_rcu(struct rcu_head *rcu)
2230 + {
2231 + struct allowedips_node *node, *stack[128] = {
2232 +@@ -49,7 +59,7 @@ static void root_free_rcu(struct rcu_head *rcu)
2233 + while (len > 0 && (node = stack[--len])) {
2234 + push_rcu(stack, node->bit[0], &len);
2235 + push_rcu(stack, node->bit[1], &len);
2236 +- kfree(node);
2237 ++ kmem_cache_free(node_cache, node);
2238 + }
2239 + }
2240 +
2241 +@@ -66,60 +76,6 @@ static void root_remove_peer_lists(struct allowedips_node *root)
2242 + }
2243 + }
2244 +
2245 +-static void walk_remove_by_peer(struct allowedips_node __rcu **top,
2246 +- struct wg_peer *peer, struct mutex *lock)
2247 +-{
2248 +-#define REF(p) rcu_access_pointer(p)
2249 +-#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
2250 +-#define PUSH(p) ({ \
2251 +- WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \
2252 +- stack[len++] = p; \
2253 +- })
2254 +-
2255 +- struct allowedips_node __rcu **stack[128], **nptr;
2256 +- struct allowedips_node *node, *prev;
2257 +- unsigned int len;
2258 +-
2259 +- if (unlikely(!peer || !REF(*top)))
2260 +- return;
2261 +-
2262 +- for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
2263 +- nptr = stack[len - 1];
2264 +- node = DEREF(nptr);
2265 +- if (!node) {
2266 +- --len;
2267 +- continue;
2268 +- }
2269 +- if (!prev || REF(prev->bit[0]) == node ||
2270 +- REF(prev->bit[1]) == node) {
2271 +- if (REF(node->bit[0]))
2272 +- PUSH(&node->bit[0]);
2273 +- else if (REF(node->bit[1]))
2274 +- PUSH(&node->bit[1]);
2275 +- } else if (REF(node->bit[0]) == prev) {
2276 +- if (REF(node->bit[1]))
2277 +- PUSH(&node->bit[1]);
2278 +- } else {
2279 +- if (rcu_dereference_protected(node->peer,
2280 +- lockdep_is_held(lock)) == peer) {
2281 +- RCU_INIT_POINTER(node->peer, NULL);
2282 +- list_del_init(&node->peer_list);
2283 +- if (!node->bit[0] || !node->bit[1]) {
2284 +- rcu_assign_pointer(*nptr, DEREF(
2285 +- &node->bit[!REF(node->bit[0])]));
2286 +- kfree_rcu(node, rcu);
2287 +- node = DEREF(nptr);
2288 +- }
2289 +- }
2290 +- --len;
2291 +- }
2292 +- }
2293 +-
2294 +-#undef REF
2295 +-#undef DEREF
2296 +-#undef PUSH
2297 +-}
2298 +-
2299 + static unsigned int fls128(u64 a, u64 b)
2300 + {
2301 + return a ? fls64(a) + 64U : fls64(b);
2302 +@@ -159,7 +115,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
2303 + found = node;
2304 + if (node->cidr == bits)
2305 + break;
2306 +- node = rcu_dereference_bh(CHOOSE_NODE(node, key));
2307 ++ node = rcu_dereference_bh(node->bit[choose(node, key)]);
2308 + }
2309 + return found;
2310 + }
2311 +@@ -191,8 +147,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
2312 + u8 cidr, u8 bits, struct allowedips_node **rnode,
2313 + struct mutex *lock)
2314 + {
2315 +- struct allowedips_node *node = rcu_dereference_protected(trie,
2316 +- lockdep_is_held(lock));
2317 ++ struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock));
2318 + struct allowedips_node *parent = NULL;
2319 + bool exact = false;
2320 +
2321 +@@ -202,13 +157,24 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
2322 + exact = true;
2323 + break;
2324 + }
2325 +- node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
2326 +- lockdep_is_held(lock));
2327 ++ node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock));
2328 + }
2329 + *rnode = parent;
2330 + return exact;
2331 + }
2332 +
2333 ++static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
2334 ++{
2335 ++ node->parent_bit_packed = (unsigned long)parent | bit;
2336 ++ rcu_assign_pointer(*parent, node);
2337 ++}
2338 ++
2339 ++static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node)
2340 ++{
2341 ++ u8 bit = choose(parent, node->bits);
2342 ++ connect_node(&parent->bit[bit], bit, node);
2343 ++}
2344 ++
2345 + static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
2346 + u8 cidr, struct wg_peer *peer, struct mutex *lock)
2347 + {
2348 +@@ -218,13 +184,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
2349 + return -EINVAL;
2350 +
2351 + if (!rcu_access_pointer(*trie)) {
2352 +- node = kzalloc(sizeof(*node), GFP_KERNEL);
2353 ++ node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
2354 + if (unlikely(!node))
2355 + return -ENOMEM;
2356 + RCU_INIT_POINTER(node->peer, peer);
2357 + list_add_tail(&node->peer_list, &peer->allowedips_list);
2358 + copy_and_assign_cidr(node, key, cidr, bits);
2359 +- rcu_assign_pointer(*trie, node);
2360 ++ connect_node(trie, 2, node);
2361 + return 0;
2362 + }
2363 + if (node_placement(*trie, key, cidr, bits, &node, lock)) {
2364 +@@ -233,7 +199,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
2365 + return 0;
2366 + }
2367 +
2368 +- newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
2369 ++ newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL);
2370 + if (unlikely(!newnode))
2371 + return -ENOMEM;
2372 + RCU_INIT_POINTER(newnode->peer, peer);
2373 +@@ -243,10 +209,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
2374 + if (!node) {
2375 + down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
2376 + } else {
2377 +- down = rcu_dereference_protected(CHOOSE_NODE(node, key),
2378 +- lockdep_is_held(lock));
2379 ++ const u8 bit = choose(node, key);
2380 ++ down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock));
2381 + if (!down) {
2382 +- rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
2383 ++ connect_node(&node->bit[bit], bit, newnode);
2384 + return 0;
2385 + }
2386 + }
2387 +@@ -254,30 +220,29 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
2388 + parent = node;
2389 +
2390 + if (newnode->cidr == cidr) {
2391 +- rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
2392 ++ choose_and_connect_node(newnode, down);
2393 + if (!parent)
2394 +- rcu_assign_pointer(*trie, newnode);
2395 ++ connect_node(trie, 2, newnode);
2396 + else
2397 +- rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
2398 +- newnode);
2399 +- } else {
2400 +- node = kzalloc(sizeof(*node), GFP_KERNEL);
2401 +- if (unlikely(!node)) {
2402 +- list_del(&newnode->peer_list);
2403 +- kfree(newnode);
2404 +- return -ENOMEM;
2405 +- }
2406 +- INIT_LIST_HEAD(&node->peer_list);
2407 +- copy_and_assign_cidr(node, newnode->bits, cidr, bits);
2408 ++ choose_and_connect_node(parent, newnode);
2409 ++ return 0;
2410 ++ }
2411 +
2412 +- rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
2413 +- rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
2414 +- if (!parent)
2415 +- rcu_assign_pointer(*trie, node);
2416 +- else
2417 +- rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
2418 +- node);
2419 ++ node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
2420 ++ if (unlikely(!node)) {
2421 ++ list_del(&newnode->peer_list);
2422 ++ kmem_cache_free(node_cache, newnode);
2423 ++ return -ENOMEM;
2424 + }
2425 ++ INIT_LIST_HEAD(&node->peer_list);
2426 ++ copy_and_assign_cidr(node, newnode->bits, cidr, bits);
2427 ++
2428 ++ choose_and_connect_node(node, down);
2429 ++ choose_and_connect_node(node, newnode);
2430 ++ if (!parent)
2431 ++ connect_node(trie, 2, node);
2432 ++ else
2433 ++ choose_and_connect_node(parent, node);
2434 + return 0;
2435 + }
2436 +
2437 +@@ -335,9 +300,41 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
2438 + void wg_allowedips_remove_by_peer(struct allowedips *table,
2439 + struct wg_peer *peer, struct mutex *lock)
2440 + {
2441 ++ struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
2442 ++ bool free_parent;
2443 ++
2444 ++ if (list_empty(&peer->allowedips_list))
2445 ++ return;
2446 + ++table->seq;
2447 +- walk_remove_by_peer(&table->root4, peer, lock);
2448 +- walk_remove_by_peer(&table->root6, peer, lock);
2449 ++ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
2450 ++ list_del_init(&node->peer_list);
2451 ++ RCU_INIT_POINTER(node->peer, NULL);
2452 ++ if (node->bit[0] && node->bit[1])
2453 ++ continue;
2454 ++ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
2455 ++ lockdep_is_held(lock));
2456 ++ if (child)
2457 ++ child->parent_bit_packed = node->parent_bit_packed;
2458 ++ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
2459 ++ *parent_bit = child;
2460 ++ parent = (void *)parent_bit -
2461 ++ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
2462 ++ free_parent = !rcu_access_pointer(node->bit[0]) &&
2463 ++ !rcu_access_pointer(node->bit[1]) &&
2464 ++ (node->parent_bit_packed & 3) <= 1 &&
2465 ++ !rcu_access_pointer(parent->peer);
2466 ++ if (free_parent)
2467 ++ child = rcu_dereference_protected(
2468 ++ parent->bit[!(node->parent_bit_packed & 1)],
2469 ++ lockdep_is_held(lock));
2470 ++ call_rcu(&node->rcu, node_free_rcu);
2471 ++ if (!free_parent)
2472 ++ continue;
2473 ++ if (child)
2474 ++ child->parent_bit_packed = parent->parent_bit_packed;
2475 ++ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
2476 ++ call_rcu(&parent->rcu, node_free_rcu);
2477 ++ }
2478 + }
2479 +
2480 + int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
2481 +@@ -374,4 +371,16 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
2482 + return NULL;
2483 + }
2484 +
2485 ++int __init wg_allowedips_slab_init(void)
2486 ++{
2487 ++ node_cache = KMEM_CACHE(allowedips_node, 0);
2488 ++ return node_cache ? 0 : -ENOMEM;
2489 ++}
2490 ++
2491 ++void wg_allowedips_slab_uninit(void)
2492 ++{
2493 ++ rcu_barrier();
2494 ++ kmem_cache_destroy(node_cache);
2495 ++}
2496 ++
2497 + #include "selftest/allowedips.c"
2498 +diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
2499 +index e5c83cafcef4c..2346c797eb4d8 100644
2500 +--- a/drivers/net/wireguard/allowedips.h
2501 ++++ b/drivers/net/wireguard/allowedips.h
2502 +@@ -15,14 +15,11 @@ struct wg_peer;
2503 + struct allowedips_node {
2504 + struct wg_peer __rcu *peer;
2505 + struct allowedips_node __rcu *bit[2];
2506 +- /* While it may seem scandalous that we waste space for v4,
2507 +- * we're alloc'ing to the nearest power of 2 anyway, so this
2508 +- * doesn't actually make a difference.
2509 +- */
2510 +- u8 bits[16] __aligned(__alignof(u64));
2511 + u8 cidr, bit_at_a, bit_at_b, bitlen;
2512 ++ u8 bits[16] __aligned(__alignof(u64));
2513 +
2514 +- /* Keep rarely used list at bottom to be beyond cache line. */
2515 ++ /* Keep rarely used members at bottom to be beyond cache line. */
2516 ++ unsigned long parent_bit_packed;
2517 + union {
2518 + struct list_head peer_list;
2519 + struct rcu_head rcu;
2520 +@@ -33,7 +30,7 @@ struct allowedips {
2521 + struct allowedips_node __rcu *root4;
2522 + struct allowedips_node __rcu *root6;
2523 + u64 seq;
2524 +-};
2525 ++} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */
2526 +
2527 + void wg_allowedips_init(struct allowedips *table);
2528 + void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
2529 +@@ -56,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
2530 + bool wg_allowedips_selftest(void);
2531 + #endif
2532 +
2533 ++int wg_allowedips_slab_init(void);
2534 ++void wg_allowedips_slab_uninit(void);
2535 ++
2536 + #endif /* _WG_ALLOWEDIPS_H */
2537 +diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
2538 +index 7a7d5f1a80fc7..75dbe77b0b4b4 100644
2539 +--- a/drivers/net/wireguard/main.c
2540 ++++ b/drivers/net/wireguard/main.c
2541 +@@ -21,13 +21,22 @@ static int __init mod_init(void)
2542 + {
2543 + int ret;
2544 +
2545 ++ ret = wg_allowedips_slab_init();
2546 ++ if (ret < 0)
2547 ++ goto err_allowedips;
2548 ++
2549 + #ifdef DEBUG
2550 ++ ret = -ENOTRECOVERABLE;
2551 + if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
2552 + !wg_ratelimiter_selftest())
2553 +- return -ENOTRECOVERABLE;
2554 ++ goto err_peer;
2555 + #endif
2556 + wg_noise_init();
2557 +
2558 ++ ret = wg_peer_init();
2559 ++ if (ret < 0)
2560 ++ goto err_peer;
2561 ++
2562 + ret = wg_device_init();
2563 + if (ret < 0)
2564 + goto err_device;
2565 +@@ -44,6 +53,10 @@ static int __init mod_init(void)
2566 + err_netlink:
2567 + wg_device_uninit();
2568 + err_device:
2569 ++ wg_peer_uninit();
2570 ++err_peer:
2571 ++ wg_allowedips_slab_uninit();
2572 ++err_allowedips:
2573 + return ret;
2574 + }
2575 +
2576 +@@ -51,6 +64,8 @@ static void __exit mod_exit(void)
2577 + {
2578 + wg_genetlink_uninit();
2579 + wg_device_uninit();
2580 ++ wg_peer_uninit();
2581 ++ wg_allowedips_slab_uninit();
2582 + }
2583 +
2584 + module_init(mod_init);
2585 +diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
2586 +index cd5cb0292cb67..1acd00ab2fbcb 100644
2587 +--- a/drivers/net/wireguard/peer.c
2588 ++++ b/drivers/net/wireguard/peer.c
2589 +@@ -15,6 +15,7 @@
2590 + #include <linux/rcupdate.h>
2591 + #include <linux/list.h>
2592 +
2593 ++static struct kmem_cache *peer_cache;
2594 + static atomic64_t peer_counter = ATOMIC64_INIT(0);
2595 +
2596 + struct wg_peer *wg_peer_create(struct wg_device *wg,
2597 +@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
2598 + if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
2599 + return ERR_PTR(ret);
2600 +
2601 +- peer = kzalloc(sizeof(*peer), GFP_KERNEL);
2602 ++ peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
2603 + if (unlikely(!peer))
2604 + return ERR_PTR(ret);
2605 +- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
2606 ++ if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
2607 + goto err;
2608 +
2609 + peer->device = wg;
2610 +@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
2611 + return peer;
2612 +
2613 + err:
2614 +- kfree(peer);
2615 ++ kmem_cache_free(peer_cache, peer);
2616 + return ERR_PTR(ret);
2617 + }
2618 +
2619 +@@ -88,7 +89,7 @@ static void peer_make_dead(struct wg_peer *peer)
2620 + /* Mark as dead, so that we don't allow jumping contexts after. */
2621 + WRITE_ONCE(peer->is_dead, true);
2622 +
2623 +- /* The caller must now synchronize_rcu() for this to take effect. */
2624 ++ /* The caller must now synchronize_net() for this to take effect. */
2625 + }
2626 +
2627 + static void peer_remove_after_dead(struct wg_peer *peer)
2628 +@@ -160,7 +161,7 @@ void wg_peer_remove(struct wg_peer *peer)
2629 + lockdep_assert_held(&peer->device->device_update_lock);
2630 +
2631 + peer_make_dead(peer);
2632 +- synchronize_rcu();
2633 ++ synchronize_net();
2634 + peer_remove_after_dead(peer);
2635 + }
2636 +
2637 +@@ -178,7 +179,7 @@ void wg_peer_remove_all(struct wg_device *wg)
2638 + peer_make_dead(peer);
2639 + list_add_tail(&peer->peer_list, &dead_peers);
2640 + }
2641 +- synchronize_rcu();
2642 ++ synchronize_net();
2643 + list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
2644 + peer_remove_after_dead(peer);
2645 + }
2646 +@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu)
2647 + /* The final zeroing takes care of clearing any remaining handshake key
2648 + * material and other potentially sensitive information.
2649 + */
2650 +- kfree_sensitive(peer);
2651 ++ memzero_explicit(peer, sizeof(*peer));
2652 ++ kmem_cache_free(peer_cache, peer);
2653 + }
2654 +
2655 + static void kref_release(struct kref *refcount)
2656 +@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer)
2657 + return;
2658 + kref_put(&peer->refcount, kref_release);
2659 + }
2660 ++
2661 ++int __init wg_peer_init(void)
2662 ++{
2663 ++ peer_cache = KMEM_CACHE(wg_peer, 0);
2664 ++ return peer_cache ? 0 : -ENOMEM;
2665 ++}
2666 ++
2667 ++void wg_peer_uninit(void)
2668 ++{
2669 ++ kmem_cache_destroy(peer_cache);
2670 ++}
2671 +diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
2672 +index 0809cda08bfa4..74227aa2d5b5a 100644
2673 +--- a/drivers/net/wireguard/peer.h
2674 ++++ b/drivers/net/wireguard/peer.h
2675 +@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer);
2676 + void wg_peer_remove(struct wg_peer *peer);
2677 + void wg_peer_remove_all(struct wg_device *wg);
2678 +
2679 ++int wg_peer_init(void);
2680 ++void wg_peer_uninit(void);
2681 ++
2682 + #endif /* _WG_PEER_H */
2683 +diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
2684 +index 846db14cb046b..e173204ae7d78 100644
2685 +--- a/drivers/net/wireguard/selftest/allowedips.c
2686 ++++ b/drivers/net/wireguard/selftest/allowedips.c
2687 +@@ -19,32 +19,22 @@
2688 +
2689 + #include <linux/siphash.h>
2690 +
2691 +-static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
2692 +- u8 cidr)
2693 +-{
2694 +- swap_endian(dst, src, bits);
2695 +- memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
2696 +- if (cidr)
2697 +- dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
2698 +-}
2699 +-
2700 + static __init void print_node(struct allowedips_node *node, u8 bits)
2701 + {
2702 + char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
2703 +- char *fmt_declaration = KERN_DEBUG
2704 +- "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
2705 ++ char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
2706 ++ u8 ip1[16], ip2[16], cidr1, cidr2;
2707 + char *style = "dotted";
2708 +- u8 ip1[16], ip2[16];
2709 + u32 color = 0;
2710 +
2711 ++ if (node == NULL)
2712 ++ return;
2713 + if (bits == 32) {
2714 + fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
2715 +- fmt_declaration = KERN_DEBUG
2716 +- "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
2717 ++ fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
2718 + } else if (bits == 128) {
2719 + fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
2720 +- fmt_declaration = KERN_DEBUG
2721 +- "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
2722 ++ fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
2723 + }
2724 + if (node->peer) {
2725 + hsiphash_key_t key = { { 0 } };
2726 +@@ -55,24 +45,20 @@ static __init void print_node(struct allowedips_node *node, u8 bits)
2727 + hsiphash_1u32(0xabad1dea, &key) % 200;
2728 + style = "bold";
2729 + }
2730 +- swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
2731 +- printk(fmt_declaration, ip1, node->cidr, style, color);
2732 ++ wg_allowedips_read_node(node, ip1, &cidr1);
2733 ++ printk(fmt_declaration, ip1, cidr1, style, color);
2734 + if (node->bit[0]) {
2735 +- swap_endian_and_apply_cidr(ip2,
2736 +- rcu_dereference_raw(node->bit[0])->bits, bits,
2737 +- node->cidr);
2738 +- printk(fmt_connection, ip1, node->cidr, ip2,
2739 +- rcu_dereference_raw(node->bit[0])->cidr);
2740 +- print_node(rcu_dereference_raw(node->bit[0]), bits);
2741 ++ wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
2742 ++ printk(fmt_connection, ip1, cidr1, ip2, cidr2);
2743 + }
2744 + if (node->bit[1]) {
2745 +- swap_endian_and_apply_cidr(ip2,
2746 +- rcu_dereference_raw(node->bit[1])->bits,
2747 +- bits, node->cidr);
2748 +- printk(fmt_connection, ip1, node->cidr, ip2,
2749 +- rcu_dereference_raw(node->bit[1])->cidr);
2750 +- print_node(rcu_dereference_raw(node->bit[1]), bits);
2751 ++ wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
2752 ++ printk(fmt_connection, ip1, cidr1, ip2, cidr2);
2753 + }
2754 ++ if (node->bit[0])
2755 ++ print_node(rcu_dereference_raw(node->bit[0]), bits);
2756 ++ if (node->bit[1])
2757 ++ print_node(rcu_dereference_raw(node->bit[1]), bits);
2758 + }
2759 +
2760 + static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
2761 +@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
2762 + {
2763 + union nf_inet_addr mask;
2764 +
2765 +- memset(&mask, 0x00, 128 / 8);
2766 +- memset(&mask, 0xff, cidr / 8);
2767 ++ memset(&mask, 0, sizeof(mask));
2768 ++ memset(&mask.all, 0xff, cidr / 8);
2769 + if (cidr % 32)
2770 + mask.all[cidr / 32] = (__force u32)htonl(
2771 + (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
2772 +@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allowedips_node *node)
2773 + }
2774 +
2775 + static __init inline bool
2776 +-horrible_match_v4(const struct horrible_allowedips_node *node,
2777 +- struct in_addr *ip)
2778 ++horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
2779 + {
2780 + return (ip->s_addr & node->mask.ip) == node->ip.ip;
2781 + }
2782 +
2783 + static __init inline bool
2784 +-horrible_match_v6(const struct horrible_allowedips_node *node,
2785 +- struct in6_addr *ip)
2786 ++horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
2787 + {
2788 +- return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
2789 +- node->ip.ip6[0] &&
2790 +- (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
2791 +- node->ip.ip6[1] &&
2792 +- (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
2793 +- node->ip.ip6[2] &&
2794 ++ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
2795 ++ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
2796 ++ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
2797 + (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
2798 + }
2799 +
2800 + static __init void
2801 +-horrible_insert_ordered(struct horrible_allowedips *table,
2802 +- struct horrible_allowedips_node *node)
2803 ++horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
2804 + {
2805 + struct horrible_allowedips_node *other = NULL, *where = NULL;
2806 + u8 my_cidr = horrible_mask_to_cidr(node->mask);
2807 +
2808 + hlist_for_each_entry(other, &table->head, table) {
2809 +- if (!memcmp(&other->mask, &node->mask,
2810 +- sizeof(union nf_inet_addr)) &&
2811 +- !memcmp(&other->ip, &node->ip,
2812 +- sizeof(union nf_inet_addr)) &&
2813 +- other->ip_version == node->ip_version) {
2814 ++ if (other->ip_version == node->ip_version &&
2815 ++ !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
2816 ++ !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
2817 + other->value = node->value;
2818 + kfree(node);
2819 + return;
2820 + }
2821 ++ }
2822 ++ hlist_for_each_entry(other, &table->head, table) {
2823 + where = other;
2824 + if (horrible_mask_to_cidr(other->mask) <= my_cidr)
2825 + break;
2826 +@@ -201,8 +181,7 @@ static __init int
2827 + horrible_allowedips_insert_v4(struct horrible_allowedips *table,
2828 + struct in_addr *ip, u8 cidr, void *value)
2829 + {
2830 +- struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
2831 +- GFP_KERNEL);
2832 ++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
2833 +
2834 + if (unlikely(!node))
2835 + return -ENOMEM;
2836 +@@ -219,8 +198,7 @@ static __init int
2837 + horrible_allowedips_insert_v6(struct horrible_allowedips *table,
2838 + struct in6_addr *ip, u8 cidr, void *value)
2839 + {
2840 +- struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
2841 +- GFP_KERNEL);
2842 ++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
2843 +
2844 + if (unlikely(!node))
2845 + return -ENOMEM;
2846 +@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table,
2847 + }
2848 +
2849 + static __init void *
2850 +-horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
2851 +- struct in_addr *ip)
2852 ++horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
2853 + {
2854 + struct horrible_allowedips_node *node;
2855 +- void *ret = NULL;
2856 +
2857 + hlist_for_each_entry(node, &table->head, table) {
2858 +- if (node->ip_version != 4)
2859 +- continue;
2860 +- if (horrible_match_v4(node, ip)) {
2861 +- ret = node->value;
2862 +- break;
2863 +- }
2864 ++ if (node->ip_version == 4 && horrible_match_v4(node, ip))
2865 ++ return node->value;
2866 + }
2867 +- return ret;
2868 ++ return NULL;
2869 + }
2870 +
2871 + static __init void *
2872 +-horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
2873 +- struct in6_addr *ip)
2874 ++horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
2875 + {
2876 + struct horrible_allowedips_node *node;
2877 +- void *ret = NULL;
2878 +
2879 + hlist_for_each_entry(node, &table->head, table) {
2880 +- if (node->ip_version != 6)
2881 ++ if (node->ip_version == 6 && horrible_match_v6(node, ip))
2882 ++ return node->value;
2883 ++ }
2884 ++ return NULL;
2885 ++}
2886 ++
2887 ++
2888 ++static __init void
2889 ++horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
2890 ++{
2891 ++ struct horrible_allowedips_node *node;
2892 ++ struct hlist_node *h;
2893 ++
2894 ++ hlist_for_each_entry_safe(node, h, &table->head, table) {
2895 ++ if (node->value != value)
2896 + continue;
2897 +- if (horrible_match_v6(node, ip)) {
2898 +- ret = node->value;
2899 +- break;
2900 +- }
2901 ++ hlist_del(&node->table);
2902 ++ kfree(node);
2903 + }
2904 +- return ret;
2905 ++
2906 + }
2907 +
2908 + static __init bool randomized_test(void)
2909 +@@ -296,6 +278,7 @@ static __init bool randomized_test(void)
2910 + goto free;
2911 + }
2912 + kref_init(&peers[i]->refcount);
2913 ++ INIT_LIST_HEAD(&peers[i]->allowedips_list);
2914 + }
2915 +
2916 + mutex_lock(&mutex);
2917 +@@ -333,7 +316,7 @@ static __init bool randomized_test(void)
2918 + if (wg_allowedips_insert_v4(&t,
2919 + (struct in_addr *)mutated,
2920 + cidr, peer, &mutex) < 0) {
2921 +- pr_err("allowedips random malloc: FAIL\n");
2922 ++ pr_err("allowedips random self-test malloc: FAIL\n");
2923 + goto free_locked;
2924 + }
2925 + if (horrible_allowedips_insert_v4(&h,
2926 +@@ -396,23 +379,33 @@ static __init bool randomized_test(void)
2927 + print_tree(t.root6, 128);
2928 + }
2929 +
2930 +- for (i = 0; i < NUM_QUERIES; ++i) {
2931 +- prandom_bytes(ip, 4);
2932 +- if (lookup(t.root4, 32, ip) !=
2933 +- horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
2934 +- pr_err("allowedips random self-test: FAIL\n");
2935 +- goto free;
2936 ++ for (j = 0;; ++j) {
2937 ++ for (i = 0; i < NUM_QUERIES; ++i) {
2938 ++ prandom_bytes(ip, 4);
2939 ++ if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
2940 ++ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
2941 ++ pr_err("allowedips random v4 self-test: FAIL\n");
2942 ++ goto free;
2943 ++ }
2944 ++ prandom_bytes(ip, 16);
2945 ++ if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
2946 ++ pr_err("allowedips random v6 self-test: FAIL\n");
2947 ++ goto free;
2948 ++ }
2949 + }
2950 ++ if (j >= NUM_PEERS)
2951 ++ break;
2952 ++ mutex_lock(&mutex);
2953 ++ wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
2954 ++ mutex_unlock(&mutex);
2955 ++ horrible_allowedips_remove_by_value(&h, peers[j]);
2956 + }
2957 +
2958 +- for (i = 0; i < NUM_QUERIES; ++i) {
2959 +- prandom_bytes(ip, 16);
2960 +- if (lookup(t.root6, 128, ip) !=
2961 +- horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
2962 +- pr_err("allowedips random self-test: FAIL\n");
2963 +- goto free;
2964 +- }
2965 ++ if (t.root4 || t.root6) {
2966 ++ pr_err("allowedips random self-test removal: FAIL\n");
2967 ++ goto free;
2968 + }
2969 ++
2970 + ret = true;
2971 +
2972 + free:
2973 +diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
2974 +index c33e2c81635fa..c8cd385d233b6 100644
2975 +--- a/drivers/net/wireguard/socket.c
2976 ++++ b/drivers/net/wireguard/socket.c
2977 +@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
2978 + if (new4)
2979 + wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
2980 + mutex_unlock(&wg->socket_update_lock);
2981 +- synchronize_rcu();
2982 ++ synchronize_net();
2983 + sock_free(old4);
2984 + sock_free(old6);
2985 + }
2986 +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
2987 +index e02a4fbb74de5..7ce9807fc24c5 100644
2988 +--- a/drivers/net/xen-netback/interface.c
2989 ++++ b/drivers/net/xen-netback/interface.c
2990 +@@ -685,6 +685,7 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
2991 + {
2992 + if (queue->task) {
2993 + kthread_stop(queue->task);
2994 ++ put_task_struct(queue->task);
2995 + queue->task = NULL;
2996 + }
2997 +
2998 +@@ -745,6 +746,11 @@ int xenvif_connect_data(struct xenvif_queue *queue,
2999 + if (IS_ERR(task))
3000 + goto kthread_err;
3001 + queue->task = task;
3002 ++ /*
3003 ++ * Take a reference to the task in order to prevent it from being freed
3004 ++ * if the thread function returns before kthread_stop is called.
3005 ++ */
3006 ++ get_task_struct(task);
3007 +
3008 + task = kthread_run(xenvif_dealloc_kthread, queue,
3009 + "%s-dealloc", queue->name);
3010 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
3011 +index 8b326508a480e..e6d58402b829d 100644
3012 +--- a/drivers/nvme/host/rdma.c
3013 ++++ b/drivers/nvme/host/rdma.c
3014 +@@ -1327,16 +1327,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
3015 + int count)
3016 + {
3017 + struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
3018 +- struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
3019 + struct ib_sge *sge = &req->sge[1];
3020 ++ struct scatterlist *sgl;
3021 + u32 len = 0;
3022 + int i;
3023 +
3024 +- for (i = 0; i < count; i++, sgl++, sge++) {
3025 ++ for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
3026 + sge->addr = sg_dma_address(sgl);
3027 + sge->length = sg_dma_len(sgl);
3028 + sge->lkey = queue->device->pd->local_dma_lkey;
3029 + len += sge->length;
3030 ++ sge++;
3031 + }
3032 +
3033 + sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
3034 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
3035 +index 46e4f7ea34c8b..8b939e9db470c 100644
3036 +--- a/drivers/nvme/target/core.c
3037 ++++ b/drivers/nvme/target/core.c
3038 +@@ -988,19 +988,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
3039 + return req->transfer_len - req->metadata_len;
3040 + }
3041 +
3042 +-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
3043 ++static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
3044 ++ struct nvmet_req *req)
3045 + {
3046 +- req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
3047 ++ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
3048 + nvmet_data_transfer_len(req));
3049 + if (!req->sg)
3050 + goto out_err;
3051 +
3052 + if (req->metadata_len) {
3053 +- req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
3054 ++ req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
3055 + &req->metadata_sg_cnt, req->metadata_len);
3056 + if (!req->metadata_sg)
3057 + goto out_free_sg;
3058 + }
3059 ++
3060 ++ req->p2p_dev = p2p_dev;
3061 ++
3062 + return 0;
3063 + out_free_sg:
3064 + pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
3065 +@@ -1008,25 +1012,19 @@ out_err:
3066 + return -ENOMEM;
3067 + }
3068 +
3069 +-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
3070 ++static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
3071 + {
3072 +- if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
3073 +- return false;
3074 +-
3075 +- if (req->sq->ctrl && req->sq->qid && req->ns) {
3076 +- req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
3077 +- req->ns->nsid);
3078 +- if (req->p2p_dev)
3079 +- return true;
3080 +- }
3081 +-
3082 +- req->p2p_dev = NULL;
3083 +- return false;
3084 ++ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
3085 ++ !req->sq->ctrl || !req->sq->qid || !req->ns)
3086 ++ return NULL;
3087 ++ return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
3088 + }
3089 +
3090 + int nvmet_req_alloc_sgls(struct nvmet_req *req)
3091 + {
3092 +- if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
3093 ++ struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
3094 ++
3095 ++ if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
3096 + return 0;
3097 +
3098 + req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
3099 +@@ -1055,6 +1053,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
3100 + pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
3101 + if (req->metadata_sg)
3102 + pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
3103 ++ req->p2p_dev = NULL;
3104 + } else {
3105 + sgl_free(req->sg);
3106 + if (req->metadata_sg)
3107 +diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
3108 +index 780d7c4fd7565..0790de29f0ca2 100644
3109 +--- a/drivers/tee/optee/call.c
3110 ++++ b/drivers/tee/optee/call.c
3111 +@@ -217,6 +217,7 @@ int optee_open_session(struct tee_context *ctx,
3112 + struct optee_msg_arg *msg_arg;
3113 + phys_addr_t msg_parg;
3114 + struct optee_session *sess = NULL;
3115 ++ uuid_t client_uuid;
3116 +
3117 + /* +2 for the meta parameters added below */
3118 + shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
3119 +@@ -237,10 +238,11 @@ int optee_open_session(struct tee_context *ctx,
3120 + memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
3121 + msg_arg->params[1].u.value.c = arg->clnt_login;
3122 +
3123 +- rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
3124 +- arg->clnt_login, arg->clnt_uuid);
3125 ++ rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
3126 ++ arg->clnt_uuid);
3127 + if (rc)
3128 + goto out;
3129 ++ export_uuid(msg_arg->params[1].u.octets, &client_uuid);
3130 +
3131 + rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
3132 + if (rc)
3133 +diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
3134 +index 7b2d919da2ace..c7ac7d02d6cc9 100644
3135 +--- a/drivers/tee/optee/optee_msg.h
3136 ++++ b/drivers/tee/optee/optee_msg.h
3137 +@@ -9,7 +9,7 @@
3138 + #include <linux/types.h>
3139 +
3140 + /*
3141 +- * This file defines the OP-TEE message protocol used to communicate
3142 ++ * This file defines the OP-TEE message protocol (ABI) used to communicate
3143 + * with an instance of OP-TEE running in secure world.
3144 + *
3145 + * This file is divided into three sections.
3146 +@@ -146,9 +146,10 @@ struct optee_msg_param_value {
3147 + * @tmem: parameter by temporary memory reference
3148 + * @rmem: parameter by registered memory reference
3149 + * @value: parameter by opaque value
3150 ++ * @octets: parameter by octet string
3151 + *
3152 + * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
3153 +- * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
3154 ++ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
3155 + * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
3156 + * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
3157 + * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
3158 +@@ -159,6 +160,7 @@ struct optee_msg_param {
3159 + struct optee_msg_param_tmem tmem;
3160 + struct optee_msg_param_rmem rmem;
3161 + struct optee_msg_param_value value;
3162 ++ u8 octets[24];
3163 + } u;
3164 + };
3165 +
3166 +diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
3167 +index 2cf9fc915510c..844059861f9e1 100644
3168 +--- a/drivers/tty/serial/stm32-usart.c
3169 ++++ b/drivers/tty/serial/stm32-usart.c
3170 +@@ -213,14 +213,11 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
3171 + struct tty_port *tport = &port->state->port;
3172 + struct stm32_port *stm32_port = to_stm32_port(port);
3173 + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
3174 +- unsigned long c, flags;
3175 ++ unsigned long c;
3176 + u32 sr;
3177 + char flag;
3178 +
3179 +- if (threaded)
3180 +- spin_lock_irqsave(&port->lock, flags);
3181 +- else
3182 +- spin_lock(&port->lock);
3183 ++ spin_lock(&port->lock);
3184 +
3185 + while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
3186 + threaded)) {
3187 +@@ -277,10 +274,7 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
3188 + uart_insert_char(port, sr, USART_SR_ORE, c, flag);
3189 + }
3190 +
3191 +- if (threaded)
3192 +- spin_unlock_irqrestore(&port->lock, flags);
3193 +- else
3194 +- spin_unlock(&port->lock);
3195 ++ spin_unlock(&port->lock);
3196 +
3197 + tty_flip_buffer_push(tport);
3198 + }
3199 +@@ -653,7 +647,8 @@ static int stm32_usart_startup(struct uart_port *port)
3200 +
3201 + ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
3202 + stm32_usart_threaded_interrupt,
3203 +- IRQF_NO_SUSPEND, name, port);
3204 ++ IRQF_ONESHOT | IRQF_NO_SUSPEND,
3205 ++ name, port);
3206 + if (ret)
3207 + return ret;
3208 +
3209 +@@ -1126,6 +1121,13 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
3210 + struct dma_async_tx_descriptor *desc = NULL;
3211 + int ret;
3212 +
3213 ++ /*
3214 ++ * Using DMA and threaded handler for the console could lead to
3215 ++ * deadlocks.
3216 ++ */
3217 ++ if (uart_console(port))
3218 ++ return -ENODEV;
3219 ++
3220 + /* Request DMA RX channel */
3221 + stm32port->rx_ch = dma_request_slave_channel(dev, "rx");
3222 + if (!stm32port->rx_ch) {
3223 +diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
3224 +index 510fd0572feb1..e3f429f1575e9 100644
3225 +--- a/drivers/usb/dwc2/core_intr.c
3226 ++++ b/drivers/usb/dwc2/core_intr.c
3227 +@@ -707,7 +707,11 @@ static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
3228 + dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
3229 +
3230 + hsotg->hibernated = 0;
3231 ++
3232 ++#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \
3233 ++ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
3234 + hsotg->bus_suspended = 0;
3235 ++#endif
3236 +
3237 + if (gpwrdn & GPWRDN_IDSTS) {
3238 + hsotg->op_state = OTG_STATE_B_PERIPHERAL;
3239 +diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
3240 +index 0f28bf99efebc..4e1107767e29b 100644
3241 +--- a/drivers/vfio/pci/Kconfig
3242 ++++ b/drivers/vfio/pci/Kconfig
3243 +@@ -2,6 +2,7 @@
3244 + config VFIO_PCI
3245 + tristate "VFIO support for PCI devices"
3246 + depends on VFIO && PCI && EVENTFD
3247 ++ depends on MMU
3248 + select VFIO_VIRQFD
3249 + select IRQ_BYPASS_MANAGER
3250 + help
3251 +diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
3252 +index a402adee8a215..47f21a6ca7fe9 100644
3253 +--- a/drivers/vfio/pci/vfio_pci_config.c
3254 ++++ b/drivers/vfio/pci/vfio_pci_config.c
3255 +@@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
3256 + if (len == 0xFF) {
3257 + len = vfio_ext_cap_len(vdev, ecap, epos);
3258 + if (len < 0)
3259 +- return ret;
3260 ++ return len;
3261 + }
3262 + }
3263 +
3264 +diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
3265 +index fb4b385191f28..e83a7cd15c956 100644
3266 +--- a/drivers/vfio/platform/vfio_platform_common.c
3267 ++++ b/drivers/vfio/platform/vfio_platform_common.c
3268 +@@ -289,7 +289,7 @@ err_irq:
3269 + vfio_platform_regions_cleanup(vdev);
3270 + err_reg:
3271 + mutex_unlock(&driver_lock);
3272 +- module_put(THIS_MODULE);
3273 ++ module_put(vdev->parent_module);
3274 + return ret;
3275 + }
3276 +
3277 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3278 +index 51c18da4792ec..73ebe0c5fdbc9 100644
3279 +--- a/fs/btrfs/extent-tree.c
3280 ++++ b/fs/btrfs/extent-tree.c
3281 +@@ -1297,16 +1297,20 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
3282 + for (i = 0; i < bbio->num_stripes; i++, stripe++) {
3283 + u64 bytes;
3284 + struct request_queue *req_q;
3285 ++ struct btrfs_device *device = stripe->dev;
3286 +
3287 +- if (!stripe->dev->bdev) {
3288 ++ if (!device->bdev) {
3289 + ASSERT(btrfs_test_opt(fs_info, DEGRADED));
3290 + continue;
3291 + }
3292 +- req_q = bdev_get_queue(stripe->dev->bdev);
3293 ++ req_q = bdev_get_queue(device->bdev);
3294 + if (!blk_queue_discard(req_q))
3295 + continue;
3296 +
3297 +- ret = btrfs_issue_discard(stripe->dev->bdev,
3298 ++ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
3299 ++ continue;
3300 ++
3301 ++ ret = btrfs_issue_discard(device->bdev,
3302 + stripe->physical,
3303 + stripe->length,
3304 + &bytes);
3305 +@@ -1830,7 +1834,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
3306 + trace_run_delayed_ref_head(fs_info, head, 0);
3307 + btrfs_delayed_ref_unlock(head);
3308 + btrfs_put_delayed_ref_head(head);
3309 +- return 0;
3310 ++ return ret;
3311 + }
3312 +
3313 + static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
3314 +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
3315 +index 8f4f2bd6d9b95..48a2ea6d70921 100644
3316 +--- a/fs/btrfs/file-item.c
3317 ++++ b/fs/btrfs/file-item.c
3318 +@@ -690,7 +690,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
3319 + u64 end_byte = bytenr + len;
3320 + u64 csum_end;
3321 + struct extent_buffer *leaf;
3322 +- int ret;
3323 ++ int ret = 0;
3324 + u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
3325 + int blocksize_bits = fs_info->sb->s_blocksize_bits;
3326 +
3327 +@@ -709,6 +709,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
3328 + path->leave_spinning = 1;
3329 + ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3330 + if (ret > 0) {
3331 ++ ret = 0;
3332 + if (path->slots[0] == 0)
3333 + break;
3334 + path->slots[0]--;
3335 +@@ -765,7 +766,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
3336 + ret = btrfs_del_items(trans, root, path,
3337 + path->slots[0], del_nr);
3338 + if (ret)
3339 +- goto out;
3340 ++ break;
3341 + if (key.offset == bytenr)
3342 + break;
3343 + } else if (key.offset < bytenr && csum_end > end_byte) {
3344 +@@ -809,8 +810,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
3345 + ret = btrfs_split_item(trans, root, path, &key, offset);
3346 + if (ret && ret != -EAGAIN) {
3347 + btrfs_abort_transaction(trans, ret);
3348 +- goto out;
3349 ++ break;
3350 + }
3351 ++ ret = 0;
3352 +
3353 + key.offset = end_byte - 1;
3354 + } else {
3355 +@@ -820,8 +822,6 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
3356 + }
3357 + btrfs_release_path(path);
3358 + }
3359 +- ret = 0;
3360 +-out:
3361 + btrfs_free_path(path);
3362 + return ret;
3363 + }
3364 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3365 +index 94c24b2a211bf..4f26dae63b64a 100644
3366 +--- a/fs/btrfs/inode.c
3367 ++++ b/fs/btrfs/inode.c
3368 +@@ -2760,6 +2760,18 @@ out:
3369 + if (ret || truncated) {
3370 + u64 unwritten_start = start;
3371 +
3372 ++ /*
3373 ++ * If we failed to finish this ordered extent for any reason we
3374 ++ * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3375 ++ * extent, and mark the inode with the error if it wasn't
3376 ++ * already set. Any error during writeback would have already
3377 ++ * set the mapping error, so we need to set it if we're the ones
3378 ++ * marking this ordered extent as failed.
3379 ++ */
3380 ++ if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
3381 ++ &ordered_extent->flags))
3382 ++ mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
3383 ++
3384 + if (truncated)
3385 + unwritten_start += logical_len;
3386 + clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3387 +@@ -8878,6 +8890,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
3388 + int ret2;
3389 + bool root_log_pinned = false;
3390 + bool dest_log_pinned = false;
3391 ++ bool need_abort = false;
3392 +
3393 + /* we only allow rename subvolume link between subvolumes */
3394 + if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
3395 +@@ -8934,6 +8947,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
3396 + old_idx);
3397 + if (ret)
3398 + goto out_fail;
3399 ++ need_abort = true;
3400 + }
3401 +
3402 + /* And now for the dest. */
3403 +@@ -8949,8 +8963,11 @@ static int btrfs_rename_exchange(struct inode *old_dir,
3404 + new_ino,
3405 + btrfs_ino(BTRFS_I(old_dir)),
3406 + new_idx);
3407 +- if (ret)
3408 ++ if (ret) {
3409 ++ if (need_abort)
3410 ++ btrfs_abort_transaction(trans, ret);
3411 + goto out_fail;
3412 ++ }
3413 + }
3414 +
3415 + /* Update inode version and ctime/mtime. */
3416 +diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
3417 +index eeb66e797e0bf..96ef9fed9a656 100644
3418 +--- a/fs/btrfs/reflink.c
3419 ++++ b/fs/btrfs/reflink.c
3420 +@@ -207,10 +207,7 @@ static int clone_copy_inline_extent(struct inode *dst,
3421 + * inline extent's data to the page.
3422 + */
3423 + ASSERT(key.offset > 0);
3424 +- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
3425 +- inline_data, size, datal,
3426 +- comp_type);
3427 +- goto out;
3428 ++ goto copy_to_page;
3429 + }
3430 + } else if (i_size_read(dst) <= datal) {
3431 + struct btrfs_file_extent_item *ei;
3432 +@@ -226,13 +223,10 @@ static int clone_copy_inline_extent(struct inode *dst,
3433 + BTRFS_FILE_EXTENT_INLINE)
3434 + goto copy_inline_extent;
3435 +
3436 +- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
3437 +- inline_data, size, datal, comp_type);
3438 +- goto out;
3439 ++ goto copy_to_page;
3440 + }
3441 +
3442 + copy_inline_extent:
3443 +- ret = 0;
3444 + /*
3445 + * We have no extent items, or we have an extent at offset 0 which may
3446 + * or may not be inlined. All these cases are dealt the same way.
3447 +@@ -244,11 +238,13 @@ copy_inline_extent:
3448 + * clone. Deal with all these cases by copying the inline extent
3449 + * data into the respective page at the destination inode.
3450 + */
3451 +- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
3452 +- inline_data, size, datal, comp_type);
3453 +- goto out;
3454 ++ goto copy_to_page;
3455 + }
3456 +
3457 ++ /*
3458 ++ * Release path before starting a new transaction so we don't hold locks
3459 ++ * that would confuse lockdep.
3460 ++ */
3461 + btrfs_release_path(path);
3462 + /*
3463 + * If we end up here it means were copy the inline extent into a leaf
3464 +@@ -281,11 +277,6 @@ copy_inline_extent:
3465 + ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
3466 + out:
3467 + if (!ret && !trans) {
3468 +- /*
3469 +- * Release path before starting a new transaction so we don't
3470 +- * hold locks that would confuse lockdep.
3471 +- */
3472 +- btrfs_release_path(path);
3473 + /*
3474 + * No transaction here means we copied the inline extent into a
3475 + * page of the destination inode.
3476 +@@ -306,6 +297,21 @@ out:
3477 + *trans_out = trans;
3478 +
3479 + return ret;
3480 ++
3481 ++copy_to_page:
3482 ++ /*
3483 ++ * Release our path because we don't need it anymore and also because
3484 ++ * copy_inline_to_page() needs to reserve data and metadata, which may
3485 ++ * need to flush delalloc when we are low on available space and
3486 ++ * therefore cause a deadlock if writeback of an inline extent needs to
3487 ++ * write to the same leaf or an ordered extent completion needs to write
3488 ++ * to the same leaf.
3489 ++ */
3490 ++ btrfs_release_path(path);
3491 ++
3492 ++ ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
3493 ++ inline_data, size, datal, comp_type);
3494 ++ goto out;
3495 + }
3496 +
3497 + /**
3498 +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
3499 +index 40845428b739c..d4a3a56726aa8 100644
3500 +--- a/fs/btrfs/tree-checker.c
3501 ++++ b/fs/btrfs/tree-checker.c
3502 +@@ -1440,22 +1440,14 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
3503 + return -EUCLEAN;
3504 + }
3505 + for (; ptr < end; ptr += sizeof(*dref)) {
3506 +- u64 root_objectid;
3507 +- u64 owner;
3508 + u64 offset;
3509 +- u64 hash;
3510 +
3511 ++ /*
3512 ++ * We cannot check the extent_data_ref hash due to possible
3513 ++ * overflow from the leaf due to hash collisions.
3514 ++ */
3515 + dref = (struct btrfs_extent_data_ref *)ptr;
3516 +- root_objectid = btrfs_extent_data_ref_root(leaf, dref);
3517 +- owner = btrfs_extent_data_ref_objectid(leaf, dref);
3518 + offset = btrfs_extent_data_ref_offset(leaf, dref);
3519 +- hash = hash_extent_data_ref(root_objectid, owner, offset);
3520 +- if (hash != key->offset) {
3521 +- extent_err(leaf, slot,
3522 +- "invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
3523 +- hash, key->offset);
3524 +- return -EUCLEAN;
3525 +- }
3526 + if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) {
3527 + extent_err(leaf, slot,
3528 + "invalid extent data backref offset, have %llu expect aligned to %u",
3529 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3530 +index 9a0cfa0e124da..300951088a11c 100644
3531 +--- a/fs/btrfs/tree-log.c
3532 ++++ b/fs/btrfs/tree-log.c
3533 +@@ -1752,6 +1752,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
3534 + break;
3535 +
3536 + if (ret == 1) {
3537 ++ ret = 0;
3538 + if (path->slots[0] == 0)
3539 + break;
3540 + path->slots[0]--;
3541 +@@ -1764,17 +1765,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
3542 +
3543 + ret = btrfs_del_item(trans, root, path);
3544 + if (ret)
3545 +- goto out;
3546 ++ break;
3547 +
3548 + btrfs_release_path(path);
3549 + inode = read_one_inode(root, key.offset);
3550 +- if (!inode)
3551 +- return -EIO;
3552 ++ if (!inode) {
3553 ++ ret = -EIO;
3554 ++ break;
3555 ++ }
3556 +
3557 + ret = fixup_inode_link_count(trans, root, inode);
3558 + iput(inode);
3559 + if (ret)
3560 +- goto out;
3561 ++ break;
3562 +
3563 + /*
3564 + * fixup on a directory may create new entries,
3565 +@@ -1783,8 +1786,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
3566 + */
3567 + key.offset = (u64)-1;
3568 + }
3569 +- ret = 0;
3570 +-out:
3571 + btrfs_release_path(path);
3572 + return ret;
3573 + }
3574 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3575 +index 12eac88373032..e6542ba264330 100644
3576 +--- a/fs/ext4/extents.c
3577 ++++ b/fs/ext4/extents.c
3578 +@@ -3206,7 +3206,10 @@ static int ext4_split_extent_at(handle_t *handle,
3579 + ext4_ext_mark_unwritten(ex2);
3580 +
3581 + err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
3582 +- if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3583 ++ if (err != -ENOSPC && err != -EDQUOT)
3584 ++ goto out;
3585 ++
3586 ++ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
3587 + if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3588 + if (split_flag & EXT4_EXT_DATA_VALID1) {
3589 + err = ext4_ext_zeroout(inode, ex2);
3590 +@@ -3232,25 +3235,22 @@ static int ext4_split_extent_at(handle_t *handle,
3591 + ext4_ext_pblock(&orig_ex));
3592 + }
3593 +
3594 +- if (err)
3595 +- goto fix_extent_len;
3596 +- /* update the extent length and mark as initialized */
3597 +- ex->ee_len = cpu_to_le16(ee_len);
3598 +- ext4_ext_try_to_merge(handle, inode, path, ex);
3599 +- err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3600 +- if (err)
3601 +- goto fix_extent_len;
3602 +-
3603 +- /* update extent status tree */
3604 +- err = ext4_zeroout_es(inode, &zero_ex);
3605 +-
3606 +- goto out;
3607 +- } else if (err)
3608 +- goto fix_extent_len;
3609 +-
3610 +-out:
3611 +- ext4_ext_show_leaf(inode, path);
3612 +- return err;
3613 ++ if (!err) {
3614 ++ /* update the extent length and mark as initialized */
3615 ++ ex->ee_len = cpu_to_le16(ee_len);
3616 ++ ext4_ext_try_to_merge(handle, inode, path, ex);
3617 ++ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3618 ++ if (!err)
3619 ++ /* update extent status tree */
3620 ++ err = ext4_zeroout_es(inode, &zero_ex);
3621 ++ /* If we failed at this point, we don't know in which
3622 ++ * state the extent tree exactly is so don't try to fix
3623 ++ * length of the original extent as it may do even more
3624 ++ * damage.
3625 ++ */
3626 ++ goto out;
3627 ++ }
3628 ++ }
3629 +
3630 + fix_extent_len:
3631 + ex->ee_len = orig_ex.ee_len;
3632 +@@ -3260,6 +3260,9 @@ fix_extent_len:
3633 + */
3634 + ext4_ext_dirty(handle, inode, path + path->p_depth);
3635 + return err;
3636 ++out:
3637 ++ ext4_ext_show_leaf(inode, path);
3638 ++ return err;
3639 + }
3640 +
3641 + /*
3642 +diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
3643 +index 896e1176e0449..53647fa038773 100644
3644 +--- a/fs/ext4/fast_commit.c
3645 ++++ b/fs/ext4/fast_commit.c
3646 +@@ -1227,18 +1227,6 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
3647 +
3648 + /* Ext4 Replay Path Routines */
3649 +
3650 +-/* Get length of a particular tlv */
3651 +-static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl)
3652 +-{
3653 +- return le16_to_cpu(tl->fc_len);
3654 +-}
3655 +-
3656 +-/* Get a pointer to "value" of a tlv */
3657 +-static inline u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl)
3658 +-{
3659 +- return (u8 *)tl + sizeof(*tl);
3660 +-}
3661 +-
3662 + /* Helper struct for dentry replay routines */
3663 + struct dentry_info_args {
3664 + int parent_ino, dname_len, ino, inode_len;
3665 +@@ -1246,28 +1234,29 @@ struct dentry_info_args {
3666 + };
3667 +
3668 + static inline void tl_to_darg(struct dentry_info_args *darg,
3669 +- struct ext4_fc_tl *tl)
3670 ++ struct ext4_fc_tl *tl, u8 *val)
3671 + {
3672 +- struct ext4_fc_dentry_info *fcd;
3673 ++ struct ext4_fc_dentry_info fcd;
3674 +
3675 +- fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl);
3676 ++ memcpy(&fcd, val, sizeof(fcd));
3677 +
3678 +- darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino);
3679 +- darg->ino = le32_to_cpu(fcd->fc_ino);
3680 +- darg->dname = fcd->fc_dname;
3681 +- darg->dname_len = ext4_fc_tag_len(tl) -
3682 +- sizeof(struct ext4_fc_dentry_info);
3683 ++ darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino);
3684 ++ darg->ino = le32_to_cpu(fcd.fc_ino);
3685 ++ darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname);
3686 ++ darg->dname_len = le16_to_cpu(tl->fc_len) -
3687 ++ sizeof(struct ext4_fc_dentry_info);
3688 + }
3689 +
3690 + /* Unlink replay function */
3691 +-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
3692 ++static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
3693 ++ u8 *val)
3694 + {
3695 + struct inode *inode, *old_parent;
3696 + struct qstr entry;
3697 + struct dentry_info_args darg;
3698 + int ret = 0;
3699 +
3700 +- tl_to_darg(&darg, tl);
3701 ++ tl_to_darg(&darg, tl, val);
3702 +
3703 + trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino,
3704 + darg.parent_ino, darg.dname_len);
3705 +@@ -1357,13 +1346,14 @@ out:
3706 + }
3707 +
3708 + /* Link replay function */
3709 +-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
3710 ++static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
3711 ++ u8 *val)
3712 + {
3713 + struct inode *inode;
3714 + struct dentry_info_args darg;
3715 + int ret = 0;
3716 +
3717 +- tl_to_darg(&darg, tl);
3718 ++ tl_to_darg(&darg, tl, val);
3719 + trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino,
3720 + darg.parent_ino, darg.dname_len);
3721 +
3722 +@@ -1408,9 +1398,10 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
3723 + /*
3724 + * Inode replay function
3725 + */
3726 +-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
3727 ++static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
3728 ++ u8 *val)
3729 + {
3730 +- struct ext4_fc_inode *fc_inode;
3731 ++ struct ext4_fc_inode fc_inode;
3732 + struct ext4_inode *raw_inode;
3733 + struct ext4_inode *raw_fc_inode;
3734 + struct inode *inode = NULL;
3735 +@@ -1418,9 +1409,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
3736 + int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag);
3737 + struct ext4_extent_header *eh;
3738 +
3739 +- fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl);
3740 ++ memcpy(&fc_inode, val, sizeof(fc_inode));
3741 +
3742 +- ino = le32_to_cpu(fc_inode->fc_ino);
3743 ++ ino = le32_to_cpu(fc_inode.fc_ino);
3744 + trace_ext4_fc_replay(sb, tag, ino, 0, 0);
3745 +
3746 + inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
3747 +@@ -1432,12 +1423,13 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
3748 +
3749 + ext4_fc_record_modified_inode(sb, ino);
3750 +
3751 +- raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode;
3752 ++ raw_fc_inode = (struct ext4_inode *)
3753 ++ (val + offsetof(struct ext4_fc_inode, fc_raw_inode));
3754 + ret = ext4_get_fc_inode_loc(sb, ino, &iloc);
3755 + if (ret)
3756 + goto out;
3757 +
3758 +- inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode);
3759 ++ inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode);
3760 + raw_inode = ext4_raw_inode(&iloc);
3761 +
3762 + memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
3763 +@@ -1505,14 +1497,15 @@ out:
3764 + * inode for which we are trying to create a dentry here, should already have
3765 + * been replayed before we start here.
3766 + */
3767 +-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
3768 ++static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
3769 ++ u8 *val)
3770 + {
3771 + int ret = 0;
3772 + struct inode *inode = NULL;
3773 + struct inode *dir = NULL;
3774 + struct dentry_info_args darg;
3775 +
3776 +- tl_to_darg(&darg, tl);
3777 ++ tl_to_darg(&darg, tl, val);
3778 +
3779 + trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino,
3780 + darg.parent_ino, darg.dname_len);
3781 +@@ -1591,9 +1584,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
3782 +
3783 + /* Replay add range tag */
3784 + static int ext4_fc_replay_add_range(struct super_block *sb,
3785 +- struct ext4_fc_tl *tl)
3786 ++ struct ext4_fc_tl *tl, u8 *val)
3787 + {
3788 +- struct ext4_fc_add_range *fc_add_ex;
3789 ++ struct ext4_fc_add_range fc_add_ex;
3790 + struct ext4_extent newex, *ex;
3791 + struct inode *inode;
3792 + ext4_lblk_t start, cur;
3793 +@@ -1603,15 +1596,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
3794 + struct ext4_ext_path *path = NULL;
3795 + int ret;
3796 +
3797 +- fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
3798 +- ex = (struct ext4_extent *)&fc_add_ex->fc_ex;
3799 ++ memcpy(&fc_add_ex, val, sizeof(fc_add_ex));
3800 ++ ex = (struct ext4_extent *)&fc_add_ex.fc_ex;
3801 +
3802 + trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE,
3803 +- le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block),
3804 ++ le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block),
3805 + ext4_ext_get_actual_len(ex));
3806 +
3807 +- inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
3808 +- EXT4_IGET_NORMAL);
3809 ++ inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
3810 + if (IS_ERR(inode)) {
3811 + jbd_debug(1, "Inode not found.");
3812 + return 0;
3813 +@@ -1720,32 +1712,33 @@ next:
3814 +
3815 + /* Replay DEL_RANGE tag */
3816 + static int
3817 +-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
3818 ++ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
3819 ++ u8 *val)
3820 + {
3821 + struct inode *inode;
3822 +- struct ext4_fc_del_range *lrange;
3823 ++ struct ext4_fc_del_range lrange;
3824 + struct ext4_map_blocks map;
3825 + ext4_lblk_t cur, remaining;
3826 + int ret;
3827 +
3828 +- lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl);
3829 +- cur = le32_to_cpu(lrange->fc_lblk);
3830 +- remaining = le32_to_cpu(lrange->fc_len);
3831 ++ memcpy(&lrange, val, sizeof(lrange));
3832 ++ cur = le32_to_cpu(lrange.fc_lblk);
3833 ++ remaining = le32_to_cpu(lrange.fc_len);
3834 +
3835 + trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE,
3836 +- le32_to_cpu(lrange->fc_ino), cur, remaining);
3837 ++ le32_to_cpu(lrange.fc_ino), cur, remaining);
3838 +
3839 +- inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
3840 ++ inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
3841 + if (IS_ERR(inode)) {
3842 +- jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
3843 ++ jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino));
3844 + return 0;
3845 + }
3846 +
3847 + ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
3848 +
3849 + jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
3850 +- inode->i_ino, le32_to_cpu(lrange->fc_lblk),
3851 +- le32_to_cpu(lrange->fc_len));
3852 ++ inode->i_ino, le32_to_cpu(lrange.fc_lblk),
3853 ++ le32_to_cpu(lrange.fc_len));
3854 + while (remaining > 0) {
3855 + map.m_lblk = cur;
3856 + map.m_len = remaining;
3857 +@@ -1766,8 +1759,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
3858 + }
3859 +
3860 + ret = ext4_punch_hole(inode,
3861 +- le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits,
3862 +- le32_to_cpu(lrange->fc_len) << sb->s_blocksize_bits);
3863 ++ le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits,
3864 ++ le32_to_cpu(lrange.fc_len) << sb->s_blocksize_bits);
3865 + if (ret)
3866 + jbd_debug(1, "ext4_punch_hole returned %d", ret);
3867 + ext4_ext_replay_shrink_inode(inode,
3868 +@@ -1909,11 +1902,11 @@ static int ext4_fc_replay_scan(journal_t *journal,
3869 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3870 + struct ext4_fc_replay_state *state;
3871 + int ret = JBD2_FC_REPLAY_CONTINUE;
3872 +- struct ext4_fc_add_range *ext;
3873 +- struct ext4_fc_tl *tl;
3874 +- struct ext4_fc_tail *tail;
3875 +- __u8 *start, *end;
3876 +- struct ext4_fc_head *head;
3877 ++ struct ext4_fc_add_range ext;
3878 ++ struct ext4_fc_tl tl;
3879 ++ struct ext4_fc_tail tail;
3880 ++ __u8 *start, *end, *cur, *val;
3881 ++ struct ext4_fc_head head;
3882 + struct ext4_extent *ex;
3883 +
3884 + state = &sbi->s_fc_replay_state;
3885 +@@ -1940,15 +1933,17 @@ static int ext4_fc_replay_scan(journal_t *journal,
3886 + }
3887 +
3888 + state->fc_replay_expected_off++;
3889 +- fc_for_each_tl(start, end, tl) {
3890 ++ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
3891 ++ memcpy(&tl, cur, sizeof(tl));
3892 ++ val = cur + sizeof(tl);
3893 + jbd_debug(3, "Scan phase, tag:%s, blk %lld\n",
3894 +- tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr);
3895 +- switch (le16_to_cpu(tl->fc_tag)) {
3896 ++ tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr);
3897 ++ switch (le16_to_cpu(tl.fc_tag)) {
3898 + case EXT4_FC_TAG_ADD_RANGE:
3899 +- ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
3900 +- ex = (struct ext4_extent *)&ext->fc_ex;
3901 ++ memcpy(&ext, val, sizeof(ext));
3902 ++ ex = (struct ext4_extent *)&ext.fc_ex;
3903 + ret = ext4_fc_record_regions(sb,
3904 +- le32_to_cpu(ext->fc_ino),
3905 ++ le32_to_cpu(ext.fc_ino),
3906 + le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
3907 + ext4_ext_get_actual_len(ex));
3908 + if (ret < 0)
3909 +@@ -1962,18 +1957,18 @@ static int ext4_fc_replay_scan(journal_t *journal,
3910 + case EXT4_FC_TAG_INODE:
3911 + case EXT4_FC_TAG_PAD:
3912 + state->fc_cur_tag++;
3913 +- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
3914 +- sizeof(*tl) + ext4_fc_tag_len(tl));
3915 ++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
3916 ++ sizeof(tl) + le16_to_cpu(tl.fc_len));
3917 + break;
3918 + case EXT4_FC_TAG_TAIL:
3919 + state->fc_cur_tag++;
3920 +- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
3921 +- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
3922 +- sizeof(*tl) +
3923 ++ memcpy(&tail, val, sizeof(tail));
3924 ++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
3925 ++ sizeof(tl) +
3926 + offsetof(struct ext4_fc_tail,
3927 + fc_crc));
3928 +- if (le32_to_cpu(tail->fc_tid) == expected_tid &&
3929 +- le32_to_cpu(tail->fc_crc) == state->fc_crc) {
3930 ++ if (le32_to_cpu(tail.fc_tid) == expected_tid &&
3931 ++ le32_to_cpu(tail.fc_crc) == state->fc_crc) {
3932 + state->fc_replay_num_tags = state->fc_cur_tag;
3933 + state->fc_regions_valid =
3934 + state->fc_regions_used;
3935 +@@ -1984,19 +1979,19 @@ static int ext4_fc_replay_scan(journal_t *journal,
3936 + state->fc_crc = 0;
3937 + break;
3938 + case EXT4_FC_TAG_HEAD:
3939 +- head = (struct ext4_fc_head *)ext4_fc_tag_val(tl);
3940 +- if (le32_to_cpu(head->fc_features) &
3941 ++ memcpy(&head, val, sizeof(head));
3942 ++ if (le32_to_cpu(head.fc_features) &
3943 + ~EXT4_FC_SUPPORTED_FEATURES) {
3944 + ret = -EOPNOTSUPP;
3945 + break;
3946 + }
3947 +- if (le32_to_cpu(head->fc_tid) != expected_tid) {
3948 ++ if (le32_to_cpu(head.fc_tid) != expected_tid) {
3949 + ret = JBD2_FC_REPLAY_STOP;
3950 + break;
3951 + }
3952 + state->fc_cur_tag++;
3953 +- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
3954 +- sizeof(*tl) + ext4_fc_tag_len(tl));
3955 ++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
3956 ++ sizeof(tl) + le16_to_cpu(tl.fc_len));
3957 + break;
3958 + default:
3959 + ret = state->fc_replay_num_tags ?
3960 +@@ -2020,11 +2015,11 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
3961 + {
3962 + struct super_block *sb = journal->j_private;
3963 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3964 +- struct ext4_fc_tl *tl;
3965 +- __u8 *start, *end;
3966 ++ struct ext4_fc_tl tl;
3967 ++ __u8 *start, *end, *cur, *val;
3968 + int ret = JBD2_FC_REPLAY_CONTINUE;
3969 + struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
3970 +- struct ext4_fc_tail *tail;
3971 ++ struct ext4_fc_tail tail;
3972 +
3973 + if (pass == PASS_SCAN) {
3974 + state->fc_current_pass = PASS_SCAN;
3975 +@@ -2051,49 +2046,52 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
3976 + start = (u8 *)bh->b_data;
3977 + end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
3978 +
3979 +- fc_for_each_tl(start, end, tl) {
3980 ++ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
3981 ++ memcpy(&tl, cur, sizeof(tl));
3982 ++ val = cur + sizeof(tl);
3983 ++
3984 + if (state->fc_replay_num_tags == 0) {
3985 + ret = JBD2_FC_REPLAY_STOP;
3986 + ext4_fc_set_bitmaps_and_counters(sb);
3987 + break;
3988 + }
3989 + jbd_debug(3, "Replay phase, tag:%s\n",
3990 +- tag2str(le16_to_cpu(tl->fc_tag)));
3991 ++ tag2str(le16_to_cpu(tl.fc_tag)));
3992 + state->fc_replay_num_tags--;
3993 +- switch (le16_to_cpu(tl->fc_tag)) {
3994 ++ switch (le16_to_cpu(tl.fc_tag)) {
3995 + case EXT4_FC_TAG_LINK:
3996 +- ret = ext4_fc_replay_link(sb, tl);
3997 ++ ret = ext4_fc_replay_link(sb, &tl, val);
3998 + break;
3999 + case EXT4_FC_TAG_UNLINK:
4000 +- ret = ext4_fc_replay_unlink(sb, tl);
4001 ++ ret = ext4_fc_replay_unlink(sb, &tl, val);
4002 + break;
4003 + case EXT4_FC_TAG_ADD_RANGE:
4004 +- ret = ext4_fc_replay_add_range(sb, tl);
4005 ++ ret = ext4_fc_replay_add_range(sb, &tl, val);
4006 + break;
4007 + case EXT4_FC_TAG_CREAT:
4008 +- ret = ext4_fc_replay_create(sb, tl);
4009 ++ ret = ext4_fc_replay_create(sb, &tl, val);
4010 + break;
4011 + case EXT4_FC_TAG_DEL_RANGE:
4012 +- ret = ext4_fc_replay_del_range(sb, tl);
4013 ++ ret = ext4_fc_replay_del_range(sb, &tl, val);
4014 + break;
4015 + case EXT4_FC_TAG_INODE:
4016 +- ret = ext4_fc_replay_inode(sb, tl);
4017 ++ ret = ext4_fc_replay_inode(sb, &tl, val);
4018 + break;
4019 + case EXT4_FC_TAG_PAD:
4020 + trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
4021 +- ext4_fc_tag_len(tl), 0);
4022 ++ le16_to_cpu(tl.fc_len), 0);
4023 + break;
4024 + case EXT4_FC_TAG_TAIL:
4025 + trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0,
4026 +- ext4_fc_tag_len(tl), 0);
4027 +- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
4028 +- WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid);
4029 ++ le16_to_cpu(tl.fc_len), 0);
4030 ++ memcpy(&tail, val, sizeof(tail));
4031 ++ WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid);
4032 + break;
4033 + case EXT4_FC_TAG_HEAD:
4034 + break;
4035 + default:
4036 +- trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0,
4037 +- ext4_fc_tag_len(tl), 0);
4038 ++ trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0,
4039 ++ le16_to_cpu(tl.fc_len), 0);
4040 + ret = -ECANCELED;
4041 + break;
4042 + }
4043 +diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h
4044 +index 3a6e5a1fa1b80..d8d0998a5c163 100644
4045 +--- a/fs/ext4/fast_commit.h
4046 ++++ b/fs/ext4/fast_commit.h
4047 +@@ -146,12 +146,5 @@ struct ext4_fc_replay_state {
4048 +
4049 + #define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1)
4050 +
4051 +-#define fc_for_each_tl(__start, __end, __tl) \
4052 +- for (tl = (struct ext4_fc_tl *)start; \
4053 +- (u8 *)tl < (u8 *)end; \
4054 +- tl = (struct ext4_fc_tl *)((u8 *)tl + \
4055 +- sizeof(struct ext4_fc_tl) + \
4056 +- + le16_to_cpu(tl->fc_len)))
4057 +-
4058 +
4059 + #endif /* __FAST_COMMIT_H__ */
4060 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
4061 +index c92558ede623e..b294ebcb4db4b 100644
4062 +--- a/fs/ext4/ialloc.c
4063 ++++ b/fs/ext4/ialloc.c
4064 +@@ -322,14 +322,16 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
4065 + if (is_directory) {
4066 + count = ext4_used_dirs_count(sb, gdp) - 1;
4067 + ext4_used_dirs_set(sb, gdp, count);
4068 +- percpu_counter_dec(&sbi->s_dirs_counter);
4069 ++ if (percpu_counter_initialized(&sbi->s_dirs_counter))
4070 ++ percpu_counter_dec(&sbi->s_dirs_counter);
4071 + }
4072 + ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
4073 + EXT4_INODES_PER_GROUP(sb) / 8);
4074 + ext4_group_desc_csum_set(sb, block_group, gdp);
4075 + ext4_unlock_group(sb, block_group);
4076 +
4077 +- percpu_counter_inc(&sbi->s_freeinodes_counter);
4078 ++ if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
4079 ++ percpu_counter_inc(&sbi->s_freeinodes_counter);
4080 + if (sbi->s_log_groups_per_flex) {
4081 + struct flex_groups *fg;
4082 +
4083 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
4084 +index b6229fe1aa233..9c390c3d7fb15 100644
4085 +--- a/fs/ext4/mballoc.c
4086 ++++ b/fs/ext4/mballoc.c
4087 +@@ -2738,7 +2738,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
4088 + */
4089 + if (sbi->s_es->s_log_groups_per_flex >= 32) {
4090 + ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
4091 +- goto err_freesgi;
4092 ++ goto err_freebuddy;
4093 + }
4094 + sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
4095 + BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
4096 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4097 +index c7f5b665834fc..21c4ba2513ce5 100644
4098 +--- a/fs/ext4/super.c
4099 ++++ b/fs/ext4/super.c
4100 +@@ -4451,14 +4451,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4101 + }
4102 +
4103 + if (sb->s_blocksize != blocksize) {
4104 ++ /*
4105 ++ * bh must be released before kill_bdev(), otherwise
4106 ++ * it won't be freed and its page also. kill_bdev()
4107 ++ * is called by sb_set_blocksize().
4108 ++ */
4109 ++ brelse(bh);
4110 + /* Validate the filesystem blocksize */
4111 + if (!sb_set_blocksize(sb, blocksize)) {
4112 + ext4_msg(sb, KERN_ERR, "bad block size %d",
4113 + blocksize);
4114 ++ bh = NULL;
4115 + goto failed_mount;
4116 + }
4117 +
4118 +- brelse(bh);
4119 + logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
4120 + offset = do_div(logical_sb_block, blocksize);
4121 + bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
4122 +@@ -5181,8 +5187,9 @@ failed_mount:
4123 + kfree(get_qf_name(sb, sbi, i));
4124 + #endif
4125 + fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
4126 +- ext4_blkdev_remove(sbi);
4127 ++ /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
4128 + brelse(bh);
4129 ++ ext4_blkdev_remove(sbi);
4130 + out_fail:
4131 + sb->s_fs_info = NULL;
4132 + kfree(sbi->s_blockgroup_lock);
4133 +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
4134 +index 35a6fd103761b..ea2f2de448063 100644
4135 +--- a/fs/gfs2/glock.c
4136 ++++ b/fs/gfs2/glock.c
4137 +@@ -1457,9 +1457,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
4138 + glock_blocked_by_withdraw(gl) &&
4139 + gh->gh_gl != sdp->sd_jinode_gl) {
4140 + sdp->sd_glock_dqs_held++;
4141 ++ spin_unlock(&gl->gl_lockref.lock);
4142 + might_sleep();
4143 + wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
4144 + TASK_UNINTERRUPTIBLE);
4145 ++ spin_lock(&gl->gl_lockref.lock);
4146 + }
4147 + if (gh->gh_flags & GL_NOCACHE)
4148 + handle_callback(gl, LM_ST_UNLOCKED, 0, false);
4149 +diff --git a/fs/io_uring.c b/fs/io_uring.c
4150 +index 369ec81033d67..fdbaaf579cc60 100644
4151 +--- a/fs/io_uring.c
4152 ++++ b/fs/io_uring.c
4153 +@@ -545,7 +545,7 @@ struct io_statx {
4154 + struct io_completion {
4155 + struct file *file;
4156 + struct list_head list;
4157 +- int cflags;
4158 ++ u32 cflags;
4159 + };
4160 +
4161 + struct io_async_connect {
4162 +@@ -1711,7 +1711,8 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
4163 + }
4164 + }
4165 +
4166 +-static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
4167 ++static void __io_cqring_fill_event(struct io_kiocb *req, long res,
4168 ++ unsigned int cflags)
4169 + {
4170 + struct io_ring_ctx *ctx = req->ctx;
4171 + struct io_uring_cqe *cqe;
4172 +@@ -6266,6 +6267,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
4173 + if (prev) {
4174 + io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
4175 + io_put_req_deferred(prev, 1);
4176 ++ io_put_req_deferred(req, 1);
4177 + } else {
4178 + io_cqring_add_event(req, -ETIME, 0);
4179 + io_put_req_deferred(req, 1);
4180 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
4181 +index 8880071ee4ee0..2b296d720c9fa 100644
4182 +--- a/fs/ocfs2/file.c
4183 ++++ b/fs/ocfs2/file.c
4184 +@@ -1855,6 +1855,45 @@ out:
4185 + return ret;
4186 + }
4187 +
4188 ++/*
4189 ++ * zero out partial blocks of one cluster.
4190 ++ *
4191 ++ * start: file offset where zero starts, will be made upper block aligned.
4192 ++ * len: it will be trimmed to the end of current cluster if "start + len"
4193 ++ * is bigger than it.
4194 ++ */
4195 ++static int ocfs2_zeroout_partial_cluster(struct inode *inode,
4196 ++ u64 start, u64 len)
4197 ++{
4198 ++ int ret;
4199 ++ u64 start_block, end_block, nr_blocks;
4200 ++ u64 p_block, offset;
4201 ++ u32 cluster, p_cluster, nr_clusters;
4202 ++ struct super_block *sb = inode->i_sb;
4203 ++ u64 end = ocfs2_align_bytes_to_clusters(sb, start);
4204 ++
4205 ++ if (start + len < end)
4206 ++ end = start + len;
4207 ++
4208 ++ start_block = ocfs2_blocks_for_bytes(sb, start);
4209 ++ end_block = ocfs2_blocks_for_bytes(sb, end);
4210 ++ nr_blocks = end_block - start_block;
4211 ++ if (!nr_blocks)
4212 ++ return 0;
4213 ++
4214 ++ cluster = ocfs2_bytes_to_clusters(sb, start);
4215 ++ ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
4216 ++ &nr_clusters, NULL);
4217 ++ if (ret)
4218 ++ return ret;
4219 ++ if (!p_cluster)
4220 ++ return 0;
4221 ++
4222 ++ offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
4223 ++ p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
4224 ++ return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
4225 ++}
4226 ++
4227 + /*
4228 + * Parts of this function taken from xfs_change_file_space()
4229 + */
4230 +@@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
4231 + {
4232 + int ret;
4233 + s64 llen;
4234 +- loff_t size;
4235 ++ loff_t size, orig_isize;
4236 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
4237 + struct buffer_head *di_bh = NULL;
4238 + handle_t *handle;
4239 +@@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
4240 + goto out_inode_unlock;
4241 + }
4242 +
4243 ++ orig_isize = i_size_read(inode);
4244 + switch (sr->l_whence) {
4245 + case 0: /*SEEK_SET*/
4246 + break;
4247 +@@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
4248 + sr->l_start += f_pos;
4249 + break;
4250 + case 2: /*SEEK_END*/
4251 +- sr->l_start += i_size_read(inode);
4252 ++ sr->l_start += orig_isize;
4253 + break;
4254 + default:
4255 + ret = -EINVAL;
4256 +@@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
4257 + default:
4258 + ret = -EINVAL;
4259 + }
4260 ++
4261 ++ /* zeroout eof blocks in the cluster. */
4262 ++ if (!ret && change_size && orig_isize < size) {
4263 ++ ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
4264 ++ size - orig_isize);
4265 ++ if (!ret)
4266 ++ i_size_write(inode, size);
4267 ++ }
4268 + up_write(&OCFS2_I(inode)->ip_alloc_sem);
4269 + if (ret) {
4270 + mlog_errno(ret);
4271 +@@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
4272 + goto out_inode_unlock;
4273 + }
4274 +
4275 +- if (change_size && i_size_read(inode) < size)
4276 +- i_size_write(inode, size);
4277 +-
4278 + inode->i_ctime = inode->i_mtime = current_time(inode);
4279 + ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
4280 + if (ret < 0)
4281 +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
4282 +index cc9ee07769745..af8f4e2cf21d1 100644
4283 +--- a/include/linux/mlx5/mlx5_ifc.h
4284 ++++ b/include/linux/mlx5/mlx5_ifc.h
4285 +@@ -1223,6 +1223,8 @@ enum mlx5_fc_bulk_alloc_bitmask {
4286 +
4287 + #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
4288 +
4289 ++#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
4290 ++
4291 + enum {
4292 + MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
4293 + MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
4294 +diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
4295 +index fafc1beea504a..9837fb011f2fb 100644
4296 +--- a/include/linux/platform_data/ti-sysc.h
4297 ++++ b/include/linux/platform_data/ti-sysc.h
4298 +@@ -50,6 +50,7 @@ struct sysc_regbits {
4299 + s8 emufree_shift;
4300 + };
4301 +
4302 ++#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27)
4303 + #define SYSC_QUIRK_GPMC_DEBUG BIT(26)
4304 + #define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
4305 + #define SYSC_MODULE_QUIRK_PRUSS BIT(24)
4306 +diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
4307 +index 2e4f7721fc4e7..8110c29fab42d 100644
4308 +--- a/include/linux/usb/usbnet.h
4309 ++++ b/include/linux/usb/usbnet.h
4310 +@@ -83,6 +83,8 @@ struct usbnet {
4311 + # define EVENT_LINK_CHANGE 11
4312 + # define EVENT_SET_RX_MODE 12
4313 + # define EVENT_NO_IP_ALIGN 13
4314 ++ u32 rx_speed; /* in bps - NOT Mbps */
4315 ++ u32 tx_speed; /* in bps - NOT Mbps */
4316 + };
4317 +
4318 + static inline struct usb_driver *driver_of(struct usb_interface *intf)
4319 +diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
4320 +index 48ecca8530ffa..b655d8666f555 100644
4321 +--- a/include/net/caif/caif_dev.h
4322 ++++ b/include/net/caif/caif_dev.h
4323 +@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
4324 + * The link_support layer is used to add any Link Layer specific
4325 + * framing.
4326 + */
4327 +-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
4328 ++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
4329 + struct cflayer *link_support, int head_room,
4330 + struct cflayer **layer, int (**rcv_func)(
4331 + struct sk_buff *, struct net_device *,
4332 +diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
4333 +index 2aa5e91d84576..8819ff4db35a6 100644
4334 +--- a/include/net/caif/cfcnfg.h
4335 ++++ b/include/net/caif/cfcnfg.h
4336 +@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
4337 + * @fcs: Specify if checksum is used in CAIF Framing Layer.
4338 + * @head_room: Head space needed by link specific protocol.
4339 + */
4340 +-void
4341 ++int
4342 + cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
4343 + struct net_device *dev, struct cflayer *phy_layer,
4344 + enum cfcnfg_phy_preference pref,
4345 +diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
4346 +index 14a55e03bb3ce..67cce8757175a 100644
4347 +--- a/include/net/caif/cfserl.h
4348 ++++ b/include/net/caif/cfserl.h
4349 +@@ -9,4 +9,5 @@
4350 + #include <net/caif/caif_layer.h>
4351 +
4352 + struct cflayer *cfserl_create(int instance, bool use_stx);
4353 ++void cfserl_release(struct cflayer *layer);
4354 + #endif
4355 +diff --git a/include/net/tls.h b/include/net/tls.h
4356 +index 2bdd802212fe0..43891b28fc482 100644
4357 +--- a/include/net/tls.h
4358 ++++ b/include/net/tls.h
4359 +@@ -193,7 +193,11 @@ struct tls_offload_context_tx {
4360 + (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
4361 +
4362 + enum tls_context_flags {
4363 +- TLS_RX_SYNC_RUNNING = 0,
4364 ++ /* tls_device_down was called after the netdev went down, device state
4365 ++ * was released, and kTLS works in software, even though rx_conf is
4366 ++ * still TLS_HW (needed for transition).
4367 ++ */
4368 ++ TLS_RX_DEV_DEGRADED = 0,
4369 + /* Unlike RX where resync is driven entirely by the core in TX only
4370 + * the driver knows when things went out of sync, so we need the flag
4371 + * to be atomic.
4372 +@@ -265,6 +269,7 @@ struct tls_context {
4373 +
4374 + /* cache cold stuff */
4375 + struct proto *sk_proto;
4376 ++ struct sock *sk;
4377 +
4378 + void (*sk_destruct)(struct sock *sk);
4379 +
4380 +@@ -447,6 +452,9 @@ static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
4381 + struct sk_buff *
4382 + tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
4383 + struct sk_buff *skb);
4384 ++struct sk_buff *
4385 ++tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
4386 ++ struct sk_buff *skb);
4387 +
4388 + static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
4389 + {
4390 +diff --git a/init/main.c b/init/main.c
4391 +index d9d9141112511..b4449544390ca 100644
4392 +--- a/init/main.c
4393 ++++ b/init/main.c
4394 +@@ -1505,7 +1505,7 @@ static noinline void __init kernel_init_freeable(void)
4395 + */
4396 + set_mems_allowed(node_states[N_MEMORY]);
4397 +
4398 +- cad_pid = task_pid(current);
4399 ++ cad_pid = get_pid(task_pid(current));
4400 +
4401 + smp_prepare_cpus(setup_max_cpus);
4402 +
4403 +diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
4404 +index c489430cac78c..f7e99bb8c3b6c 100644
4405 +--- a/kernel/bpf/helpers.c
4406 ++++ b/kernel/bpf/helpers.c
4407 +@@ -14,6 +14,7 @@
4408 + #include <linux/jiffies.h>
4409 + #include <linux/pid_namespace.h>
4410 + #include <linux/proc_ns.h>
4411 ++#include <linux/security.h>
4412 +
4413 + #include "../../lib/kstrtox.h"
4414 +
4415 +@@ -707,14 +708,6 @@ bpf_base_func_proto(enum bpf_func_id func_id)
4416 + return &bpf_spin_lock_proto;
4417 + case BPF_FUNC_spin_unlock:
4418 + return &bpf_spin_unlock_proto;
4419 +- case BPF_FUNC_trace_printk:
4420 +- if (!perfmon_capable())
4421 +- return NULL;
4422 +- return bpf_get_trace_printk_proto();
4423 +- case BPF_FUNC_snprintf_btf:
4424 +- if (!perfmon_capable())
4425 +- return NULL;
4426 +- return &bpf_snprintf_btf_proto;
4427 + case BPF_FUNC_jiffies64:
4428 + return &bpf_jiffies64_proto;
4429 + case BPF_FUNC_per_cpu_ptr:
4430 +@@ -729,16 +722,22 @@ bpf_base_func_proto(enum bpf_func_id func_id)
4431 + return NULL;
4432 +
4433 + switch (func_id) {
4434 ++ case BPF_FUNC_trace_printk:
4435 ++ return bpf_get_trace_printk_proto();
4436 + case BPF_FUNC_get_current_task:
4437 + return &bpf_get_current_task_proto;
4438 + case BPF_FUNC_probe_read_user:
4439 + return &bpf_probe_read_user_proto;
4440 + case BPF_FUNC_probe_read_kernel:
4441 +- return &bpf_probe_read_kernel_proto;
4442 ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
4443 ++ NULL : &bpf_probe_read_kernel_proto;
4444 + case BPF_FUNC_probe_read_user_str:
4445 + return &bpf_probe_read_user_str_proto;
4446 + case BPF_FUNC_probe_read_kernel_str:
4447 +- return &bpf_probe_read_kernel_str_proto;
4448 ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
4449 ++ NULL : &bpf_probe_read_kernel_str_proto;
4450 ++ case BPF_FUNC_snprintf_btf:
4451 ++ return &bpf_snprintf_btf_proto;
4452 + default:
4453 + return NULL;
4454 + }
4455 +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
4456 +index fcbfc95649967..01710831fd02f 100644
4457 +--- a/kernel/trace/bpf_trace.c
4458 ++++ b/kernel/trace/bpf_trace.c
4459 +@@ -212,16 +212,11 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
4460 + static __always_inline int
4461 + bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
4462 + {
4463 +- int ret = security_locked_down(LOCKDOWN_BPF_READ);
4464 ++ int ret;
4465 +
4466 +- if (unlikely(ret < 0))
4467 +- goto fail;
4468 + ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
4469 + if (unlikely(ret < 0))
4470 +- goto fail;
4471 +- return ret;
4472 +-fail:
4473 +- memset(dst, 0, size);
4474 ++ memset(dst, 0, size);
4475 + return ret;
4476 + }
4477 +
4478 +@@ -243,10 +238,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = {
4479 + static __always_inline int
4480 + bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
4481 + {
4482 +- int ret = security_locked_down(LOCKDOWN_BPF_READ);
4483 +-
4484 +- if (unlikely(ret < 0))
4485 +- goto fail;
4486 ++ int ret;
4487 +
4488 + /*
4489 + * The strncpy_from_kernel_nofault() call will likely not fill the
4490 +@@ -259,11 +251,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
4491 + */
4492 + ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
4493 + if (unlikely(ret < 0))
4494 +- goto fail;
4495 +-
4496 +- return ret;
4497 +-fail:
4498 +- memset(dst, 0, size);
4499 ++ memset(dst, 0, size);
4500 + return ret;
4501 + }
4502 +
4503 +@@ -1293,16 +1281,20 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4504 + case BPF_FUNC_probe_read_user:
4505 + return &bpf_probe_read_user_proto;
4506 + case BPF_FUNC_probe_read_kernel:
4507 +- return &bpf_probe_read_kernel_proto;
4508 ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
4509 ++ NULL : &bpf_probe_read_kernel_proto;
4510 + case BPF_FUNC_probe_read_user_str:
4511 + return &bpf_probe_read_user_str_proto;
4512 + case BPF_FUNC_probe_read_kernel_str:
4513 +- return &bpf_probe_read_kernel_str_proto;
4514 ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
4515 ++ NULL : &bpf_probe_read_kernel_str_proto;
4516 + #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
4517 + case BPF_FUNC_probe_read:
4518 +- return &bpf_probe_read_compat_proto;
4519 ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
4520 ++ NULL : &bpf_probe_read_compat_proto;
4521 + case BPF_FUNC_probe_read_str:
4522 +- return &bpf_probe_read_compat_str_proto;
4523 ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
4524 ++ NULL : &bpf_probe_read_compat_str_proto;
4525 + #endif
4526 + #ifdef CONFIG_CGROUPS
4527 + case BPF_FUNC_get_current_cgroup_id:
4528 +diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
4529 +index 00cb0d0b73e16..8a7724a6ce2fb 100644
4530 +--- a/lib/lz4/lz4_decompress.c
4531 ++++ b/lib/lz4/lz4_decompress.c
4532 +@@ -263,7 +263,11 @@ static FORCE_INLINE int LZ4_decompress_generic(
4533 + }
4534 + }
4535 +
4536 +- LZ4_memcpy(op, ip, length);
4537 ++ /*
4538 ++ * supports overlapping memory regions; only matters
4539 ++ * for in-place decompression scenarios
4540 ++ */
4541 ++ LZ4_memmove(op, ip, length);
4542 + ip += length;
4543 + op += length;
4544 +
4545 +diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
4546 +index c91dd96ef6291..673bd206aa98b 100644
4547 +--- a/lib/lz4/lz4defs.h
4548 ++++ b/lib/lz4/lz4defs.h
4549 +@@ -146,6 +146,7 @@ static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
4550 + * environments. This is needed when decompressing the Linux Kernel, for example.
4551 + */
4552 + #define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
4553 ++#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size)
4554 +
4555 + static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
4556 + {
4557 +diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
4558 +index c05d9dcf78911..750bfef26be37 100644
4559 +--- a/mm/debug_vm_pgtable.c
4560 ++++ b/mm/debug_vm_pgtable.c
4561 +@@ -163,7 +163,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
4562 +
4563 + pr_debug("Validating PMD advanced\n");
4564 + /* Align the address wrt HPAGE_PMD_SIZE */
4565 +- vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
4566 ++ vaddr &= HPAGE_PMD_MASK;
4567 +
4568 + pgtable_trans_huge_deposit(mm, pmdp, pgtable);
4569 +
4570 +@@ -285,7 +285,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
4571 +
4572 + pr_debug("Validating PUD advanced\n");
4573 + /* Align the address wrt HPAGE_PUD_SIZE */
4574 +- vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
4575 ++ vaddr &= HPAGE_PUD_MASK;
4576 +
4577 + set_pud_at(mm, vaddr, pudp, pud);
4578 + pudp_set_wrprotect(mm, vaddr, pudp);
4579 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
4580 +index 900851a4f9146..bc1006a327338 100644
4581 +--- a/mm/hugetlb.c
4582 ++++ b/mm/hugetlb.c
4583 +@@ -4708,10 +4708,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4584 + struct page *page;
4585 +
4586 + if (!*pagep) {
4587 +- ret = -ENOMEM;
4588 ++ /* If a page already exists, then it's UFFDIO_COPY for
4589 ++ * a non-missing case. Return -EEXIST.
4590 ++ */
4591 ++ if (vm_shared &&
4592 ++ hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
4593 ++ ret = -EEXIST;
4594 ++ goto out;
4595 ++ }
4596 ++
4597 + page = alloc_huge_page(dst_vma, dst_addr, 0);
4598 +- if (IS_ERR(page))
4599 ++ if (IS_ERR(page)) {
4600 ++ ret = -ENOMEM;
4601 + goto out;
4602 ++ }
4603 +
4604 + ret = copy_huge_page_from_user(page,
4605 + (const void __user *) src_addr,
4606 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4607 +index 7ffa706e5c305..81cc7fdc9c8fd 100644
4608 +--- a/mm/page_alloc.c
4609 ++++ b/mm/page_alloc.c
4610 +@@ -8870,6 +8870,8 @@ bool take_page_off_buddy(struct page *page)
4611 + del_page_from_free_list(page_head, zone, page_order);
4612 + break_down_buddy_pages(zone, page_head, page, 0,
4613 + page_order, migratetype);
4614 ++ if (!is_migrate_isolate(migratetype))
4615 ++ __mod_zone_freepage_state(zone, -1, migratetype);
4616 + ret = true;
4617 + break;
4618 + }
4619 +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
4620 +index 0152bc6b67967..86ebfc6ae6986 100644
4621 +--- a/net/bluetooth/hci_core.c
4622 ++++ b/net/bluetooth/hci_core.c
4623 +@@ -1602,8 +1602,13 @@ setup_failed:
4624 + } else {
4625 + /* Init failed, cleanup */
4626 + flush_work(&hdev->tx_work);
4627 +- flush_work(&hdev->cmd_work);
4628 ++
4629 ++ /* Since hci_rx_work() is possible to awake new cmd_work
4630 ++ * it should be flushed first to avoid unexpected call of
4631 ++ * hci_cmd_work()
4632 ++ */
4633 + flush_work(&hdev->rx_work);
4634 ++ flush_work(&hdev->cmd_work);
4635 +
4636 + skb_queue_purge(&hdev->cmd_q);
4637 + skb_queue_purge(&hdev->rx_q);
4638 +diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
4639 +index 251b9128f530a..eed0dd066e12c 100644
4640 +--- a/net/bluetooth/hci_sock.c
4641 ++++ b/net/bluetooth/hci_sock.c
4642 +@@ -762,7 +762,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
4643 + /* Detach sockets from device */
4644 + read_lock(&hci_sk_list.lock);
4645 + sk_for_each(sk, &hci_sk_list.head) {
4646 +- bh_lock_sock_nested(sk);
4647 ++ lock_sock(sk);
4648 + if (hci_pi(sk)->hdev == hdev) {
4649 + hci_pi(sk)->hdev = NULL;
4650 + sk->sk_err = EPIPE;
4651 +@@ -771,7 +771,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
4652 +
4653 + hci_dev_put(hdev);
4654 + }
4655 +- bh_unlock_sock(sk);
4656 ++ release_sock(sk);
4657 + }
4658 + read_unlock(&hci_sk_list.lock);
4659 + }
4660 +diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
4661 +index c10e5a55758d2..440139706130a 100644
4662 +--- a/net/caif/caif_dev.c
4663 ++++ b/net/caif/caif_dev.c
4664 +@@ -308,7 +308,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
4665 + caifd_put(caifd);
4666 + }
4667 +
4668 +-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
4669 ++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
4670 + struct cflayer *link_support, int head_room,
4671 + struct cflayer **layer,
4672 + int (**rcv_func)(struct sk_buff *, struct net_device *,
4673 +@@ -319,11 +319,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
4674 + enum cfcnfg_phy_preference pref;
4675 + struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
4676 + struct caif_device_entry_list *caifdevs;
4677 ++ int res;
4678 +
4679 + caifdevs = caif_device_list(dev_net(dev));
4680 + caifd = caif_device_alloc(dev);
4681 + if (!caifd)
4682 +- return;
4683 ++ return -ENOMEM;
4684 + *layer = &caifd->layer;
4685 + spin_lock_init(&caifd->flow_lock);
4686 +
4687 +@@ -344,7 +345,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
4688 + strlcpy(caifd->layer.name, dev->name,
4689 + sizeof(caifd->layer.name));
4690 + caifd->layer.transmit = transmit;
4691 +- cfcnfg_add_phy_layer(cfg,
4692 ++ res = cfcnfg_add_phy_layer(cfg,
4693 + dev,
4694 + &caifd->layer,
4695 + pref,
4696 +@@ -354,6 +355,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
4697 + mutex_unlock(&caifdevs->lock);
4698 + if (rcv_func)
4699 + *rcv_func = receive;
4700 ++ return res;
4701 + }
4702 + EXPORT_SYMBOL(caif_enroll_dev);
4703 +
4704 +@@ -368,6 +370,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
4705 + struct cflayer *layer, *link_support;
4706 + int head_room = 0;
4707 + struct caif_device_entry_list *caifdevs;
4708 ++ int res;
4709 +
4710 + cfg = get_cfcnfg(dev_net(dev));
4711 + caifdevs = caif_device_list(dev_net(dev));
4712 +@@ -393,8 +396,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
4713 + break;
4714 + }
4715 + }
4716 +- caif_enroll_dev(dev, caifdev, link_support, head_room,
4717 ++ res = caif_enroll_dev(dev, caifdev, link_support, head_room,
4718 + &layer, NULL);
4719 ++ if (res)
4720 ++ cfserl_release(link_support);
4721 + caifdev->flowctrl = dev_flowctrl;
4722 + break;
4723 +
4724 +diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
4725 +index a0116b9503d9d..b02e1292f7f19 100644
4726 +--- a/net/caif/caif_usb.c
4727 ++++ b/net/caif/caif_usb.c
4728 +@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
4729 + return (struct cflayer *) this;
4730 + }
4731 +
4732 ++static void cfusbl_release(struct cflayer *layer)
4733 ++{
4734 ++ kfree(layer);
4735 ++}
4736 ++
4737 + static struct packet_type caif_usb_type __read_mostly = {
4738 + .type = cpu_to_be16(ETH_P_802_EX1),
4739 + };
4740 +@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
4741 + struct cflayer *layer, *link_support;
4742 + struct usbnet *usbnet;
4743 + struct usb_device *usbdev;
4744 ++ int res;
4745 +
4746 + /* Check whether we have a NCM device, and find its VID/PID. */
4747 + if (!(dev->dev.parent && dev->dev.parent->driver &&
4748 +@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
4749 + if (dev->num_tx_queues > 1)
4750 + pr_warn("USB device uses more than one tx queue\n");
4751 +
4752 +- caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
4753 ++ res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
4754 + &layer, &caif_usb_type.func);
4755 ++ if (res)
4756 ++ goto err;
4757 ++
4758 + if (!pack_added)
4759 + dev_add_pack(&caif_usb_type);
4760 + pack_added = true;
4761 +@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
4762 + strlcpy(layer->name, dev->name, sizeof(layer->name));
4763 +
4764 + return 0;
4765 ++err:
4766 ++ cfusbl_release(link_support);
4767 ++ return res;
4768 + }
4769 +
4770 + static struct notifier_block caif_device_notifier = {
4771 +diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
4772 +index 399239a14420f..cac30e676ac94 100644
4773 +--- a/net/caif/cfcnfg.c
4774 ++++ b/net/caif/cfcnfg.c
4775 +@@ -450,7 +450,7 @@ unlock:
4776 + rcu_read_unlock();
4777 + }
4778 +
4779 +-void
4780 ++int
4781 + cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
4782 + struct net_device *dev, struct cflayer *phy_layer,
4783 + enum cfcnfg_phy_preference pref,
4784 +@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
4785 + {
4786 + struct cflayer *frml;
4787 + struct cfcnfg_phyinfo *phyinfo = NULL;
4788 +- int i;
4789 ++ int i, res = 0;
4790 + u8 phyid;
4791 +
4792 + mutex_lock(&cnfg->lock);
4793 +@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
4794 + goto got_phyid;
4795 + }
4796 + pr_warn("Too many CAIF Link Layers (max 6)\n");
4797 ++ res = -EEXIST;
4798 + goto out;
4799 +
4800 + got_phyid:
4801 + phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
4802 +- if (!phyinfo)
4803 ++ if (!phyinfo) {
4804 ++ res = -ENOMEM;
4805 + goto out_err;
4806 ++ }
4807 +
4808 + phy_layer->id = phyid;
4809 + phyinfo->pref = pref;
4810 +@@ -492,8 +495,10 @@ got_phyid:
4811 +
4812 + frml = cffrml_create(phyid, fcs);
4813 +
4814 +- if (!frml)
4815 ++ if (!frml) {
4816 ++ res = -ENOMEM;
4817 + goto out_err;
4818 ++ }
4819 + phyinfo->frm_layer = frml;
4820 + layer_set_up(frml, cnfg->mux);
4821 +
4822 +@@ -511,11 +516,12 @@ got_phyid:
4823 + list_add_rcu(&phyinfo->node, &cnfg->phys);
4824 + out:
4825 + mutex_unlock(&cnfg->lock);
4826 +- return;
4827 ++ return res;
4828 +
4829 + out_err:
4830 + kfree(phyinfo);
4831 + mutex_unlock(&cnfg->lock);
4832 ++ return res;
4833 + }
4834 + EXPORT_SYMBOL(cfcnfg_add_phy_layer);
4835 +
4836 +diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
4837 +index e11725a4bb0ed..40cd57ad0a0f4 100644
4838 +--- a/net/caif/cfserl.c
4839 ++++ b/net/caif/cfserl.c
4840 +@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
4841 + static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
4842 + int phyid);
4843 +
4844 ++void cfserl_release(struct cflayer *layer)
4845 ++{
4846 ++ kfree(layer);
4847 ++}
4848 ++
4849 + struct cflayer *cfserl_create(int instance, bool use_stx)
4850 + {
4851 + struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
4852 +diff --git a/net/core/devlink.c b/net/core/devlink.c
4853 +index 5d397838bceb6..90badb6f72271 100644
4854 +--- a/net/core/devlink.c
4855 ++++ b/net/core/devlink.c
4856 +@@ -693,7 +693,6 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
4857 + case DEVLINK_PORT_FLAVOUR_PHYSICAL:
4858 + case DEVLINK_PORT_FLAVOUR_CPU:
4859 + case DEVLINK_PORT_FLAVOUR_DSA:
4860 +- case DEVLINK_PORT_FLAVOUR_VIRTUAL:
4861 + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
4862 + attrs->phys.port_number))
4863 + return -EMSGSIZE;
4864 +@@ -8376,7 +8375,6 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
4865 +
4866 + switch (attrs->flavour) {
4867 + case DEVLINK_PORT_FLAVOUR_PHYSICAL:
4868 +- case DEVLINK_PORT_FLAVOUR_VIRTUAL:
4869 + if (!attrs->split)
4870 + n = snprintf(name, len, "p%u", attrs->phys.port_number);
4871 + else
4872 +@@ -8413,6 +8411,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
4873 + n = snprintf(name, len, "pf%uvf%u",
4874 + attrs->pci_vf.pf, attrs->pci_vf.vf);
4875 + break;
4876 ++ case DEVLINK_PORT_FLAVOUR_VIRTUAL:
4877 ++ return -EOPNOTSUPP;
4878 + }
4879 +
4880 + if (n >= len)
4881 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
4882 +index a18c2973b8c6d..c452ebf209394 100644
4883 +--- a/net/core/neighbour.c
4884 ++++ b/net/core/neighbour.c
4885 +@@ -239,6 +239,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
4886 +
4887 + write_lock(&n->lock);
4888 + if ((n->nud_state == NUD_FAILED) ||
4889 ++ (n->nud_state == NUD_NOARP) ||
4890 + (tbl->is_multicast &&
4891 + tbl->is_multicast(n->primary_key)) ||
4892 + time_after(tref, n->updated))
4893 +diff --git a/net/core/sock.c b/net/core/sock.c
4894 +index dee29f41beaf8..7de51ea15cdfc 100644
4895 +--- a/net/core/sock.c
4896 ++++ b/net/core/sock.c
4897 +@@ -807,10 +807,18 @@ void sock_set_rcvbuf(struct sock *sk, int val)
4898 + }
4899 + EXPORT_SYMBOL(sock_set_rcvbuf);
4900 +
4901 ++static void __sock_set_mark(struct sock *sk, u32 val)
4902 ++{
4903 ++ if (val != sk->sk_mark) {
4904 ++ sk->sk_mark = val;
4905 ++ sk_dst_reset(sk);
4906 ++ }
4907 ++}
4908 ++
4909 + void sock_set_mark(struct sock *sk, u32 val)
4910 + {
4911 + lock_sock(sk);
4912 +- sk->sk_mark = val;
4913 ++ __sock_set_mark(sk, val);
4914 + release_sock(sk);
4915 + }
4916 + EXPORT_SYMBOL(sock_set_mark);
4917 +@@ -1118,10 +1126,10 @@ set_sndbuf:
4918 + case SO_MARK:
4919 + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
4920 + ret = -EPERM;
4921 +- } else if (val != sk->sk_mark) {
4922 +- sk->sk_mark = val;
4923 +- sk_dst_reset(sk);
4924 ++ break;
4925 + }
4926 ++
4927 ++ __sock_set_mark(sk, val);
4928 + break;
4929 +
4930 + case SO_RXQ_OVFL:
4931 +diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
4932 +index 8e3e8a5b85593..a00b513c22a1d 100644
4933 +--- a/net/dsa/tag_8021q.c
4934 ++++ b/net/dsa/tag_8021q.c
4935 +@@ -64,7 +64,7 @@
4936 + #define DSA_8021Q_SUBVLAN_HI_SHIFT 9
4937 + #define DSA_8021Q_SUBVLAN_HI_MASK GENMASK(9, 9)
4938 + #define DSA_8021Q_SUBVLAN_LO_SHIFT 4
4939 +-#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(4, 3)
4940 ++#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(5, 4)
4941 + #define DSA_8021Q_SUBVLAN_HI(x) (((x) & GENMASK(2, 2)) >> 2)
4942 + #define DSA_8021Q_SUBVLAN_LO(x) ((x) & GENMASK(1, 0))
4943 + #define DSA_8021Q_SUBVLAN(x) \
4944 +diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
4945 +index d19c40c684e80..71be751123210 100644
4946 +--- a/net/ieee802154/nl-mac.c
4947 ++++ b/net/ieee802154/nl-mac.c
4948 +@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
4949 + nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
4950 + nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
4951 + be32_to_cpu(params.frame_counter)) ||
4952 +- ieee802154_llsec_fill_key_id(msg, &params.out_key))
4953 ++ ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
4954 ++ rc = -ENOBUFS;
4955 + goto out_free;
4956 ++ }
4957 +
4958 + dev_put(dev);
4959 +
4960 +diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
4961 +index 2cdc7e63fe172..88215b5c93aa4 100644
4962 +--- a/net/ieee802154/nl-phy.c
4963 ++++ b/net/ieee802154/nl-phy.c
4964 +@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
4965 + }
4966 +
4967 + if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
4968 +- nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
4969 ++ nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
4970 ++ rc = -EMSGSIZE;
4971 + goto nla_put_failure;
4972 ++ }
4973 + dev_put(dev);
4974 +
4975 + wpan_phy_put(phy);
4976 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4977 +index 71e578ed8699f..ccff4738313c1 100644
4978 +--- a/net/ipv6/route.c
4979 ++++ b/net/ipv6/route.c
4980 +@@ -3671,11 +3671,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
4981 + if (nh) {
4982 + if (rt->fib6_src.plen) {
4983 + NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
4984 +- goto out;
4985 ++ goto out_free;
4986 + }
4987 + if (!nexthop_get(nh)) {
4988 + NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
4989 +- goto out;
4990 ++ goto out_free;
4991 + }
4992 + rt->nh = nh;
4993 + fib6_nh = nexthop_fib6_nh(rt->nh);
4994 +@@ -3712,6 +3712,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
4995 + out:
4996 + fib6_info_release(rt);
4997 + return ERR_PTR(err);
4998 ++out_free:
4999 ++ ip_fib_metrics_put(rt->fib6_metrics);
5000 ++ kfree(rt);
5001 ++ return ERR_PTR(err);
5002 + }
5003 +
5004 + int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
5005 +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
5006 +index bdd6af38a9ae3..96b6aca9d0ae7 100644
5007 +--- a/net/mptcp/subflow.c
5008 ++++ b/net/mptcp/subflow.c
5009 +@@ -527,21 +527,20 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
5010 +
5011 + /* if the sk is MP_CAPABLE, we try to fetch the client key */
5012 + if (subflow_req->mp_capable) {
5013 +- if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
5014 +- /* here we can receive and accept an in-window,
5015 +- * out-of-order pkt, which will not carry the MP_CAPABLE
5016 +- * opt even on mptcp enabled paths
5017 +- */
5018 +- goto create_msk;
5019 +- }
5020 +-
5021 ++ /* we can receive and accept an in-window, out-of-order pkt,
5022 ++ * which may not carry the MP_CAPABLE opt even on mptcp enabled
5023 ++ * paths: always try to extract the peer key, and fallback
5024 ++ * for packets missing it.
5025 ++ * Even OoO DSS packets coming legitly after dropped or
5026 ++ * reordered MPC will cause fallback, but we don't have other
5027 ++ * options.
5028 ++ */
5029 + mptcp_get_options(skb, &mp_opt);
5030 + if (!mp_opt.mp_capable) {
5031 + fallback = true;
5032 + goto create_child;
5033 + }
5034 +
5035 +-create_msk:
5036 + new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
5037 + if (!new_msk)
5038 + fallback = true;
5039 +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
5040 +index d45dbcba8b49c..c25097092a060 100644
5041 +--- a/net/netfilter/ipvs/ip_vs_ctl.c
5042 ++++ b/net/netfilter/ipvs/ip_vs_ctl.c
5043 +@@ -1367,7 +1367,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
5044 + ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
5045 + svc->port = u->port;
5046 + svc->fwmark = u->fwmark;
5047 +- svc->flags = u->flags;
5048 ++ svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
5049 + svc->timeout = u->timeout * HZ;
5050 + svc->netmask = u->netmask;
5051 + svc->ipvs = ipvs;
5052 +diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
5053 +index 47e9319d2cf31..71892822bbf5d 100644
5054 +--- a/net/netfilter/nf_conntrack_proto.c
5055 ++++ b/net/netfilter/nf_conntrack_proto.c
5056 +@@ -660,7 +660,7 @@ int nf_conntrack_proto_init(void)
5057 +
5058 + #if IS_ENABLED(CONFIG_IPV6)
5059 + cleanup_sockopt:
5060 +- nf_unregister_sockopt(&so_getorigdst6);
5061 ++ nf_unregister_sockopt(&so_getorigdst);
5062 + #endif
5063 + return ret;
5064 + }
5065 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5066 +index 7bf7bfa0c7d9c..e34d05cc57549 100644
5067 +--- a/net/netfilter/nf_tables_api.c
5068 ++++ b/net/netfilter/nf_tables_api.c
5069 +@@ -3263,8 +3263,10 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
5070 + if (n == NFT_RULE_MAXEXPRS)
5071 + goto err1;
5072 + err = nf_tables_expr_parse(&ctx, tmp, &info[n]);
5073 +- if (err < 0)
5074 ++ if (err < 0) {
5075 ++ NL_SET_BAD_ATTR(extack, tmp);
5076 + goto err1;
5077 ++ }
5078 + size += info[n].ops->size;
5079 + n++;
5080 + }
5081 +diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
5082 +index 5b0d0a77379c6..91afbf8ac8cf0 100644
5083 +--- a/net/netfilter/nfnetlink_cthelper.c
5084 ++++ b/net/netfilter/nfnetlink_cthelper.c
5085 +@@ -380,10 +380,14 @@ static int
5086 + nfnl_cthelper_update(const struct nlattr * const tb[],
5087 + struct nf_conntrack_helper *helper)
5088 + {
5089 ++ u32 size;
5090 + int ret;
5091 +
5092 +- if (tb[NFCTH_PRIV_DATA_LEN])
5093 +- return -EBUSY;
5094 ++ if (tb[NFCTH_PRIV_DATA_LEN]) {
5095 ++ size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
5096 ++ if (size != helper->data_len)
5097 ++ return -EBUSY;
5098 ++ }
5099 +
5100 + if (tb[NFCTH_POLICY]) {
5101 + ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
5102 +diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
5103 +index a1b0aac46e9e0..70d46e0bbf064 100644
5104 +--- a/net/netfilter/nft_ct.c
5105 ++++ b/net/netfilter/nft_ct.c
5106 +@@ -1218,7 +1218,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
5107 + struct nf_conn *ct;
5108 +
5109 + ct = nf_ct_get(pkt->skb, &ctinfo);
5110 +- if (!ct || ctinfo == IP_CT_UNTRACKED) {
5111 ++ if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
5112 + regs->verdict.code = NFT_BREAK;
5113 + return;
5114 + }
5115 +diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
5116 +index 53dbe733f9981..6cfd30fc07985 100644
5117 +--- a/net/nfc/llcp_sock.c
5118 ++++ b/net/nfc/llcp_sock.c
5119 +@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
5120 + if (!llcp_sock->service_name) {
5121 + nfc_llcp_local_put(llcp_sock->local);
5122 + llcp_sock->local = NULL;
5123 ++ llcp_sock->dev = NULL;
5124 + ret = -ENOMEM;
5125 + goto put_dev;
5126 + }
5127 +@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
5128 + llcp_sock->local = NULL;
5129 + kfree(llcp_sock->service_name);
5130 + llcp_sock->service_name = NULL;
5131 ++ llcp_sock->dev = NULL;
5132 + ret = -EADDRINUSE;
5133 + goto put_dev;
5134 + }
5135 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
5136 +index aba3cd85f284f..315a5b2f3add8 100644
5137 +--- a/net/sched/act_ct.c
5138 ++++ b/net/sched/act_ct.c
5139 +@@ -979,7 +979,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
5140 + */
5141 + cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
5142 + if (!cached) {
5143 +- if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
5144 ++ if (tcf_ct_flow_table_lookup(p, skb, family)) {
5145 + skip_add = true;
5146 + goto do_nat;
5147 + }
5148 +@@ -1019,10 +1019,11 @@ do_nat:
5149 + * even if the connection is already confirmed.
5150 + */
5151 + nf_conntrack_confirm(skb);
5152 +- } else if (!skip_add) {
5153 +- tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
5154 + }
5155 +
5156 ++ if (!skip_add)
5157 ++ tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
5158 ++
5159 + out_push:
5160 + skb_push_rcsum(skb, nh_ofs);
5161 +
5162 +@@ -1198,9 +1199,6 @@ static int tcf_ct_fill_params(struct net *net,
5163 + sizeof(p->zone));
5164 + }
5165 +
5166 +- if (p->zone == NF_CT_DEFAULT_ZONE_ID)
5167 +- return 0;
5168 +-
5169 + nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
5170 + tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
5171 + if (!tmpl) {
5172 +diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
5173 +index 6504141104521..12e535b43d887 100644
5174 +--- a/net/tipc/bearer.c
5175 ++++ b/net/tipc/bearer.c
5176 +@@ -234,7 +234,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
5177 + */
5178 + static int tipc_enable_bearer(struct net *net, const char *name,
5179 + u32 disc_domain, u32 prio,
5180 +- struct nlattr *attr[])
5181 ++ struct nlattr *attr[],
5182 ++ struct netlink_ext_ack *extack)
5183 + {
5184 + struct tipc_net *tn = tipc_net(net);
5185 + struct tipc_bearer_names b_names;
5186 +@@ -245,20 +246,24 @@ static int tipc_enable_bearer(struct net *net, const char *name,
5187 + int bearer_id = 0;
5188 + int res = -EINVAL;
5189 + char *errstr = "";
5190 ++ u32 i;
5191 +
5192 + if (!bearer_name_validate(name, &b_names)) {
5193 + errstr = "illegal name";
5194 ++ NL_SET_ERR_MSG(extack, "Illegal name");
5195 + goto rejected;
5196 + }
5197 +
5198 + if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) {
5199 + errstr = "illegal priority";
5200 ++ NL_SET_ERR_MSG(extack, "Illegal priority");
5201 + goto rejected;
5202 + }
5203 +
5204 + m = tipc_media_find(b_names.media_name);
5205 + if (!m) {
5206 + errstr = "media not registered";
5207 ++ NL_SET_ERR_MSG(extack, "Media not registered");
5208 + goto rejected;
5209 + }
5210 +
5211 +@@ -266,33 +271,43 @@ static int tipc_enable_bearer(struct net *net, const char *name,
5212 + prio = m->priority;
5213 +
5214 + /* Check new bearer vs existing ones and find free bearer id if any */
5215 +- while (bearer_id < MAX_BEARERS) {
5216 +- b = rtnl_dereference(tn->bearer_list[bearer_id]);
5217 +- if (!b)
5218 +- break;
5219 ++ bearer_id = MAX_BEARERS;
5220 ++ i = MAX_BEARERS;
5221 ++ while (i-- != 0) {
5222 ++ b = rtnl_dereference(tn->bearer_list[i]);
5223 ++ if (!b) {
5224 ++ bearer_id = i;
5225 ++ continue;
5226 ++ }
5227 + if (!strcmp(name, b->name)) {
5228 + errstr = "already enabled";
5229 ++ NL_SET_ERR_MSG(extack, "Already enabled");
5230 + goto rejected;
5231 + }
5232 +- bearer_id++;
5233 +- if (b->priority != prio)
5234 +- continue;
5235 +- if (++with_this_prio <= 2)
5236 +- continue;
5237 +- pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
5238 +- name, prio);
5239 +- if (prio == TIPC_MIN_LINK_PRI) {
5240 +- errstr = "cannot adjust to lower";
5241 +- goto rejected;
5242 ++
5243 ++ if (b->priority == prio &&
5244 ++ (++with_this_prio > 2)) {
5245 ++ pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
5246 ++ name, prio);
5247 ++
5248 ++ if (prio == TIPC_MIN_LINK_PRI) {
5249 ++ errstr = "cannot adjust to lower";
5250 ++ NL_SET_ERR_MSG(extack, "Cannot adjust to lower");
5251 ++ goto rejected;
5252 ++ }
5253 ++
5254 ++ pr_warn("Bearer <%s>: trying with adjusted priority\n",
5255 ++ name);
5256 ++ prio--;
5257 ++ bearer_id = MAX_BEARERS;
5258 ++ i = MAX_BEARERS;
5259 ++ with_this_prio = 1;
5260 + }
5261 +- pr_warn("Bearer <%s>: trying with adjusted priority\n", name);
5262 +- prio--;
5263 +- bearer_id = 0;
5264 +- with_this_prio = 1;
5265 + }
5266 +
5267 + if (bearer_id >= MAX_BEARERS) {
5268 + errstr = "max 3 bearers permitted";
5269 ++ NL_SET_ERR_MSG(extack, "Max 3 bearers permitted");
5270 + goto rejected;
5271 + }
5272 +
5273 +@@ -306,6 +321,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
5274 + if (res) {
5275 + kfree(b);
5276 + errstr = "failed to enable media";
5277 ++ NL_SET_ERR_MSG(extack, "Failed to enable media");
5278 + goto rejected;
5279 + }
5280 +
5281 +@@ -322,6 +338,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
5282 + if (res) {
5283 + bearer_disable(net, b);
5284 + errstr = "failed to create discoverer";
5285 ++ NL_SET_ERR_MSG(extack, "Failed to create discoverer");
5286 + goto rejected;
5287 + }
5288 +
5289 +@@ -894,6 +911,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
5290 + bearer = tipc_bearer_find(net, name);
5291 + if (!bearer) {
5292 + err = -EINVAL;
5293 ++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
5294 + goto err_out;
5295 + }
5296 +
5297 +@@ -933,8 +951,10 @@ int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
5298 + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
5299 +
5300 + bearer = tipc_bearer_find(net, name);
5301 +- if (!bearer)
5302 ++ if (!bearer) {
5303 ++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
5304 + return -EINVAL;
5305 ++ }
5306 +
5307 + bearer_disable(net, bearer);
5308 +
5309 +@@ -992,7 +1012,8 @@ int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
5310 + prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
5311 + }
5312 +
5313 +- return tipc_enable_bearer(net, bearer, domain, prio, attrs);
5314 ++ return tipc_enable_bearer(net, bearer, domain, prio, attrs,
5315 ++ info->extack);
5316 + }
5317 +
5318 + int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
5319 +@@ -1031,6 +1052,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
5320 + b = tipc_bearer_find(net, name);
5321 + if (!b) {
5322 + rtnl_unlock();
5323 ++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
5324 + return -EINVAL;
5325 + }
5326 +
5327 +@@ -1071,8 +1093,10 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
5328 + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
5329 +
5330 + b = tipc_bearer_find(net, name);
5331 +- if (!b)
5332 ++ if (!b) {
5333 ++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
5334 + return -EINVAL;
5335 ++ }
5336 +
5337 + if (attrs[TIPC_NLA_BEARER_PROP]) {
5338 + struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
5339 +@@ -1091,12 +1115,18 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
5340 + if (props[TIPC_NLA_PROP_WIN])
5341 + b->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
5342 + if (props[TIPC_NLA_PROP_MTU]) {
5343 +- if (b->media->type_id != TIPC_MEDIA_TYPE_UDP)
5344 ++ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
5345 ++ NL_SET_ERR_MSG(info->extack,
5346 ++ "MTU property is unsupported");
5347 + return -EINVAL;
5348 ++ }
5349 + #ifdef CONFIG_TIPC_MEDIA_UDP
5350 + if (tipc_udp_mtu_bad(nla_get_u32
5351 +- (props[TIPC_NLA_PROP_MTU])))
5352 ++ (props[TIPC_NLA_PROP_MTU]))) {
5353 ++ NL_SET_ERR_MSG(info->extack,
5354 ++ "MTU value is out-of-range");
5355 + return -EINVAL;
5356 ++ }
5357 + b->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
5358 + tipc_node_apply_property(net, b, TIPC_NLA_PROP_MTU);
5359 + #endif
5360 +@@ -1224,6 +1254,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
5361 + rtnl_lock();
5362 + media = tipc_media_find(name);
5363 + if (!media) {
5364 ++ NL_SET_ERR_MSG(info->extack, "Media not found");
5365 + err = -EINVAL;
5366 + goto err_out;
5367 + }
5368 +@@ -1260,9 +1291,10 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
5369 + name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
5370 +
5371 + m = tipc_media_find(name);
5372 +- if (!m)
5373 ++ if (!m) {
5374 ++ NL_SET_ERR_MSG(info->extack, "Media not found");
5375 + return -EINVAL;
5376 +-
5377 ++ }
5378 + if (attrs[TIPC_NLA_MEDIA_PROP]) {
5379 + struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
5380 +
5381 +@@ -1278,12 +1310,18 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
5382 + if (props[TIPC_NLA_PROP_WIN])
5383 + m->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
5384 + if (props[TIPC_NLA_PROP_MTU]) {
5385 +- if (m->type_id != TIPC_MEDIA_TYPE_UDP)
5386 ++ if (m->type_id != TIPC_MEDIA_TYPE_UDP) {
5387 ++ NL_SET_ERR_MSG(info->extack,
5388 ++ "MTU property is unsupported");
5389 + return -EINVAL;
5390 ++ }
5391 + #ifdef CONFIG_TIPC_MEDIA_UDP
5392 + if (tipc_udp_mtu_bad(nla_get_u32
5393 +- (props[TIPC_NLA_PROP_MTU])))
5394 ++ (props[TIPC_NLA_PROP_MTU]))) {
5395 ++ NL_SET_ERR_MSG(info->extack,
5396 ++ "MTU value is out-of-range");
5397 + return -EINVAL;
5398 ++ }
5399 + m->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
5400 + #endif
5401 + }
5402 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
5403 +index a3ab2d3d4e4ea..f718c7346088f 100644
5404 +--- a/net/tls/tls_device.c
5405 ++++ b/net/tls/tls_device.c
5406 +@@ -50,6 +50,7 @@ static void tls_device_gc_task(struct work_struct *work);
5407 + static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
5408 + static LIST_HEAD(tls_device_gc_list);
5409 + static LIST_HEAD(tls_device_list);
5410 ++static LIST_HEAD(tls_device_down_list);
5411 + static DEFINE_SPINLOCK(tls_device_lock);
5412 +
5413 + static void tls_device_free_ctx(struct tls_context *ctx)
5414 +@@ -680,15 +681,13 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
5415 + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
5416 + struct net_device *netdev;
5417 +
5418 +- if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
5419 +- return;
5420 +-
5421 + trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
5422 ++ rcu_read_lock();
5423 + netdev = READ_ONCE(tls_ctx->netdev);
5424 + if (netdev)
5425 + netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
5426 + TLS_OFFLOAD_CTX_DIR_RX);
5427 +- clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
5428 ++ rcu_read_unlock();
5429 + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
5430 + }
5431 +
5432 +@@ -761,6 +760,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
5433 +
5434 + if (tls_ctx->rx_conf != TLS_HW)
5435 + return;
5436 ++ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
5437 ++ return;
5438 +
5439 + prot = &tls_ctx->prot_info;
5440 + rx_ctx = tls_offload_ctx_rx(tls_ctx);
5441 +@@ -963,6 +964,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
5442 +
5443 + ctx->sw.decrypted |= is_decrypted;
5444 +
5445 ++ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
5446 ++ if (likely(is_encrypted || is_decrypted))
5447 ++ return 0;
5448 ++
5449 ++ /* After tls_device_down disables the offload, the next SKB will
5450 ++ * likely have initial fragments decrypted, and final ones not
5451 ++ * decrypted. We need to reencrypt that single SKB.
5452 ++ */
5453 ++ return tls_device_reencrypt(sk, skb);
5454 ++ }
5455 ++
5456 + /* Return immediately if the record is either entirely plaintext or
5457 + * entirely ciphertext. Otherwise handle reencrypt partially decrypted
5458 + * record.
5459 +@@ -1290,6 +1302,26 @@ static int tls_device_down(struct net_device *netdev)
5460 + spin_unlock_irqrestore(&tls_device_lock, flags);
5461 +
5462 + list_for_each_entry_safe(ctx, tmp, &list, list) {
5463 ++ /* Stop offloaded TX and switch to the fallback.
5464 ++ * tls_is_sk_tx_device_offloaded will return false.
5465 ++ */
5466 ++ WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
5467 ++
5468 ++ /* Stop the RX and TX resync.
5469 ++ * tls_dev_resync must not be called after tls_dev_del.
5470 ++ */
5471 ++ WRITE_ONCE(ctx->netdev, NULL);
5472 ++
5473 ++ /* Start skipping the RX resync logic completely. */
5474 ++ set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
5475 ++
5476 ++ /* Sync with inflight packets. After this point:
5477 ++ * TX: no non-encrypted packets will be passed to the driver.
5478 ++ * RX: resync requests from the driver will be ignored.
5479 ++ */
5480 ++ synchronize_net();
5481 ++
5482 ++ /* Release the offload context on the driver side. */
5483 + if (ctx->tx_conf == TLS_HW)
5484 + netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
5485 + TLS_OFFLOAD_CTX_DIR_TX);
5486 +@@ -1297,15 +1329,21 @@ static int tls_device_down(struct net_device *netdev)
5487 + !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
5488 + netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
5489 + TLS_OFFLOAD_CTX_DIR_RX);
5490 +- WRITE_ONCE(ctx->netdev, NULL);
5491 +- smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
5492 +- while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
5493 +- usleep_range(10, 200);
5494 ++
5495 + dev_put(netdev);
5496 +- list_del_init(&ctx->list);
5497 +
5498 +- if (refcount_dec_and_test(&ctx->refcount))
5499 +- tls_device_free_ctx(ctx);
5500 ++ /* Move the context to a separate list for two reasons:
5501 ++ * 1. When the context is deallocated, list_del is called.
5502 ++ * 2. It's no longer an offloaded context, so we don't want to
5503 ++ * run offload-specific code on this context.
5504 ++ */
5505 ++ spin_lock_irqsave(&tls_device_lock, flags);
5506 ++ list_move_tail(&ctx->list, &tls_device_down_list);
5507 ++ spin_unlock_irqrestore(&tls_device_lock, flags);
5508 ++
5509 ++ /* Device contexts for RX and TX will be freed in on sk_destruct
5510 ++ * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
5511 ++ */
5512 + }
5513 +
5514 + up_write(&device_offload_lock);
5515 +diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
5516 +index 28895333701e4..0d40016bf69e0 100644
5517 +--- a/net/tls/tls_device_fallback.c
5518 ++++ b/net/tls/tls_device_fallback.c
5519 +@@ -430,6 +430,13 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
5520 + }
5521 + EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
5522 +
5523 ++struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
5524 ++ struct net_device *dev,
5525 ++ struct sk_buff *skb)
5526 ++{
5527 ++ return tls_sw_fallback(sk, skb);
5528 ++}
5529 ++
5530 + struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
5531 + {
5532 + return tls_sw_fallback(skb->sk, skb);
5533 +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
5534 +index 8d93cea99f2cb..32a51b20509c9 100644
5535 +--- a/net/tls/tls_main.c
5536 ++++ b/net/tls/tls_main.c
5537 +@@ -633,6 +633,7 @@ struct tls_context *tls_ctx_create(struct sock *sk)
5538 + mutex_init(&ctx->tx_lock);
5539 + rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
5540 + ctx->sk_proto = READ_ONCE(sk->sk_prot);
5541 ++ ctx->sk = sk;
5542 + return ctx;
5543 + }
5544 +
5545 +diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
5546 +index 21dbf63d6e415..9ec93d90e8a5a 100644
5547 +--- a/samples/vfio-mdev/mdpy-fb.c
5548 ++++ b/samples/vfio-mdev/mdpy-fb.c
5549 +@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
5550 + if (format != DRM_FORMAT_XRGB8888) {
5551 + pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
5552 + format, DRM_FORMAT_XRGB8888);
5553 +- return -EINVAL;
5554 ++ ret = -EINVAL;
5555 ++ goto err_release_regions;
5556 + }
5557 + if (width < 100 || width > 10000) {
5558 + pci_err(pdev, "width (%d) out of range\n", width);
5559 +- return -EINVAL;
5560 ++ ret = -EINVAL;
5561 ++ goto err_release_regions;
5562 + }
5563 + if (height < 100 || height > 10000) {
5564 + pci_err(pdev, "height (%d) out of range\n", height);
5565 +- return -EINVAL;
5566 ++ ret = -EINVAL;
5567 ++ goto err_release_regions;
5568 + }
5569 + pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
5570 + width, height);
5571 +
5572 + info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
5573 +- if (!info)
5574 ++ if (!info) {
5575 ++ ret = -ENOMEM;
5576 + goto err_release_regions;
5577 ++ }
5578 + pci_set_drvdata(pdev, info);
5579 + par = info->par;
5580 +
5581 +diff --git a/sound/core/timer.c b/sound/core/timer.c
5582 +index 765ea66665a8c..c15c8314671b7 100644
5583 +--- a/sound/core/timer.c
5584 ++++ b/sound/core/timer.c
5585 +@@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
5586 + return;
5587 + if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
5588 + return;
5589 ++ event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
5590 + list_for_each_entry(ts, &ti->slave_active_head, active_list)
5591 + if (ts->ccallback)
5592 +- ts->ccallback(ts, event + 100, &tstamp, resolution);
5593 ++ ts->ccallback(ts, event, &tstamp, resolution);
5594 + }
5595 +
5596 + /* start/continue a master timer */
5597 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
5598 +index eec1775dfffe9..4cec1bd77e6fe 100644
5599 +--- a/sound/pci/hda/hda_codec.c
5600 ++++ b/sound/pci/hda/hda_codec.c
5601 +@@ -2973,6 +2973,7 @@ static int hda_codec_runtime_resume(struct device *dev)
5602 + #ifdef CONFIG_PM_SLEEP
5603 + static int hda_codec_pm_prepare(struct device *dev)
5604 + {
5605 ++ dev->power.power_state = PMSG_SUSPEND;
5606 + return pm_runtime_suspended(dev);
5607 + }
5608 +
5609 +@@ -2980,6 +2981,10 @@ static void hda_codec_pm_complete(struct device *dev)
5610 + {
5611 + struct hda_codec *codec = dev_to_hda_codec(dev);
5612 +
5613 ++ /* If no other pm-functions are called between prepare() and complete() */
5614 ++ if (dev->power.power_state.event == PM_EVENT_SUSPEND)
5615 ++ dev->power.power_state = PMSG_RESUME;
5616 ++
5617 + if (pm_runtime_suspended(dev) && (codec->jackpoll_interval ||
5618 + hda_codec_need_resume(codec) || codec->forced_resume))
5619 + pm_request_resume(dev);
5620 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5621 +index d8424d226714f..cc13a68197f3c 100644
5622 +--- a/sound/pci/hda/patch_realtek.c
5623 ++++ b/sound/pci/hda/patch_realtek.c
5624 +@@ -8289,6 +8289,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5625 + SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
5626 + SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
5627 + SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5628 ++ SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5629 + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5630 + SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
5631 + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5632 +diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
5633 +index 7b2d471a6419d..4343356f3cf9a 100644
5634 +--- a/tools/perf/util/dwarf-aux.c
5635 ++++ b/tools/perf/util/dwarf-aux.c
5636 +@@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
5637 + if ((tag == DW_TAG_formal_parameter ||
5638 + tag == DW_TAG_variable) &&
5639 + die_compare_name(die_mem, fvp->name) &&
5640 +- /* Does the DIE have location information or external instance? */
5641 ++ /*
5642 ++ * Does the DIE have location information or const value
5643 ++ * or external instance?
5644 ++ */
5645 + (dwarf_attr(die_mem, DW_AT_external, &attr) ||
5646 +- dwarf_attr(die_mem, DW_AT_location, &attr)))
5647 ++ dwarf_attr(die_mem, DW_AT_location, &attr) ||
5648 ++ dwarf_attr(die_mem, DW_AT_const_value, &attr)))
5649 + return DIE_FIND_CB_END;
5650 + if (dwarf_haspc(die_mem, fvp->addr))
5651 + return DIE_FIND_CB_CONTINUE;
5652 +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
5653 +index 76dd349aa48d8..fdafbfcef6871 100644
5654 +--- a/tools/perf/util/probe-finder.c
5655 ++++ b/tools/perf/util/probe-finder.c
5656 +@@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
5657 + immediate_value_is_supported()) {
5658 + Dwarf_Sword snum;
5659 +
5660 ++ if (!tvar)
5661 ++ return 0;
5662 ++
5663 + dwarf_formsdata(&attr, &snum);
5664 + ret = asprintf(&tvar->value, "\\%ld", (long)snum);
5665 +
5666 +diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
5667 +index 7ed7cd95e58fe..ebc4ee0fe179f 100755
5668 +--- a/tools/testing/selftests/wireguard/netns.sh
5669 ++++ b/tools/testing/selftests/wireguard/netns.sh
5670 +@@ -363,6 +363,7 @@ ip1 -6 rule add table main suppress_prefixlength 0
5671 + ip1 -4 route add default dev wg0 table 51820
5672 + ip1 -4 rule add not fwmark 51820 table 51820
5673 + ip1 -4 rule add table main suppress_prefixlength 0
5674 ++n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter'
5675 + # Flood the pings instead of sending just one, to trigger routing table reference counting bugs.
5676 + n1 ping -W 1 -c 100 -f 192.168.99.7
5677 + n1 ping -W 1 -c 100 -f abab::1111
5678 +diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config
5679 +index 4eecb432a66c1..74db83a0aedd8 100644
5680 +--- a/tools/testing/selftests/wireguard/qemu/kernel.config
5681 ++++ b/tools/testing/selftests/wireguard/qemu/kernel.config
5682 +@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y
5683 + CONFIG_NETFILTER_XT_NAT=y
5684 + CONFIG_NETFILTER_XT_MATCH_LENGTH=y
5685 + CONFIG_NETFILTER_XT_MARK=y
5686 +-CONFIG_NF_CONNTRACK_IPV4=y
5687 + CONFIG_NF_NAT_IPV4=y
5688 + CONFIG_IP_NF_IPTABLES=y
5689 + CONFIG_IP_NF_FILTER=y