1 |
commit: b2e817b8ca49469e758b8db4e5985605cb670c1b |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Thu Jun 10 12:14:48 2021 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Thu Jun 10 12:14:48 2021 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b2e817b8 |
7 |
|
8 |
Linux patch 5.12.10 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1009_linux-5.12.10.patch | 6796 ++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 6800 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index f16429c..25657f9 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -79,6 +79,10 @@ Patch: 1008_linux-5.12.9.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.12.9 |
23 |
|
24 |
+Patch: 1009_linux-5.12.10.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.12.10 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1009_linux-5.12.10.patch b/1009_linux-5.12.10.patch |
33 |
new file mode 100644 |
34 |
index 0000000..d201785 |
35 |
--- /dev/null |
36 |
+++ b/1009_linux-5.12.10.patch |
37 |
@@ -0,0 +1,6796 @@ |
38 |
+diff --git a/Makefile b/Makefile |
39 |
+index d53577db10858..ebc02c56db03c 100644 |
40 |
+--- a/Makefile |
41 |
++++ b/Makefile |
42 |
+@@ -1,7 +1,7 @@ |
43 |
+ # SPDX-License-Identifier: GPL-2.0 |
44 |
+ VERSION = 5 |
45 |
+ PATCHLEVEL = 12 |
46 |
+-SUBLEVEL = 9 |
47 |
++SUBLEVEL = 10 |
48 |
+ EXTRAVERSION = |
49 |
+ NAME = Frozen Wasteland |
50 |
+ |
51 |
+diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi |
52 |
+index 7d2c72562c735..9148a01ed6d9f 100644 |
53 |
+--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi |
54 |
++++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi |
55 |
+@@ -105,9 +105,13 @@ |
56 |
+ phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; |
57 |
+ phy-reset-duration = <20>; |
58 |
+ phy-supply = <&sw2_reg>; |
59 |
+- phy-handle = <ðphy0>; |
60 |
+ status = "okay"; |
61 |
+ |
62 |
++ fixed-link { |
63 |
++ speed = <1000>; |
64 |
++ full-duplex; |
65 |
++ }; |
66 |
++ |
67 |
+ mdio { |
68 |
+ #address-cells = <1>; |
69 |
+ #size-cells = <0>; |
70 |
+diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi |
71 |
+index 236fc205c3890..d0768ae429faa 100644 |
72 |
+--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi |
73 |
++++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi |
74 |
+@@ -406,6 +406,18 @@ |
75 |
+ vin-supply = <&sw1_reg>; |
76 |
+ }; |
77 |
+ |
78 |
++®_pu { |
79 |
++ vin-supply = <&sw1_reg>; |
80 |
++}; |
81 |
++ |
82 |
++®_vdd1p1 { |
83 |
++ vin-supply = <&sw2_reg>; |
84 |
++}; |
85 |
++ |
86 |
++®_vdd2p5 { |
87 |
++ vin-supply = <&sw2_reg>; |
88 |
++}; |
89 |
++ |
90 |
+ &uart1 { |
91 |
+ pinctrl-names = "default"; |
92 |
+ pinctrl-0 = <&pinctrl_uart1>; |
93 |
+diff --git a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi |
94 |
+index 828cf3e39784a..c4e146f3341bb 100644 |
95 |
+--- a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi |
96 |
++++ b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi |
97 |
+@@ -126,7 +126,7 @@ |
98 |
+ compatible = "nxp,pca8574"; |
99 |
+ reg = <0x3a>; |
100 |
+ gpio-controller; |
101 |
+- #gpio-cells = <1>; |
102 |
++ #gpio-cells = <2>; |
103 |
+ }; |
104 |
+ }; |
105 |
+ |
106 |
+diff --git a/arch/arm/boot/dts/imx7d-meerkat96.dts b/arch/arm/boot/dts/imx7d-meerkat96.dts |
107 |
+index 5339210b63d0f..dd8003bd1fc09 100644 |
108 |
+--- a/arch/arm/boot/dts/imx7d-meerkat96.dts |
109 |
++++ b/arch/arm/boot/dts/imx7d-meerkat96.dts |
110 |
+@@ -193,7 +193,7 @@ |
111 |
+ pinctrl-names = "default"; |
112 |
+ pinctrl-0 = <&pinctrl_usdhc1>; |
113 |
+ keep-power-in-suspend; |
114 |
+- tuning-step = <2>; |
115 |
++ fsl,tuning-step = <2>; |
116 |
+ vmmc-supply = <®_3p3v>; |
117 |
+ no-1-8-v; |
118 |
+ broken-cd; |
119 |
+diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi |
120 |
+index e57da0d32b98d..e519897fae082 100644 |
121 |
+--- a/arch/arm/boot/dts/imx7d-pico.dtsi |
122 |
++++ b/arch/arm/boot/dts/imx7d-pico.dtsi |
123 |
+@@ -351,7 +351,7 @@ |
124 |
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>; |
125 |
+ cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; |
126 |
+ bus-width = <4>; |
127 |
+- tuning-step = <2>; |
128 |
++ fsl,tuning-step = <2>; |
129 |
+ vmmc-supply = <®_3p3v>; |
130 |
+ wakeup-source; |
131 |
+ no-1-8-v; |
132 |
+diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c |
133 |
+index c40cf5ef86079..977b0b744c22a 100644 |
134 |
+--- a/arch/arm/mach-omap1/board-h2.c |
135 |
++++ b/arch/arm/mach-omap1/board-h2.c |
136 |
+@@ -320,7 +320,7 @@ static int tps_setup(struct i2c_client *client, void *context) |
137 |
+ { |
138 |
+ if (!IS_BUILTIN(CONFIG_TPS65010)) |
139 |
+ return -ENOSYS; |
140 |
+- |
141 |
++ |
142 |
+ tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V | |
143 |
+ TPS_LDO1_ENABLE | TPS_VLDO1_3_0V); |
144 |
+ |
145 |
+@@ -394,6 +394,8 @@ static void __init h2_init(void) |
146 |
+ BUG_ON(gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0); |
147 |
+ gpio_direction_input(H2_NAND_RB_GPIO_PIN); |
148 |
+ |
149 |
++ gpiod_add_lookup_table(&isp1301_gpiod_table); |
150 |
++ |
151 |
+ omap_cfg_reg(L3_1610_FLASH_CS2B_OE); |
152 |
+ omap_cfg_reg(M8_1610_FLASH_CS2B_WE); |
153 |
+ |
154 |
+diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms |
155 |
+index cdfd5fed457ff..a3fdffcd1ce8b 100644 |
156 |
+--- a/arch/arm64/Kconfig.platforms |
157 |
++++ b/arch/arm64/Kconfig.platforms |
158 |
+@@ -168,6 +168,7 @@ config ARCH_MEDIATEK |
159 |
+ |
160 |
+ config ARCH_MESON |
161 |
+ bool "Amlogic Platforms" |
162 |
++ select COMMON_CLK |
163 |
+ select MESON_IRQ_GPIO |
164 |
+ help |
165 |
+ This enables support for the arm64 based Amlogic SoCs |
166 |
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts |
167 |
+index 6c309b97587df..e8d31279b7a34 100644 |
168 |
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts |
169 |
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts |
170 |
+@@ -46,7 +46,8 @@ |
171 |
+ eee-broken-100tx; |
172 |
+ qca,clk-out-frequency = <125000000>; |
173 |
+ qca,clk-out-strength = <AR803X_STRENGTH_FULL>; |
174 |
+- vddio-supply = <&vddh>; |
175 |
++ qca,keep-pll-enabled; |
176 |
++ vddio-supply = <&vddio>; |
177 |
+ |
178 |
+ vddio: vddio-regulator { |
179 |
+ regulator-name = "VDDIO"; |
180 |
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts |
181 |
+index df212ed5bb942..e65d1c477e2ce 100644 |
182 |
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts |
183 |
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts |
184 |
+@@ -31,11 +31,10 @@ |
185 |
+ reg = <0x4>; |
186 |
+ eee-broken-1000t; |
187 |
+ eee-broken-100tx; |
188 |
+- |
189 |
+ qca,clk-out-frequency = <125000000>; |
190 |
+ qca,clk-out-strength = <AR803X_STRENGTH_FULL>; |
191 |
+- |
192 |
+- vddio-supply = <&vddh>; |
193 |
++ qca,keep-pll-enabled; |
194 |
++ vddio-supply = <&vddio>; |
195 |
+ |
196 |
+ vddio: vddio-regulator { |
197 |
+ regulator-name = "VDDIO"; |
198 |
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi |
199 |
+index 262fbad8f0ec5..1b264e5e947ac 100644 |
200 |
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi |
201 |
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi |
202 |
+@@ -201,8 +201,8 @@ |
203 |
+ ddr: memory-controller@1080000 { |
204 |
+ compatible = "fsl,qoriq-memory-controller"; |
205 |
+ reg = <0x0 0x1080000 0x0 0x1000>; |
206 |
+- interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>; |
207 |
+- big-endian; |
208 |
++ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; |
209 |
++ little-endian; |
210 |
+ }; |
211 |
+ |
212 |
+ dcfg: syscon@1e00000 { |
213 |
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts |
214 |
+index 631e01c1b9fd4..be1e7d6f0ecb5 100644 |
215 |
+--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts |
216 |
++++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts |
217 |
+@@ -88,11 +88,11 @@ |
218 |
+ pinctrl-0 = <&pinctrl_codec2>; |
219 |
+ reg = <0x18>; |
220 |
+ #sound-dai-cells = <0>; |
221 |
+- HPVDD-supply = <®_3p3v>; |
222 |
+- SPRVDD-supply = <®_3p3v>; |
223 |
+- SPLVDD-supply = <®_3p3v>; |
224 |
+- AVDD-supply = <®_3p3v>; |
225 |
+- IOVDD-supply = <®_3p3v>; |
226 |
++ HPVDD-supply = <®_gen_3p3>; |
227 |
++ SPRVDD-supply = <®_gen_3p3>; |
228 |
++ SPLVDD-supply = <®_gen_3p3>; |
229 |
++ AVDD-supply = <®_gen_3p3>; |
230 |
++ IOVDD-supply = <®_gen_3p3>; |
231 |
+ DVDD-supply = <&vgen4_reg>; |
232 |
+ reset-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>; |
233 |
+ }; |
234 |
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi |
235 |
+index 4dc8383478ee2..a08a568c31d92 100644 |
236 |
+--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi |
237 |
++++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi |
238 |
+@@ -45,8 +45,8 @@ |
239 |
+ reg_12p0_main: regulator-12p0-main { |
240 |
+ compatible = "regulator-fixed"; |
241 |
+ regulator-name = "12V_MAIN"; |
242 |
+- regulator-min-microvolt = <5000000>; |
243 |
+- regulator-max-microvolt = <5000000>; |
244 |
++ regulator-min-microvolt = <12000000>; |
245 |
++ regulator-max-microvolt = <12000000>; |
246 |
+ regulator-always-on; |
247 |
+ }; |
248 |
+ |
249 |
+@@ -77,15 +77,6 @@ |
250 |
+ regulator-always-on; |
251 |
+ }; |
252 |
+ |
253 |
+- reg_3p3v: regulator-3p3v { |
254 |
+- compatible = "regulator-fixed"; |
255 |
+- vin-supply = <®_3p3_main>; |
256 |
+- regulator-name = "GEN_3V3"; |
257 |
+- regulator-min-microvolt = <3300000>; |
258 |
+- regulator-max-microvolt = <3300000>; |
259 |
+- regulator-always-on; |
260 |
+- }; |
261 |
+- |
262 |
+ reg_usdhc2_vmmc: regulator-vsd-3v3 { |
263 |
+ pinctrl-names = "default"; |
264 |
+ pinctrl-0 = <&pinctrl_reg_usdhc2>; |
265 |
+@@ -415,11 +406,11 @@ |
266 |
+ pinctrl-0 = <&pinctrl_codec1>; |
267 |
+ reg = <0x18>; |
268 |
+ #sound-dai-cells = <0>; |
269 |
+- HPVDD-supply = <®_3p3v>; |
270 |
+- SPRVDD-supply = <®_3p3v>; |
271 |
+- SPLVDD-supply = <®_3p3v>; |
272 |
+- AVDD-supply = <®_3p3v>; |
273 |
+- IOVDD-supply = <®_3p3v>; |
274 |
++ HPVDD-supply = <®_gen_3p3>; |
275 |
++ SPRVDD-supply = <®_gen_3p3>; |
276 |
++ SPLVDD-supply = <®_gen_3p3>; |
277 |
++ AVDD-supply = <®_gen_3p3>; |
278 |
++ IOVDD-supply = <®_gen_3p3>; |
279 |
+ DVDD-supply = <&vgen4_reg>; |
280 |
+ reset-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>; |
281 |
+ }; |
282 |
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi |
283 |
+index 17477ab0fd8e1..3398f174f09b3 100644 |
284 |
+--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi |
285 |
++++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi |
286 |
+@@ -85,6 +85,8 @@ |
287 |
+ #size-cells = <2>; |
288 |
+ ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>; |
289 |
+ ti,sci-dev-id = <199>; |
290 |
++ dma-coherent; |
291 |
++ dma-ranges; |
292 |
+ |
293 |
+ main_navss_intr: interrupt-controller1 { |
294 |
+ compatible = "ti,sci-intr"; |
295 |
+diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h |
296 |
+index a8578d650bb67..f362f72bcb508 100644 |
297 |
+--- a/arch/arm64/include/asm/kvm_asm.h |
298 |
++++ b/arch/arm64/include/asm/kvm_asm.h |
299 |
+@@ -57,6 +57,7 @@ |
300 |
+ #define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12 |
301 |
+ #define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13 |
302 |
+ #define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14 |
303 |
++#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 15 |
304 |
+ |
305 |
+ #ifndef __ASSEMBLY__ |
306 |
+ |
307 |
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c |
308 |
+index 84b5f79c9eab4..7730b81aad6d1 100644 |
309 |
+--- a/arch/arm64/kvm/arm.c |
310 |
++++ b/arch/arm64/kvm/arm.c |
311 |
+@@ -715,11 +715,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
312 |
+ return ret; |
313 |
+ } |
314 |
+ |
315 |
+- if (run->immediate_exit) |
316 |
+- return -EINTR; |
317 |
+- |
318 |
+ vcpu_load(vcpu); |
319 |
+ |
320 |
++ if (run->immediate_exit) { |
321 |
++ ret = -EINTR; |
322 |
++ goto out; |
323 |
++ } |
324 |
++ |
325 |
+ kvm_sigset_activate(vcpu); |
326 |
+ |
327 |
+ ret = 1; |
328 |
+@@ -892,6 +894,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
329 |
+ |
330 |
+ kvm_sigset_deactivate(vcpu); |
331 |
+ |
332 |
++out: |
333 |
++ /* |
334 |
++ * In the unlikely event that we are returning to userspace |
335 |
++ * with pending exceptions or PC adjustment, commit these |
336 |
++ * adjustments in order to give userspace a consistent view of |
337 |
++ * the vcpu state. Note that this relies on __kvm_adjust_pc() |
338 |
++ * being preempt-safe on VHE. |
339 |
++ */ |
340 |
++ if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION | |
341 |
++ KVM_ARM64_INCREMENT_PC))) |
342 |
++ kvm_call_hyp(__kvm_adjust_pc, vcpu); |
343 |
++ |
344 |
+ vcpu_put(vcpu); |
345 |
+ return ret; |
346 |
+ } |
347 |
+diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c |
348 |
+index 0812a496725f6..11541b94b328f 100644 |
349 |
+--- a/arch/arm64/kvm/hyp/exception.c |
350 |
++++ b/arch/arm64/kvm/hyp/exception.c |
351 |
+@@ -331,8 +331,8 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu) |
352 |
+ } |
353 |
+ |
354 |
+ /* |
355 |
+- * Adjust the guest PC on entry, depending on flags provided by EL1 |
356 |
+- * for the purpose of emulation (MMIO, sysreg) or exception injection. |
357 |
++ * Adjust the guest PC (and potentially exception state) depending on |
358 |
++ * flags provided by the emulation code. |
359 |
+ */ |
360 |
+ void __kvm_adjust_pc(struct kvm_vcpu *vcpu) |
361 |
+ { |
362 |
+diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c |
363 |
+index 936328207bde0..e52582e140873 100644 |
364 |
+--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c |
365 |
++++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c |
366 |
+@@ -25,6 +25,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) |
367 |
+ cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu)); |
368 |
+ } |
369 |
+ |
370 |
++static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt) |
371 |
++{ |
372 |
++ DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1); |
373 |
++ |
374 |
++ __kvm_adjust_pc(kern_hyp_va(vcpu)); |
375 |
++} |
376 |
++ |
377 |
+ static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt) |
378 |
+ { |
379 |
+ __kvm_flush_vm_context(); |
380 |
+@@ -112,6 +119,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *); |
381 |
+ |
382 |
+ static const hcall_t host_hcall[] = { |
383 |
+ HANDLE_FUNC(__kvm_vcpu_run), |
384 |
++ HANDLE_FUNC(__kvm_adjust_pc), |
385 |
+ HANDLE_FUNC(__kvm_flush_vm_context), |
386 |
+ HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), |
387 |
+ HANDLE_FUNC(__kvm_tlb_flush_vmid), |
388 |
+diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c |
389 |
+index 7719d632df8df..1754498b07174 100644 |
390 |
+--- a/arch/mips/mm/cache.c |
391 |
++++ b/arch/mips/mm/cache.c |
392 |
+@@ -157,31 +157,29 @@ unsigned long _page_cachable_default; |
393 |
+ EXPORT_SYMBOL(_page_cachable_default); |
394 |
+ |
395 |
+ #define PM(p) __pgprot(_page_cachable_default | (p)) |
396 |
+-#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p)) |
397 |
+ |
398 |
+ static inline void setup_protection_map(void) |
399 |
+ { |
400 |
+ protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); |
401 |
+- protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); |
402 |
+- protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); |
403 |
+- protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); |
404 |
+- protection_map[4] = PVA(_PAGE_PRESENT); |
405 |
+- protection_map[5] = PVA(_PAGE_PRESENT); |
406 |
+- protection_map[6] = PVA(_PAGE_PRESENT); |
407 |
+- protection_map[7] = PVA(_PAGE_PRESENT); |
408 |
++ protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); |
409 |
++ protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); |
410 |
++ protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); |
411 |
++ protection_map[4] = PM(_PAGE_PRESENT); |
412 |
++ protection_map[5] = PM(_PAGE_PRESENT); |
413 |
++ protection_map[6] = PM(_PAGE_PRESENT); |
414 |
++ protection_map[7] = PM(_PAGE_PRESENT); |
415 |
+ |
416 |
+ protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); |
417 |
+- protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); |
418 |
+- protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | |
419 |
++ protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); |
420 |
++ protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | |
421 |
+ _PAGE_NO_READ); |
422 |
+- protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); |
423 |
+- protection_map[12] = PVA(_PAGE_PRESENT); |
424 |
+- protection_map[13] = PVA(_PAGE_PRESENT); |
425 |
+- protection_map[14] = PVA(_PAGE_PRESENT); |
426 |
+- protection_map[15] = PVA(_PAGE_PRESENT); |
427 |
++ protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); |
428 |
++ protection_map[12] = PM(_PAGE_PRESENT); |
429 |
++ protection_map[13] = PM(_PAGE_PRESENT); |
430 |
++ protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); |
431 |
++ protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); |
432 |
+ } |
433 |
+ |
434 |
+-#undef _PVA |
435 |
+ #undef PM |
436 |
+ |
437 |
+ void cpu_cache_init(void) |
438 |
+diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c |
439 |
+index 01ab2163659e4..e8c2a6373157d 100644 |
440 |
+--- a/arch/powerpc/kernel/kprobes.c |
441 |
++++ b/arch/powerpc/kernel/kprobes.c |
442 |
+@@ -108,7 +108,6 @@ int arch_prepare_kprobe(struct kprobe *p) |
443 |
+ int ret = 0; |
444 |
+ struct kprobe *prev; |
445 |
+ struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr); |
446 |
+- struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1)); |
447 |
+ |
448 |
+ if ((unsigned long)p->addr & 0x03) { |
449 |
+ printk("Attempt to register kprobe at an unaligned address\n"); |
450 |
+@@ -116,7 +115,8 @@ int arch_prepare_kprobe(struct kprobe *p) |
451 |
+ } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) { |
452 |
+ printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n"); |
453 |
+ ret = -EINVAL; |
454 |
+- } else if (ppc_inst_prefixed(prefix)) { |
455 |
++ } else if ((unsigned long)p->addr & ~PAGE_MASK && |
456 |
++ ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) { |
457 |
+ printk("Cannot register a kprobe on the second word of prefixed instruction\n"); |
458 |
+ ret = -EINVAL; |
459 |
+ } |
460 |
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c |
461 |
+index 208a053c9adfd..60c5bc0c130cf 100644 |
462 |
+--- a/arch/powerpc/kvm/book3s_hv.c |
463 |
++++ b/arch/powerpc/kvm/book3s_hv.c |
464 |
+@@ -4418,7 +4418,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) |
465 |
+ mtspr(SPRN_EBBRR, ebb_regs[1]); |
466 |
+ mtspr(SPRN_BESCR, ebb_regs[2]); |
467 |
+ mtspr(SPRN_TAR, user_tar); |
468 |
+- mtspr(SPRN_FSCR, current->thread.fscr); |
469 |
+ } |
470 |
+ mtspr(SPRN_VRSAVE, user_vrsave); |
471 |
+ |
472 |
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
473 |
+index 5e634db4809bf..004f0d4e665f8 100644 |
474 |
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
475 |
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
476 |
+@@ -59,6 +59,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
477 |
+ #define STACK_SLOT_UAMOR (SFS-88) |
478 |
+ #define STACK_SLOT_DAWR1 (SFS-96) |
479 |
+ #define STACK_SLOT_DAWRX1 (SFS-104) |
480 |
++#define STACK_SLOT_FSCR (SFS-112) |
481 |
+ /* the following is used by the P9 short path */ |
482 |
+ #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ |
483 |
+ |
484 |
+@@ -686,6 +687,8 @@ BEGIN_FTR_SECTION |
485 |
+ std r6, STACK_SLOT_DAWR0(r1) |
486 |
+ std r7, STACK_SLOT_DAWRX0(r1) |
487 |
+ std r8, STACK_SLOT_IAMR(r1) |
488 |
++ mfspr r5, SPRN_FSCR |
489 |
++ std r5, STACK_SLOT_FSCR(r1) |
490 |
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
491 |
+ BEGIN_FTR_SECTION |
492 |
+ mfspr r6, SPRN_DAWR1 |
493 |
+@@ -1663,6 +1666,10 @@ FTR_SECTION_ELSE |
494 |
+ ld r7, STACK_SLOT_HFSCR(r1) |
495 |
+ mtspr SPRN_HFSCR, r7 |
496 |
+ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) |
497 |
++BEGIN_FTR_SECTION |
498 |
++ ld r5, STACK_SLOT_FSCR(r1) |
499 |
++ mtspr SPRN_FSCR, r5 |
500 |
++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
501 |
+ /* |
502 |
+ * Restore various registers to 0, where non-zero values |
503 |
+ * set by the guest could disrupt the host. |
504 |
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile |
505 |
+index ca2b40dfd24b8..24d936c147cdf 100644 |
506 |
+--- a/arch/riscv/kernel/vdso/Makefile |
507 |
++++ b/arch/riscv/kernel/vdso/Makefile |
508 |
+@@ -23,7 +23,7 @@ ifneq ($(c-gettimeofday-y),) |
509 |
+ endif |
510 |
+ |
511 |
+ # Build rules |
512 |
+-targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o |
513 |
++targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S |
514 |
+ obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) |
515 |
+ |
516 |
+ obj-y += vdso.o vdso-syms.o |
517 |
+@@ -41,7 +41,7 @@ KASAN_SANITIZE := n |
518 |
+ $(obj)/vdso.o: $(obj)/vdso.so |
519 |
+ |
520 |
+ # link rule for the .so file, .lds has to be first |
521 |
+-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE |
522 |
++$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE |
523 |
+ $(call if_changed,vdsold) |
524 |
+ LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \ |
525 |
+ --build-id=sha1 --hash-style=both --eh-frame-hdr |
526 |
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h |
527 |
+index 412b51e059c80..48067af946785 100644 |
528 |
+--- a/arch/x86/include/asm/apic.h |
529 |
++++ b/arch/x86/include/asm/apic.h |
530 |
+@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void) |
531 |
+ extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); |
532 |
+ extern void lapic_assign_system_vectors(void); |
533 |
+ extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace); |
534 |
++extern void lapic_update_legacy_vectors(void); |
535 |
+ extern void lapic_online(void); |
536 |
+ extern void lapic_offline(void); |
537 |
+ extern bool apic_needs_pit(void); |
538 |
+diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h |
539 |
+index b7dd944dc8673..8f28fafa98b32 100644 |
540 |
+--- a/arch/x86/include/asm/disabled-features.h |
541 |
++++ b/arch/x86/include/asm/disabled-features.h |
542 |
+@@ -56,11 +56,8 @@ |
543 |
+ # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) |
544 |
+ #endif |
545 |
+ |
546 |
+-#ifdef CONFIG_IOMMU_SUPPORT |
547 |
+-# define DISABLE_ENQCMD 0 |
548 |
+-#else |
549 |
+-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31)) |
550 |
+-#endif |
551 |
++/* Force disable because it's broken beyond repair */ |
552 |
++#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31)) |
553 |
+ |
554 |
+ #ifdef CONFIG_X86_SGX |
555 |
+ # define DISABLE_SGX 0 |
556 |
+diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h |
557 |
+index ed33a14188f66..23bef08a83880 100644 |
558 |
+--- a/arch/x86/include/asm/fpu/api.h |
559 |
++++ b/arch/x86/include/asm/fpu/api.h |
560 |
+@@ -106,10 +106,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name); |
561 |
+ */ |
562 |
+ #define PASID_DISABLED 0 |
563 |
+ |
564 |
+-#ifdef CONFIG_IOMMU_SUPPORT |
565 |
+-/* Update current's PASID MSR/state by mm's PASID. */ |
566 |
+-void update_pasid(void); |
567 |
+-#else |
568 |
+ static inline void update_pasid(void) { } |
569 |
+-#endif |
570 |
++ |
571 |
+ #endif /* _ASM_X86_FPU_API_H */ |
572 |
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h |
573 |
+index 8d33ad80704f2..ceeba9f631722 100644 |
574 |
+--- a/arch/x86/include/asm/fpu/internal.h |
575 |
++++ b/arch/x86/include/asm/fpu/internal.h |
576 |
+@@ -584,13 +584,6 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) |
577 |
+ pkru_val = pk->pkru; |
578 |
+ } |
579 |
+ __write_pkru(pkru_val); |
580 |
+- |
581 |
+- /* |
582 |
+- * Expensive PASID MSR write will be avoided in update_pasid() because |
583 |
+- * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated |
584 |
+- * unless it's different from mm->pasid to reduce overhead. |
585 |
+- */ |
586 |
+- update_pasid(); |
587 |
+ } |
588 |
+ |
589 |
+ #endif /* _ASM_X86_FPU_INTERNAL_H */ |
590 |
+diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h |
591 |
+index 3381198525126..69299878b200a 100644 |
592 |
+--- a/arch/x86/include/asm/kvm_para.h |
593 |
++++ b/arch/x86/include/asm/kvm_para.h |
594 |
+@@ -7,8 +7,6 @@ |
595 |
+ #include <linux/interrupt.h> |
596 |
+ #include <uapi/asm/kvm_para.h> |
597 |
+ |
598 |
+-extern void kvmclock_init(void); |
599 |
+- |
600 |
+ #ifdef CONFIG_KVM_GUEST |
601 |
+ bool kvm_check_and_clear_guest_paused(void); |
602 |
+ #else |
603 |
+@@ -86,13 +84,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, |
604 |
+ } |
605 |
+ |
606 |
+ #ifdef CONFIG_KVM_GUEST |
607 |
++void kvmclock_init(void); |
608 |
++void kvmclock_disable(void); |
609 |
+ bool kvm_para_available(void); |
610 |
+ unsigned int kvm_arch_para_features(void); |
611 |
+ unsigned int kvm_arch_para_hints(void); |
612 |
+ void kvm_async_pf_task_wait_schedule(u32 token); |
613 |
+ void kvm_async_pf_task_wake(u32 token); |
614 |
+ u32 kvm_read_and_reset_apf_flags(void); |
615 |
+-void kvm_disable_steal_time(void); |
616 |
+ bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token); |
617 |
+ |
618 |
+ DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled); |
619 |
+@@ -137,11 +136,6 @@ static inline u32 kvm_read_and_reset_apf_flags(void) |
620 |
+ return 0; |
621 |
+ } |
622 |
+ |
623 |
+-static inline void kvm_disable_steal_time(void) |
624 |
+-{ |
625 |
+- return; |
626 |
+-} |
627 |
+- |
628 |
+ static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token) |
629 |
+ { |
630 |
+ return false; |
631 |
+diff --git a/arch/x86/include/asm/thermal.h b/arch/x86/include/asm/thermal.h |
632 |
+index ddbdefd5b94f1..91a7b6687c3b9 100644 |
633 |
+--- a/arch/x86/include/asm/thermal.h |
634 |
++++ b/arch/x86/include/asm/thermal.h |
635 |
+@@ -3,11 +3,13 @@ |
636 |
+ #define _ASM_X86_THERMAL_H |
637 |
+ |
638 |
+ #ifdef CONFIG_X86_THERMAL_VECTOR |
639 |
++void therm_lvt_init(void); |
640 |
+ void intel_init_thermal(struct cpuinfo_x86 *c); |
641 |
+ bool x86_thermal_enabled(void); |
642 |
+ void intel_thermal_interrupt(void); |
643 |
+ #else |
644 |
+-static inline void intel_init_thermal(struct cpuinfo_x86 *c) { } |
645 |
++static inline void therm_lvt_init(void) { } |
646 |
++static inline void intel_init_thermal(struct cpuinfo_x86 *c) { } |
647 |
+ #endif |
648 |
+ |
649 |
+ #endif /* _ASM_X86_THERMAL_H */ |
650 |
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c |
651 |
+index 4f26700f314d9..b967a2ba7494a 100644 |
652 |
+--- a/arch/x86/kernel/apic/apic.c |
653 |
++++ b/arch/x86/kernel/apic/apic.c |
654 |
+@@ -2604,6 +2604,7 @@ static void __init apic_bsp_setup(bool upmode) |
655 |
+ end_local_APIC_setup(); |
656 |
+ irq_remap_enable_fault_handling(); |
657 |
+ setup_IO_APIC(); |
658 |
++ lapic_update_legacy_vectors(); |
659 |
+ } |
660 |
+ |
661 |
+ #ifdef CONFIG_UP_LATE_INIT |
662 |
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c |
663 |
+index 3c9c7492252f8..8c97a3468affa 100644 |
664 |
+--- a/arch/x86/kernel/apic/vector.c |
665 |
++++ b/arch/x86/kernel/apic/vector.c |
666 |
+@@ -730,6 +730,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace) |
667 |
+ irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace); |
668 |
+ } |
669 |
+ |
670 |
++void __init lapic_update_legacy_vectors(void) |
671 |
++{ |
672 |
++ unsigned int i; |
673 |
++ |
674 |
++ if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0) |
675 |
++ return; |
676 |
++ |
677 |
++ /* |
678 |
++ * If the IO/APIC is disabled via config, kernel command line or |
679 |
++ * lack of enumeration then all legacy interrupts are routed |
680 |
++ * through the PIC. Make sure that they are marked as legacy |
681 |
++ * vectors. PIC_CASCADE_IRQ has already been marked in |
682 |
++ * lapic_assign_system_vectors(). |
683 |
++ */ |
684 |
++ for (i = 0; i < nr_legacy_irqs(); i++) { |
685 |
++ if (i != PIC_CASCADE_IR) |
686 |
++ lapic_assign_legacy_vector(i, true); |
687 |
++ } |
688 |
++} |
689 |
++ |
690 |
+ void __init lapic_assign_system_vectors(void) |
691 |
+ { |
692 |
+ unsigned int i, vector = 0; |
693 |
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c |
694 |
+index 683749b80ae28..2ad57cc14b83f 100644 |
695 |
+--- a/arch/x86/kernel/fpu/xstate.c |
696 |
++++ b/arch/x86/kernel/fpu/xstate.c |
697 |
+@@ -1402,60 +1402,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns, |
698 |
+ return 0; |
699 |
+ } |
700 |
+ #endif /* CONFIG_PROC_PID_ARCH_STATUS */ |
701 |
+- |
702 |
+-#ifdef CONFIG_IOMMU_SUPPORT |
703 |
+-void update_pasid(void) |
704 |
+-{ |
705 |
+- u64 pasid_state; |
706 |
+- u32 pasid; |
707 |
+- |
708 |
+- if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) |
709 |
+- return; |
710 |
+- |
711 |
+- if (!current->mm) |
712 |
+- return; |
713 |
+- |
714 |
+- pasid = READ_ONCE(current->mm->pasid); |
715 |
+- /* Set the valid bit in the PASID MSR/state only for valid pasid. */ |
716 |
+- pasid_state = pasid == PASID_DISABLED ? |
717 |
+- pasid : pasid | MSR_IA32_PASID_VALID; |
718 |
+- |
719 |
+- /* |
720 |
+- * No need to hold fregs_lock() since the task's fpstate won't |
721 |
+- * be changed by others (e.g. ptrace) while the task is being |
722 |
+- * switched to or is in IPI. |
723 |
+- */ |
724 |
+- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { |
725 |
+- /* The MSR is active and can be directly updated. */ |
726 |
+- wrmsrl(MSR_IA32_PASID, pasid_state); |
727 |
+- } else { |
728 |
+- struct fpu *fpu = ¤t->thread.fpu; |
729 |
+- struct ia32_pasid_state *ppasid_state; |
730 |
+- struct xregs_state *xsave; |
731 |
+- |
732 |
+- /* |
733 |
+- * The CPU's xstate registers are not currently active. Just |
734 |
+- * update the PASID state in the memory buffer here. The |
735 |
+- * PASID MSR will be loaded when returning to user mode. |
736 |
+- */ |
737 |
+- xsave = &fpu->state.xsave; |
738 |
+- xsave->header.xfeatures |= XFEATURE_MASK_PASID; |
739 |
+- ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID); |
740 |
+- /* |
741 |
+- * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state |
742 |
+- * won't be NULL and no need to check its value. |
743 |
+- * |
744 |
+- * Only update the task's PASID state when it's different |
745 |
+- * from the mm's pasid. |
746 |
+- */ |
747 |
+- if (ppasid_state->pasid != pasid_state) { |
748 |
+- /* |
749 |
+- * Invalid fpregs so that state restoring will pick up |
750 |
+- * the PASID state. |
751 |
+- */ |
752 |
+- __fpu_invalidate_fpregs_state(fpu); |
753 |
+- ppasid_state->pasid = pasid_state; |
754 |
+- } |
755 |
+- } |
756 |
+-} |
757 |
+-#endif /* CONFIG_IOMMU_SUPPORT */ |
758 |
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c |
759 |
+index 78bb0fae39826..919411a0117df 100644 |
760 |
+--- a/arch/x86/kernel/kvm.c |
761 |
++++ b/arch/x86/kernel/kvm.c |
762 |
+@@ -26,6 +26,7 @@ |
763 |
+ #include <linux/kprobes.h> |
764 |
+ #include <linux/nmi.h> |
765 |
+ #include <linux/swait.h> |
766 |
++#include <linux/syscore_ops.h> |
767 |
+ #include <asm/timer.h> |
768 |
+ #include <asm/cpu.h> |
769 |
+ #include <asm/traps.h> |
770 |
+@@ -37,6 +38,7 @@ |
771 |
+ #include <asm/tlb.h> |
772 |
+ #include <asm/cpuidle_haltpoll.h> |
773 |
+ #include <asm/ptrace.h> |
774 |
++#include <asm/reboot.h> |
775 |
+ #include <asm/svm.h> |
776 |
+ |
777 |
+ DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled); |
778 |
+@@ -374,6 +376,14 @@ static void kvm_pv_disable_apf(void) |
779 |
+ pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id()); |
780 |
+ } |
781 |
+ |
782 |
++static void kvm_disable_steal_time(void) |
783 |
++{ |
784 |
++ if (!has_steal_clock) |
785 |
++ return; |
786 |
++ |
787 |
++ wrmsr(MSR_KVM_STEAL_TIME, 0, 0); |
788 |
++} |
789 |
++ |
790 |
+ static void kvm_pv_guest_cpu_reboot(void *unused) |
791 |
+ { |
792 |
+ /* |
793 |
+@@ -416,14 +426,6 @@ static u64 kvm_steal_clock(int cpu) |
794 |
+ return steal; |
795 |
+ } |
796 |
+ |
797 |
+-void kvm_disable_steal_time(void) |
798 |
+-{ |
799 |
+- if (!has_steal_clock) |
800 |
+- return; |
801 |
+- |
802 |
+- wrmsr(MSR_KVM_STEAL_TIME, 0, 0); |
803 |
+-} |
804 |
+- |
805 |
+ static inline void __set_percpu_decrypted(void *ptr, unsigned long size) |
806 |
+ { |
807 |
+ early_set_memory_decrypted((unsigned long) ptr, size); |
808 |
+@@ -460,6 +462,27 @@ static bool pv_tlb_flush_supported(void) |
809 |
+ |
810 |
+ static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); |
811 |
+ |
812 |
++static void kvm_guest_cpu_offline(bool shutdown) |
813 |
++{ |
814 |
++ kvm_disable_steal_time(); |
815 |
++ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) |
816 |
++ wrmsrl(MSR_KVM_PV_EOI_EN, 0); |
817 |
++ kvm_pv_disable_apf(); |
818 |
++ if (!shutdown) |
819 |
++ apf_task_wake_all(); |
820 |
++ kvmclock_disable(); |
821 |
++} |
822 |
++ |
823 |
++static int kvm_cpu_online(unsigned int cpu) |
824 |
++{ |
825 |
++ unsigned long flags; |
826 |
++ |
827 |
++ local_irq_save(flags); |
828 |
++ kvm_guest_cpu_init(); |
829 |
++ local_irq_restore(flags); |
830 |
++ return 0; |
831 |
++} |
832 |
++ |
833 |
+ #ifdef CONFIG_SMP |
834 |
+ |
835 |
+ static bool pv_ipi_supported(void) |
836 |
+@@ -587,29 +610,46 @@ static void __init kvm_smp_prepare_boot_cpu(void) |
837 |
+ kvm_spinlock_init(); |
838 |
+ } |
839 |
+ |
840 |
+-static void kvm_guest_cpu_offline(void) |
841 |
++static int kvm_cpu_down_prepare(unsigned int cpu) |
842 |
+ { |
843 |
+- kvm_disable_steal_time(); |
844 |
+- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) |
845 |
+- wrmsrl(MSR_KVM_PV_EOI_EN, 0); |
846 |
+- kvm_pv_disable_apf(); |
847 |
+- apf_task_wake_all(); |
848 |
++ unsigned long flags; |
849 |
++ |
850 |
++ local_irq_save(flags); |
851 |
++ kvm_guest_cpu_offline(false); |
852 |
++ local_irq_restore(flags); |
853 |
++ return 0; |
854 |
+ } |
855 |
+ |
856 |
+-static int kvm_cpu_online(unsigned int cpu) |
857 |
++#endif |
858 |
++ |
859 |
++static int kvm_suspend(void) |
860 |
+ { |
861 |
+- local_irq_disable(); |
862 |
+- kvm_guest_cpu_init(); |
863 |
+- local_irq_enable(); |
864 |
++ kvm_guest_cpu_offline(false); |
865 |
++ |
866 |
+ return 0; |
867 |
+ } |
868 |
+ |
869 |
+-static int kvm_cpu_down_prepare(unsigned int cpu) |
870 |
++static void kvm_resume(void) |
871 |
+ { |
872 |
+- local_irq_disable(); |
873 |
+- kvm_guest_cpu_offline(); |
874 |
+- local_irq_enable(); |
875 |
+- return 0; |
876 |
++ kvm_cpu_online(raw_smp_processor_id()); |
877 |
++} |
878 |
++ |
879 |
++static struct syscore_ops kvm_syscore_ops = { |
880 |
++ .suspend = kvm_suspend, |
881 |
++ .resume = kvm_resume, |
882 |
++}; |
883 |
++ |
884 |
++/* |
885 |
++ * After a PV feature is registered, the host will keep writing to the |
886 |
++ * registered memory location. If the guest happens to shutdown, this memory |
887 |
++ * won't be valid. In cases like kexec, in which you install a new kernel, this |
888 |
++ * means a random memory location will be kept being written. |
889 |
++ */ |
890 |
++#ifdef CONFIG_KEXEC_CORE |
891 |
++static void kvm_crash_shutdown(struct pt_regs *regs) |
892 |
++{ |
893 |
++ kvm_guest_cpu_offline(true); |
894 |
++ native_machine_crash_shutdown(regs); |
895 |
+ } |
896 |
+ #endif |
897 |
+ |
898 |
+@@ -681,6 +721,12 @@ static void __init kvm_guest_init(void) |
899 |
+ kvm_guest_cpu_init(); |
900 |
+ #endif |
901 |
+ |
902 |
++#ifdef CONFIG_KEXEC_CORE |
903 |
++ machine_ops.crash_shutdown = kvm_crash_shutdown; |
904 |
++#endif |
905 |
++ |
906 |
++ register_syscore_ops(&kvm_syscore_ops); |
907 |
++ |
908 |
+ /* |
909 |
+ * Hard lockup detection is enabled by default. Disable it, as guests |
910 |
+ * can get false positives too easily, for example if the host is |
911 |
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c |
912 |
+index 1fc0962c89c08..b825c87c12ef7 100644 |
913 |
+--- a/arch/x86/kernel/kvmclock.c |
914 |
++++ b/arch/x86/kernel/kvmclock.c |
915 |
+@@ -20,7 +20,6 @@ |
916 |
+ #include <asm/hypervisor.h> |
917 |
+ #include <asm/mem_encrypt.h> |
918 |
+ #include <asm/x86_init.h> |
919 |
+-#include <asm/reboot.h> |
920 |
+ #include <asm/kvmclock.h> |
921 |
+ |
922 |
+ static int kvmclock __initdata = 1; |
923 |
+@@ -203,28 +202,9 @@ static void kvm_setup_secondary_clock(void) |
924 |
+ } |
925 |
+ #endif |
926 |
+ |
927 |
+-/* |
928 |
+- * After the clock is registered, the host will keep writing to the |
929 |
+- * registered memory location. If the guest happens to shutdown, this memory |
930 |
+- * won't be valid. In cases like kexec, in which you install a new kernel, this |
931 |
+- * means a random memory location will be kept being written. So before any |
932 |
+- * kind of shutdown from our side, we unregister the clock by writing anything |
933 |
+- * that does not have the 'enable' bit set in the msr |
934 |
+- */ |
935 |
+-#ifdef CONFIG_KEXEC_CORE |
936 |
+-static void kvm_crash_shutdown(struct pt_regs *regs) |
937 |
+-{ |
938 |
+- native_write_msr(msr_kvm_system_time, 0, 0); |
939 |
+- kvm_disable_steal_time(); |
940 |
+- native_machine_crash_shutdown(regs); |
941 |
+-} |
942 |
+-#endif |
943 |
+- |
944 |
+-static void kvm_shutdown(void) |
945 |
++void kvmclock_disable(void) |
946 |
+ { |
947 |
+ native_write_msr(msr_kvm_system_time, 0, 0); |
948 |
+- kvm_disable_steal_time(); |
949 |
+- native_machine_shutdown(); |
950 |
+ } |
951 |
+ |
952 |
+ static void __init kvmclock_init_mem(void) |
953 |
+@@ -351,10 +331,6 @@ void __init kvmclock_init(void) |
954 |
+ #endif |
955 |
+ x86_platform.save_sched_clock_state = kvm_save_sched_clock_state; |
956 |
+ x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state; |
957 |
+- machine_ops.shutdown = kvm_shutdown; |
958 |
+-#ifdef CONFIG_KEXEC_CORE |
959 |
+- machine_ops.crash_shutdown = kvm_crash_shutdown; |
960 |
+-#endif |
961 |
+ kvm_get_preset_lpj(); |
962 |
+ |
963 |
+ /* |
964 |
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c |
965 |
+index ccab6cf91283d..e79f21d13a0d7 100644 |
966 |
+--- a/arch/x86/kernel/setup.c |
967 |
++++ b/arch/x86/kernel/setup.c |
968 |
+@@ -44,6 +44,7 @@ |
969 |
+ #include <asm/pci-direct.h> |
970 |
+ #include <asm/prom.h> |
971 |
+ #include <asm/proto.h> |
972 |
++#include <asm/thermal.h> |
973 |
+ #include <asm/unwind.h> |
974 |
+ #include <asm/vsyscall.h> |
975 |
+ #include <linux/vmalloc.h> |
976 |
+@@ -1220,6 +1221,14 @@ void __init setup_arch(char **cmdline_p) |
977 |
+ |
978 |
+ x86_init.timers.wallclock_init(); |
979 |
+ |
980 |
++ /* |
981 |
++ * This needs to run before setup_local_APIC() which soft-disables the |
982 |
++ * local APIC temporarily and that masks the thermal LVT interrupt, |
983 |
++ * leading to softlockups on machines which have configured SMI |
984 |
++ * interrupt delivery. |
985 |
++ */ |
986 |
++ therm_lvt_init(); |
987 |
++ |
988 |
+ mcheck_init(); |
989 |
+ |
990 |
+ register_refined_jiffies(CLOCK_TICK_RATE); |
991 |
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c |
992 |
+index 9a6825feaf53f..30569bbbca9ac 100644 |
993 |
+--- a/arch/x86/kvm/svm/svm.c |
994 |
++++ b/arch/x86/kvm/svm/svm.c |
995 |
+@@ -2532,7 +2532,7 @@ static int cr_interception(struct vcpu_svm *svm) |
996 |
+ err = 0; |
997 |
+ if (cr >= 16) { /* mov to cr */ |
998 |
+ cr -= 16; |
999 |
+- val = kvm_register_read(&svm->vcpu, reg); |
1000 |
++ val = kvm_register_readl(&svm->vcpu, reg); |
1001 |
+ trace_kvm_cr_write(cr, val); |
1002 |
+ switch (cr) { |
1003 |
+ case 0: |
1004 |
+@@ -2578,7 +2578,7 @@ static int cr_interception(struct vcpu_svm *svm) |
1005 |
+ kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
1006 |
+ return 1; |
1007 |
+ } |
1008 |
+- kvm_register_write(&svm->vcpu, reg, val); |
1009 |
++ kvm_register_writel(&svm->vcpu, reg, val); |
1010 |
+ trace_kvm_cr_read(cr, val); |
1011 |
+ } |
1012 |
+ return kvm_complete_insn_gp(&svm->vcpu, err); |
1013 |
+@@ -2643,11 +2643,11 @@ static int dr_interception(struct vcpu_svm *svm) |
1014 |
+ dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; |
1015 |
+ if (dr >= 16) { /* mov to DRn */ |
1016 |
+ dr -= 16; |
1017 |
+- val = kvm_register_read(&svm->vcpu, reg); |
1018 |
++ val = kvm_register_readl(&svm->vcpu, reg); |
1019 |
+ err = kvm_set_dr(&svm->vcpu, dr, val); |
1020 |
+ } else { |
1021 |
+ kvm_get_dr(&svm->vcpu, dr, &val); |
1022 |
+- kvm_register_write(&svm->vcpu, reg, val); |
1023 |
++ kvm_register_writel(&svm->vcpu, reg, val); |
1024 |
+ } |
1025 |
+ |
1026 |
+ return kvm_complete_insn_gp(&svm->vcpu, err); |
1027 |
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c |
1028 |
+index a73347e2cdfc5..ea3d0b73731bc 100644 |
1029 |
+--- a/arch/x86/mm/fault.c |
1030 |
++++ b/arch/x86/mm/fault.c |
1031 |
+@@ -836,8 +836,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
1032 |
+ |
1033 |
+ if (si_code == SEGV_PKUERR) |
1034 |
+ force_sig_pkuerr((void __user *)address, pkey); |
1035 |
+- |
1036 |
+- force_sig_fault(SIGSEGV, si_code, (void __user *)address); |
1037 |
++ else |
1038 |
++ force_sig_fault(SIGSEGV, si_code, (void __user *)address); |
1039 |
+ |
1040 |
+ local_irq_disable(); |
1041 |
+ } |
1042 |
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c |
1043 |
+index a19374d261013..65f599e9075bc 100644 |
1044 |
+--- a/arch/x86/mm/mem_encrypt_identity.c |
1045 |
++++ b/arch/x86/mm/mem_encrypt_identity.c |
1046 |
+@@ -504,10 +504,6 @@ void __init sme_enable(struct boot_params *bp) |
1047 |
+ #define AMD_SME_BIT BIT(0) |
1048 |
+ #define AMD_SEV_BIT BIT(1) |
1049 |
+ |
1050 |
+- /* Check the SEV MSR whether SEV or SME is enabled */ |
1051 |
+- sev_status = __rdmsr(MSR_AMD64_SEV); |
1052 |
+- feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; |
1053 |
+- |
1054 |
+ /* |
1055 |
+ * Check for the SME/SEV feature: |
1056 |
+ * CPUID Fn8000_001F[EAX] |
1057 |
+@@ -519,11 +515,16 @@ void __init sme_enable(struct boot_params *bp) |
1058 |
+ eax = 0x8000001f; |
1059 |
+ ecx = 0; |
1060 |
+ native_cpuid(&eax, &ebx, &ecx, &edx); |
1061 |
+- if (!(eax & feature_mask)) |
1062 |
++ /* Check whether SEV or SME is supported */ |
1063 |
++ if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT))) |
1064 |
+ return; |
1065 |
+ |
1066 |
+ me_mask = 1UL << (ebx & 0x3f); |
1067 |
+ |
1068 |
++ /* Check the SEV MSR whether SEV or SME is enabled */ |
1069 |
++ sev_status = __rdmsr(MSR_AMD64_SEV); |
1070 |
++ feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; |
1071 |
++ |
1072 |
+ /* Check if memory encryption is enabled */ |
1073 |
+ if (feature_mask == AMD_SME_BIT) { |
1074 |
+ /* |
1075 |
+diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c |
1076 |
+index 624a26794d558..e5ba9795ec696 100644 |
1077 |
+--- a/drivers/acpi/acpica/utdelete.c |
1078 |
++++ b/drivers/acpi/acpica/utdelete.c |
1079 |
+@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) |
1080 |
+ } |
1081 |
+ break; |
1082 |
+ |
1083 |
++ case ACPI_TYPE_LOCAL_ADDRESS_HANDLER: |
1084 |
++ |
1085 |
++ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, |
1086 |
++ "***** Address handler %p\n", object)); |
1087 |
++ |
1088 |
++ acpi_os_delete_mutex(object->address_space.context_mutex); |
1089 |
++ break; |
1090 |
++ |
1091 |
+ default: |
1092 |
+ |
1093 |
+ break; |
1094 |
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c |
1095 |
+index 68145e326eb90..30e9b700273e1 100644 |
1096 |
+--- a/drivers/bus/ti-sysc.c |
1097 |
++++ b/drivers/bus/ti-sysc.c |
1098 |
+@@ -1334,6 +1334,34 @@ err_allow_idle: |
1099 |
+ return error; |
1100 |
+ } |
1101 |
+ |
1102 |
++static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled) |
1103 |
++{ |
1104 |
++ struct device *dev = ddata->dev; |
1105 |
++ int error; |
1106 |
++ |
1107 |
++ /* Disable target module if it is enabled */ |
1108 |
++ if (ddata->enabled) { |
1109 |
++ error = sysc_runtime_suspend(dev); |
1110 |
++ if (error) |
1111 |
++ dev_warn(dev, "reinit suspend failed: %i\n", error); |
1112 |
++ } |
1113 |
++ |
1114 |
++ /* Enable target module */ |
1115 |
++ error = sysc_runtime_resume(dev); |
1116 |
++ if (error) |
1117 |
++ dev_warn(dev, "reinit resume failed: %i\n", error); |
1118 |
++ |
1119 |
++ if (leave_enabled) |
1120 |
++ return error; |
1121 |
++ |
1122 |
++ /* Disable target module if no leave_enabled was set */ |
1123 |
++ error = sysc_runtime_suspend(dev); |
1124 |
++ if (error) |
1125 |
++ dev_warn(dev, "reinit suspend failed: %i\n", error); |
1126 |
++ |
1127 |
++ return error; |
1128 |
++} |
1129 |
++ |
1130 |
+ static int __maybe_unused sysc_noirq_suspend(struct device *dev) |
1131 |
+ { |
1132 |
+ struct sysc *ddata; |
1133 |
+@@ -1344,12 +1372,18 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev) |
1134 |
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) |
1135 |
+ return 0; |
1136 |
+ |
1137 |
+- return pm_runtime_force_suspend(dev); |
1138 |
++ if (!ddata->enabled) |
1139 |
++ return 0; |
1140 |
++ |
1141 |
++ ddata->needs_resume = 1; |
1142 |
++ |
1143 |
++ return sysc_runtime_suspend(dev); |
1144 |
+ } |
1145 |
+ |
1146 |
+ static int __maybe_unused sysc_noirq_resume(struct device *dev) |
1147 |
+ { |
1148 |
+ struct sysc *ddata; |
1149 |
++ int error = 0; |
1150 |
+ |
1151 |
+ ddata = dev_get_drvdata(dev); |
1152 |
+ |
1153 |
+@@ -1357,7 +1391,19 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev) |
1154 |
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) |
1155 |
+ return 0; |
1156 |
+ |
1157 |
+- return pm_runtime_force_resume(dev); |
1158 |
++ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) { |
1159 |
++ error = sysc_reinit_module(ddata, ddata->needs_resume); |
1160 |
++ if (error) |
1161 |
++ dev_warn(dev, "noirq_resume failed: %i\n", error); |
1162 |
++ } else if (ddata->needs_resume) { |
1163 |
++ error = sysc_runtime_resume(dev); |
1164 |
++ if (error) |
1165 |
++ dev_warn(dev, "noirq_resume failed: %i\n", error); |
1166 |
++ } |
1167 |
++ |
1168 |
++ ddata->needs_resume = 0; |
1169 |
++ |
1170 |
++ return error; |
1171 |
+ } |
1172 |
+ |
1173 |
+ static const struct dev_pm_ops sysc_pm_ops = { |
1174 |
+@@ -1408,9 +1454,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { |
1175 |
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), |
1176 |
+ /* Uarts on omap4 and later */ |
1177 |
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, |
1178 |
+- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), |
1179 |
++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), |
1180 |
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, |
1181 |
+- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), |
1182 |
++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), |
1183 |
+ |
1184 |
+ /* Quirks that need to be set based on the module address */ |
1185 |
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, |
1186 |
+@@ -1466,7 +1512,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { |
1187 |
+ SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, |
1188 |
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), |
1189 |
+ SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, |
1190 |
+- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), |
1191 |
++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY | |
1192 |
++ SYSC_QUIRK_REINIT_ON_RESUME), |
1193 |
+ SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, |
1194 |
+ SYSC_MODULE_QUIRK_WDT), |
1195 |
+ /* PRUSS on am3, am4 and am5 */ |
1196 |
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c |
1197 |
+index 07cf7977a0450..59f2104ffc771 100644 |
1198 |
+--- a/drivers/dma/idxd/init.c |
1199 |
++++ b/drivers/dma/idxd/init.c |
1200 |
+@@ -675,12 +675,12 @@ static int __init idxd_init_module(void) |
1201 |
+ * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in |
1202 |
+ * enumerating the device. We can not utilize it. |
1203 |
+ */ |
1204 |
+- if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) { |
1205 |
++ if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { |
1206 |
+ pr_warn("idxd driver failed to load without MOVDIR64B.\n"); |
1207 |
+ return -ENODEV; |
1208 |
+ } |
1209 |
+ |
1210 |
+- if (!boot_cpu_has(X86_FEATURE_ENQCMD)) |
1211 |
++ if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) |
1212 |
+ pr_warn("Platform does not have ENQCMD(S) support.\n"); |
1213 |
+ else |
1214 |
+ support_enqcmd = true; |
1215 |
+diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c |
1216 |
+index e15d484b6a5a7..ea7ca74fc1730 100644 |
1217 |
+--- a/drivers/firmware/efi/cper.c |
1218 |
++++ b/drivers/firmware/efi/cper.c |
1219 |
+@@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) |
1220 |
+ if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE)) |
1221 |
+ return 0; |
1222 |
+ |
1223 |
+- n = 0; |
1224 |
+- len = CPER_REC_LEN - 1; |
1225 |
++ len = CPER_REC_LEN; |
1226 |
+ dmi_memdev_name(mem->mem_dev_handle, &bank, &device); |
1227 |
+ if (bank && device) |
1228 |
+ n = snprintf(msg, len, "DIMM location: %s %s ", bank, device); |
1229 |
+@@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) |
1230 |
+ "DIMM location: not present. DMI handle: 0x%.4x ", |
1231 |
+ mem->mem_dev_handle); |
1232 |
+ |
1233 |
+- msg[n] = '\0'; |
1234 |
+ return n; |
1235 |
+ } |
1236 |
+ |
1237 |
+diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c |
1238 |
+index bb042ab7c2be6..e901f8564ca0c 100644 |
1239 |
+--- a/drivers/firmware/efi/fdtparams.c |
1240 |
++++ b/drivers/firmware/efi/fdtparams.c |
1241 |
+@@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm) |
1242 |
+ BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name)); |
1243 |
+ BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params)); |
1244 |
+ |
1245 |
++ if (!fdt) |
1246 |
++ return 0; |
1247 |
++ |
1248 |
+ for (i = 0; i < ARRAY_SIZE(dt_params); i++) { |
1249 |
+ node = fdt_path_offset(fdt, dt_params[i].path); |
1250 |
+ if (node < 0) |
1251 |
+diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c |
1252 |
+index 4e81c6077188e..dd95f330fe6e1 100644 |
1253 |
+--- a/drivers/firmware/efi/libstub/file.c |
1254 |
++++ b/drivers/firmware/efi/libstub/file.c |
1255 |
+@@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len, |
1256 |
+ return 0; |
1257 |
+ |
1258 |
+ /* Skip any leading slashes */ |
1259 |
+- while (cmdline[i] == L'/' || cmdline[i] == L'\\') |
1260 |
++ while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\')) |
1261 |
+ i++; |
1262 |
+ |
1263 |
+ while (--result_len > 0 && i < cmdline_len) { |
1264 |
+diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c |
1265 |
+index 5737cb0fcd44e..0a9aba5f9ceff 100644 |
1266 |
+--- a/drivers/firmware/efi/memattr.c |
1267 |
++++ b/drivers/firmware/efi/memattr.c |
1268 |
+@@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out) |
1269 |
+ return false; |
1270 |
+ } |
1271 |
+ |
1272 |
+- if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) { |
1273 |
+- pr_warn("Entry attributes invalid: RO and XP bits both cleared\n"); |
1274 |
+- return false; |
1275 |
+- } |
1276 |
+- |
1277 |
+ if (PAGE_SIZE > EFI_PAGE_SIZE && |
1278 |
+ (!PAGE_ALIGNED(in->phys_addr) || |
1279 |
+ !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) { |
1280 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c |
1281 |
+index 0350205c48974..6819fe5612d9e 100644 |
1282 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c |
1283 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c |
1284 |
+@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, |
1285 |
+ { |
1286 |
+ struct amdgpu_ctx *ctx; |
1287 |
+ struct amdgpu_ctx_mgr *mgr; |
1288 |
+- unsigned long ras_counter; |
1289 |
+ |
1290 |
+ if (!fpriv) |
1291 |
+ return -EINVAL; |
1292 |
+@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, |
1293 |
+ if (atomic_read(&ctx->guilty)) |
1294 |
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; |
1295 |
+ |
1296 |
+- /*query ue count*/ |
1297 |
+- ras_counter = amdgpu_ras_query_error_count(adev, false); |
1298 |
+- /*ras counter is monotonic increasing*/ |
1299 |
+- if (ras_counter != ctx->ras_counter_ue) { |
1300 |
+- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE; |
1301 |
+- ctx->ras_counter_ue = ras_counter; |
1302 |
+- } |
1303 |
+- |
1304 |
+- /*query ce count*/ |
1305 |
+- ras_counter = amdgpu_ras_query_error_count(adev, true); |
1306 |
+- if (ras_counter != ctx->ras_counter_ce) { |
1307 |
+- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE; |
1308 |
+- ctx->ras_counter_ce = ras_counter; |
1309 |
+- } |
1310 |
+- |
1311 |
+ mutex_unlock(&mgr->lock); |
1312 |
+ return 0; |
1313 |
+ } |
1314 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c |
1315 |
+index a2ac44cc2a6da..e80cc2928b583 100644 |
1316 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c |
1317 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c |
1318 |
+@@ -944,6 +944,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, |
1319 |
+ domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); |
1320 |
+ if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { |
1321 |
+ drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); |
1322 |
++ drm_gem_object_put(obj); |
1323 |
+ return ERR_PTR(-EINVAL); |
1324 |
+ } |
1325 |
+ |
1326 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c |
1327 |
+index dc947c8ffe213..e6c4a36eaf9ae 100644 |
1328 |
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c |
1329 |
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c |
1330 |
+@@ -187,14 +187,14 @@ static int jpeg_v2_5_hw_init(void *handle) |
1331 |
+ static int jpeg_v2_5_hw_fini(void *handle) |
1332 |
+ { |
1333 |
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1334 |
+- struct amdgpu_ring *ring; |
1335 |
+ int i; |
1336 |
+ |
1337 |
++ cancel_delayed_work_sync(&adev->vcn.idle_work); |
1338 |
++ |
1339 |
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { |
1340 |
+ if (adev->jpeg.harvest_config & (1 << i)) |
1341 |
+ continue; |
1342 |
+ |
1343 |
+- ring = &adev->jpeg.inst[i].ring_dec; |
1344 |
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && |
1345 |
+ RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) |
1346 |
+ jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); |
1347 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c |
1348 |
+index 1d354245678d5..2ea68c84e6b48 100644 |
1349 |
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c |
1350 |
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c |
1351 |
+@@ -159,9 +159,9 @@ static int jpeg_v3_0_hw_init(void *handle) |
1352 |
+ static int jpeg_v3_0_hw_fini(void *handle) |
1353 |
+ { |
1354 |
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1355 |
+- struct amdgpu_ring *ring; |
1356 |
+ |
1357 |
+- ring = &adev->jpeg.inst->ring_dec; |
1358 |
++ cancel_delayed_work_sync(&adev->vcn.idle_work); |
1359 |
++ |
1360 |
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && |
1361 |
+ RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) |
1362 |
+ jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE); |
1363 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c |
1364 |
+index 760859880c1ed..4eebf973a0658 100644 |
1365 |
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c |
1366 |
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c |
1367 |
+@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
1368 |
+ |
1369 |
+ error: |
1370 |
+ dma_fence_put(fence); |
1371 |
++ amdgpu_bo_unpin(bo); |
1372 |
+ amdgpu_bo_unreserve(bo); |
1373 |
+ amdgpu_bo_unref(&bo); |
1374 |
+ return r; |
1375 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c |
1376 |
+index ebbc04ff5da06..90138469648a9 100644 |
1377 |
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c |
1378 |
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c |
1379 |
+@@ -367,15 +367,14 @@ done: |
1380 |
+ static int vcn_v3_0_hw_fini(void *handle) |
1381 |
+ { |
1382 |
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1383 |
+- struct amdgpu_ring *ring; |
1384 |
+ int i; |
1385 |
+ |
1386 |
++ cancel_delayed_work_sync(&adev->vcn.idle_work); |
1387 |
++ |
1388 |
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
1389 |
+ if (adev->vcn.harvest_config & (1 << i)) |
1390 |
+ continue; |
1391 |
+ |
1392 |
+- ring = &adev->vcn.inst[i].ring_dec; |
1393 |
+- |
1394 |
+ if (!amdgpu_sriov_vf(adev)) { |
1395 |
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || |
1396 |
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE && |
1397 |
+diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c |
1398 |
+index d2a678a2497e4..411494005f0ec 100644 |
1399 |
+--- a/drivers/gpu/drm/i915/selftests/i915_request.c |
1400 |
++++ b/drivers/gpu/drm/i915/selftests/i915_request.c |
1401 |
+@@ -1392,8 +1392,8 @@ static int live_breadcrumbs_smoketest(void *arg) |
1402 |
+ |
1403 |
+ for (n = 0; n < smoke[0].ncontexts; n++) { |
1404 |
+ smoke[0].contexts[n] = live_context(i915, file); |
1405 |
+- if (!smoke[0].contexts[n]) { |
1406 |
+- ret = -ENOMEM; |
1407 |
++ if (IS_ERR(smoke[0].contexts[n])) { |
1408 |
++ ret = PTR_ERR(smoke[0].contexts[n]); |
1409 |
+ goto out_contexts; |
1410 |
+ } |
1411 |
+ } |
1412 |
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c |
1413 |
+index 85f2c3564c966..fb061e666faa7 100644 |
1414 |
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c |
1415 |
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c |
1416 |
+@@ -933,8 +933,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) |
1417 |
+ DPU_DEBUG("REG_DMA is not defined"); |
1418 |
+ } |
1419 |
+ |
1420 |
+- if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) |
1421 |
+- dpu_kms_parse_data_bus_icc_path(dpu_kms); |
1422 |
++ dpu_kms_parse_data_bus_icc_path(dpu_kms); |
1423 |
+ |
1424 |
+ pm_runtime_get_sync(&dpu_kms->pdev->dev); |
1425 |
+ |
1426 |
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c |
1427 |
+index cd4078807db1b..3416e9617ee9a 100644 |
1428 |
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c |
1429 |
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c |
1430 |
+@@ -31,40 +31,8 @@ struct dpu_mdss { |
1431 |
+ void __iomem *mmio; |
1432 |
+ struct dss_module_power mp; |
1433 |
+ struct dpu_irq_controller irq_controller; |
1434 |
+- struct icc_path *path[2]; |
1435 |
+- u32 num_paths; |
1436 |
+ }; |
1437 |
+ |
1438 |
+-static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev, |
1439 |
+- struct dpu_mdss *dpu_mdss) |
1440 |
+-{ |
1441 |
+- struct icc_path *path0 = of_icc_get(dev->dev, "mdp0-mem"); |
1442 |
+- struct icc_path *path1 = of_icc_get(dev->dev, "mdp1-mem"); |
1443 |
+- |
1444 |
+- if (IS_ERR_OR_NULL(path0)) |
1445 |
+- return PTR_ERR_OR_ZERO(path0); |
1446 |
+- |
1447 |
+- dpu_mdss->path[0] = path0; |
1448 |
+- dpu_mdss->num_paths = 1; |
1449 |
+- |
1450 |
+- if (!IS_ERR_OR_NULL(path1)) { |
1451 |
+- dpu_mdss->path[1] = path1; |
1452 |
+- dpu_mdss->num_paths++; |
1453 |
+- } |
1454 |
+- |
1455 |
+- return 0; |
1456 |
+-} |
1457 |
+- |
1458 |
+-static void dpu_mdss_icc_request_bw(struct msm_mdss *mdss) |
1459 |
+-{ |
1460 |
+- struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); |
1461 |
+- int i; |
1462 |
+- u64 avg_bw = dpu_mdss->num_paths ? MAX_BW / dpu_mdss->num_paths : 0; |
1463 |
+- |
1464 |
+- for (i = 0; i < dpu_mdss->num_paths; i++) |
1465 |
+- icc_set_bw(dpu_mdss->path[i], avg_bw, kBps_to_icc(MAX_BW)); |
1466 |
+-} |
1467 |
+- |
1468 |
+ static void dpu_mdss_irq(struct irq_desc *desc) |
1469 |
+ { |
1470 |
+ struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc); |
1471 |
+@@ -178,8 +146,6 @@ static int dpu_mdss_enable(struct msm_mdss *mdss) |
1472 |
+ struct dss_module_power *mp = &dpu_mdss->mp; |
1473 |
+ int ret; |
1474 |
+ |
1475 |
+- dpu_mdss_icc_request_bw(mdss); |
1476 |
+- |
1477 |
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); |
1478 |
+ if (ret) { |
1479 |
+ DPU_ERROR("clock enable failed, ret:%d\n", ret); |
1480 |
+@@ -213,15 +179,12 @@ static int dpu_mdss_disable(struct msm_mdss *mdss) |
1481 |
+ { |
1482 |
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); |
1483 |
+ struct dss_module_power *mp = &dpu_mdss->mp; |
1484 |
+- int ret, i; |
1485 |
++ int ret; |
1486 |
+ |
1487 |
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); |
1488 |
+ if (ret) |
1489 |
+ DPU_ERROR("clock disable failed, ret:%d\n", ret); |
1490 |
+ |
1491 |
+- for (i = 0; i < dpu_mdss->num_paths; i++) |
1492 |
+- icc_set_bw(dpu_mdss->path[i], 0, 0); |
1493 |
+- |
1494 |
+ return ret; |
1495 |
+ } |
1496 |
+ |
1497 |
+@@ -232,7 +195,6 @@ static void dpu_mdss_destroy(struct drm_device *dev) |
1498 |
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss); |
1499 |
+ struct dss_module_power *mp = &dpu_mdss->mp; |
1500 |
+ int irq; |
1501 |
+- int i; |
1502 |
+ |
1503 |
+ pm_runtime_suspend(dev->dev); |
1504 |
+ pm_runtime_disable(dev->dev); |
1505 |
+@@ -242,9 +204,6 @@ static void dpu_mdss_destroy(struct drm_device *dev) |
1506 |
+ msm_dss_put_clk(mp->clk_config, mp->num_clk); |
1507 |
+ devm_kfree(&pdev->dev, mp->clk_config); |
1508 |
+ |
1509 |
+- for (i = 0; i < dpu_mdss->num_paths; i++) |
1510 |
+- icc_put(dpu_mdss->path[i]); |
1511 |
+- |
1512 |
+ if (dpu_mdss->mmio) |
1513 |
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio); |
1514 |
+ dpu_mdss->mmio = NULL; |
1515 |
+@@ -276,12 +235,6 @@ int dpu_mdss_init(struct drm_device *dev) |
1516 |
+ |
1517 |
+ DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio); |
1518 |
+ |
1519 |
+- if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) { |
1520 |
+- ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss); |
1521 |
+- if (ret) |
1522 |
+- return ret; |
1523 |
+- } |
1524 |
+- |
1525 |
+ mp = &dpu_mdss->mp; |
1526 |
+ ret = msm_dss_parse_clock(pdev, mp); |
1527 |
+ if (ret) { |
1528 |
+@@ -307,8 +260,6 @@ int dpu_mdss_init(struct drm_device *dev) |
1529 |
+ |
1530 |
+ pm_runtime_enable(dev->dev); |
1531 |
+ |
1532 |
+- dpu_mdss_icc_request_bw(priv->mdss); |
1533 |
+- |
1534 |
+ return ret; |
1535 |
+ |
1536 |
+ irq_error: |
1537 |
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c |
1538 |
+index 2ab38b7153477..ea9a4913932d6 100644 |
1539 |
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c |
1540 |
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c |
1541 |
+@@ -88,6 +88,7 @@ static void amd_sfh_work(struct work_struct *work) |
1542 |
+ sensor_index = req_node->sensor_idx; |
1543 |
+ report_id = req_node->report_id; |
1544 |
+ node_type = req_node->report_type; |
1545 |
++ kfree(req_node); |
1546 |
+ |
1547 |
+ if (node_type == HID_FEATURE_REPORT) { |
1548 |
+ report_size = get_feature_report(sensor_index, report_id, |
1549 |
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c |
1550 |
+index d459e2dbe6474..f7710fb2f48d2 100644 |
1551 |
+--- a/drivers/hid/hid-logitech-hidpp.c |
1552 |
++++ b/drivers/hid/hid-logitech-hidpp.c |
1553 |
+@@ -1262,6 +1262,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage, |
1554 |
+ int status; |
1555 |
+ |
1556 |
+ long flags = (long) data[2]; |
1557 |
++ *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; |
1558 |
+ |
1559 |
+ if (flags & 0x80) |
1560 |
+ switch (flags & 0x07) { |
1561 |
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c |
1562 |
+index abd86903875f0..fc4c074597539 100644 |
1563 |
+--- a/drivers/hid/hid-magicmouse.c |
1564 |
++++ b/drivers/hid/hid-magicmouse.c |
1565 |
+@@ -597,7 +597,7 @@ static int magicmouse_probe(struct hid_device *hdev, |
1566 |
+ if (id->vendor == USB_VENDOR_ID_APPLE && |
1567 |
+ id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && |
1568 |
+ hdev->type != HID_TYPE_USBMOUSE) |
1569 |
+- return 0; |
1570 |
++ return -ENODEV; |
1571 |
+ |
1572 |
+ msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL); |
1573 |
+ if (msc == NULL) { |
1574 |
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c |
1575 |
+index 9d9f3e1bd5f41..55dcb8536286b 100644 |
1576 |
+--- a/drivers/hid/hid-multitouch.c |
1577 |
++++ b/drivers/hid/hid-multitouch.c |
1578 |
+@@ -604,9 +604,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td, |
1579 |
+ if (!(HID_MAIN_ITEM_VARIABLE & field->flags)) |
1580 |
+ continue; |
1581 |
+ |
1582 |
+- for (n = 0; n < field->report_count; n++) { |
1583 |
+- if (field->usage[n].hid == HID_DG_CONTACTID) |
1584 |
+- rdata->is_mt_collection = true; |
1585 |
++ if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) { |
1586 |
++ for (n = 0; n < field->report_count; n++) { |
1587 |
++ if (field->usage[n].hid == HID_DG_CONTACTID) { |
1588 |
++ rdata->is_mt_collection = true; |
1589 |
++ break; |
1590 |
++ } |
1591 |
++ } |
1592 |
+ } |
1593 |
+ } |
1594 |
+ |
1595 |
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c |
1596 |
+index 9993133989a58..46474612e73c6 100644 |
1597 |
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c |
1598 |
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c |
1599 |
+@@ -45,6 +45,7 @@ |
1600 |
+ #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) |
1601 |
+ #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5) |
1602 |
+ #define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6) |
1603 |
++#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7) |
1604 |
+ |
1605 |
+ |
1606 |
+ /* flags */ |
1607 |
+@@ -178,6 +179,11 @@ static const struct i2c_hid_quirks { |
1608 |
+ I2C_HID_QUIRK_RESET_ON_RESUME }, |
1609 |
+ { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, |
1610 |
+ I2C_HID_QUIRK_BAD_INPUT_SIZE }, |
1611 |
++ /* |
1612 |
++ * Sending the wakeup after reset actually break ELAN touchscreen controller |
1613 |
++ */ |
1614 |
++ { USB_VENDOR_ID_ELAN, HID_ANY_ID, |
1615 |
++ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET }, |
1616 |
+ { 0, 0 } |
1617 |
+ }; |
1618 |
+ |
1619 |
+@@ -461,7 +467,8 @@ static int i2c_hid_hwreset(struct i2c_client *client) |
1620 |
+ } |
1621 |
+ |
1622 |
+ /* At least some SIS devices need this after reset */ |
1623 |
+- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); |
1624 |
++ if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET)) |
1625 |
++ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); |
1626 |
+ |
1627 |
+ out_unlock: |
1628 |
+ mutex_unlock(&ihid->reset_lock); |
1629 |
+@@ -990,8 +997,8 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, |
1630 |
+ hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); |
1631 |
+ hid->product = le16_to_cpu(ihid->hdesc.wProductID); |
1632 |
+ |
1633 |
+- snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX", |
1634 |
+- client->name, hid->vendor, hid->product); |
1635 |
++ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", |
1636 |
++ client->name, (u16)hid->vendor, (u16)hid->product); |
1637 |
+ strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys)); |
1638 |
+ |
1639 |
+ ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product); |
1640 |
+diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c |
1641 |
+index fddac7c72f645..07a9fe97d2e05 100644 |
1642 |
+--- a/drivers/hid/usbhid/hid-pidff.c |
1643 |
++++ b/drivers/hid/usbhid/hid-pidff.c |
1644 |
+@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid) |
1645 |
+ |
1646 |
+ if (pidff->pool[PID_DEVICE_MANAGED_POOL].value && |
1647 |
+ pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) { |
1648 |
++ error = -EPERM; |
1649 |
+ hid_notice(hid, |
1650 |
+ "device does not support device managed pool\n"); |
1651 |
+ goto fail; |
1652 |
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c |
1653 |
+index 73b9db9e3aab6..63b74e781c5d9 100644 |
1654 |
+--- a/drivers/hwmon/dell-smm-hwmon.c |
1655 |
++++ b/drivers/hwmon/dell-smm-hwmon.c |
1656 |
+@@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = { |
1657 |
+ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, |
1658 |
+ int index) |
1659 |
+ { |
1660 |
+- if (disallow_fan_support && index >= 8) |
1661 |
++ if (disallow_fan_support && index >= 20) |
1662 |
+ return 0; |
1663 |
+ if (disallow_fan_type_call && |
1664 |
+- (index == 9 || index == 12 || index == 15)) |
1665 |
++ (index == 21 || index == 25 || index == 28)) |
1666 |
+ return 0; |
1667 |
+ if (index >= 0 && index <= 1 && |
1668 |
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) |
1669 |
+diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c |
1670 |
+index 2bee930d39002..789242ed72e5d 100644 |
1671 |
+--- a/drivers/hwmon/pmbus/isl68137.c |
1672 |
++++ b/drivers/hwmon/pmbus/isl68137.c |
1673 |
+@@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client) |
1674 |
+ info->read_word_data = raa_dmpvr2_read_word_data; |
1675 |
+ break; |
1676 |
+ case raa_dmpvr2_2rail_nontc: |
1677 |
+- info->func[0] &= ~PMBUS_HAVE_TEMP; |
1678 |
+- info->func[1] &= ~PMBUS_HAVE_TEMP; |
1679 |
++ info->func[0] &= ~PMBUS_HAVE_TEMP3; |
1680 |
++ info->func[1] &= ~PMBUS_HAVE_TEMP3; |
1681 |
+ fallthrough; |
1682 |
+ case raa_dmpvr2_2rail: |
1683 |
+ info->pages = 2; |
1684 |
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c |
1685 |
+index 214b4c913a139..671f4a52275ec 100644 |
1686 |
+--- a/drivers/i2c/busses/i2c-qcom-geni.c |
1687 |
++++ b/drivers/i2c/busses/i2c-qcom-geni.c |
1688 |
+@@ -650,6 +650,14 @@ static int geni_i2c_remove(struct platform_device *pdev) |
1689 |
+ return 0; |
1690 |
+ } |
1691 |
+ |
1692 |
++static void geni_i2c_shutdown(struct platform_device *pdev) |
1693 |
++{ |
1694 |
++ struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev); |
1695 |
++ |
1696 |
++ /* Make client i2c transfers start failing */ |
1697 |
++ i2c_mark_adapter_suspended(&gi2c->adap); |
1698 |
++} |
1699 |
++ |
1700 |
+ static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev) |
1701 |
+ { |
1702 |
+ int ret; |
1703 |
+@@ -690,6 +698,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev) |
1704 |
+ { |
1705 |
+ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev); |
1706 |
+ |
1707 |
++ i2c_mark_adapter_suspended(&gi2c->adap); |
1708 |
++ |
1709 |
+ if (!gi2c->suspended) { |
1710 |
+ geni_i2c_runtime_suspend(dev); |
1711 |
+ pm_runtime_disable(dev); |
1712 |
+@@ -699,8 +709,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev) |
1713 |
+ return 0; |
1714 |
+ } |
1715 |
+ |
1716 |
++static int __maybe_unused geni_i2c_resume_noirq(struct device *dev) |
1717 |
++{ |
1718 |
++ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev); |
1719 |
++ |
1720 |
++ i2c_mark_adapter_resumed(&gi2c->adap); |
1721 |
++ return 0; |
1722 |
++} |
1723 |
++ |
1724 |
+ static const struct dev_pm_ops geni_i2c_pm_ops = { |
1725 |
+- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL) |
1726 |
++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq) |
1727 |
+ SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume, |
1728 |
+ NULL) |
1729 |
+ }; |
1730 |
+@@ -714,6 +732,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match); |
1731 |
+ static struct platform_driver geni_i2c_driver = { |
1732 |
+ .probe = geni_i2c_probe, |
1733 |
+ .remove = geni_i2c_remove, |
1734 |
++ .shutdown = geni_i2c_shutdown, |
1735 |
+ .driver = { |
1736 |
+ .name = "geni_i2c", |
1737 |
+ .pm = &geni_i2c_pm_ops, |
1738 |
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h |
1739 |
+index 314f8d8067231..9058f09f921ee 100644 |
1740 |
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h |
1741 |
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h |
1742 |
+@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid, |
1743 |
+ bool persistent, u8 *smt_idx); |
1744 |
+ int cxgb4_get_msix_idx_from_bmap(struct adapter *adap); |
1745 |
+ void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx); |
1746 |
+-int cxgb_open(struct net_device *dev); |
1747 |
+-int cxgb_close(struct net_device *dev); |
1748 |
+ void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q); |
1749 |
+ void cxgb4_quiesce_rx(struct sge_rspq *q); |
1750 |
+ int cxgb4_port_mirror_alloc(struct net_device *dev); |
1751 |
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
1752 |
+index 421bd9b88028d..1f601de02e706 100644 |
1753 |
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
1754 |
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
1755 |
+@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter) |
1756 |
+ /* |
1757 |
+ * net_device operations |
1758 |
+ */ |
1759 |
+-int cxgb_open(struct net_device *dev) |
1760 |
++static int cxgb_open(struct net_device *dev) |
1761 |
+ { |
1762 |
+ struct port_info *pi = netdev_priv(dev); |
1763 |
+ struct adapter *adapter = pi->adapter; |
1764 |
+@@ -2882,7 +2882,7 @@ out_unlock: |
1765 |
+ return err; |
1766 |
+ } |
1767 |
+ |
1768 |
+-int cxgb_close(struct net_device *dev) |
1769 |
++static int cxgb_close(struct net_device *dev) |
1770 |
+ { |
1771 |
+ struct port_info *pi = netdev_priv(dev); |
1772 |
+ struct adapter *adapter = pi->adapter; |
1773 |
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c |
1774 |
+index 1b88bd1c2dbe4..dd9be229819a5 100644 |
1775 |
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c |
1776 |
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c |
1777 |
+@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev, |
1778 |
+ if (!ch_flower) |
1779 |
+ return -ENOENT; |
1780 |
+ |
1781 |
++ rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node, |
1782 |
++ adap->flower_ht_params); |
1783 |
++ |
1784 |
+ ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio, |
1785 |
+ &ch_flower->fs, ch_flower->filter_id); |
1786 |
+ if (ret) |
1787 |
+- goto err; |
1788 |
++ netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d", |
1789 |
++ ch_flower->filter_id, ret); |
1790 |
+ |
1791 |
+- ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node, |
1792 |
+- adap->flower_ht_params); |
1793 |
+- if (ret) { |
1794 |
+- netdev_err(dev, "Flow remove from rhashtable failed"); |
1795 |
+- goto err; |
1796 |
+- } |
1797 |
+ kfree_rcu(ch_flower, rcu); |
1798 |
+- |
1799 |
+-err: |
1800 |
+ return ret; |
1801 |
+ } |
1802 |
+ |
1803 |
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c |
1804 |
+index 6c259de96f969..338b04f339b3d 100644 |
1805 |
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c |
1806 |
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c |
1807 |
+@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev, |
1808 |
+ * down before configuring tc params. |
1809 |
+ */ |
1810 |
+ if (netif_running(dev)) { |
1811 |
+- cxgb_close(dev); |
1812 |
++ netif_tx_stop_all_queues(dev); |
1813 |
++ netif_carrier_off(dev); |
1814 |
+ needs_bring_up = true; |
1815 |
+ } |
1816 |
+ |
1817 |
+@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev, |
1818 |
+ } |
1819 |
+ |
1820 |
+ out: |
1821 |
+- if (needs_bring_up) |
1822 |
+- cxgb_open(dev); |
1823 |
++ if (needs_bring_up) { |
1824 |
++ netif_tx_start_all_queues(dev); |
1825 |
++ netif_carrier_on(dev); |
1826 |
++ } |
1827 |
+ |
1828 |
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex); |
1829 |
+ return ret; |
1830 |
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c |
1831 |
+index 1e5f2edb70cf4..6a099cb34b122 100644 |
1832 |
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c |
1833 |
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c |
1834 |
+@@ -2556,6 +2556,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc) |
1835 |
+ if (!eosw_txq) |
1836 |
+ return -ENOMEM; |
1837 |
+ |
1838 |
++ if (!(adap->flags & CXGB4_FW_OK)) { |
1839 |
++ /* Don't stall caller when access to FW is lost */ |
1840 |
++ complete(&eosw_txq->completion); |
1841 |
++ return -EIO; |
1842 |
++ } |
1843 |
++ |
1844 |
+ skb = alloc_skb(len, GFP_KERNEL); |
1845 |
+ if (!skb) |
1846 |
+ return -ENOMEM; |
1847 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
1848 |
+index 70b515049540f..c358d90498813 100644 |
1849 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
1850 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
1851 |
+@@ -2313,15 +2313,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) |
1852 |
+ case XDP_TX: |
1853 |
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; |
1854 |
+ result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); |
1855 |
++ if (result == I40E_XDP_CONSUMED) |
1856 |
++ goto out_failure; |
1857 |
+ break; |
1858 |
+ case XDP_REDIRECT: |
1859 |
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); |
1860 |
+- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; |
1861 |
++ if (err) |
1862 |
++ goto out_failure; |
1863 |
++ result = I40E_XDP_REDIR; |
1864 |
+ break; |
1865 |
+ default: |
1866 |
+ bpf_warn_invalid_xdp_action(act); |
1867 |
+ fallthrough; |
1868 |
+ case XDP_ABORTED: |
1869 |
++out_failure: |
1870 |
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
1871 |
+ fallthrough; /* handle aborts by dropping packet */ |
1872 |
+ case XDP_DROP: |
1873 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c |
1874 |
+index 12ca84113587d..5b39c457bd77b 100644 |
1875 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c |
1876 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c |
1877 |
+@@ -160,21 +160,28 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) |
1878 |
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog); |
1879 |
+ act = bpf_prog_run_xdp(xdp_prog, xdp); |
1880 |
+ |
1881 |
++ if (likely(act == XDP_REDIRECT)) { |
1882 |
++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); |
1883 |
++ if (err) |
1884 |
++ goto out_failure; |
1885 |
++ rcu_read_unlock(); |
1886 |
++ return I40E_XDP_REDIR; |
1887 |
++ } |
1888 |
++ |
1889 |
+ switch (act) { |
1890 |
+ case XDP_PASS: |
1891 |
+ break; |
1892 |
+ case XDP_TX: |
1893 |
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; |
1894 |
+ result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); |
1895 |
+- break; |
1896 |
+- case XDP_REDIRECT: |
1897 |
+- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); |
1898 |
+- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; |
1899 |
++ if (result == I40E_XDP_CONSUMED) |
1900 |
++ goto out_failure; |
1901 |
+ break; |
1902 |
+ default: |
1903 |
+ bpf_warn_invalid_xdp_action(act); |
1904 |
+ fallthrough; |
1905 |
+ case XDP_ABORTED: |
1906 |
++out_failure: |
1907 |
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
1908 |
+ fallthrough; /* handle aborts by dropping packet */ |
1909 |
+ case XDP_DROP: |
1910 |
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h |
1911 |
+index 17101c45cbcd8..f668296ca6779 100644 |
1912 |
+--- a/drivers/net/ethernet/intel/ice/ice.h |
1913 |
++++ b/drivers/net/ethernet/intel/ice/ice.h |
1914 |
+@@ -325,6 +325,7 @@ struct ice_vsi { |
1915 |
+ struct ice_tc_cfg tc_cfg; |
1916 |
+ struct bpf_prog *xdp_prog; |
1917 |
+ struct ice_ring **xdp_rings; /* XDP ring array */ |
1918 |
++ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ |
1919 |
+ u16 num_xdp_txq; /* Used XDP queues */ |
1920 |
+ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ |
1921 |
+ |
1922 |
+@@ -534,15 +535,16 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring) |
1923 |
+ */ |
1924 |
+ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring) |
1925 |
+ { |
1926 |
++ struct ice_vsi *vsi = ring->vsi; |
1927 |
+ u16 qid = ring->q_index; |
1928 |
+ |
1929 |
+ if (ice_ring_is_xdp(ring)) |
1930 |
+- qid -= ring->vsi->num_xdp_txq; |
1931 |
++ qid -= vsi->num_xdp_txq; |
1932 |
+ |
1933 |
+- if (!ice_is_xdp_ena_vsi(ring->vsi)) |
1934 |
++ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) |
1935 |
+ return NULL; |
1936 |
+ |
1937 |
+- return xsk_get_pool_from_qid(ring->vsi->netdev, qid); |
1938 |
++ return xsk_get_pool_from_qid(vsi->netdev, qid); |
1939 |
+ } |
1940 |
+ |
1941 |
+ /** |
1942 |
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c |
1943 |
+index 32ba71a161652..f80fff97d8dce 100644 |
1944 |
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c |
1945 |
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c |
1946 |
+@@ -1797,49 +1797,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev, |
1947 |
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, |
1948 |
+ 100000baseKR4_Full); |
1949 |
+ } |
1950 |
+- |
1951 |
+- /* Autoneg PHY types */ |
1952 |
+- if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || |
1953 |
+- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || |
1954 |
+- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX || |
1955 |
+- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T || |
1956 |
+- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX || |
1957 |
+- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || |
1958 |
+- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR || |
1959 |
+- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || |
1960 |
+- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 || |
1961 |
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || |
1962 |
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || |
1963 |
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || |
1964 |
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || |
1965 |
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || |
1966 |
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || |
1967 |
+- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 || |
1968 |
+- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || |
1969 |
+- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { |
1970 |
+- ethtool_link_ksettings_add_link_mode(ks, supported, |
1971 |
+- Autoneg); |
1972 |
+- ethtool_link_ksettings_add_link_mode(ks, advertising, |
1973 |
+- Autoneg); |
1974 |
+- } |
1975 |
+- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 || |
1976 |
+- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 || |
1977 |
+- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP || |
1978 |
+- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { |
1979 |
+- ethtool_link_ksettings_add_link_mode(ks, supported, |
1980 |
+- Autoneg); |
1981 |
+- ethtool_link_ksettings_add_link_mode(ks, advertising, |
1982 |
+- Autoneg); |
1983 |
+- } |
1984 |
+- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 || |
1985 |
+- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 || |
1986 |
+- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 || |
1987 |
+- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) { |
1988 |
+- ethtool_link_ksettings_add_link_mode(ks, supported, |
1989 |
+- Autoneg); |
1990 |
+- ethtool_link_ksettings_add_link_mode(ks, advertising, |
1991 |
+- Autoneg); |
1992 |
+- } |
1993 |
+ } |
1994 |
+ |
1995 |
+ #define TEST_SET_BITS_TIMEOUT 50 |
1996 |
+@@ -1996,9 +1953,7 @@ ice_get_link_ksettings(struct net_device *netdev, |
1997 |
+ ks->base.port = PORT_TP; |
1998 |
+ break; |
1999 |
+ case ICE_MEDIA_BACKPLANE: |
2000 |
+- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); |
2001 |
+ ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); |
2002 |
+- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); |
2003 |
+ ethtool_link_ksettings_add_link_mode(ks, advertising, |
2004 |
+ Backplane); |
2005 |
+ ks->base.port = PORT_NONE; |
2006 |
+@@ -2073,6 +2028,12 @@ ice_get_link_ksettings(struct net_device *netdev, |
2007 |
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) |
2008 |
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); |
2009 |
+ |
2010 |
++ /* Set supported and advertised autoneg */ |
2011 |
++ if (ice_is_phy_caps_an_enabled(caps)) { |
2012 |
++ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); |
2013 |
++ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); |
2014 |
++ } |
2015 |
++ |
2016 |
+ done: |
2017 |
+ kfree(caps); |
2018 |
+ return err; |
2019 |
+diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h |
2020 |
+index 093a1818a3929..1998821896c0f 100644 |
2021 |
+--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h |
2022 |
++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h |
2023 |
+@@ -31,6 +31,7 @@ |
2024 |
+ #define PF_FW_ATQLEN_ATQOVFL_M BIT(29) |
2025 |
+ #define PF_FW_ATQLEN_ATQCRIT_M BIT(30) |
2026 |
+ #define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4)) |
2027 |
++#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4)) |
2028 |
+ #define PF_FW_ATQLEN_ATQENABLE_M BIT(31) |
2029 |
+ #define PF_FW_ATQT 0x00080400 |
2030 |
+ #define PF_MBX_ARQBAH 0x0022E400 |
2031 |
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c |
2032 |
+index 195d122c9cb22..27e439853c3b0 100644 |
2033 |
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c |
2034 |
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c |
2035 |
+@@ -105,8 +105,14 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) |
2036 |
+ if (!vsi->q_vectors) |
2037 |
+ goto err_vectors; |
2038 |
+ |
2039 |
++ vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); |
2040 |
++ if (!vsi->af_xdp_zc_qps) |
2041 |
++ goto err_zc_qps; |
2042 |
++ |
2043 |
+ return 0; |
2044 |
+ |
2045 |
++err_zc_qps: |
2046 |
++ devm_kfree(dev, vsi->q_vectors); |
2047 |
+ err_vectors: |
2048 |
+ devm_kfree(dev, vsi->rxq_map); |
2049 |
+ err_rxq_map: |
2050 |
+@@ -192,6 +198,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) |
2051 |
+ break; |
2052 |
+ case ICE_VSI_VF: |
2053 |
+ vf = &pf->vf[vsi->vf_id]; |
2054 |
++ if (vf->num_req_qs) |
2055 |
++ vf->num_vf_qs = vf->num_req_qs; |
2056 |
+ vsi->alloc_txq = vf->num_vf_qs; |
2057 |
+ vsi->alloc_rxq = vf->num_vf_qs; |
2058 |
+ /* pf->num_msix_per_vf includes (VF miscellaneous vector + |
2059 |
+@@ -286,6 +294,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) |
2060 |
+ |
2061 |
+ dev = ice_pf_to_dev(pf); |
2062 |
+ |
2063 |
++ if (vsi->af_xdp_zc_qps) { |
2064 |
++ bitmap_free(vsi->af_xdp_zc_qps); |
2065 |
++ vsi->af_xdp_zc_qps = NULL; |
2066 |
++ } |
2067 |
+ /* free the ring and vector containers */ |
2068 |
+ if (vsi->q_vectors) { |
2069 |
+ devm_kfree(dev, vsi->q_vectors); |
2070 |
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c |
2071 |
+index b91dcfd12727d..113e53efffd71 100644 |
2072 |
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c |
2073 |
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c |
2074 |
+@@ -523,7 +523,7 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, |
2075 |
+ struct bpf_prog *xdp_prog) |
2076 |
+ { |
2077 |
+ struct ice_ring *xdp_ring; |
2078 |
+- int err; |
2079 |
++ int err, result; |
2080 |
+ u32 act; |
2081 |
+ |
2082 |
+ act = bpf_prog_run_xdp(xdp_prog, xdp); |
2083 |
+@@ -532,14 +532,20 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, |
2084 |
+ return ICE_XDP_PASS; |
2085 |
+ case XDP_TX: |
2086 |
+ xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; |
2087 |
+- return ice_xmit_xdp_buff(xdp, xdp_ring); |
2088 |
++ result = ice_xmit_xdp_buff(xdp, xdp_ring); |
2089 |
++ if (result == ICE_XDP_CONSUMED) |
2090 |
++ goto out_failure; |
2091 |
++ return result; |
2092 |
+ case XDP_REDIRECT: |
2093 |
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); |
2094 |
+- return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; |
2095 |
++ if (err) |
2096 |
++ goto out_failure; |
2097 |
++ return ICE_XDP_REDIR; |
2098 |
+ default: |
2099 |
+ bpf_warn_invalid_xdp_action(act); |
2100 |
+ fallthrough; |
2101 |
+ case XDP_ABORTED: |
2102 |
++out_failure: |
2103 |
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
2104 |
+ fallthrough; |
2105 |
+ case XDP_DROP: |
2106 |
+@@ -2331,6 +2337,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) |
2107 |
+ struct ice_tx_offload_params offload = { 0 }; |
2108 |
+ struct ice_vsi *vsi = tx_ring->vsi; |
2109 |
+ struct ice_tx_buf *first; |
2110 |
++ struct ethhdr *eth; |
2111 |
+ unsigned int count; |
2112 |
+ int tso, csum; |
2113 |
+ |
2114 |
+@@ -2377,7 +2384,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) |
2115 |
+ goto out_drop; |
2116 |
+ |
2117 |
+ /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ |
2118 |
+- if (unlikely(skb->priority == TC_PRIO_CONTROL && |
2119 |
++ eth = (struct ethhdr *)skb_mac_header(skb); |
2120 |
++ if (unlikely((skb->priority == TC_PRIO_CONTROL || |
2121 |
++ eth->h_proto == htons(ETH_P_LLDP)) && |
2122 |
+ vsi->type == ICE_VSI_PF && |
2123 |
+ vsi->port_info->qos_cfg.is_sw_lldp)) |
2124 |
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | |
2125 |
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c |
2126 |
+index 1f38a8d0c5254..48dee9c5d534b 100644 |
2127 |
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c |
2128 |
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c |
2129 |
+@@ -435,13 +435,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) |
2130 |
+ */ |
2131 |
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states); |
2132 |
+ |
2133 |
+- /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it |
2134 |
+- * in the case of VFR. If this is done for PFR, it can mess up VF |
2135 |
+- * resets because the VF driver may already have started cleanup |
2136 |
+- * by the time we get here. |
2137 |
++ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver |
2138 |
++ * needs to clear them in the case of VFR/VFLR. If this is done for |
2139 |
++ * PFR, it can mess up VF resets because the VF driver may already |
2140 |
++ * have started cleanup by the time we get here. |
2141 |
+ */ |
2142 |
+- if (!is_pfr) |
2143 |
++ if (!is_pfr) { |
2144 |
+ wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0); |
2145 |
++ wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0); |
2146 |
++ } |
2147 |
+ |
2148 |
+ /* In the case of a VFLR, the HW has already reset the VF and we |
2149 |
+ * just need to clean up, so don't hit the VFRTRIG register. |
2150 |
+@@ -1375,7 +1377,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) |
2151 |
+ } |
2152 |
+ |
2153 |
+ ice_vf_pre_vsi_rebuild(vf); |
2154 |
+- ice_vf_rebuild_vsi_with_release(vf); |
2155 |
++ |
2156 |
++ if (ice_vf_rebuild_vsi_with_release(vf)) { |
2157 |
++ dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id); |
2158 |
++ return false; |
2159 |
++ } |
2160 |
++ |
2161 |
+ ice_vf_post_vsi_rebuild(vf); |
2162 |
+ |
2163 |
+ return true; |
2164 |
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c |
2165 |
+index 9f94d9159acde..f1d4240e57df3 100644 |
2166 |
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c |
2167 |
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c |
2168 |
+@@ -273,6 +273,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) |
2169 |
+ if (!pool) |
2170 |
+ return -EINVAL; |
2171 |
+ |
2172 |
++ clear_bit(qid, vsi->af_xdp_zc_qps); |
2173 |
+ xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR); |
2174 |
+ |
2175 |
+ return 0; |
2176 |
+@@ -303,6 +304,8 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) |
2177 |
+ if (err) |
2178 |
+ return err; |
2179 |
+ |
2180 |
++ set_bit(qid, vsi->af_xdp_zc_qps); |
2181 |
++ |
2182 |
+ return 0; |
2183 |
+ } |
2184 |
+ |
2185 |
+@@ -473,21 +476,29 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) |
2186 |
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog); |
2187 |
+ |
2188 |
+ act = bpf_prog_run_xdp(xdp_prog, xdp); |
2189 |
++ |
2190 |
++ if (likely(act == XDP_REDIRECT)) { |
2191 |
++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); |
2192 |
++ if (err) |
2193 |
++ goto out_failure; |
2194 |
++ rcu_read_unlock(); |
2195 |
++ return ICE_XDP_REDIR; |
2196 |
++ } |
2197 |
++ |
2198 |
+ switch (act) { |
2199 |
+ case XDP_PASS: |
2200 |
+ break; |
2201 |
+ case XDP_TX: |
2202 |
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index]; |
2203 |
+ result = ice_xmit_xdp_buff(xdp, xdp_ring); |
2204 |
+- break; |
2205 |
+- case XDP_REDIRECT: |
2206 |
+- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); |
2207 |
+- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; |
2208 |
++ if (result == ICE_XDP_CONSUMED) |
2209 |
++ goto out_failure; |
2210 |
+ break; |
2211 |
+ default: |
2212 |
+ bpf_warn_invalid_xdp_action(act); |
2213 |
+ fallthrough; |
2214 |
+ case XDP_ABORTED: |
2215 |
++out_failure: |
2216 |
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
2217 |
+ fallthrough; |
2218 |
+ case XDP_DROP: |
2219 |
+diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h |
2220 |
+index 7bda8c5edea5d..2d3daf022651c 100644 |
2221 |
+--- a/drivers/net/ethernet/intel/igb/igb.h |
2222 |
++++ b/drivers/net/ethernet/intel/igb/igb.h |
2223 |
+@@ -749,7 +749,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter); |
2224 |
+ void igb_ptp_tx_hang(struct igb_adapter *adapter); |
2225 |
+ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); |
2226 |
+ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, |
2227 |
+- struct sk_buff *skb); |
2228 |
++ ktime_t *timestamp); |
2229 |
+ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); |
2230 |
+ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); |
2231 |
+ void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); |
2232 |
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c |
2233 |
+index a45cd2b416c89..caa8929289ae7 100644 |
2234 |
+--- a/drivers/net/ethernet/intel/igb/igb_main.c |
2235 |
++++ b/drivers/net/ethernet/intel/igb/igb_main.c |
2236 |
+@@ -8281,7 +8281,7 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring, |
2237 |
+ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, |
2238 |
+ struct igb_rx_buffer *rx_buffer, |
2239 |
+ struct xdp_buff *xdp, |
2240 |
+- union e1000_adv_rx_desc *rx_desc) |
2241 |
++ ktime_t timestamp) |
2242 |
+ { |
2243 |
+ #if (PAGE_SIZE < 8192) |
2244 |
+ unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; |
2245 |
+@@ -8301,12 +8301,8 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, |
2246 |
+ if (unlikely(!skb)) |
2247 |
+ return NULL; |
2248 |
+ |
2249 |
+- if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { |
2250 |
+- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) { |
2251 |
+- xdp->data += IGB_TS_HDR_LEN; |
2252 |
+- size -= IGB_TS_HDR_LEN; |
2253 |
+- } |
2254 |
+- } |
2255 |
++ if (timestamp) |
2256 |
++ skb_hwtstamps(skb)->hwtstamp = timestamp; |
2257 |
+ |
2258 |
+ /* Determine available headroom for copy */ |
2259 |
+ headlen = size; |
2260 |
+@@ -8337,7 +8333,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, |
2261 |
+ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, |
2262 |
+ struct igb_rx_buffer *rx_buffer, |
2263 |
+ struct xdp_buff *xdp, |
2264 |
+- union e1000_adv_rx_desc *rx_desc) |
2265 |
++ ktime_t timestamp) |
2266 |
+ { |
2267 |
+ #if (PAGE_SIZE < 8192) |
2268 |
+ unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; |
2269 |
+@@ -8364,11 +8360,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, |
2270 |
+ if (metasize) |
2271 |
+ skb_metadata_set(skb, metasize); |
2272 |
+ |
2273 |
+- /* pull timestamp out of packet data */ |
2274 |
+- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { |
2275 |
+- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb)) |
2276 |
+- __skb_pull(skb, IGB_TS_HDR_LEN); |
2277 |
+- } |
2278 |
++ if (timestamp) |
2279 |
++ skb_hwtstamps(skb)->hwtstamp = timestamp; |
2280 |
+ |
2281 |
+ /* update buffer offset */ |
2282 |
+ #if (PAGE_SIZE < 8192) |
2283 |
+@@ -8402,18 +8395,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, |
2284 |
+ break; |
2285 |
+ case XDP_TX: |
2286 |
+ result = igb_xdp_xmit_back(adapter, xdp); |
2287 |
++ if (result == IGB_XDP_CONSUMED) |
2288 |
++ goto out_failure; |
2289 |
+ break; |
2290 |
+ case XDP_REDIRECT: |
2291 |
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); |
2292 |
+- if (!err) |
2293 |
+- result = IGB_XDP_REDIR; |
2294 |
+- else |
2295 |
+- result = IGB_XDP_CONSUMED; |
2296 |
++ if (err) |
2297 |
++ goto out_failure; |
2298 |
++ result = IGB_XDP_REDIR; |
2299 |
+ break; |
2300 |
+ default: |
2301 |
+ bpf_warn_invalid_xdp_action(act); |
2302 |
+ fallthrough; |
2303 |
+ case XDP_ABORTED: |
2304 |
++out_failure: |
2305 |
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
2306 |
+ fallthrough; |
2307 |
+ case XDP_DROP: |
2308 |
+@@ -8683,7 +8678,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) |
2309 |
+ while (likely(total_packets < budget)) { |
2310 |
+ union e1000_adv_rx_desc *rx_desc; |
2311 |
+ struct igb_rx_buffer *rx_buffer; |
2312 |
++ ktime_t timestamp = 0; |
2313 |
++ int pkt_offset = 0; |
2314 |
+ unsigned int size; |
2315 |
++ void *pktbuf; |
2316 |
+ |
2317 |
+ /* return some buffers to hardware, one at a time is too slow */ |
2318 |
+ if (cleaned_count >= IGB_RX_BUFFER_WRITE) { |
2319 |
+@@ -8703,14 +8701,24 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) |
2320 |
+ dma_rmb(); |
2321 |
+ |
2322 |
+ rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); |
2323 |
++ pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; |
2324 |
++ |
2325 |
++ /* pull rx packet timestamp if available and valid */ |
2326 |
++ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { |
2327 |
++ int ts_hdr_len; |
2328 |
++ |
2329 |
++ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector, |
2330 |
++ pktbuf, ×tamp); |
2331 |
++ |
2332 |
++ pkt_offset += ts_hdr_len; |
2333 |
++ size -= ts_hdr_len; |
2334 |
++ } |
2335 |
+ |
2336 |
+ /* retrieve a buffer from the ring */ |
2337 |
+ if (!skb) { |
2338 |
+- unsigned int offset = igb_rx_offset(rx_ring); |
2339 |
+- unsigned char *hard_start; |
2340 |
++ unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring); |
2341 |
++ unsigned int offset = pkt_offset + igb_rx_offset(rx_ring); |
2342 |
+ |
2343 |
+- hard_start = page_address(rx_buffer->page) + |
2344 |
+- rx_buffer->page_offset - offset; |
2345 |
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true); |
2346 |
+ #if (PAGE_SIZE > 4096) |
2347 |
+ /* At larger PAGE_SIZE, frame_sz depend on len size */ |
2348 |
+@@ -8733,10 +8741,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) |
2349 |
+ } else if (skb) |
2350 |
+ igb_add_rx_frag(rx_ring, rx_buffer, skb, size); |
2351 |
+ else if (ring_uses_build_skb(rx_ring)) |
2352 |
+- skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); |
2353 |
++ skb = igb_build_skb(rx_ring, rx_buffer, &xdp, |
2354 |
++ timestamp); |
2355 |
+ else |
2356 |
+ skb = igb_construct_skb(rx_ring, rx_buffer, |
2357 |
+- &xdp, rx_desc); |
2358 |
++ &xdp, timestamp); |
2359 |
+ |
2360 |
+ /* exit if we failed to retrieve a buffer */ |
2361 |
+ if (!skb) { |
2362 |
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c |
2363 |
+index 86a576201f5ff..58b25f26ea7f2 100644 |
2364 |
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c |
2365 |
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c |
2366 |
+@@ -856,30 +856,28 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) |
2367 |
+ dev_kfree_skb_any(skb); |
2368 |
+ } |
2369 |
+ |
2370 |
+-#define IGB_RET_PTP_DISABLED 1 |
2371 |
+-#define IGB_RET_PTP_INVALID 2 |
2372 |
+- |
2373 |
+ /** |
2374 |
+ * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp |
2375 |
+ * @q_vector: Pointer to interrupt specific structure |
2376 |
+ * @va: Pointer to address containing Rx buffer |
2377 |
+- * @skb: Buffer containing timestamp and packet |
2378 |
++ * @timestamp: Pointer where timestamp will be stored |
2379 |
+ * |
2380 |
+ * This function is meant to retrieve a timestamp from the first buffer of an |
2381 |
+ * incoming frame. The value is stored in little endian format starting on |
2382 |
+ * byte 8 |
2383 |
+ * |
2384 |
+- * Returns: 0 if success, nonzero if failure |
2385 |
++ * Returns: The timestamp header length or 0 if not available |
2386 |
+ **/ |
2387 |
+ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, |
2388 |
+- struct sk_buff *skb) |
2389 |
++ ktime_t *timestamp) |
2390 |
+ { |
2391 |
+ struct igb_adapter *adapter = q_vector->adapter; |
2392 |
++ struct skb_shared_hwtstamps ts; |
2393 |
+ __le64 *regval = (__le64 *)va; |
2394 |
+ int adjust = 0; |
2395 |
+ |
2396 |
+ if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) |
2397 |
+- return IGB_RET_PTP_DISABLED; |
2398 |
++ return 0; |
2399 |
+ |
2400 |
+ /* The timestamp is recorded in little endian format. |
2401 |
+ * DWORD: 0 1 2 3 |
2402 |
+@@ -888,10 +886,9 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, |
2403 |
+ |
2404 |
+ /* check reserved dwords are zero, be/le doesn't matter for zero */ |
2405 |
+ if (regval[0]) |
2406 |
+- return IGB_RET_PTP_INVALID; |
2407 |
++ return 0; |
2408 |
+ |
2409 |
+- igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), |
2410 |
+- le64_to_cpu(regval[1])); |
2411 |
++ igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1])); |
2412 |
+ |
2413 |
+ /* adjust timestamp for the RX latency based on link speed */ |
2414 |
+ if (adapter->hw.mac.type == e1000_i210) { |
2415 |
+@@ -907,10 +904,10 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, |
2416 |
+ break; |
2417 |
+ } |
2418 |
+ } |
2419 |
+- skb_hwtstamps(skb)->hwtstamp = |
2420 |
+- ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); |
2421 |
+ |
2422 |
+- return 0; |
2423 |
++ *timestamp = ktime_sub_ns(ts.hwtstamp, adjust); |
2424 |
++ |
2425 |
++ return IGB_TS_HDR_LEN; |
2426 |
+ } |
2427 |
+ |
2428 |
+ /** |
2429 |
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
2430 |
+index cffb95f8f6326..c194158a421c7 100644 |
2431 |
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
2432 |
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
2433 |
+@@ -2213,23 +2213,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, |
2434 |
+ break; |
2435 |
+ case XDP_TX: |
2436 |
+ xdpf = xdp_convert_buff_to_frame(xdp); |
2437 |
+- if (unlikely(!xdpf)) { |
2438 |
+- result = IXGBE_XDP_CONSUMED; |
2439 |
+- break; |
2440 |
+- } |
2441 |
++ if (unlikely(!xdpf)) |
2442 |
++ goto out_failure; |
2443 |
+ result = ixgbe_xmit_xdp_ring(adapter, xdpf); |
2444 |
++ if (result == IXGBE_XDP_CONSUMED) |
2445 |
++ goto out_failure; |
2446 |
+ break; |
2447 |
+ case XDP_REDIRECT: |
2448 |
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); |
2449 |
+- if (!err) |
2450 |
+- result = IXGBE_XDP_REDIR; |
2451 |
+- else |
2452 |
+- result = IXGBE_XDP_CONSUMED; |
2453 |
++ if (err) |
2454 |
++ goto out_failure; |
2455 |
++ result = IXGBE_XDP_REDIR; |
2456 |
+ break; |
2457 |
+ default: |
2458 |
+ bpf_warn_invalid_xdp_action(act); |
2459 |
+ fallthrough; |
2460 |
+ case XDP_ABORTED: |
2461 |
++out_failure: |
2462 |
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
2463 |
+ fallthrough; /* handle aborts by dropping packet */ |
2464 |
+ case XDP_DROP: |
2465 |
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c |
2466 |
+index 3771857cf887c..f72d2978263b9 100644 |
2467 |
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c |
2468 |
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c |
2469 |
+@@ -104,25 +104,30 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, |
2470 |
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog); |
2471 |
+ act = bpf_prog_run_xdp(xdp_prog, xdp); |
2472 |
+ |
2473 |
++ if (likely(act == XDP_REDIRECT)) { |
2474 |
++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); |
2475 |
++ if (err) |
2476 |
++ goto out_failure; |
2477 |
++ rcu_read_unlock(); |
2478 |
++ return IXGBE_XDP_REDIR; |
2479 |
++ } |
2480 |
++ |
2481 |
+ switch (act) { |
2482 |
+ case XDP_PASS: |
2483 |
+ break; |
2484 |
+ case XDP_TX: |
2485 |
+ xdpf = xdp_convert_buff_to_frame(xdp); |
2486 |
+- if (unlikely(!xdpf)) { |
2487 |
+- result = IXGBE_XDP_CONSUMED; |
2488 |
+- break; |
2489 |
+- } |
2490 |
++ if (unlikely(!xdpf)) |
2491 |
++ goto out_failure; |
2492 |
+ result = ixgbe_xmit_xdp_ring(adapter, xdpf); |
2493 |
+- break; |
2494 |
+- case XDP_REDIRECT: |
2495 |
+- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); |
2496 |
+- result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; |
2497 |
++ if (result == IXGBE_XDP_CONSUMED) |
2498 |
++ goto out_failure; |
2499 |
+ break; |
2500 |
+ default: |
2501 |
+ bpf_warn_invalid_xdp_action(act); |
2502 |
+ fallthrough; |
2503 |
+ case XDP_ABORTED: |
2504 |
++out_failure: |
2505 |
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
2506 |
+ fallthrough; /* handle aborts by dropping packet */ |
2507 |
+ case XDP_DROP: |
2508 |
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c |
2509 |
+index 449d7d5b280dd..b38860c485986 100644 |
2510 |
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c |
2511 |
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c |
2512 |
+@@ -1067,11 +1067,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, |
2513 |
+ case XDP_TX: |
2514 |
+ xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; |
2515 |
+ result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp); |
2516 |
++ if (result == IXGBEVF_XDP_CONSUMED) |
2517 |
++ goto out_failure; |
2518 |
+ break; |
2519 |
+ default: |
2520 |
+ bpf_warn_invalid_xdp_action(act); |
2521 |
+ fallthrough; |
2522 |
+ case XDP_ABORTED: |
2523 |
++out_failure: |
2524 |
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act); |
2525 |
+ fallthrough; /* handle aborts by dropping packet */ |
2526 |
+ case XDP_DROP: |
2527 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c |
2528 |
+index 53802e18af900..04b49cb3adb32 100644 |
2529 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c |
2530 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c |
2531 |
+@@ -1632,12 +1632,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev, |
2532 |
+ { |
2533 |
+ struct mlx5e_priv *priv = netdev_priv(netdev); |
2534 |
+ struct mlx5_core_dev *mdev = priv->mdev; |
2535 |
++ unsigned long fec_bitmap; |
2536 |
+ u16 fec_policy = 0; |
2537 |
+ int mode; |
2538 |
+ int err; |
2539 |
+ |
2540 |
+- if (bitmap_weight((unsigned long *)&fecparam->fec, |
2541 |
+- ETHTOOL_FEC_LLRS_BIT + 1) > 1) |
2542 |
++ bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE); |
2543 |
++ if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1) |
2544 |
+ return -EOPNOTSUPP; |
2545 |
+ |
2546 |
+ for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) { |
2547 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c |
2548 |
+index 78a1403c98026..b633f669ea57f 100644 |
2549 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c |
2550 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c |
2551 |
+@@ -1964,11 +1964,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, |
2552 |
+ misc_parameters); |
2553 |
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f); |
2554 |
+ struct flow_dissector *dissector = rule->match.dissector; |
2555 |
++ enum fs_flow_table_type fs_type; |
2556 |
+ u16 addr_type = 0; |
2557 |
+ u8 ip_proto = 0; |
2558 |
+ u8 *match_level; |
2559 |
+ int err; |
2560 |
+ |
2561 |
++ fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX; |
2562 |
+ match_level = outer_match_level; |
2563 |
+ |
2564 |
+ if (dissector->used_keys & |
2565 |
+@@ -2093,6 +2095,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, |
2566 |
+ if (match.mask->vlan_id || |
2567 |
+ match.mask->vlan_priority || |
2568 |
+ match.mask->vlan_tpid) { |
2569 |
++ if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid, |
2570 |
++ fs_type)) { |
2571 |
++ NL_SET_ERR_MSG_MOD(extack, |
2572 |
++ "Matching on CVLAN is not supported"); |
2573 |
++ return -EOPNOTSUPP; |
2574 |
++ } |
2575 |
++ |
2576 |
+ if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { |
2577 |
+ MLX5_SET(fte_match_set_misc, misc_c, |
2578 |
+ outer_second_svlan_tag, 1); |
2579 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c |
2580 |
+index d4a2f8d1ee9f1..3719452a78035 100644 |
2581 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c |
2582 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c |
2583 |
+@@ -349,7 +349,8 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, |
2584 |
+ struct mlx5_fs_chains *chains, |
2585 |
+ int i) |
2586 |
+ { |
2587 |
+- flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; |
2588 |
++ if (mlx5_chains_ignore_flow_level_supported(chains)) |
2589 |
++ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; |
2590 |
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; |
2591 |
+ dest[i].ft = mlx5_chains_get_tc_end_ft(chains); |
2592 |
+ } |
2593 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c |
2594 |
+index f9042e147c7f6..ee710ce007950 100644 |
2595 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c |
2596 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c |
2597 |
+@@ -354,6 +354,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work) |
2598 |
+ reset_abort_work); |
2599 |
+ struct mlx5_core_dev *dev = fw_reset->dev; |
2600 |
+ |
2601 |
++ if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) |
2602 |
++ return; |
2603 |
++ |
2604 |
+ mlx5_sync_reset_clear_reset_requested(dev, true); |
2605 |
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n"); |
2606 |
+ } |
2607 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c |
2608 |
+index 381325b4a863e..b607ed5a74bb4 100644 |
2609 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c |
2610 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c |
2611 |
+@@ -111,7 +111,7 @@ bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains) |
2612 |
+ return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED; |
2613 |
+ } |
2614 |
+ |
2615 |
+-static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains) |
2616 |
++bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains) |
2617 |
+ { |
2618 |
+ return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; |
2619 |
+ } |
2620 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h |
2621 |
+index 6d5be31b05dd7..9f53a08235582 100644 |
2622 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h |
2623 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h |
2624 |
+@@ -27,6 +27,7 @@ struct mlx5_chains_attr { |
2625 |
+ |
2626 |
+ bool |
2627 |
+ mlx5_chains_prios_supported(struct mlx5_fs_chains *chains); |
2628 |
++bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains); |
2629 |
+ bool |
2630 |
+ mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains); |
2631 |
+ u32 |
2632 |
+@@ -72,6 +73,10 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains, |
2633 |
+ |
2634 |
+ #else /* CONFIG_MLX5_CLS_ACT */ |
2635 |
+ |
2636 |
++static inline bool |
2637 |
++mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains) |
2638 |
++{ return false; } |
2639 |
++ |
2640 |
+ static inline struct mlx5_flow_table * |
2641 |
+ mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio, |
2642 |
+ u32 level) { return ERR_PTR(-EOPNOTSUPP); } |
2643 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c |
2644 |
+index 1fbcd012bb855..7ccfd40586cee 100644 |
2645 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c |
2646 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c |
2647 |
+@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, |
2648 |
+ int ret; |
2649 |
+ |
2650 |
+ ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB; |
2651 |
+- ft_attr.level = dmn->info.caps.max_ft_level - 2; |
2652 |
++ ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2, |
2653 |
++ MLX5_FT_MAX_MULTIPATH_LEVEL); |
2654 |
+ ft_attr.reformat_en = reformat_req; |
2655 |
+ ft_attr.decap_en = reformat_req; |
2656 |
+ |
2657 |
+diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile |
2658 |
+index fc52b2cb500b3..dbe1f8514efc3 100644 |
2659 |
+--- a/drivers/net/wireguard/Makefile |
2660 |
++++ b/drivers/net/wireguard/Makefile |
2661 |
+@@ -1,5 +1,4 @@ |
2662 |
+-ccflags-y := -O3 |
2663 |
+-ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' |
2664 |
++ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' |
2665 |
+ ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG |
2666 |
+ wireguard-y := main.o |
2667 |
+ wireguard-y += noise.o |
2668 |
+diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c |
2669 |
+index 3725e9cd85f4f..b7197e80f2264 100644 |
2670 |
+--- a/drivers/net/wireguard/allowedips.c |
2671 |
++++ b/drivers/net/wireguard/allowedips.c |
2672 |
+@@ -6,6 +6,8 @@ |
2673 |
+ #include "allowedips.h" |
2674 |
+ #include "peer.h" |
2675 |
+ |
2676 |
++static struct kmem_cache *node_cache; |
2677 |
++ |
2678 |
+ static void swap_endian(u8 *dst, const u8 *src, u8 bits) |
2679 |
+ { |
2680 |
+ if (bits == 32) { |
2681 |
+@@ -28,8 +30,11 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src, |
2682 |
+ node->bitlen = bits; |
2683 |
+ memcpy(node->bits, src, bits / 8U); |
2684 |
+ } |
2685 |
+-#define CHOOSE_NODE(parent, key) \ |
2686 |
+- parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] |
2687 |
++ |
2688 |
++static inline u8 choose(struct allowedips_node *node, const u8 *key) |
2689 |
++{ |
2690 |
++ return (key[node->bit_at_a] >> node->bit_at_b) & 1; |
2691 |
++} |
2692 |
+ |
2693 |
+ static void push_rcu(struct allowedips_node **stack, |
2694 |
+ struct allowedips_node __rcu *p, unsigned int *len) |
2695 |
+@@ -40,6 +45,11 @@ static void push_rcu(struct allowedips_node **stack, |
2696 |
+ } |
2697 |
+ } |
2698 |
+ |
2699 |
++static void node_free_rcu(struct rcu_head *rcu) |
2700 |
++{ |
2701 |
++ kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu)); |
2702 |
++} |
2703 |
++ |
2704 |
+ static void root_free_rcu(struct rcu_head *rcu) |
2705 |
+ { |
2706 |
+ struct allowedips_node *node, *stack[128] = { |
2707 |
+@@ -49,7 +59,7 @@ static void root_free_rcu(struct rcu_head *rcu) |
2708 |
+ while (len > 0 && (node = stack[--len])) { |
2709 |
+ push_rcu(stack, node->bit[0], &len); |
2710 |
+ push_rcu(stack, node->bit[1], &len); |
2711 |
+- kfree(node); |
2712 |
++ kmem_cache_free(node_cache, node); |
2713 |
+ } |
2714 |
+ } |
2715 |
+ |
2716 |
+@@ -66,60 +76,6 @@ static void root_remove_peer_lists(struct allowedips_node *root) |
2717 |
+ } |
2718 |
+ } |
2719 |
+ |
2720 |
+-static void walk_remove_by_peer(struct allowedips_node __rcu **top, |
2721 |
+- struct wg_peer *peer, struct mutex *lock) |
2722 |
+-{ |
2723 |
+-#define REF(p) rcu_access_pointer(p) |
2724 |
+-#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock)) |
2725 |
+-#define PUSH(p) ({ \ |
2726 |
+- WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \ |
2727 |
+- stack[len++] = p; \ |
2728 |
+- }) |
2729 |
+- |
2730 |
+- struct allowedips_node __rcu **stack[128], **nptr; |
2731 |
+- struct allowedips_node *node, *prev; |
2732 |
+- unsigned int len; |
2733 |
+- |
2734 |
+- if (unlikely(!peer || !REF(*top))) |
2735 |
+- return; |
2736 |
+- |
2737 |
+- for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) { |
2738 |
+- nptr = stack[len - 1]; |
2739 |
+- node = DEREF(nptr); |
2740 |
+- if (!node) { |
2741 |
+- --len; |
2742 |
+- continue; |
2743 |
+- } |
2744 |
+- if (!prev || REF(prev->bit[0]) == node || |
2745 |
+- REF(prev->bit[1]) == node) { |
2746 |
+- if (REF(node->bit[0])) |
2747 |
+- PUSH(&node->bit[0]); |
2748 |
+- else if (REF(node->bit[1])) |
2749 |
+- PUSH(&node->bit[1]); |
2750 |
+- } else if (REF(node->bit[0]) == prev) { |
2751 |
+- if (REF(node->bit[1])) |
2752 |
+- PUSH(&node->bit[1]); |
2753 |
+- } else { |
2754 |
+- if (rcu_dereference_protected(node->peer, |
2755 |
+- lockdep_is_held(lock)) == peer) { |
2756 |
+- RCU_INIT_POINTER(node->peer, NULL); |
2757 |
+- list_del_init(&node->peer_list); |
2758 |
+- if (!node->bit[0] || !node->bit[1]) { |
2759 |
+- rcu_assign_pointer(*nptr, DEREF( |
2760 |
+- &node->bit[!REF(node->bit[0])])); |
2761 |
+- kfree_rcu(node, rcu); |
2762 |
+- node = DEREF(nptr); |
2763 |
+- } |
2764 |
+- } |
2765 |
+- --len; |
2766 |
+- } |
2767 |
+- } |
2768 |
+- |
2769 |
+-#undef REF |
2770 |
+-#undef DEREF |
2771 |
+-#undef PUSH |
2772 |
+-} |
2773 |
+- |
2774 |
+ static unsigned int fls128(u64 a, u64 b) |
2775 |
+ { |
2776 |
+ return a ? fls64(a) + 64U : fls64(b); |
2777 |
+@@ -159,7 +115,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits, |
2778 |
+ found = node; |
2779 |
+ if (node->cidr == bits) |
2780 |
+ break; |
2781 |
+- node = rcu_dereference_bh(CHOOSE_NODE(node, key)); |
2782 |
++ node = rcu_dereference_bh(node->bit[choose(node, key)]); |
2783 |
+ } |
2784 |
+ return found; |
2785 |
+ } |
2786 |
+@@ -191,8 +147,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, |
2787 |
+ u8 cidr, u8 bits, struct allowedips_node **rnode, |
2788 |
+ struct mutex *lock) |
2789 |
+ { |
2790 |
+- struct allowedips_node *node = rcu_dereference_protected(trie, |
2791 |
+- lockdep_is_held(lock)); |
2792 |
++ struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock)); |
2793 |
+ struct allowedips_node *parent = NULL; |
2794 |
+ bool exact = false; |
2795 |
+ |
2796 |
+@@ -202,13 +157,24 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, |
2797 |
+ exact = true; |
2798 |
+ break; |
2799 |
+ } |
2800 |
+- node = rcu_dereference_protected(CHOOSE_NODE(parent, key), |
2801 |
+- lockdep_is_held(lock)); |
2802 |
++ node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock)); |
2803 |
+ } |
2804 |
+ *rnode = parent; |
2805 |
+ return exact; |
2806 |
+ } |
2807 |
+ |
2808 |
++static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node) |
2809 |
++{ |
2810 |
++ node->parent_bit_packed = (unsigned long)parent | bit; |
2811 |
++ rcu_assign_pointer(*parent, node); |
2812 |
++} |
2813 |
++ |
2814 |
++static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node) |
2815 |
++{ |
2816 |
++ u8 bit = choose(parent, node->bits); |
2817 |
++ connect_node(&parent->bit[bit], bit, node); |
2818 |
++} |
2819 |
++ |
2820 |
+ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, |
2821 |
+ u8 cidr, struct wg_peer *peer, struct mutex *lock) |
2822 |
+ { |
2823 |
+@@ -218,13 +184,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, |
2824 |
+ return -EINVAL; |
2825 |
+ |
2826 |
+ if (!rcu_access_pointer(*trie)) { |
2827 |
+- node = kzalloc(sizeof(*node), GFP_KERNEL); |
2828 |
++ node = kmem_cache_zalloc(node_cache, GFP_KERNEL); |
2829 |
+ if (unlikely(!node)) |
2830 |
+ return -ENOMEM; |
2831 |
+ RCU_INIT_POINTER(node->peer, peer); |
2832 |
+ list_add_tail(&node->peer_list, &peer->allowedips_list); |
2833 |
+ copy_and_assign_cidr(node, key, cidr, bits); |
2834 |
+- rcu_assign_pointer(*trie, node); |
2835 |
++ connect_node(trie, 2, node); |
2836 |
+ return 0; |
2837 |
+ } |
2838 |
+ if (node_placement(*trie, key, cidr, bits, &node, lock)) { |
2839 |
+@@ -233,7 +199,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, |
2840 |
+ return 0; |
2841 |
+ } |
2842 |
+ |
2843 |
+- newnode = kzalloc(sizeof(*newnode), GFP_KERNEL); |
2844 |
++ newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL); |
2845 |
+ if (unlikely(!newnode)) |
2846 |
+ return -ENOMEM; |
2847 |
+ RCU_INIT_POINTER(newnode->peer, peer); |
2848 |
+@@ -243,10 +209,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, |
2849 |
+ if (!node) { |
2850 |
+ down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); |
2851 |
+ } else { |
2852 |
+- down = rcu_dereference_protected(CHOOSE_NODE(node, key), |
2853 |
+- lockdep_is_held(lock)); |
2854 |
++ const u8 bit = choose(node, key); |
2855 |
++ down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock)); |
2856 |
+ if (!down) { |
2857 |
+- rcu_assign_pointer(CHOOSE_NODE(node, key), newnode); |
2858 |
++ connect_node(&node->bit[bit], bit, newnode); |
2859 |
+ return 0; |
2860 |
+ } |
2861 |
+ } |
2862 |
+@@ -254,30 +220,29 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, |
2863 |
+ parent = node; |
2864 |
+ |
2865 |
+ if (newnode->cidr == cidr) { |
2866 |
+- rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down); |
2867 |
++ choose_and_connect_node(newnode, down); |
2868 |
+ if (!parent) |
2869 |
+- rcu_assign_pointer(*trie, newnode); |
2870 |
++ connect_node(trie, 2, newnode); |
2871 |
+ else |
2872 |
+- rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits), |
2873 |
+- newnode); |
2874 |
+- } else { |
2875 |
+- node = kzalloc(sizeof(*node), GFP_KERNEL); |
2876 |
+- if (unlikely(!node)) { |
2877 |
+- list_del(&newnode->peer_list); |
2878 |
+- kfree(newnode); |
2879 |
+- return -ENOMEM; |
2880 |
+- } |
2881 |
+- INIT_LIST_HEAD(&node->peer_list); |
2882 |
+- copy_and_assign_cidr(node, newnode->bits, cidr, bits); |
2883 |
++ choose_and_connect_node(parent, newnode); |
2884 |
++ return 0; |
2885 |
++ } |
2886 |
+ |
2887 |
+- rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down); |
2888 |
+- rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode); |
2889 |
+- if (!parent) |
2890 |
+- rcu_assign_pointer(*trie, node); |
2891 |
+- else |
2892 |
+- rcu_assign_pointer(CHOOSE_NODE(parent, node->bits), |
2893 |
+- node); |
2894 |
++ node = kmem_cache_zalloc(node_cache, GFP_KERNEL); |
2895 |
++ if (unlikely(!node)) { |
2896 |
++ list_del(&newnode->peer_list); |
2897 |
++ kmem_cache_free(node_cache, newnode); |
2898 |
++ return -ENOMEM; |
2899 |
+ } |
2900 |
++ INIT_LIST_HEAD(&node->peer_list); |
2901 |
++ copy_and_assign_cidr(node, newnode->bits, cidr, bits); |
2902 |
++ |
2903 |
++ choose_and_connect_node(node, down); |
2904 |
++ choose_and_connect_node(node, newnode); |
2905 |
++ if (!parent) |
2906 |
++ connect_node(trie, 2, node); |
2907 |
++ else |
2908 |
++ choose_and_connect_node(parent, node); |
2909 |
+ return 0; |
2910 |
+ } |
2911 |
+ |
2912 |
+@@ -335,9 +300,41 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, |
2913 |
+ void wg_allowedips_remove_by_peer(struct allowedips *table, |
2914 |
+ struct wg_peer *peer, struct mutex *lock) |
2915 |
+ { |
2916 |
++ struct allowedips_node *node, *child, **parent_bit, *parent, *tmp; |
2917 |
++ bool free_parent; |
2918 |
++ |
2919 |
++ if (list_empty(&peer->allowedips_list)) |
2920 |
++ return; |
2921 |
+ ++table->seq; |
2922 |
+- walk_remove_by_peer(&table->root4, peer, lock); |
2923 |
+- walk_remove_by_peer(&table->root6, peer, lock); |
2924 |
++ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) { |
2925 |
++ list_del_init(&node->peer_list); |
2926 |
++ RCU_INIT_POINTER(node->peer, NULL); |
2927 |
++ if (node->bit[0] && node->bit[1]) |
2928 |
++ continue; |
2929 |
++ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])], |
2930 |
++ lockdep_is_held(lock)); |
2931 |
++ if (child) |
2932 |
++ child->parent_bit_packed = node->parent_bit_packed; |
2933 |
++ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL); |
2934 |
++ *parent_bit = child; |
2935 |
++ parent = (void *)parent_bit - |
2936 |
++ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]); |
2937 |
++ free_parent = !rcu_access_pointer(node->bit[0]) && |
2938 |
++ !rcu_access_pointer(node->bit[1]) && |
2939 |
++ (node->parent_bit_packed & 3) <= 1 && |
2940 |
++ !rcu_access_pointer(parent->peer); |
2941 |
++ if (free_parent) |
2942 |
++ child = rcu_dereference_protected( |
2943 |
++ parent->bit[!(node->parent_bit_packed & 1)], |
2944 |
++ lockdep_is_held(lock)); |
2945 |
++ call_rcu(&node->rcu, node_free_rcu); |
2946 |
++ if (!free_parent) |
2947 |
++ continue; |
2948 |
++ if (child) |
2949 |
++ child->parent_bit_packed = parent->parent_bit_packed; |
2950 |
++ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child; |
2951 |
++ call_rcu(&parent->rcu, node_free_rcu); |
2952 |
++ } |
2953 |
+ } |
2954 |
+ |
2955 |
+ int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) |
2956 |
+@@ -374,4 +371,16 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, |
2957 |
+ return NULL; |
2958 |
+ } |
2959 |
+ |
2960 |
++int __init wg_allowedips_slab_init(void) |
2961 |
++{ |
2962 |
++ node_cache = KMEM_CACHE(allowedips_node, 0); |
2963 |
++ return node_cache ? 0 : -ENOMEM; |
2964 |
++} |
2965 |
++ |
2966 |
++void wg_allowedips_slab_uninit(void) |
2967 |
++{ |
2968 |
++ rcu_barrier(); |
2969 |
++ kmem_cache_destroy(node_cache); |
2970 |
++} |
2971 |
++ |
2972 |
+ #include "selftest/allowedips.c" |
2973 |
+diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h |
2974 |
+index e5c83cafcef4c..2346c797eb4d8 100644 |
2975 |
+--- a/drivers/net/wireguard/allowedips.h |
2976 |
++++ b/drivers/net/wireguard/allowedips.h |
2977 |
+@@ -15,14 +15,11 @@ struct wg_peer; |
2978 |
+ struct allowedips_node { |
2979 |
+ struct wg_peer __rcu *peer; |
2980 |
+ struct allowedips_node __rcu *bit[2]; |
2981 |
+- /* While it may seem scandalous that we waste space for v4, |
2982 |
+- * we're alloc'ing to the nearest power of 2 anyway, so this |
2983 |
+- * doesn't actually make a difference. |
2984 |
+- */ |
2985 |
+- u8 bits[16] __aligned(__alignof(u64)); |
2986 |
+ u8 cidr, bit_at_a, bit_at_b, bitlen; |
2987 |
++ u8 bits[16] __aligned(__alignof(u64)); |
2988 |
+ |
2989 |
+- /* Keep rarely used list at bottom to be beyond cache line. */ |
2990 |
++ /* Keep rarely used members at bottom to be beyond cache line. */ |
2991 |
++ unsigned long parent_bit_packed; |
2992 |
+ union { |
2993 |
+ struct list_head peer_list; |
2994 |
+ struct rcu_head rcu; |
2995 |
+@@ -33,7 +30,7 @@ struct allowedips { |
2996 |
+ struct allowedips_node __rcu *root4; |
2997 |
+ struct allowedips_node __rcu *root6; |
2998 |
+ u64 seq; |
2999 |
+-}; |
3000 |
++} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */ |
3001 |
+ |
3002 |
+ void wg_allowedips_init(struct allowedips *table); |
3003 |
+ void wg_allowedips_free(struct allowedips *table, struct mutex *mutex); |
3004 |
+@@ -56,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, |
3005 |
+ bool wg_allowedips_selftest(void); |
3006 |
+ #endif |
3007 |
+ |
3008 |
++int wg_allowedips_slab_init(void); |
3009 |
++void wg_allowedips_slab_uninit(void); |
3010 |
++ |
3011 |
+ #endif /* _WG_ALLOWEDIPS_H */ |
3012 |
+diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c |
3013 |
+index 7a7d5f1a80fc7..75dbe77b0b4b4 100644 |
3014 |
+--- a/drivers/net/wireguard/main.c |
3015 |
++++ b/drivers/net/wireguard/main.c |
3016 |
+@@ -21,13 +21,22 @@ static int __init mod_init(void) |
3017 |
+ { |
3018 |
+ int ret; |
3019 |
+ |
3020 |
++ ret = wg_allowedips_slab_init(); |
3021 |
++ if (ret < 0) |
3022 |
++ goto err_allowedips; |
3023 |
++ |
3024 |
+ #ifdef DEBUG |
3025 |
++ ret = -ENOTRECOVERABLE; |
3026 |
+ if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() || |
3027 |
+ !wg_ratelimiter_selftest()) |
3028 |
+- return -ENOTRECOVERABLE; |
3029 |
++ goto err_peer; |
3030 |
+ #endif |
3031 |
+ wg_noise_init(); |
3032 |
+ |
3033 |
++ ret = wg_peer_init(); |
3034 |
++ if (ret < 0) |
3035 |
++ goto err_peer; |
3036 |
++ |
3037 |
+ ret = wg_device_init(); |
3038 |
+ if (ret < 0) |
3039 |
+ goto err_device; |
3040 |
+@@ -44,6 +53,10 @@ static int __init mod_init(void) |
3041 |
+ err_netlink: |
3042 |
+ wg_device_uninit(); |
3043 |
+ err_device: |
3044 |
++ wg_peer_uninit(); |
3045 |
++err_peer: |
3046 |
++ wg_allowedips_slab_uninit(); |
3047 |
++err_allowedips: |
3048 |
+ return ret; |
3049 |
+ } |
3050 |
+ |
3051 |
+@@ -51,6 +64,8 @@ static void __exit mod_exit(void) |
3052 |
+ { |
3053 |
+ wg_genetlink_uninit(); |
3054 |
+ wg_device_uninit(); |
3055 |
++ wg_peer_uninit(); |
3056 |
++ wg_allowedips_slab_uninit(); |
3057 |
+ } |
3058 |
+ |
3059 |
+ module_init(mod_init); |
3060 |
+diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c |
3061 |
+index cd5cb0292cb67..1acd00ab2fbcb 100644 |
3062 |
+--- a/drivers/net/wireguard/peer.c |
3063 |
++++ b/drivers/net/wireguard/peer.c |
3064 |
+@@ -15,6 +15,7 @@ |
3065 |
+ #include <linux/rcupdate.h> |
3066 |
+ #include <linux/list.h> |
3067 |
+ |
3068 |
++static struct kmem_cache *peer_cache; |
3069 |
+ static atomic64_t peer_counter = ATOMIC64_INIT(0); |
3070 |
+ |
3071 |
+ struct wg_peer *wg_peer_create(struct wg_device *wg, |
3072 |
+@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg, |
3073 |
+ if (wg->num_peers >= MAX_PEERS_PER_DEVICE) |
3074 |
+ return ERR_PTR(ret); |
3075 |
+ |
3076 |
+- peer = kzalloc(sizeof(*peer), GFP_KERNEL); |
3077 |
++ peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL); |
3078 |
+ if (unlikely(!peer)) |
3079 |
+ return ERR_PTR(ret); |
3080 |
+- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) |
3081 |
++ if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))) |
3082 |
+ goto err; |
3083 |
+ |
3084 |
+ peer->device = wg; |
3085 |
+@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg, |
3086 |
+ return peer; |
3087 |
+ |
3088 |
+ err: |
3089 |
+- kfree(peer); |
3090 |
++ kmem_cache_free(peer_cache, peer); |
3091 |
+ return ERR_PTR(ret); |
3092 |
+ } |
3093 |
+ |
3094 |
+@@ -88,7 +89,7 @@ static void peer_make_dead(struct wg_peer *peer) |
3095 |
+ /* Mark as dead, so that we don't allow jumping contexts after. */ |
3096 |
+ WRITE_ONCE(peer->is_dead, true); |
3097 |
+ |
3098 |
+- /* The caller must now synchronize_rcu() for this to take effect. */ |
3099 |
++ /* The caller must now synchronize_net() for this to take effect. */ |
3100 |
+ } |
3101 |
+ |
3102 |
+ static void peer_remove_after_dead(struct wg_peer *peer) |
3103 |
+@@ -160,7 +161,7 @@ void wg_peer_remove(struct wg_peer *peer) |
3104 |
+ lockdep_assert_held(&peer->device->device_update_lock); |
3105 |
+ |
3106 |
+ peer_make_dead(peer); |
3107 |
+- synchronize_rcu(); |
3108 |
++ synchronize_net(); |
3109 |
+ peer_remove_after_dead(peer); |
3110 |
+ } |
3111 |
+ |
3112 |
+@@ -178,7 +179,7 @@ void wg_peer_remove_all(struct wg_device *wg) |
3113 |
+ peer_make_dead(peer); |
3114 |
+ list_add_tail(&peer->peer_list, &dead_peers); |
3115 |
+ } |
3116 |
+- synchronize_rcu(); |
3117 |
++ synchronize_net(); |
3118 |
+ list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) |
3119 |
+ peer_remove_after_dead(peer); |
3120 |
+ } |
3121 |
+@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu) |
3122 |
+ /* The final zeroing takes care of clearing any remaining handshake key |
3123 |
+ * material and other potentially sensitive information. |
3124 |
+ */ |
3125 |
+- kfree_sensitive(peer); |
3126 |
++ memzero_explicit(peer, sizeof(*peer)); |
3127 |
++ kmem_cache_free(peer_cache, peer); |
3128 |
+ } |
3129 |
+ |
3130 |
+ static void kref_release(struct kref *refcount) |
3131 |
+@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer) |
3132 |
+ return; |
3133 |
+ kref_put(&peer->refcount, kref_release); |
3134 |
+ } |
3135 |
++ |
3136 |
++int __init wg_peer_init(void) |
3137 |
++{ |
3138 |
++ peer_cache = KMEM_CACHE(wg_peer, 0); |
3139 |
++ return peer_cache ? 0 : -ENOMEM; |
3140 |
++} |
3141 |
++ |
3142 |
++void wg_peer_uninit(void) |
3143 |
++{ |
3144 |
++ kmem_cache_destroy(peer_cache); |
3145 |
++} |
3146 |
+diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h |
3147 |
+index 8d53b687a1d16..76e4d3128ad4e 100644 |
3148 |
+--- a/drivers/net/wireguard/peer.h |
3149 |
++++ b/drivers/net/wireguard/peer.h |
3150 |
+@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer); |
3151 |
+ void wg_peer_remove(struct wg_peer *peer); |
3152 |
+ void wg_peer_remove_all(struct wg_device *wg); |
3153 |
+ |
3154 |
++int wg_peer_init(void); |
3155 |
++void wg_peer_uninit(void); |
3156 |
++ |
3157 |
+ #endif /* _WG_PEER_H */ |
3158 |
+diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c |
3159 |
+index 846db14cb046b..e173204ae7d78 100644 |
3160 |
+--- a/drivers/net/wireguard/selftest/allowedips.c |
3161 |
++++ b/drivers/net/wireguard/selftest/allowedips.c |
3162 |
+@@ -19,32 +19,22 @@ |
3163 |
+ |
3164 |
+ #include <linux/siphash.h> |
3165 |
+ |
3166 |
+-static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits, |
3167 |
+- u8 cidr) |
3168 |
+-{ |
3169 |
+- swap_endian(dst, src, bits); |
3170 |
+- memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8); |
3171 |
+- if (cidr) |
3172 |
+- dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8); |
3173 |
+-} |
3174 |
+- |
3175 |
+ static __init void print_node(struct allowedips_node *node, u8 bits) |
3176 |
+ { |
3177 |
+ char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n"; |
3178 |
+- char *fmt_declaration = KERN_DEBUG |
3179 |
+- "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; |
3180 |
++ char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; |
3181 |
++ u8 ip1[16], ip2[16], cidr1, cidr2; |
3182 |
+ char *style = "dotted"; |
3183 |
+- u8 ip1[16], ip2[16]; |
3184 |
+ u32 color = 0; |
3185 |
+ |
3186 |
++ if (node == NULL) |
3187 |
++ return; |
3188 |
+ if (bits == 32) { |
3189 |
+ fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n"; |
3190 |
+- fmt_declaration = KERN_DEBUG |
3191 |
+- "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; |
3192 |
++ fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; |
3193 |
+ } else if (bits == 128) { |
3194 |
+ fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n"; |
3195 |
+- fmt_declaration = KERN_DEBUG |
3196 |
+- "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; |
3197 |
++ fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; |
3198 |
+ } |
3199 |
+ if (node->peer) { |
3200 |
+ hsiphash_key_t key = { { 0 } }; |
3201 |
+@@ -55,24 +45,20 @@ static __init void print_node(struct allowedips_node *node, u8 bits) |
3202 |
+ hsiphash_1u32(0xabad1dea, &key) % 200; |
3203 |
+ style = "bold"; |
3204 |
+ } |
3205 |
+- swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr); |
3206 |
+- printk(fmt_declaration, ip1, node->cidr, style, color); |
3207 |
++ wg_allowedips_read_node(node, ip1, &cidr1); |
3208 |
++ printk(fmt_declaration, ip1, cidr1, style, color); |
3209 |
+ if (node->bit[0]) { |
3210 |
+- swap_endian_and_apply_cidr(ip2, |
3211 |
+- rcu_dereference_raw(node->bit[0])->bits, bits, |
3212 |
+- node->cidr); |
3213 |
+- printk(fmt_connection, ip1, node->cidr, ip2, |
3214 |
+- rcu_dereference_raw(node->bit[0])->cidr); |
3215 |
+- print_node(rcu_dereference_raw(node->bit[0]), bits); |
3216 |
++ wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2); |
3217 |
++ printk(fmt_connection, ip1, cidr1, ip2, cidr2); |
3218 |
+ } |
3219 |
+ if (node->bit[1]) { |
3220 |
+- swap_endian_and_apply_cidr(ip2, |
3221 |
+- rcu_dereference_raw(node->bit[1])->bits, |
3222 |
+- bits, node->cidr); |
3223 |
+- printk(fmt_connection, ip1, node->cidr, ip2, |
3224 |
+- rcu_dereference_raw(node->bit[1])->cidr); |
3225 |
+- print_node(rcu_dereference_raw(node->bit[1]), bits); |
3226 |
++ wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2); |
3227 |
++ printk(fmt_connection, ip1, cidr1, ip2, cidr2); |
3228 |
+ } |
3229 |
++ if (node->bit[0]) |
3230 |
++ print_node(rcu_dereference_raw(node->bit[0]), bits); |
3231 |
++ if (node->bit[1]) |
3232 |
++ print_node(rcu_dereference_raw(node->bit[1]), bits); |
3233 |
+ } |
3234 |
+ |
3235 |
+ static __init void print_tree(struct allowedips_node __rcu *top, u8 bits) |
3236 |
+@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr) |
3237 |
+ { |
3238 |
+ union nf_inet_addr mask; |
3239 |
+ |
3240 |
+- memset(&mask, 0x00, 128 / 8); |
3241 |
+- memset(&mask, 0xff, cidr / 8); |
3242 |
++ memset(&mask, 0, sizeof(mask)); |
3243 |
++ memset(&mask.all, 0xff, cidr / 8); |
3244 |
+ if (cidr % 32) |
3245 |
+ mask.all[cidr / 32] = (__force u32)htonl( |
3246 |
+ (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL); |
3247 |
+@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allowedips_node *node) |
3248 |
+ } |
3249 |
+ |
3250 |
+ static __init inline bool |
3251 |
+-horrible_match_v4(const struct horrible_allowedips_node *node, |
3252 |
+- struct in_addr *ip) |
3253 |
++horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip) |
3254 |
+ { |
3255 |
+ return (ip->s_addr & node->mask.ip) == node->ip.ip; |
3256 |
+ } |
3257 |
+ |
3258 |
+ static __init inline bool |
3259 |
+-horrible_match_v6(const struct horrible_allowedips_node *node, |
3260 |
+- struct in6_addr *ip) |
3261 |
++horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip) |
3262 |
+ { |
3263 |
+- return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == |
3264 |
+- node->ip.ip6[0] && |
3265 |
+- (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == |
3266 |
+- node->ip.ip6[1] && |
3267 |
+- (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == |
3268 |
+- node->ip.ip6[2] && |
3269 |
++ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] && |
3270 |
++ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] && |
3271 |
++ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] && |
3272 |
+ (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3]; |
3273 |
+ } |
3274 |
+ |
3275 |
+ static __init void |
3276 |
+-horrible_insert_ordered(struct horrible_allowedips *table, |
3277 |
+- struct horrible_allowedips_node *node) |
3278 |
++horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node) |
3279 |
+ { |
3280 |
+ struct horrible_allowedips_node *other = NULL, *where = NULL; |
3281 |
+ u8 my_cidr = horrible_mask_to_cidr(node->mask); |
3282 |
+ |
3283 |
+ hlist_for_each_entry(other, &table->head, table) { |
3284 |
+- if (!memcmp(&other->mask, &node->mask, |
3285 |
+- sizeof(union nf_inet_addr)) && |
3286 |
+- !memcmp(&other->ip, &node->ip, |
3287 |
+- sizeof(union nf_inet_addr)) && |
3288 |
+- other->ip_version == node->ip_version) { |
3289 |
++ if (other->ip_version == node->ip_version && |
3290 |
++ !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) && |
3291 |
++ !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) { |
3292 |
+ other->value = node->value; |
3293 |
+ kfree(node); |
3294 |
+ return; |
3295 |
+ } |
3296 |
++ } |
3297 |
++ hlist_for_each_entry(other, &table->head, table) { |
3298 |
+ where = other; |
3299 |
+ if (horrible_mask_to_cidr(other->mask) <= my_cidr) |
3300 |
+ break; |
3301 |
+@@ -201,8 +181,7 @@ static __init int |
3302 |
+ horrible_allowedips_insert_v4(struct horrible_allowedips *table, |
3303 |
+ struct in_addr *ip, u8 cidr, void *value) |
3304 |
+ { |
3305 |
+- struct horrible_allowedips_node *node = kzalloc(sizeof(*node), |
3306 |
+- GFP_KERNEL); |
3307 |
++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); |
3308 |
+ |
3309 |
+ if (unlikely(!node)) |
3310 |
+ return -ENOMEM; |
3311 |
+@@ -219,8 +198,7 @@ static __init int |
3312 |
+ horrible_allowedips_insert_v6(struct horrible_allowedips *table, |
3313 |
+ struct in6_addr *ip, u8 cidr, void *value) |
3314 |
+ { |
3315 |
+- struct horrible_allowedips_node *node = kzalloc(sizeof(*node), |
3316 |
+- GFP_KERNEL); |
3317 |
++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); |
3318 |
+ |
3319 |
+ if (unlikely(!node)) |
3320 |
+ return -ENOMEM; |
3321 |
+@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table, |
3322 |
+ } |
3323 |
+ |
3324 |
+ static __init void * |
3325 |
+-horrible_allowedips_lookup_v4(struct horrible_allowedips *table, |
3326 |
+- struct in_addr *ip) |
3327 |
++horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip) |
3328 |
+ { |
3329 |
+ struct horrible_allowedips_node *node; |
3330 |
+- void *ret = NULL; |
3331 |
+ |
3332 |
+ hlist_for_each_entry(node, &table->head, table) { |
3333 |
+- if (node->ip_version != 4) |
3334 |
+- continue; |
3335 |
+- if (horrible_match_v4(node, ip)) { |
3336 |
+- ret = node->value; |
3337 |
+- break; |
3338 |
+- } |
3339 |
++ if (node->ip_version == 4 && horrible_match_v4(node, ip)) |
3340 |
++ return node->value; |
3341 |
+ } |
3342 |
+- return ret; |
3343 |
++ return NULL; |
3344 |
+ } |
3345 |
+ |
3346 |
+ static __init void * |
3347 |
+-horrible_allowedips_lookup_v6(struct horrible_allowedips *table, |
3348 |
+- struct in6_addr *ip) |
3349 |
++horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip) |
3350 |
+ { |
3351 |
+ struct horrible_allowedips_node *node; |
3352 |
+- void *ret = NULL; |
3353 |
+ |
3354 |
+ hlist_for_each_entry(node, &table->head, table) { |
3355 |
+- if (node->ip_version != 6) |
3356 |
++ if (node->ip_version == 6 && horrible_match_v6(node, ip)) |
3357 |
++ return node->value; |
3358 |
++ } |
3359 |
++ return NULL; |
3360 |
++} |
3361 |
++ |
3362 |
++ |
3363 |
++static __init void |
3364 |
++horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value) |
3365 |
++{ |
3366 |
++ struct horrible_allowedips_node *node; |
3367 |
++ struct hlist_node *h; |
3368 |
++ |
3369 |
++ hlist_for_each_entry_safe(node, h, &table->head, table) { |
3370 |
++ if (node->value != value) |
3371 |
+ continue; |
3372 |
+- if (horrible_match_v6(node, ip)) { |
3373 |
+- ret = node->value; |
3374 |
+- break; |
3375 |
+- } |
3376 |
++ hlist_del(&node->table); |
3377 |
++ kfree(node); |
3378 |
+ } |
3379 |
+- return ret; |
3380 |
++ |
3381 |
+ } |
3382 |
+ |
3383 |
+ static __init bool randomized_test(void) |
3384 |
+@@ -296,6 +278,7 @@ static __init bool randomized_test(void) |
3385 |
+ goto free; |
3386 |
+ } |
3387 |
+ kref_init(&peers[i]->refcount); |
3388 |
++ INIT_LIST_HEAD(&peers[i]->allowedips_list); |
3389 |
+ } |
3390 |
+ |
3391 |
+ mutex_lock(&mutex); |
3392 |
+@@ -333,7 +316,7 @@ static __init bool randomized_test(void) |
3393 |
+ if (wg_allowedips_insert_v4(&t, |
3394 |
+ (struct in_addr *)mutated, |
3395 |
+ cidr, peer, &mutex) < 0) { |
3396 |
+- pr_err("allowedips random malloc: FAIL\n"); |
3397 |
++ pr_err("allowedips random self-test malloc: FAIL\n"); |
3398 |
+ goto free_locked; |
3399 |
+ } |
3400 |
+ if (horrible_allowedips_insert_v4(&h, |
3401 |
+@@ -396,23 +379,33 @@ static __init bool randomized_test(void) |
3402 |
+ print_tree(t.root6, 128); |
3403 |
+ } |
3404 |
+ |
3405 |
+- for (i = 0; i < NUM_QUERIES; ++i) { |
3406 |
+- prandom_bytes(ip, 4); |
3407 |
+- if (lookup(t.root4, 32, ip) != |
3408 |
+- horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { |
3409 |
+- pr_err("allowedips random self-test: FAIL\n"); |
3410 |
+- goto free; |
3411 |
++ for (j = 0;; ++j) { |
3412 |
++ for (i = 0; i < NUM_QUERIES; ++i) { |
3413 |
++ prandom_bytes(ip, 4); |
3414 |
++ if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { |
3415 |
++ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip); |
3416 |
++ pr_err("allowedips random v4 self-test: FAIL\n"); |
3417 |
++ goto free; |
3418 |
++ } |
3419 |
++ prandom_bytes(ip, 16); |
3420 |
++ if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { |
3421 |
++ pr_err("allowedips random v6 self-test: FAIL\n"); |
3422 |
++ goto free; |
3423 |
++ } |
3424 |
+ } |
3425 |
++ if (j >= NUM_PEERS) |
3426 |
++ break; |
3427 |
++ mutex_lock(&mutex); |
3428 |
++ wg_allowedips_remove_by_peer(&t, peers[j], &mutex); |
3429 |
++ mutex_unlock(&mutex); |
3430 |
++ horrible_allowedips_remove_by_value(&h, peers[j]); |
3431 |
+ } |
3432 |
+ |
3433 |
+- for (i = 0; i < NUM_QUERIES; ++i) { |
3434 |
+- prandom_bytes(ip, 16); |
3435 |
+- if (lookup(t.root6, 128, ip) != |
3436 |
+- horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { |
3437 |
+- pr_err("allowedips random self-test: FAIL\n"); |
3438 |
+- goto free; |
3439 |
+- } |
3440 |
++ if (t.root4 || t.root6) { |
3441 |
++ pr_err("allowedips random self-test removal: FAIL\n"); |
3442 |
++ goto free; |
3443 |
+ } |
3444 |
++ |
3445 |
+ ret = true; |
3446 |
+ |
3447 |
+ free: |
3448 |
+diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c |
3449 |
+index d9ad850daa793..8c496b7471082 100644 |
3450 |
+--- a/drivers/net/wireguard/socket.c |
3451 |
++++ b/drivers/net/wireguard/socket.c |
3452 |
+@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4, |
3453 |
+ if (new4) |
3454 |
+ wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); |
3455 |
+ mutex_unlock(&wg->socket_update_lock); |
3456 |
+- synchronize_rcu(); |
3457 |
++ synchronize_net(); |
3458 |
+ sock_free(old4); |
3459 |
+ sock_free(old6); |
3460 |
+ } |
3461 |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c |
3462 |
+index 02d0aa0b815e9..d2489dc9dc139 100644 |
3463 |
+--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c |
3464 |
++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c |
3465 |
+@@ -87,7 +87,7 @@ static const struct ieee80211_ops mt76x0e_ops = { |
3466 |
+ .reconfig_complete = mt76x02_reconfig_complete, |
3467 |
+ }; |
3468 |
+ |
3469 |
+-static int mt76x0e_register_device(struct mt76x02_dev *dev) |
3470 |
++static int mt76x0e_init_hardware(struct mt76x02_dev *dev, bool resume) |
3471 |
+ { |
3472 |
+ int err; |
3473 |
+ |
3474 |
+@@ -100,9 +100,11 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev) |
3475 |
+ if (err < 0) |
3476 |
+ return err; |
3477 |
+ |
3478 |
+- err = mt76x02_dma_init(dev); |
3479 |
+- if (err < 0) |
3480 |
+- return err; |
3481 |
++ if (!resume) { |
3482 |
++ err = mt76x02_dma_init(dev); |
3483 |
++ if (err < 0) |
3484 |
++ return err; |
3485 |
++ } |
3486 |
+ |
3487 |
+ err = mt76x0_init_hardware(dev); |
3488 |
+ if (err < 0) |
3489 |
+@@ -123,6 +125,17 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev) |
3490 |
+ mt76_clear(dev, 0x110, BIT(9)); |
3491 |
+ mt76_set(dev, MT_MAX_LEN_CFG, BIT(13)); |
3492 |
+ |
3493 |
++ return 0; |
3494 |
++} |
3495 |
++ |
3496 |
++static int mt76x0e_register_device(struct mt76x02_dev *dev) |
3497 |
++{ |
3498 |
++ int err; |
3499 |
++ |
3500 |
++ err = mt76x0e_init_hardware(dev, false); |
3501 |
++ if (err < 0) |
3502 |
++ return err; |
3503 |
++ |
3504 |
+ err = mt76x0_register_device(dev); |
3505 |
+ if (err < 0) |
3506 |
+ return err; |
3507 |
+@@ -167,6 +180,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
3508 |
+ if (ret) |
3509 |
+ return ret; |
3510 |
+ |
3511 |
++ mt76_pci_disable_aspm(pdev); |
3512 |
++ |
3513 |
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops, |
3514 |
+ &drv_ops); |
3515 |
+ if (!mdev) |
3516 |
+@@ -220,6 +235,60 @@ mt76x0e_remove(struct pci_dev *pdev) |
3517 |
+ mt76_free_device(mdev); |
3518 |
+ } |
3519 |
+ |
3520 |
++#ifdef CONFIG_PM |
3521 |
++static int mt76x0e_suspend(struct pci_dev *pdev, pm_message_t state) |
3522 |
++{ |
3523 |
++ struct mt76_dev *mdev = pci_get_drvdata(pdev); |
3524 |
++ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); |
3525 |
++ int i; |
3526 |
++ |
3527 |
++ mt76_worker_disable(&mdev->tx_worker); |
3528 |
++ for (i = 0; i < ARRAY_SIZE(mdev->phy.q_tx); i++) |
3529 |
++ mt76_queue_tx_cleanup(dev, mdev->phy.q_tx[i], true); |
3530 |
++ for (i = 0; i < ARRAY_SIZE(mdev->q_mcu); i++) |
3531 |
++ mt76_queue_tx_cleanup(dev, mdev->q_mcu[i], true); |
3532 |
++ napi_disable(&mdev->tx_napi); |
3533 |
++ |
3534 |
++ mt76_for_each_q_rx(mdev, i) |
3535 |
++ napi_disable(&mdev->napi[i]); |
3536 |
++ |
3537 |
++ mt76x02_dma_disable(dev); |
3538 |
++ mt76x02_mcu_cleanup(dev); |
3539 |
++ mt76x0_chip_onoff(dev, false, false); |
3540 |
++ |
3541 |
++ pci_enable_wake(pdev, pci_choose_state(pdev, state), true); |
3542 |
++ pci_save_state(pdev); |
3543 |
++ |
3544 |
++ return pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
3545 |
++} |
3546 |
++ |
3547 |
++static int mt76x0e_resume(struct pci_dev *pdev) |
3548 |
++{ |
3549 |
++ struct mt76_dev *mdev = pci_get_drvdata(pdev); |
3550 |
++ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); |
3551 |
++ int err, i; |
3552 |
++ |
3553 |
++ err = pci_set_power_state(pdev, PCI_D0); |
3554 |
++ if (err) |
3555 |
++ return err; |
3556 |
++ |
3557 |
++ pci_restore_state(pdev); |
3558 |
++ |
3559 |
++ mt76_worker_enable(&mdev->tx_worker); |
3560 |
++ |
3561 |
++ mt76_for_each_q_rx(mdev, i) { |
3562 |
++ mt76_queue_rx_reset(dev, i); |
3563 |
++ napi_enable(&mdev->napi[i]); |
3564 |
++ napi_schedule(&mdev->napi[i]); |
3565 |
++ } |
3566 |
++ |
3567 |
++ napi_enable(&mdev->tx_napi); |
3568 |
++ napi_schedule(&mdev->tx_napi); |
3569 |
++ |
3570 |
++ return mt76x0e_init_hardware(dev, true); |
3571 |
++} |
3572 |
++#endif /* CONFIG_PM */ |
3573 |
++ |
3574 |
+ static const struct pci_device_id mt76x0e_device_table[] = { |
3575 |
+ { PCI_DEVICE(0x14c3, 0x7610) }, |
3576 |
+ { PCI_DEVICE(0x14c3, 0x7630) }, |
3577 |
+@@ -237,6 +306,10 @@ static struct pci_driver mt76x0e_driver = { |
3578 |
+ .id_table = mt76x0e_device_table, |
3579 |
+ .probe = mt76x0e_probe, |
3580 |
+ .remove = mt76x0e_remove, |
3581 |
++#ifdef CONFIG_PM |
3582 |
++ .suspend = mt76x0e_suspend, |
3583 |
++ .resume = mt76x0e_resume, |
3584 |
++#endif /* CONFIG_PM */ |
3585 |
+ }; |
3586 |
+ |
3587 |
+ module_pci_driver(mt76x0e_driver); |
3588 |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c |
3589 |
+index 62afbad77596b..be88c9f5637a5 100644 |
3590 |
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c |
3591 |
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c |
3592 |
+@@ -391,29 +391,37 @@ static void |
3593 |
+ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb, |
3594 |
+ u16 wlan_idx) |
3595 |
+ { |
3596 |
+- struct mt7921_mcu_wlan_info_event *wtbl_info = |
3597 |
+- (struct mt7921_mcu_wlan_info_event *)(skb->data); |
3598 |
+- struct rate_info rate = {}; |
3599 |
+- u8 curr_idx = wtbl_info->rate_info.rate_idx; |
3600 |
+- u16 curr = le16_to_cpu(wtbl_info->rate_info.rate[curr_idx]); |
3601 |
+- struct mt7921_mcu_peer_cap peer = wtbl_info->peer_cap; |
3602 |
++ struct mt7921_mcu_wlan_info_event *wtbl_info; |
3603 |
+ struct mt76_phy *mphy = &dev->mphy; |
3604 |
+ struct mt7921_sta_stats *stats; |
3605 |
++ struct rate_info rate = {}; |
3606 |
+ struct mt7921_sta *msta; |
3607 |
+ struct mt76_wcid *wcid; |
3608 |
++ u8 idx; |
3609 |
+ |
3610 |
+ if (wlan_idx >= MT76_N_WCIDS) |
3611 |
+ return; |
3612 |
++ |
3613 |
++ wtbl_info = (struct mt7921_mcu_wlan_info_event *)skb->data; |
3614 |
++ idx = wtbl_info->rate_info.rate_idx; |
3615 |
++ if (idx >= ARRAY_SIZE(wtbl_info->rate_info.rate)) |
3616 |
++ return; |
3617 |
++ |
3618 |
++ rcu_read_lock(); |
3619 |
++ |
3620 |
+ wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); |
3621 |
+ if (!wcid) |
3622 |
+- return; |
3623 |
++ goto out; |
3624 |
+ |
3625 |
+ msta = container_of(wcid, struct mt7921_sta, wcid); |
3626 |
+ stats = &msta->stats; |
3627 |
+ |
3628 |
+ /* current rate */ |
3629 |
+- mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr); |
3630 |
++ mt7921_mcu_tx_rate_parse(mphy, &wtbl_info->peer_cap, &rate, |
3631 |
++ le16_to_cpu(wtbl_info->rate_info.rate[idx])); |
3632 |
+ stats->tx_rate = rate; |
3633 |
++out: |
3634 |
++ rcu_read_unlock(); |
3635 |
+ } |
3636 |
+ |
3637 |
+ static void |
3638 |
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c |
3639 |
+index 193b723fe3bd7..c58996c1e2309 100644 |
3640 |
+--- a/drivers/net/xen-netback/interface.c |
3641 |
++++ b/drivers/net/xen-netback/interface.c |
3642 |
+@@ -684,6 +684,7 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue) |
3643 |
+ { |
3644 |
+ if (queue->task) { |
3645 |
+ kthread_stop(queue->task); |
3646 |
++ put_task_struct(queue->task); |
3647 |
+ queue->task = NULL; |
3648 |
+ } |
3649 |
+ |
3650 |
+@@ -745,6 +746,11 @@ int xenvif_connect_data(struct xenvif_queue *queue, |
3651 |
+ if (IS_ERR(task)) |
3652 |
+ goto kthread_err; |
3653 |
+ queue->task = task; |
3654 |
++ /* |
3655 |
++ * Take a reference to the task in order to prevent it from being freed |
3656 |
++ * if the thread function returns before kthread_stop is called. |
3657 |
++ */ |
3658 |
++ get_task_struct(task); |
3659 |
+ |
3660 |
+ task = kthread_run(xenvif_dealloc_kthread, queue, |
3661 |
+ "%s-dealloc", queue->name); |
3662 |
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c |
3663 |
+index be905d4fdb47f..ce8b3ce7582be 100644 |
3664 |
+--- a/drivers/nvme/host/rdma.c |
3665 |
++++ b/drivers/nvme/host/rdma.c |
3666 |
+@@ -1319,16 +1319,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, |
3667 |
+ int count) |
3668 |
+ { |
3669 |
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl; |
3670 |
+- struct scatterlist *sgl = req->data_sgl.sg_table.sgl; |
3671 |
+ struct ib_sge *sge = &req->sge[1]; |
3672 |
++ struct scatterlist *sgl; |
3673 |
+ u32 len = 0; |
3674 |
+ int i; |
3675 |
+ |
3676 |
+- for (i = 0; i < count; i++, sgl++, sge++) { |
3677 |
++ for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) { |
3678 |
+ sge->addr = sg_dma_address(sgl); |
3679 |
+ sge->length = sg_dma_len(sgl); |
3680 |
+ sge->lkey = queue->device->pd->local_dma_lkey; |
3681 |
+ len += sge->length; |
3682 |
++ sge++; |
3683 |
+ } |
3684 |
+ |
3685 |
+ sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); |
3686 |
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c |
3687 |
+index 348057fdc568f..7d16cb4cd8acf 100644 |
3688 |
+--- a/drivers/nvme/target/core.c |
3689 |
++++ b/drivers/nvme/target/core.c |
3690 |
+@@ -999,19 +999,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req) |
3691 |
+ return req->transfer_len - req->metadata_len; |
3692 |
+ } |
3693 |
+ |
3694 |
+-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req) |
3695 |
++static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev, |
3696 |
++ struct nvmet_req *req) |
3697 |
+ { |
3698 |
+- req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt, |
3699 |
++ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt, |
3700 |
+ nvmet_data_transfer_len(req)); |
3701 |
+ if (!req->sg) |
3702 |
+ goto out_err; |
3703 |
+ |
3704 |
+ if (req->metadata_len) { |
3705 |
+- req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev, |
3706 |
++ req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev, |
3707 |
+ &req->metadata_sg_cnt, req->metadata_len); |
3708 |
+ if (!req->metadata_sg) |
3709 |
+ goto out_free_sg; |
3710 |
+ } |
3711 |
++ |
3712 |
++ req->p2p_dev = p2p_dev; |
3713 |
++ |
3714 |
+ return 0; |
3715 |
+ out_free_sg: |
3716 |
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg); |
3717 |
+@@ -1019,25 +1023,19 @@ out_err: |
3718 |
+ return -ENOMEM; |
3719 |
+ } |
3720 |
+ |
3721 |
+-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req) |
3722 |
++static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req) |
3723 |
+ { |
3724 |
+- if (!IS_ENABLED(CONFIG_PCI_P2PDMA)) |
3725 |
+- return false; |
3726 |
+- |
3727 |
+- if (req->sq->ctrl && req->sq->qid && req->ns) { |
3728 |
+- req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, |
3729 |
+- req->ns->nsid); |
3730 |
+- if (req->p2p_dev) |
3731 |
+- return true; |
3732 |
+- } |
3733 |
+- |
3734 |
+- req->p2p_dev = NULL; |
3735 |
+- return false; |
3736 |
++ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) || |
3737 |
++ !req->sq->ctrl || !req->sq->qid || !req->ns) |
3738 |
++ return NULL; |
3739 |
++ return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid); |
3740 |
+ } |
3741 |
+ |
3742 |
+ int nvmet_req_alloc_sgls(struct nvmet_req *req) |
3743 |
+ { |
3744 |
+- if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req)) |
3745 |
++ struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req); |
3746 |
++ |
3747 |
++ if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req)) |
3748 |
+ return 0; |
3749 |
+ |
3750 |
+ req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL, |
3751 |
+@@ -1066,6 +1064,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req) |
3752 |
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg); |
3753 |
+ if (req->metadata_sg) |
3754 |
+ pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg); |
3755 |
++ req->p2p_dev = NULL; |
3756 |
+ } else { |
3757 |
+ sgl_free(req->sg); |
3758 |
+ if (req->metadata_sg) |
3759 |
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c |
3760 |
+index 920cf329268b5..f8a5a4eb5bcef 100644 |
3761 |
+--- a/drivers/scsi/lpfc/lpfc_sli.c |
3762 |
++++ b/drivers/scsi/lpfc/lpfc_sli.c |
3763 |
+@@ -20591,10 +20591,8 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
3764 |
+ abtswqe = &abtsiocb->wqe; |
3765 |
+ memset(abtswqe, 0, sizeof(*abtswqe)); |
3766 |
+ |
3767 |
+- if (lpfc_is_link_up(phba)) |
3768 |
++ if (!lpfc_is_link_up(phba)) |
3769 |
+ bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1); |
3770 |
+- else |
3771 |
+- bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 0); |
3772 |
+ bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG); |
3773 |
+ abtswqe->abort_cmd.rsrvd5 = 0; |
3774 |
+ abtswqe->abort_cmd.wqe_com.abort_tag = xritag; |
3775 |
+diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c |
3776 |
+index 7a77e375b503c..6b52f0c526baa 100644 |
3777 |
+--- a/drivers/tee/optee/call.c |
3778 |
++++ b/drivers/tee/optee/call.c |
3779 |
+@@ -216,6 +216,7 @@ int optee_open_session(struct tee_context *ctx, |
3780 |
+ struct optee_msg_arg *msg_arg; |
3781 |
+ phys_addr_t msg_parg; |
3782 |
+ struct optee_session *sess = NULL; |
3783 |
++ uuid_t client_uuid; |
3784 |
+ |
3785 |
+ /* +2 for the meta parameters added below */ |
3786 |
+ shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); |
3787 |
+@@ -236,10 +237,11 @@ int optee_open_session(struct tee_context *ctx, |
3788 |
+ memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); |
3789 |
+ msg_arg->params[1].u.value.c = arg->clnt_login; |
3790 |
+ |
3791 |
+- rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value, |
3792 |
+- arg->clnt_login, arg->clnt_uuid); |
3793 |
++ rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login, |
3794 |
++ arg->clnt_uuid); |
3795 |
+ if (rc) |
3796 |
+ goto out; |
3797 |
++ export_uuid(msg_arg->params[1].u.octets, &client_uuid); |
3798 |
+ |
3799 |
+ rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); |
3800 |
+ if (rc) |
3801 |
+diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h |
3802 |
+index 81ff593ac4ec2..e3d72d09c4848 100644 |
3803 |
+--- a/drivers/tee/optee/optee_msg.h |
3804 |
++++ b/drivers/tee/optee/optee_msg.h |
3805 |
+@@ -9,7 +9,7 @@ |
3806 |
+ #include <linux/types.h> |
3807 |
+ |
3808 |
+ /* |
3809 |
+- * This file defines the OP-TEE message protocol used to communicate |
3810 |
++ * This file defines the OP-TEE message protocol (ABI) used to communicate |
3811 |
+ * with an instance of OP-TEE running in secure world. |
3812 |
+ * |
3813 |
+ * This file is divided into two sections. |
3814 |
+@@ -144,9 +144,10 @@ struct optee_msg_param_value { |
3815 |
+ * @tmem: parameter by temporary memory reference |
3816 |
+ * @rmem: parameter by registered memory reference |
3817 |
+ * @value: parameter by opaque value |
3818 |
++ * @octets: parameter by octet string |
3819 |
+ * |
3820 |
+ * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in |
3821 |
+- * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, |
3822 |
++ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets, |
3823 |
+ * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and |
3824 |
+ * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem, |
3825 |
+ * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. |
3826 |
+@@ -157,6 +158,7 @@ struct optee_msg_param { |
3827 |
+ struct optee_msg_param_tmem tmem; |
3828 |
+ struct optee_msg_param_rmem rmem; |
3829 |
+ struct optee_msg_param_value value; |
3830 |
++ u8 octets[24]; |
3831 |
+ } u; |
3832 |
+ }; |
3833 |
+ |
3834 |
+diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c |
3835 |
+index f8e882592ba5d..99abdc03c44ce 100644 |
3836 |
+--- a/drivers/thermal/intel/therm_throt.c |
3837 |
++++ b/drivers/thermal/intel/therm_throt.c |
3838 |
+@@ -621,6 +621,17 @@ bool x86_thermal_enabled(void) |
3839 |
+ return atomic_read(&therm_throt_en); |
3840 |
+ } |
3841 |
+ |
3842 |
++void __init therm_lvt_init(void) |
3843 |
++{ |
3844 |
++ /* |
3845 |
++ * This function is only called on boot CPU. Save the init thermal |
3846 |
++ * LVT value on BSP and use that value to restore APs' thermal LVT |
3847 |
++ * entry BIOS programmed later |
3848 |
++ */ |
3849 |
++ if (intel_thermal_supported(&boot_cpu_data)) |
3850 |
++ lvtthmr_init = apic_read(APIC_LVTTHMR); |
3851 |
++} |
3852 |
++ |
3853 |
+ void intel_init_thermal(struct cpuinfo_x86 *c) |
3854 |
+ { |
3855 |
+ unsigned int cpu = smp_processor_id(); |
3856 |
+@@ -630,10 +641,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c) |
3857 |
+ if (!intel_thermal_supported(c)) |
3858 |
+ return; |
3859 |
+ |
3860 |
+- /* On the BSP? */ |
3861 |
+- if (c == &boot_cpu_data) |
3862 |
+- lvtthmr_init = apic_read(APIC_LVTTHMR); |
3863 |
+- |
3864 |
+ /* |
3865 |
+ * First check if its enabled already, in which case there might |
3866 |
+ * be some SMM goo which handles it, so we can't even put a handler |
3867 |
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c |
3868 |
+index 99dfa884cbefb..68c6535bbf7f0 100644 |
3869 |
+--- a/drivers/tty/serial/stm32-usart.c |
3870 |
++++ b/drivers/tty/serial/stm32-usart.c |
3871 |
+@@ -214,14 +214,11 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded) |
3872 |
+ struct tty_port *tport = &port->state->port; |
3873 |
+ struct stm32_port *stm32_port = to_stm32_port(port); |
3874 |
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; |
3875 |
+- unsigned long c, flags; |
3876 |
++ unsigned long c; |
3877 |
+ u32 sr; |
3878 |
+ char flag; |
3879 |
+ |
3880 |
+- if (threaded) |
3881 |
+- spin_lock_irqsave(&port->lock, flags); |
3882 |
+- else |
3883 |
+- spin_lock(&port->lock); |
3884 |
++ spin_lock(&port->lock); |
3885 |
+ |
3886 |
+ while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res, |
3887 |
+ threaded)) { |
3888 |
+@@ -278,10 +275,7 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded) |
3889 |
+ uart_insert_char(port, sr, USART_SR_ORE, c, flag); |
3890 |
+ } |
3891 |
+ |
3892 |
+- if (threaded) |
3893 |
+- spin_unlock_irqrestore(&port->lock, flags); |
3894 |
+- else |
3895 |
+- spin_unlock(&port->lock); |
3896 |
++ spin_unlock(&port->lock); |
3897 |
+ |
3898 |
+ tty_flip_buffer_push(tport); |
3899 |
+ } |
3900 |
+@@ -654,7 +648,8 @@ static int stm32_usart_startup(struct uart_port *port) |
3901 |
+ |
3902 |
+ ret = request_threaded_irq(port->irq, stm32_usart_interrupt, |
3903 |
+ stm32_usart_threaded_interrupt, |
3904 |
+- IRQF_NO_SUSPEND, name, port); |
3905 |
++ IRQF_ONESHOT | IRQF_NO_SUSPEND, |
3906 |
++ name, port); |
3907 |
+ if (ret) |
3908 |
+ return ret; |
3909 |
+ |
3910 |
+@@ -1136,6 +1131,13 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, |
3911 |
+ struct dma_async_tx_descriptor *desc = NULL; |
3912 |
+ int ret; |
3913 |
+ |
3914 |
++ /* |
3915 |
++ * Using DMA and threaded handler for the console could lead to |
3916 |
++ * deadlocks. |
3917 |
++ */ |
3918 |
++ if (uart_console(port)) |
3919 |
++ return -ENODEV; |
3920 |
++ |
3921 |
+ /* Request DMA RX channel */ |
3922 |
+ stm32port->rx_ch = dma_request_slave_channel(dev, "rx"); |
3923 |
+ if (!stm32port->rx_ch) { |
3924 |
+diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c |
3925 |
+index 510fd0572feb1..e3f429f1575e9 100644 |
3926 |
+--- a/drivers/usb/dwc2/core_intr.c |
3927 |
++++ b/drivers/usb/dwc2/core_intr.c |
3928 |
+@@ -707,7 +707,11 @@ static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg, |
3929 |
+ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); |
3930 |
+ |
3931 |
+ hsotg->hibernated = 0; |
3932 |
++ |
3933 |
++#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \ |
3934 |
++ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) |
3935 |
+ hsotg->bus_suspended = 0; |
3936 |
++#endif |
3937 |
+ |
3938 |
+ if (gpwrdn & GPWRDN_IDSTS) { |
3939 |
+ hsotg->op_state = OTG_STATE_B_PERIPHERAL; |
3940 |
+diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig |
3941 |
+index 4abddbebd4b23..c691127bc805a 100644 |
3942 |
+--- a/drivers/vfio/pci/Kconfig |
3943 |
++++ b/drivers/vfio/pci/Kconfig |
3944 |
+@@ -2,6 +2,7 @@ |
3945 |
+ config VFIO_PCI |
3946 |
+ tristate "VFIO support for PCI devices" |
3947 |
+ depends on VFIO && PCI && EVENTFD |
3948 |
++ depends on MMU |
3949 |
+ select VFIO_VIRQFD |
3950 |
+ select IRQ_BYPASS_MANAGER |
3951 |
+ help |
3952 |
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c |
3953 |
+index a402adee8a215..47f21a6ca7fe9 100644 |
3954 |
+--- a/drivers/vfio/pci/vfio_pci_config.c |
3955 |
++++ b/drivers/vfio/pci/vfio_pci_config.c |
3956 |
+@@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev) |
3957 |
+ if (len == 0xFF) { |
3958 |
+ len = vfio_ext_cap_len(vdev, ecap, epos); |
3959 |
+ if (len < 0) |
3960 |
+- return ret; |
3961 |
++ return len; |
3962 |
+ } |
3963 |
+ } |
3964 |
+ |
3965 |
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c |
3966 |
+index fb4b385191f28..e83a7cd15c956 100644 |
3967 |
+--- a/drivers/vfio/platform/vfio_platform_common.c |
3968 |
++++ b/drivers/vfio/platform/vfio_platform_common.c |
3969 |
+@@ -289,7 +289,7 @@ err_irq: |
3970 |
+ vfio_platform_regions_cleanup(vdev); |
3971 |
+ err_reg: |
3972 |
+ mutex_unlock(&driver_lock); |
3973 |
+- module_put(THIS_MODULE); |
3974 |
++ module_put(vdev->parent_module); |
3975 |
+ return ret; |
3976 |
+ } |
3977 |
+ |
3978 |
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
3979 |
+index 5b82050b871a7..27c3680074814 100644 |
3980 |
+--- a/fs/btrfs/extent-tree.c |
3981 |
++++ b/fs/btrfs/extent-tree.c |
3982 |
+@@ -1868,7 +1868,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, |
3983 |
+ trace_run_delayed_ref_head(fs_info, head, 0); |
3984 |
+ btrfs_delayed_ref_unlock(head); |
3985 |
+ btrfs_put_delayed_ref_head(head); |
3986 |
+- return 0; |
3987 |
++ return ret; |
3988 |
+ } |
3989 |
+ |
3990 |
+ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( |
3991 |
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c |
3992 |
+index 47cd3a6dc6351..eed75bb0fedbf 100644 |
3993 |
+--- a/fs/btrfs/file-item.c |
3994 |
++++ b/fs/btrfs/file-item.c |
3995 |
+@@ -787,7 +787,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, |
3996 |
+ u64 end_byte = bytenr + len; |
3997 |
+ u64 csum_end; |
3998 |
+ struct extent_buffer *leaf; |
3999 |
+- int ret; |
4000 |
++ int ret = 0; |
4001 |
+ const u32 csum_size = fs_info->csum_size; |
4002 |
+ u32 blocksize_bits = fs_info->sectorsize_bits; |
4003 |
+ |
4004 |
+@@ -805,6 +805,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, |
4005 |
+ |
4006 |
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
4007 |
+ if (ret > 0) { |
4008 |
++ ret = 0; |
4009 |
+ if (path->slots[0] == 0) |
4010 |
+ break; |
4011 |
+ path->slots[0]--; |
4012 |
+@@ -861,7 +862,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, |
4013 |
+ ret = btrfs_del_items(trans, root, path, |
4014 |
+ path->slots[0], del_nr); |
4015 |
+ if (ret) |
4016 |
+- goto out; |
4017 |
++ break; |
4018 |
+ if (key.offset == bytenr) |
4019 |
+ break; |
4020 |
+ } else if (key.offset < bytenr && csum_end > end_byte) { |
4021 |
+@@ -905,8 +906,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, |
4022 |
+ ret = btrfs_split_item(trans, root, path, &key, offset); |
4023 |
+ if (ret && ret != -EAGAIN) { |
4024 |
+ btrfs_abort_transaction(trans, ret); |
4025 |
+- goto out; |
4026 |
++ break; |
4027 |
+ } |
4028 |
++ ret = 0; |
4029 |
+ |
4030 |
+ key.offset = end_byte - 1; |
4031 |
+ } else { |
4032 |
+@@ -916,12 +918,41 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, |
4033 |
+ } |
4034 |
+ btrfs_release_path(path); |
4035 |
+ } |
4036 |
+- ret = 0; |
4037 |
+-out: |
4038 |
+ btrfs_free_path(path); |
4039 |
+ return ret; |
4040 |
+ } |
4041 |
+ |
4042 |
++static int find_next_csum_offset(struct btrfs_root *root, |
4043 |
++ struct btrfs_path *path, |
4044 |
++ u64 *next_offset) |
4045 |
++{ |
4046 |
++ const u32 nritems = btrfs_header_nritems(path->nodes[0]); |
4047 |
++ struct btrfs_key found_key; |
4048 |
++ int slot = path->slots[0] + 1; |
4049 |
++ int ret; |
4050 |
++ |
4051 |
++ if (nritems == 0 || slot >= nritems) { |
4052 |
++ ret = btrfs_next_leaf(root, path); |
4053 |
++ if (ret < 0) { |
4054 |
++ return ret; |
4055 |
++ } else if (ret > 0) { |
4056 |
++ *next_offset = (u64)-1; |
4057 |
++ return 0; |
4058 |
++ } |
4059 |
++ slot = path->slots[0]; |
4060 |
++ } |
4061 |
++ |
4062 |
++ btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); |
4063 |
++ |
4064 |
++ if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || |
4065 |
++ found_key.type != BTRFS_EXTENT_CSUM_KEY) |
4066 |
++ *next_offset = (u64)-1; |
4067 |
++ else |
4068 |
++ *next_offset = found_key.offset; |
4069 |
++ |
4070 |
++ return 0; |
4071 |
++} |
4072 |
++ |
4073 |
+ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, |
4074 |
+ struct btrfs_root *root, |
4075 |
+ struct btrfs_ordered_sum *sums) |
4076 |
+@@ -937,7 +968,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, |
4077 |
+ u64 total_bytes = 0; |
4078 |
+ u64 csum_offset; |
4079 |
+ u64 bytenr; |
4080 |
+- u32 nritems; |
4081 |
+ u32 ins_size; |
4082 |
+ int index = 0; |
4083 |
+ int found_next; |
4084 |
+@@ -980,26 +1010,10 @@ again: |
4085 |
+ goto insert; |
4086 |
+ } |
4087 |
+ } else { |
4088 |
+- int slot = path->slots[0] + 1; |
4089 |
+- /* we didn't find a csum item, insert one */ |
4090 |
+- nritems = btrfs_header_nritems(path->nodes[0]); |
4091 |
+- if (!nritems || (path->slots[0] >= nritems - 1)) { |
4092 |
+- ret = btrfs_next_leaf(root, path); |
4093 |
+- if (ret < 0) { |
4094 |
+- goto out; |
4095 |
+- } else if (ret > 0) { |
4096 |
+- found_next = 1; |
4097 |
+- goto insert; |
4098 |
+- } |
4099 |
+- slot = path->slots[0]; |
4100 |
+- } |
4101 |
+- btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); |
4102 |
+- if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || |
4103 |
+- found_key.type != BTRFS_EXTENT_CSUM_KEY) { |
4104 |
+- found_next = 1; |
4105 |
+- goto insert; |
4106 |
+- } |
4107 |
+- next_offset = found_key.offset; |
4108 |
++ /* We didn't find a csum item, insert one. */ |
4109 |
++ ret = find_next_csum_offset(root, path, &next_offset); |
4110 |
++ if (ret < 0) |
4111 |
++ goto out; |
4112 |
+ found_next = 1; |
4113 |
+ goto insert; |
4114 |
+ } |
4115 |
+@@ -1055,8 +1069,48 @@ extend_csum: |
4116 |
+ tmp = sums->len - total_bytes; |
4117 |
+ tmp >>= fs_info->sectorsize_bits; |
4118 |
+ WARN_ON(tmp < 1); |
4119 |
++ extend_nr = max_t(int, 1, tmp); |
4120 |
++ |
4121 |
++ /* |
4122 |
++ * A log tree can already have checksum items with a subset of |
4123 |
++ * the checksums we are trying to log. This can happen after |
4124 |
++ * doing a sequence of partial writes into prealloc extents and |
4125 |
++ * fsyncs in between, with a full fsync logging a larger subrange |
4126 |
++ * of an extent for which a previous fast fsync logged a smaller |
4127 |
++ * subrange. And this happens in particular due to merging file |
4128 |
++ * extent items when we complete an ordered extent for a range |
4129 |
++ * covered by a prealloc extent - this is done at |
4130 |
++ * btrfs_mark_extent_written(). |
4131 |
++ * |
4132 |
++ * So if we try to extend the previous checksum item, which has |
4133 |
++ * a range that ends at the start of the range we want to insert, |
4134 |
++ * make sure we don't extend beyond the start offset of the next |
4135 |
++ * checksum item. If we are at the last item in the leaf, then |
4136 |
++ * forget the optimization of extending and add a new checksum |
4137 |
++ * item - it is not worth the complexity of releasing the path, |
4138 |
++ * getting the first key for the next leaf, repeat the btree |
4139 |
++ * search, etc, because log trees are temporary anyway and it |
4140 |
++ * would only save a few bytes of leaf space. |
4141 |
++ */ |
4142 |
++ if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { |
4143 |
++ if (path->slots[0] + 1 >= |
4144 |
++ btrfs_header_nritems(path->nodes[0])) { |
4145 |
++ ret = find_next_csum_offset(root, path, &next_offset); |
4146 |
++ if (ret < 0) |
4147 |
++ goto out; |
4148 |
++ found_next = 1; |
4149 |
++ goto insert; |
4150 |
++ } |
4151 |
++ |
4152 |
++ ret = find_next_csum_offset(root, path, &next_offset); |
4153 |
++ if (ret < 0) |
4154 |
++ goto out; |
4155 |
++ |
4156 |
++ tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits; |
4157 |
++ if (tmp <= INT_MAX) |
4158 |
++ extend_nr = min_t(int, extend_nr, tmp); |
4159 |
++ } |
4160 |
+ |
4161 |
+- extend_nr = max_t(int, 1, (int)tmp); |
4162 |
+ diff = (csum_offset + extend_nr) * csum_size; |
4163 |
+ diff = min(diff, |
4164 |
+ MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); |
4165 |
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
4166 |
+index 81b93c9c659b7..3bb8ce4969f31 100644 |
4167 |
+--- a/fs/btrfs/inode.c |
4168 |
++++ b/fs/btrfs/inode.c |
4169 |
+@@ -3011,6 +3011,18 @@ out: |
4170 |
+ if (ret || truncated) { |
4171 |
+ u64 unwritten_start = start; |
4172 |
+ |
4173 |
++ /* |
4174 |
++ * If we failed to finish this ordered extent for any reason we |
4175 |
++ * need to make sure BTRFS_ORDERED_IOERR is set on the ordered |
4176 |
++ * extent, and mark the inode with the error if it wasn't |
4177 |
++ * already set. Any error during writeback would have already |
4178 |
++ * set the mapping error, so we need to set it if we're the ones |
4179 |
++ * marking this ordered extent as failed. |
4180 |
++ */ |
4181 |
++ if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR, |
4182 |
++ &ordered_extent->flags)) |
4183 |
++ mapping_set_error(ordered_extent->inode->i_mapping, -EIO); |
4184 |
++ |
4185 |
+ if (truncated) |
4186 |
+ unwritten_start += logical_len; |
4187 |
+ clear_extent_uptodate(io_tree, unwritten_start, end, NULL); |
4188 |
+@@ -9076,6 +9088,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, |
4189 |
+ int ret2; |
4190 |
+ bool root_log_pinned = false; |
4191 |
+ bool dest_log_pinned = false; |
4192 |
++ bool need_abort = false; |
4193 |
+ |
4194 |
+ /* we only allow rename subvolume link between subvolumes */ |
4195 |
+ if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) |
4196 |
+@@ -9132,6 +9145,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, |
4197 |
+ old_idx); |
4198 |
+ if (ret) |
4199 |
+ goto out_fail; |
4200 |
++ need_abort = true; |
4201 |
+ } |
4202 |
+ |
4203 |
+ /* And now for the dest. */ |
4204 |
+@@ -9147,8 +9161,11 @@ static int btrfs_rename_exchange(struct inode *old_dir, |
4205 |
+ new_ino, |
4206 |
+ btrfs_ino(BTRFS_I(old_dir)), |
4207 |
+ new_idx); |
4208 |
+- if (ret) |
4209 |
++ if (ret) { |
4210 |
++ if (need_abort) |
4211 |
++ btrfs_abort_transaction(trans, ret); |
4212 |
+ goto out_fail; |
4213 |
++ } |
4214 |
+ } |
4215 |
+ |
4216 |
+ /* Update inode version and ctime/mtime. */ |
4217 |
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c |
4218 |
+index 53ee17f5e382c..238e713635d79 100644 |
4219 |
+--- a/fs/btrfs/reflink.c |
4220 |
++++ b/fs/btrfs/reflink.c |
4221 |
+@@ -207,10 +207,7 @@ static int clone_copy_inline_extent(struct inode *dst, |
4222 |
+ * inline extent's data to the page. |
4223 |
+ */ |
4224 |
+ ASSERT(key.offset > 0); |
4225 |
+- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, |
4226 |
+- inline_data, size, datal, |
4227 |
+- comp_type); |
4228 |
+- goto out; |
4229 |
++ goto copy_to_page; |
4230 |
+ } |
4231 |
+ } else if (i_size_read(dst) <= datal) { |
4232 |
+ struct btrfs_file_extent_item *ei; |
4233 |
+@@ -226,13 +223,10 @@ static int clone_copy_inline_extent(struct inode *dst, |
4234 |
+ BTRFS_FILE_EXTENT_INLINE) |
4235 |
+ goto copy_inline_extent; |
4236 |
+ |
4237 |
+- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, |
4238 |
+- inline_data, size, datal, comp_type); |
4239 |
+- goto out; |
4240 |
++ goto copy_to_page; |
4241 |
+ } |
4242 |
+ |
4243 |
+ copy_inline_extent: |
4244 |
+- ret = 0; |
4245 |
+ /* |
4246 |
+ * We have no extent items, or we have an extent at offset 0 which may |
4247 |
+ * or may not be inlined. All these cases are dealt the same way. |
4248 |
+@@ -244,11 +238,13 @@ copy_inline_extent: |
4249 |
+ * clone. Deal with all these cases by copying the inline extent |
4250 |
+ * data into the respective page at the destination inode. |
4251 |
+ */ |
4252 |
+- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, |
4253 |
+- inline_data, size, datal, comp_type); |
4254 |
+- goto out; |
4255 |
++ goto copy_to_page; |
4256 |
+ } |
4257 |
+ |
4258 |
++ /* |
4259 |
++ * Release path before starting a new transaction so we don't hold locks |
4260 |
++ * that would confuse lockdep. |
4261 |
++ */ |
4262 |
+ btrfs_release_path(path); |
4263 |
+ /* |
4264 |
+ * If we end up here it means were copy the inline extent into a leaf |
4265 |
+@@ -285,11 +281,6 @@ copy_inline_extent: |
4266 |
+ ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end); |
4267 |
+ out: |
4268 |
+ if (!ret && !trans) { |
4269 |
+- /* |
4270 |
+- * Release path before starting a new transaction so we don't |
4271 |
+- * hold locks that would confuse lockdep. |
4272 |
+- */ |
4273 |
+- btrfs_release_path(path); |
4274 |
+ /* |
4275 |
+ * No transaction here means we copied the inline extent into a |
4276 |
+ * page of the destination inode. |
4277 |
+@@ -310,6 +301,21 @@ out: |
4278 |
+ *trans_out = trans; |
4279 |
+ |
4280 |
+ return ret; |
4281 |
++ |
4282 |
++copy_to_page: |
4283 |
++ /* |
4284 |
++ * Release our path because we don't need it anymore and also because |
4285 |
++ * copy_inline_to_page() needs to reserve data and metadata, which may |
4286 |
++ * need to flush delalloc when we are low on available space and |
4287 |
++ * therefore cause a deadlock if writeback of an inline extent needs to |
4288 |
++ * write to the same leaf or an ordered extent completion needs to write |
4289 |
++ * to the same leaf. |
4290 |
++ */ |
4291 |
++ btrfs_release_path(path); |
4292 |
++ |
4293 |
++ ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, |
4294 |
++ inline_data, size, datal, comp_type); |
4295 |
++ goto out; |
4296 |
+ } |
4297 |
+ |
4298 |
+ /** |
4299 |
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c |
4300 |
+index d7f1599e69b1f..faae6ebd8a279 100644 |
4301 |
+--- a/fs/btrfs/tree-log.c |
4302 |
++++ b/fs/btrfs/tree-log.c |
4303 |
+@@ -1574,7 +1574,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, |
4304 |
+ if (ret) |
4305 |
+ goto out; |
4306 |
+ |
4307 |
+- btrfs_update_inode(trans, root, BTRFS_I(inode)); |
4308 |
++ ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); |
4309 |
++ if (ret) |
4310 |
++ goto out; |
4311 |
+ } |
4312 |
+ |
4313 |
+ ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; |
4314 |
+@@ -1749,7 +1751,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, |
4315 |
+ |
4316 |
+ if (nlink != inode->i_nlink) { |
4317 |
+ set_nlink(inode, nlink); |
4318 |
+- btrfs_update_inode(trans, root, BTRFS_I(inode)); |
4319 |
++ ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); |
4320 |
++ if (ret) |
4321 |
++ goto out; |
4322 |
+ } |
4323 |
+ BTRFS_I(inode)->index_cnt = (u64)-1; |
4324 |
+ |
4325 |
+@@ -1787,6 +1791,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, |
4326 |
+ break; |
4327 |
+ |
4328 |
+ if (ret == 1) { |
4329 |
++ ret = 0; |
4330 |
+ if (path->slots[0] == 0) |
4331 |
+ break; |
4332 |
+ path->slots[0]--; |
4333 |
+@@ -1799,17 +1804,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, |
4334 |
+ |
4335 |
+ ret = btrfs_del_item(trans, root, path); |
4336 |
+ if (ret) |
4337 |
+- goto out; |
4338 |
++ break; |
4339 |
+ |
4340 |
+ btrfs_release_path(path); |
4341 |
+ inode = read_one_inode(root, key.offset); |
4342 |
+- if (!inode) |
4343 |
+- return -EIO; |
4344 |
++ if (!inode) { |
4345 |
++ ret = -EIO; |
4346 |
++ break; |
4347 |
++ } |
4348 |
+ |
4349 |
+ ret = fixup_inode_link_count(trans, root, inode); |
4350 |
+ iput(inode); |
4351 |
+ if (ret) |
4352 |
+- goto out; |
4353 |
++ break; |
4354 |
+ |
4355 |
+ /* |
4356 |
+ * fixup on a directory may create new entries, |
4357 |
+@@ -1818,8 +1825,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, |
4358 |
+ */ |
4359 |
+ key.offset = (u64)-1; |
4360 |
+ } |
4361 |
+- ret = 0; |
4362 |
+-out: |
4363 |
+ btrfs_release_path(path); |
4364 |
+ return ret; |
4365 |
+ } |
4366 |
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c |
4367 |
+index 77c84d6f1af6b..cbf37b2cf871e 100644 |
4368 |
+--- a/fs/ext4/extents.c |
4369 |
++++ b/fs/ext4/extents.c |
4370 |
+@@ -3206,7 +3206,10 @@ static int ext4_split_extent_at(handle_t *handle, |
4371 |
+ ext4_ext_mark_unwritten(ex2); |
4372 |
+ |
4373 |
+ err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); |
4374 |
+- if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { |
4375 |
++ if (err != -ENOSPC && err != -EDQUOT) |
4376 |
++ goto out; |
4377 |
++ |
4378 |
++ if (EXT4_EXT_MAY_ZEROOUT & split_flag) { |
4379 |
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { |
4380 |
+ if (split_flag & EXT4_EXT_DATA_VALID1) { |
4381 |
+ err = ext4_ext_zeroout(inode, ex2); |
4382 |
+@@ -3232,25 +3235,22 @@ static int ext4_split_extent_at(handle_t *handle, |
4383 |
+ ext4_ext_pblock(&orig_ex)); |
4384 |
+ } |
4385 |
+ |
4386 |
+- if (err) |
4387 |
+- goto fix_extent_len; |
4388 |
+- /* update the extent length and mark as initialized */ |
4389 |
+- ex->ee_len = cpu_to_le16(ee_len); |
4390 |
+- ext4_ext_try_to_merge(handle, inode, path, ex); |
4391 |
+- err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
4392 |
+- if (err) |
4393 |
+- goto fix_extent_len; |
4394 |
+- |
4395 |
+- /* update extent status tree */ |
4396 |
+- err = ext4_zeroout_es(inode, &zero_ex); |
4397 |
+- |
4398 |
+- goto out; |
4399 |
+- } else if (err) |
4400 |
+- goto fix_extent_len; |
4401 |
+- |
4402 |
+-out: |
4403 |
+- ext4_ext_show_leaf(inode, path); |
4404 |
+- return err; |
4405 |
++ if (!err) { |
4406 |
++ /* update the extent length and mark as initialized */ |
4407 |
++ ex->ee_len = cpu_to_le16(ee_len); |
4408 |
++ ext4_ext_try_to_merge(handle, inode, path, ex); |
4409 |
++ err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
4410 |
++ if (!err) |
4411 |
++ /* update extent status tree */ |
4412 |
++ err = ext4_zeroout_es(inode, &zero_ex); |
4413 |
++ /* If we failed at this point, we don't know in which |
4414 |
++ * state the extent tree exactly is so don't try to fix |
4415 |
++ * length of the original extent as it may do even more |
4416 |
++ * damage. |
4417 |
++ */ |
4418 |
++ goto out; |
4419 |
++ } |
4420 |
++ } |
4421 |
+ |
4422 |
+ fix_extent_len: |
4423 |
+ ex->ee_len = orig_ex.ee_len; |
4424 |
+@@ -3260,6 +3260,9 @@ fix_extent_len: |
4425 |
+ */ |
4426 |
+ ext4_ext_dirty(handle, inode, path + path->p_depth); |
4427 |
+ return err; |
4428 |
++out: |
4429 |
++ ext4_ext_show_leaf(inode, path); |
4430 |
++ return err; |
4431 |
+ } |
4432 |
+ |
4433 |
+ /* |
4434 |
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c |
4435 |
+index eda14f630def4..c1c962b118012 100644 |
4436 |
+--- a/fs/ext4/fast_commit.c |
4437 |
++++ b/fs/ext4/fast_commit.c |
4438 |
+@@ -1288,28 +1288,29 @@ struct dentry_info_args { |
4439 |
+ }; |
4440 |
+ |
4441 |
+ static inline void tl_to_darg(struct dentry_info_args *darg, |
4442 |
+- struct ext4_fc_tl *tl) |
4443 |
++ struct ext4_fc_tl *tl, u8 *val) |
4444 |
+ { |
4445 |
+- struct ext4_fc_dentry_info *fcd; |
4446 |
++ struct ext4_fc_dentry_info fcd; |
4447 |
+ |
4448 |
+- fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl); |
4449 |
++ memcpy(&fcd, val, sizeof(fcd)); |
4450 |
+ |
4451 |
+- darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino); |
4452 |
+- darg->ino = le32_to_cpu(fcd->fc_ino); |
4453 |
+- darg->dname = fcd->fc_dname; |
4454 |
+- darg->dname_len = ext4_fc_tag_len(tl) - |
4455 |
+- sizeof(struct ext4_fc_dentry_info); |
4456 |
++ darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino); |
4457 |
++ darg->ino = le32_to_cpu(fcd.fc_ino); |
4458 |
++ darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname); |
4459 |
++ darg->dname_len = le16_to_cpu(tl->fc_len) - |
4460 |
++ sizeof(struct ext4_fc_dentry_info); |
4461 |
+ } |
4462 |
+ |
4463 |
+ /* Unlink replay function */ |
4464 |
+-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl) |
4465 |
++static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl, |
4466 |
++ u8 *val) |
4467 |
+ { |
4468 |
+ struct inode *inode, *old_parent; |
4469 |
+ struct qstr entry; |
4470 |
+ struct dentry_info_args darg; |
4471 |
+ int ret = 0; |
4472 |
+ |
4473 |
+- tl_to_darg(&darg, tl); |
4474 |
++ tl_to_darg(&darg, tl, val); |
4475 |
+ |
4476 |
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino, |
4477 |
+ darg.parent_ino, darg.dname_len); |
4478 |
+@@ -1399,13 +1400,14 @@ out: |
4479 |
+ } |
4480 |
+ |
4481 |
+ /* Link replay function */ |
4482 |
+-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl) |
4483 |
++static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl, |
4484 |
++ u8 *val) |
4485 |
+ { |
4486 |
+ struct inode *inode; |
4487 |
+ struct dentry_info_args darg; |
4488 |
+ int ret = 0; |
4489 |
+ |
4490 |
+- tl_to_darg(&darg, tl); |
4491 |
++ tl_to_darg(&darg, tl, val); |
4492 |
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino, |
4493 |
+ darg.parent_ino, darg.dname_len); |
4494 |
+ |
4495 |
+@@ -1450,9 +1452,10 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino) |
4496 |
+ /* |
4497 |
+ * Inode replay function |
4498 |
+ */ |
4499 |
+-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl) |
4500 |
++static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl, |
4501 |
++ u8 *val) |
4502 |
+ { |
4503 |
+- struct ext4_fc_inode *fc_inode; |
4504 |
++ struct ext4_fc_inode fc_inode; |
4505 |
+ struct ext4_inode *raw_inode; |
4506 |
+ struct ext4_inode *raw_fc_inode; |
4507 |
+ struct inode *inode = NULL; |
4508 |
+@@ -1460,9 +1463,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl) |
4509 |
+ int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag); |
4510 |
+ struct ext4_extent_header *eh; |
4511 |
+ |
4512 |
+- fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl); |
4513 |
++ memcpy(&fc_inode, val, sizeof(fc_inode)); |
4514 |
+ |
4515 |
+- ino = le32_to_cpu(fc_inode->fc_ino); |
4516 |
++ ino = le32_to_cpu(fc_inode.fc_ino); |
4517 |
+ trace_ext4_fc_replay(sb, tag, ino, 0, 0); |
4518 |
+ |
4519 |
+ inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL); |
4520 |
+@@ -1474,12 +1477,13 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl) |
4521 |
+ |
4522 |
+ ext4_fc_record_modified_inode(sb, ino); |
4523 |
+ |
4524 |
+- raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode; |
4525 |
++ raw_fc_inode = (struct ext4_inode *) |
4526 |
++ (val + offsetof(struct ext4_fc_inode, fc_raw_inode)); |
4527 |
+ ret = ext4_get_fc_inode_loc(sb, ino, &iloc); |
4528 |
+ if (ret) |
4529 |
+ goto out; |
4530 |
+ |
4531 |
+- inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode); |
4532 |
++ inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode); |
4533 |
+ raw_inode = ext4_raw_inode(&iloc); |
4534 |
+ |
4535 |
+ memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block)); |
4536 |
+@@ -1547,14 +1551,15 @@ out: |
4537 |
+ * inode for which we are trying to create a dentry here, should already have |
4538 |
+ * been replayed before we start here. |
4539 |
+ */ |
4540 |
+-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl) |
4541 |
++static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl, |
4542 |
++ u8 *val) |
4543 |
+ { |
4544 |
+ int ret = 0; |
4545 |
+ struct inode *inode = NULL; |
4546 |
+ struct inode *dir = NULL; |
4547 |
+ struct dentry_info_args darg; |
4548 |
+ |
4549 |
+- tl_to_darg(&darg, tl); |
4550 |
++ tl_to_darg(&darg, tl, val); |
4551 |
+ |
4552 |
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino, |
4553 |
+ darg.parent_ino, darg.dname_len); |
4554 |
+@@ -1633,9 +1638,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino, |
4555 |
+ |
4556 |
+ /* Replay add range tag */ |
4557 |
+ static int ext4_fc_replay_add_range(struct super_block *sb, |
4558 |
+- struct ext4_fc_tl *tl) |
4559 |
++ struct ext4_fc_tl *tl, u8 *val) |
4560 |
+ { |
4561 |
+- struct ext4_fc_add_range *fc_add_ex; |
4562 |
++ struct ext4_fc_add_range fc_add_ex; |
4563 |
+ struct ext4_extent newex, *ex; |
4564 |
+ struct inode *inode; |
4565 |
+ ext4_lblk_t start, cur; |
4566 |
+@@ -1645,15 +1650,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb, |
4567 |
+ struct ext4_ext_path *path = NULL; |
4568 |
+ int ret; |
4569 |
+ |
4570 |
+- fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl); |
4571 |
+- ex = (struct ext4_extent *)&fc_add_ex->fc_ex; |
4572 |
++ memcpy(&fc_add_ex, val, sizeof(fc_add_ex)); |
4573 |
++ ex = (struct ext4_extent *)&fc_add_ex.fc_ex; |
4574 |
+ |
4575 |
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE, |
4576 |
+- le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block), |
4577 |
++ le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block), |
4578 |
+ ext4_ext_get_actual_len(ex)); |
4579 |
+ |
4580 |
+- inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino), |
4581 |
+- EXT4_IGET_NORMAL); |
4582 |
++ inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL); |
4583 |
+ if (IS_ERR(inode)) { |
4584 |
+ jbd_debug(1, "Inode not found."); |
4585 |
+ return 0; |
4586 |
+@@ -1762,32 +1766,33 @@ next: |
4587 |
+ |
4588 |
+ /* Replay DEL_RANGE tag */ |
4589 |
+ static int |
4590 |
+-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl) |
4591 |
++ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl, |
4592 |
++ u8 *val) |
4593 |
+ { |
4594 |
+ struct inode *inode; |
4595 |
+- struct ext4_fc_del_range *lrange; |
4596 |
++ struct ext4_fc_del_range lrange; |
4597 |
+ struct ext4_map_blocks map; |
4598 |
+ ext4_lblk_t cur, remaining; |
4599 |
+ int ret; |
4600 |
+ |
4601 |
+- lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl); |
4602 |
+- cur = le32_to_cpu(lrange->fc_lblk); |
4603 |
+- remaining = le32_to_cpu(lrange->fc_len); |
4604 |
++ memcpy(&lrange, val, sizeof(lrange)); |
4605 |
++ cur = le32_to_cpu(lrange.fc_lblk); |
4606 |
++ remaining = le32_to_cpu(lrange.fc_len); |
4607 |
+ |
4608 |
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE, |
4609 |
+- le32_to_cpu(lrange->fc_ino), cur, remaining); |
4610 |
++ le32_to_cpu(lrange.fc_ino), cur, remaining); |
4611 |
+ |
4612 |
+- inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL); |
4613 |
++ inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL); |
4614 |
+ if (IS_ERR(inode)) { |
4615 |
+- jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino)); |
4616 |
++ jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino)); |
4617 |
+ return 0; |
4618 |
+ } |
4619 |
+ |
4620 |
+ ret = ext4_fc_record_modified_inode(sb, inode->i_ino); |
4621 |
+ |
4622 |
+ jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n", |
4623 |
+- inode->i_ino, le32_to_cpu(lrange->fc_lblk), |
4624 |
+- le32_to_cpu(lrange->fc_len)); |
4625 |
++ inode->i_ino, le32_to_cpu(lrange.fc_lblk), |
4626 |
++ le32_to_cpu(lrange.fc_len)); |
4627 |
+ while (remaining > 0) { |
4628 |
+ map.m_lblk = cur; |
4629 |
+ map.m_len = remaining; |
4630 |
+@@ -1808,8 +1813,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl) |
4631 |
+ } |
4632 |
+ |
4633 |
+ ret = ext4_punch_hole(inode, |
4634 |
+- le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits, |
4635 |
+- le32_to_cpu(lrange->fc_len) << sb->s_blocksize_bits); |
4636 |
++ le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits, |
4637 |
++ le32_to_cpu(lrange.fc_len) << sb->s_blocksize_bits); |
4638 |
+ if (ret) |
4639 |
+ jbd_debug(1, "ext4_punch_hole returned %d", ret); |
4640 |
+ ext4_ext_replay_shrink_inode(inode, |
4641 |
+@@ -1925,11 +1930,11 @@ static int ext4_fc_replay_scan(journal_t *journal, |
4642 |
+ struct ext4_sb_info *sbi = EXT4_SB(sb); |
4643 |
+ struct ext4_fc_replay_state *state; |
4644 |
+ int ret = JBD2_FC_REPLAY_CONTINUE; |
4645 |
+- struct ext4_fc_add_range *ext; |
4646 |
+- struct ext4_fc_tl *tl; |
4647 |
+- struct ext4_fc_tail *tail; |
4648 |
+- __u8 *start, *end; |
4649 |
+- struct ext4_fc_head *head; |
4650 |
++ struct ext4_fc_add_range ext; |
4651 |
++ struct ext4_fc_tl tl; |
4652 |
++ struct ext4_fc_tail tail; |
4653 |
++ __u8 *start, *end, *cur, *val; |
4654 |
++ struct ext4_fc_head head; |
4655 |
+ struct ext4_extent *ex; |
4656 |
+ |
4657 |
+ state = &sbi->s_fc_replay_state; |
4658 |
+@@ -1956,15 +1961,17 @@ static int ext4_fc_replay_scan(journal_t *journal, |
4659 |
+ } |
4660 |
+ |
4661 |
+ state->fc_replay_expected_off++; |
4662 |
+- fc_for_each_tl(start, end, tl) { |
4663 |
++ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) { |
4664 |
++ memcpy(&tl, cur, sizeof(tl)); |
4665 |
++ val = cur + sizeof(tl); |
4666 |
+ jbd_debug(3, "Scan phase, tag:%s, blk %lld\n", |
4667 |
+- tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr); |
4668 |
+- switch (le16_to_cpu(tl->fc_tag)) { |
4669 |
++ tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr); |
4670 |
++ switch (le16_to_cpu(tl.fc_tag)) { |
4671 |
+ case EXT4_FC_TAG_ADD_RANGE: |
4672 |
+- ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl); |
4673 |
+- ex = (struct ext4_extent *)&ext->fc_ex; |
4674 |
++ memcpy(&ext, val, sizeof(ext)); |
4675 |
++ ex = (struct ext4_extent *)&ext.fc_ex; |
4676 |
+ ret = ext4_fc_record_regions(sb, |
4677 |
+- le32_to_cpu(ext->fc_ino), |
4678 |
++ le32_to_cpu(ext.fc_ino), |
4679 |
+ le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex), |
4680 |
+ ext4_ext_get_actual_len(ex)); |
4681 |
+ if (ret < 0) |
4682 |
+@@ -1978,18 +1985,18 @@ static int ext4_fc_replay_scan(journal_t *journal, |
4683 |
+ case EXT4_FC_TAG_INODE: |
4684 |
+ case EXT4_FC_TAG_PAD: |
4685 |
+ state->fc_cur_tag++; |
4686 |
+- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl, |
4687 |
+- sizeof(*tl) + ext4_fc_tag_len(tl)); |
4688 |
++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur, |
4689 |
++ sizeof(tl) + le16_to_cpu(tl.fc_len)); |
4690 |
+ break; |
4691 |
+ case EXT4_FC_TAG_TAIL: |
4692 |
+ state->fc_cur_tag++; |
4693 |
+- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl); |
4694 |
+- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl, |
4695 |
+- sizeof(*tl) + |
4696 |
++ memcpy(&tail, val, sizeof(tail)); |
4697 |
++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur, |
4698 |
++ sizeof(tl) + |
4699 |
+ offsetof(struct ext4_fc_tail, |
4700 |
+ fc_crc)); |
4701 |
+- if (le32_to_cpu(tail->fc_tid) == expected_tid && |
4702 |
+- le32_to_cpu(tail->fc_crc) == state->fc_crc) { |
4703 |
++ if (le32_to_cpu(tail.fc_tid) == expected_tid && |
4704 |
++ le32_to_cpu(tail.fc_crc) == state->fc_crc) { |
4705 |
+ state->fc_replay_num_tags = state->fc_cur_tag; |
4706 |
+ state->fc_regions_valid = |
4707 |
+ state->fc_regions_used; |
4708 |
+@@ -2000,19 +2007,19 @@ static int ext4_fc_replay_scan(journal_t *journal, |
4709 |
+ state->fc_crc = 0; |
4710 |
+ break; |
4711 |
+ case EXT4_FC_TAG_HEAD: |
4712 |
+- head = (struct ext4_fc_head *)ext4_fc_tag_val(tl); |
4713 |
+- if (le32_to_cpu(head->fc_features) & |
4714 |
++ memcpy(&head, val, sizeof(head)); |
4715 |
++ if (le32_to_cpu(head.fc_features) & |
4716 |
+ ~EXT4_FC_SUPPORTED_FEATURES) { |
4717 |
+ ret = -EOPNOTSUPP; |
4718 |
+ break; |
4719 |
+ } |
4720 |
+- if (le32_to_cpu(head->fc_tid) != expected_tid) { |
4721 |
++ if (le32_to_cpu(head.fc_tid) != expected_tid) { |
4722 |
+ ret = JBD2_FC_REPLAY_STOP; |
4723 |
+ break; |
4724 |
+ } |
4725 |
+ state->fc_cur_tag++; |
4726 |
+- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl, |
4727 |
+- sizeof(*tl) + ext4_fc_tag_len(tl)); |
4728 |
++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur, |
4729 |
++ sizeof(tl) + le16_to_cpu(tl.fc_len)); |
4730 |
+ break; |
4731 |
+ default: |
4732 |
+ ret = state->fc_replay_num_tags ? |
4733 |
+@@ -2036,11 +2043,11 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh, |
4734 |
+ { |
4735 |
+ struct super_block *sb = journal->j_private; |
4736 |
+ struct ext4_sb_info *sbi = EXT4_SB(sb); |
4737 |
+- struct ext4_fc_tl *tl; |
4738 |
+- __u8 *start, *end; |
4739 |
++ struct ext4_fc_tl tl; |
4740 |
++ __u8 *start, *end, *cur, *val; |
4741 |
+ int ret = JBD2_FC_REPLAY_CONTINUE; |
4742 |
+ struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state; |
4743 |
+- struct ext4_fc_tail *tail; |
4744 |
++ struct ext4_fc_tail tail; |
4745 |
+ |
4746 |
+ if (pass == PASS_SCAN) { |
4747 |
+ state->fc_current_pass = PASS_SCAN; |
4748 |
+@@ -2067,49 +2074,52 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh, |
4749 |
+ start = (u8 *)bh->b_data; |
4750 |
+ end = (__u8 *)bh->b_data + journal->j_blocksize - 1; |
4751 |
+ |
4752 |
+- fc_for_each_tl(start, end, tl) { |
4753 |
++ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) { |
4754 |
++ memcpy(&tl, cur, sizeof(tl)); |
4755 |
++ val = cur + sizeof(tl); |
4756 |
++ |
4757 |
+ if (state->fc_replay_num_tags == 0) { |
4758 |
+ ret = JBD2_FC_REPLAY_STOP; |
4759 |
+ ext4_fc_set_bitmaps_and_counters(sb); |
4760 |
+ break; |
4761 |
+ } |
4762 |
+ jbd_debug(3, "Replay phase, tag:%s\n", |
4763 |
+- tag2str(le16_to_cpu(tl->fc_tag))); |
4764 |
++ tag2str(le16_to_cpu(tl.fc_tag))); |
4765 |
+ state->fc_replay_num_tags--; |
4766 |
+- switch (le16_to_cpu(tl->fc_tag)) { |
4767 |
++ switch (le16_to_cpu(tl.fc_tag)) { |
4768 |
+ case EXT4_FC_TAG_LINK: |
4769 |
+- ret = ext4_fc_replay_link(sb, tl); |
4770 |
++ ret = ext4_fc_replay_link(sb, &tl, val); |
4771 |
+ break; |
4772 |
+ case EXT4_FC_TAG_UNLINK: |
4773 |
+- ret = ext4_fc_replay_unlink(sb, tl); |
4774 |
++ ret = ext4_fc_replay_unlink(sb, &tl, val); |
4775 |
+ break; |
4776 |
+ case EXT4_FC_TAG_ADD_RANGE: |
4777 |
+- ret = ext4_fc_replay_add_range(sb, tl); |
4778 |
++ ret = ext4_fc_replay_add_range(sb, &tl, val); |
4779 |
+ break; |
4780 |
+ case EXT4_FC_TAG_CREAT: |
4781 |
+- ret = ext4_fc_replay_create(sb, tl); |
4782 |
++ ret = ext4_fc_replay_create(sb, &tl, val); |
4783 |
+ break; |
4784 |
+ case EXT4_FC_TAG_DEL_RANGE: |
4785 |
+- ret = ext4_fc_replay_del_range(sb, tl); |
4786 |
++ ret = ext4_fc_replay_del_range(sb, &tl, val); |
4787 |
+ break; |
4788 |
+ case EXT4_FC_TAG_INODE: |
4789 |
+- ret = ext4_fc_replay_inode(sb, tl); |
4790 |
++ ret = ext4_fc_replay_inode(sb, &tl, val); |
4791 |
+ break; |
4792 |
+ case EXT4_FC_TAG_PAD: |
4793 |
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0, |
4794 |
+- ext4_fc_tag_len(tl), 0); |
4795 |
++ le16_to_cpu(tl.fc_len), 0); |
4796 |
+ break; |
4797 |
+ case EXT4_FC_TAG_TAIL: |
4798 |
+ trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0, |
4799 |
+- ext4_fc_tag_len(tl), 0); |
4800 |
+- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl); |
4801 |
+- WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid); |
4802 |
++ le16_to_cpu(tl.fc_len), 0); |
4803 |
++ memcpy(&tail, val, sizeof(tail)); |
4804 |
++ WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid); |
4805 |
+ break; |
4806 |
+ case EXT4_FC_TAG_HEAD: |
4807 |
+ break; |
4808 |
+ default: |
4809 |
+- trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0, |
4810 |
+- ext4_fc_tag_len(tl), 0); |
4811 |
++ trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0, |
4812 |
++ le16_to_cpu(tl.fc_len), 0); |
4813 |
+ ret = -ECANCELED; |
4814 |
+ break; |
4815 |
+ } |
4816 |
+diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h |
4817 |
+index b77f70f55a622..937c381b4c85e 100644 |
4818 |
+--- a/fs/ext4/fast_commit.h |
4819 |
++++ b/fs/ext4/fast_commit.h |
4820 |
+@@ -153,13 +153,6 @@ struct ext4_fc_replay_state { |
4821 |
+ #define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1) |
4822 |
+ #endif |
4823 |
+ |
4824 |
+-#define fc_for_each_tl(__start, __end, __tl) \ |
4825 |
+- for (tl = (struct ext4_fc_tl *)(__start); \ |
4826 |
+- (__u8 *)tl < (__u8 *)(__end); \ |
4827 |
+- tl = (struct ext4_fc_tl *)((__u8 *)tl + \ |
4828 |
+- sizeof(struct ext4_fc_tl) + \ |
4829 |
+- + le16_to_cpu(tl->fc_len))) |
4830 |
+- |
4831 |
+ static inline const char *tag2str(__u16 tag) |
4832 |
+ { |
4833 |
+ switch (tag) { |
4834 |
+@@ -186,16 +179,4 @@ static inline const char *tag2str(__u16 tag) |
4835 |
+ } |
4836 |
+ } |
4837 |
+ |
4838 |
+-/* Get length of a particular tlv */ |
4839 |
+-static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl) |
4840 |
+-{ |
4841 |
+- return le16_to_cpu(tl->fc_len); |
4842 |
+-} |
4843 |
+- |
4844 |
+-/* Get a pointer to "value" of a tlv */ |
4845 |
+-static inline __u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl) |
4846 |
+-{ |
4847 |
+- return (__u8 *)tl + sizeof(*tl); |
4848 |
+-} |
4849 |
+- |
4850 |
+ #endif /* __FAST_COMMIT_H__ */ |
4851 |
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c |
4852 |
+index 71d321b3b9844..edbaed073ac5c 100644 |
4853 |
+--- a/fs/ext4/ialloc.c |
4854 |
++++ b/fs/ext4/ialloc.c |
4855 |
+@@ -322,14 +322,16 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) |
4856 |
+ if (is_directory) { |
4857 |
+ count = ext4_used_dirs_count(sb, gdp) - 1; |
4858 |
+ ext4_used_dirs_set(sb, gdp, count); |
4859 |
+- percpu_counter_dec(&sbi->s_dirs_counter); |
4860 |
++ if (percpu_counter_initialized(&sbi->s_dirs_counter)) |
4861 |
++ percpu_counter_dec(&sbi->s_dirs_counter); |
4862 |
+ } |
4863 |
+ ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh, |
4864 |
+ EXT4_INODES_PER_GROUP(sb) / 8); |
4865 |
+ ext4_group_desc_csum_set(sb, block_group, gdp); |
4866 |
+ ext4_unlock_group(sb, block_group); |
4867 |
+ |
4868 |
+- percpu_counter_inc(&sbi->s_freeinodes_counter); |
4869 |
++ if (percpu_counter_initialized(&sbi->s_freeinodes_counter)) |
4870 |
++ percpu_counter_inc(&sbi->s_freeinodes_counter); |
4871 |
+ if (sbi->s_log_groups_per_flex) { |
4872 |
+ struct flex_groups *fg; |
4873 |
+ |
4874 |
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c |
4875 |
+index a02fadf4fc84e..d24cb3dc79fff 100644 |
4876 |
+--- a/fs/ext4/mballoc.c |
4877 |
++++ b/fs/ext4/mballoc.c |
4878 |
+@@ -2715,7 +2715,7 @@ static int ext4_mb_init_backend(struct super_block *sb) |
4879 |
+ */ |
4880 |
+ if (sbi->s_es->s_log_groups_per_flex >= 32) { |
4881 |
+ ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); |
4882 |
+- goto err_freesgi; |
4883 |
++ goto err_freebuddy; |
4884 |
+ } |
4885 |
+ sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, |
4886 |
+ BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); |
4887 |
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
4888 |
+index 77c1cb2582623..0e3a847b5d279 100644 |
4889 |
+--- a/fs/ext4/super.c |
4890 |
++++ b/fs/ext4/super.c |
4891 |
+@@ -4449,14 +4449,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
4892 |
+ } |
4893 |
+ |
4894 |
+ if (sb->s_blocksize != blocksize) { |
4895 |
++ /* |
4896 |
++ * bh must be released before kill_bdev(), otherwise |
4897 |
++ * it won't be freed and its page also. kill_bdev() |
4898 |
++ * is called by sb_set_blocksize(). |
4899 |
++ */ |
4900 |
++ brelse(bh); |
4901 |
+ /* Validate the filesystem blocksize */ |
4902 |
+ if (!sb_set_blocksize(sb, blocksize)) { |
4903 |
+ ext4_msg(sb, KERN_ERR, "bad block size %d", |
4904 |
+ blocksize); |
4905 |
++ bh = NULL; |
4906 |
+ goto failed_mount; |
4907 |
+ } |
4908 |
+ |
4909 |
+- brelse(bh); |
4910 |
+ logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; |
4911 |
+ offset = do_div(logical_sb_block, blocksize); |
4912 |
+ bh = ext4_sb_bread_unmovable(sb, logical_sb_block); |
4913 |
+@@ -5176,8 +5182,9 @@ failed_mount: |
4914 |
+ kfree(get_qf_name(sb, sbi, i)); |
4915 |
+ #endif |
4916 |
+ fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); |
4917 |
+- ext4_blkdev_remove(sbi); |
4918 |
++ /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */ |
4919 |
+ brelse(bh); |
4920 |
++ ext4_blkdev_remove(sbi); |
4921 |
+ out_fail: |
4922 |
+ sb->s_fs_info = NULL; |
4923 |
+ kfree(sbi->s_blockgroup_lock); |
4924 |
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c |
4925 |
+index 9567520d79f79..7c2ba81213da0 100644 |
4926 |
+--- a/fs/gfs2/glock.c |
4927 |
++++ b/fs/gfs2/glock.c |
4928 |
+@@ -1465,9 +1465,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh) |
4929 |
+ glock_blocked_by_withdraw(gl) && |
4930 |
+ gh->gh_gl != sdp->sd_jinode_gl) { |
4931 |
+ sdp->sd_glock_dqs_held++; |
4932 |
++ spin_unlock(&gl->gl_lockref.lock); |
4933 |
+ might_sleep(); |
4934 |
+ wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY, |
4935 |
+ TASK_UNINTERRUPTIBLE); |
4936 |
++ spin_lock(&gl->gl_lockref.lock); |
4937 |
+ } |
4938 |
+ if (gh->gh_flags & GL_NOCACHE) |
4939 |
+ handle_callback(gl, LM_ST_UNLOCKED, 0, false); |
4940 |
+diff --git a/fs/io_uring.c b/fs/io_uring.c |
4941 |
+index 144056b0cac92..359d1abb089c4 100644 |
4942 |
+--- a/fs/io_uring.c |
4943 |
++++ b/fs/io_uring.c |
4944 |
+@@ -653,7 +653,7 @@ struct io_unlink { |
4945 |
+ struct io_completion { |
4946 |
+ struct file *file; |
4947 |
+ struct list_head list; |
4948 |
+- int cflags; |
4949 |
++ u32 cflags; |
4950 |
+ }; |
4951 |
+ |
4952 |
+ struct io_async_connect { |
4953 |
+@@ -1476,7 +1476,33 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, |
4954 |
+ return ret; |
4955 |
+ } |
4956 |
+ |
4957 |
+-static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) |
4958 |
++static inline bool req_ref_inc_not_zero(struct io_kiocb *req) |
4959 |
++{ |
4960 |
++ return refcount_inc_not_zero(&req->refs); |
4961 |
++} |
4962 |
++ |
4963 |
++static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs) |
4964 |
++{ |
4965 |
++ return refcount_sub_and_test(refs, &req->refs); |
4966 |
++} |
4967 |
++ |
4968 |
++static inline bool req_ref_put_and_test(struct io_kiocb *req) |
4969 |
++{ |
4970 |
++ return refcount_dec_and_test(&req->refs); |
4971 |
++} |
4972 |
++ |
4973 |
++static inline void req_ref_put(struct io_kiocb *req) |
4974 |
++{ |
4975 |
++ refcount_dec(&req->refs); |
4976 |
++} |
4977 |
++ |
4978 |
++static inline void req_ref_get(struct io_kiocb *req) |
4979 |
++{ |
4980 |
++ refcount_inc(&req->refs); |
4981 |
++} |
4982 |
++ |
4983 |
++static void __io_cqring_fill_event(struct io_kiocb *req, long res, |
4984 |
++ unsigned int cflags) |
4985 |
+ { |
4986 |
+ struct io_ring_ctx *ctx = req->ctx; |
4987 |
+ struct io_uring_cqe *cqe; |
4988 |
+@@ -1511,7 +1537,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) |
4989 |
+ io_clean_op(req); |
4990 |
+ req->result = res; |
4991 |
+ req->compl.cflags = cflags; |
4992 |
+- refcount_inc(&req->refs); |
4993 |
++ req_ref_get(req); |
4994 |
+ list_add_tail(&req->compl.list, &ctx->cq_overflow_list); |
4995 |
+ } |
4996 |
+ } |
4997 |
+@@ -1533,7 +1559,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res, |
4998 |
+ * If we're the last reference to this request, add to our locked |
4999 |
+ * free_list cache. |
5000 |
+ */ |
5001 |
+- if (refcount_dec_and_test(&req->refs)) { |
5002 |
++ if (req_ref_put_and_test(req)) { |
5003 |
+ struct io_comp_state *cs = &ctx->submit_state.comp; |
5004 |
+ |
5005 |
+ if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { |
5006 |
+@@ -2112,7 +2138,7 @@ static void io_submit_flush_completions(struct io_comp_state *cs, |
5007 |
+ req = cs->reqs[i]; |
5008 |
+ |
5009 |
+ /* submission and completion refs */ |
5010 |
+- if (refcount_sub_and_test(2, &req->refs)) |
5011 |
++ if (req_ref_sub_and_test(req, 2)) |
5012 |
+ io_req_free_batch(&rb, req, &ctx->submit_state); |
5013 |
+ } |
5014 |
+ |
5015 |
+@@ -2128,7 +2154,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) |
5016 |
+ { |
5017 |
+ struct io_kiocb *nxt = NULL; |
5018 |
+ |
5019 |
+- if (refcount_dec_and_test(&req->refs)) { |
5020 |
++ if (req_ref_put_and_test(req)) { |
5021 |
+ nxt = io_req_find_next(req); |
5022 |
+ __io_free_req(req); |
5023 |
+ } |
5024 |
+@@ -2137,7 +2163,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) |
5025 |
+ |
5026 |
+ static void io_put_req(struct io_kiocb *req) |
5027 |
+ { |
5028 |
+- if (refcount_dec_and_test(&req->refs)) |
5029 |
++ if (req_ref_put_and_test(req)) |
5030 |
+ io_free_req(req); |
5031 |
+ } |
5032 |
+ |
5033 |
+@@ -2160,14 +2186,14 @@ static void io_free_req_deferred(struct io_kiocb *req) |
5034 |
+ |
5035 |
+ static inline void io_put_req_deferred(struct io_kiocb *req, int refs) |
5036 |
+ { |
5037 |
+- if (refcount_sub_and_test(refs, &req->refs)) |
5038 |
++ if (req_ref_sub_and_test(req, refs)) |
5039 |
+ io_free_req_deferred(req); |
5040 |
+ } |
5041 |
+ |
5042 |
+ static void io_double_put_req(struct io_kiocb *req) |
5043 |
+ { |
5044 |
+ /* drop both submit and complete references */ |
5045 |
+- if (refcount_sub_and_test(2, &req->refs)) |
5046 |
++ if (req_ref_sub_and_test(req, 2)) |
5047 |
+ io_free_req(req); |
5048 |
+ } |
5049 |
+ |
5050 |
+@@ -2253,7 +2279,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, |
5051 |
+ __io_cqring_fill_event(req, req->result, cflags); |
5052 |
+ (*nr_events)++; |
5053 |
+ |
5054 |
+- if (refcount_dec_and_test(&req->refs)) |
5055 |
++ if (req_ref_put_and_test(req)) |
5056 |
+ io_req_free_batch(&rb, req, &ctx->submit_state); |
5057 |
+ } |
5058 |
+ |
5059 |
+@@ -2495,7 +2521,7 @@ static bool io_rw_reissue(struct io_kiocb *req) |
5060 |
+ lockdep_assert_held(&req->ctx->uring_lock); |
5061 |
+ |
5062 |
+ if (io_resubmit_prep(req)) { |
5063 |
+- refcount_inc(&req->refs); |
5064 |
++ req_ref_get(req); |
5065 |
+ io_queue_async_work(req); |
5066 |
+ return true; |
5067 |
+ } |
5068 |
+@@ -3208,7 +3234,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, |
5069 |
+ list_del_init(&wait->entry); |
5070 |
+ |
5071 |
+ /* submit ref gets dropped, acquire a new one */ |
5072 |
+- refcount_inc(&req->refs); |
5073 |
++ req_ref_get(req); |
5074 |
+ io_req_task_queue(req); |
5075 |
+ return 1; |
5076 |
+ } |
5077 |
+@@ -4953,7 +4979,7 @@ static void io_poll_remove_double(struct io_kiocb *req) |
5078 |
+ spin_lock(&head->lock); |
5079 |
+ list_del_init(&poll->wait.entry); |
5080 |
+ if (poll->wait.private) |
5081 |
+- refcount_dec(&req->refs); |
5082 |
++ req_ref_put(req); |
5083 |
+ poll->head = NULL; |
5084 |
+ spin_unlock(&head->lock); |
5085 |
+ } |
5086 |
+@@ -5019,7 +5045,7 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, |
5087 |
+ poll->wait.func(&poll->wait, mode, sync, key); |
5088 |
+ } |
5089 |
+ } |
5090 |
+- refcount_dec(&req->refs); |
5091 |
++ req_ref_put(req); |
5092 |
+ return 1; |
5093 |
+ } |
5094 |
+ |
5095 |
+@@ -5062,7 +5088,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, |
5096 |
+ return; |
5097 |
+ } |
5098 |
+ io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake); |
5099 |
+- refcount_inc(&req->refs); |
5100 |
++ req_ref_get(req); |
5101 |
+ poll->wait.private = req; |
5102 |
+ *poll_ptr = poll; |
5103 |
+ } |
5104 |
+@@ -6211,7 +6237,7 @@ static void io_wq_submit_work(struct io_wq_work *work) |
5105 |
+ /* avoid locking problems by failing it from a clean context */ |
5106 |
+ if (ret) { |
5107 |
+ /* io-wq is going to take one down */ |
5108 |
+- refcount_inc(&req->refs); |
5109 |
++ req_ref_get(req); |
5110 |
+ io_req_task_queue_fail(req, ret); |
5111 |
+ } |
5112 |
+ } |
5113 |
+@@ -6263,15 +6289,17 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) |
5114 |
+ * We don't expect the list to be empty, that will only happen if we |
5115 |
+ * race with the completion of the linked work. |
5116 |
+ */ |
5117 |
+- if (prev && refcount_inc_not_zero(&prev->refs)) |
5118 |
++ if (prev) { |
5119 |
+ io_remove_next_linked(prev); |
5120 |
+- else |
5121 |
+- prev = NULL; |
5122 |
++ if (!req_ref_inc_not_zero(prev)) |
5123 |
++ prev = NULL; |
5124 |
++ } |
5125 |
+ spin_unlock_irqrestore(&ctx->completion_lock, flags); |
5126 |
+ |
5127 |
+ if (prev) { |
5128 |
+ io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); |
5129 |
+ io_put_req_deferred(prev, 1); |
5130 |
++ io_put_req_deferred(req, 1); |
5131 |
+ } else { |
5132 |
+ io_req_complete_post(req, -ETIME, 0); |
5133 |
+ io_put_req_deferred(req, 1); |
5134 |
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c |
5135 |
+index 5edc1d0cf115f..0e45893c0cda0 100644 |
5136 |
+--- a/fs/ocfs2/file.c |
5137 |
++++ b/fs/ocfs2/file.c |
5138 |
+@@ -1857,6 +1857,45 @@ out: |
5139 |
+ return ret; |
5140 |
+ } |
5141 |
+ |
5142 |
++/* |
5143 |
++ * zero out partial blocks of one cluster. |
5144 |
++ * |
5145 |
++ * start: file offset where zero starts, will be made upper block aligned. |
5146 |
++ * len: it will be trimmed to the end of current cluster if "start + len" |
5147 |
++ * is bigger than it. |
5148 |
++ */ |
5149 |
++static int ocfs2_zeroout_partial_cluster(struct inode *inode, |
5150 |
++ u64 start, u64 len) |
5151 |
++{ |
5152 |
++ int ret; |
5153 |
++ u64 start_block, end_block, nr_blocks; |
5154 |
++ u64 p_block, offset; |
5155 |
++ u32 cluster, p_cluster, nr_clusters; |
5156 |
++ struct super_block *sb = inode->i_sb; |
5157 |
++ u64 end = ocfs2_align_bytes_to_clusters(sb, start); |
5158 |
++ |
5159 |
++ if (start + len < end) |
5160 |
++ end = start + len; |
5161 |
++ |
5162 |
++ start_block = ocfs2_blocks_for_bytes(sb, start); |
5163 |
++ end_block = ocfs2_blocks_for_bytes(sb, end); |
5164 |
++ nr_blocks = end_block - start_block; |
5165 |
++ if (!nr_blocks) |
5166 |
++ return 0; |
5167 |
++ |
5168 |
++ cluster = ocfs2_bytes_to_clusters(sb, start); |
5169 |
++ ret = ocfs2_get_clusters(inode, cluster, &p_cluster, |
5170 |
++ &nr_clusters, NULL); |
5171 |
++ if (ret) |
5172 |
++ return ret; |
5173 |
++ if (!p_cluster) |
5174 |
++ return 0; |
5175 |
++ |
5176 |
++ offset = start_block - ocfs2_clusters_to_blocks(sb, cluster); |
5177 |
++ p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset; |
5178 |
++ return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS); |
5179 |
++} |
5180 |
++ |
5181 |
+ /* |
5182 |
+ * Parts of this function taken from xfs_change_file_space() |
5183 |
+ */ |
5184 |
+@@ -1867,7 +1906,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, |
5185 |
+ { |
5186 |
+ int ret; |
5187 |
+ s64 llen; |
5188 |
+- loff_t size; |
5189 |
++ loff_t size, orig_isize; |
5190 |
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
5191 |
+ struct buffer_head *di_bh = NULL; |
5192 |
+ handle_t *handle; |
5193 |
+@@ -1898,6 +1937,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, |
5194 |
+ goto out_inode_unlock; |
5195 |
+ } |
5196 |
+ |
5197 |
++ orig_isize = i_size_read(inode); |
5198 |
+ switch (sr->l_whence) { |
5199 |
+ case 0: /*SEEK_SET*/ |
5200 |
+ break; |
5201 |
+@@ -1905,7 +1945,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, |
5202 |
+ sr->l_start += f_pos; |
5203 |
+ break; |
5204 |
+ case 2: /*SEEK_END*/ |
5205 |
+- sr->l_start += i_size_read(inode); |
5206 |
++ sr->l_start += orig_isize; |
5207 |
+ break; |
5208 |
+ default: |
5209 |
+ ret = -EINVAL; |
5210 |
+@@ -1959,6 +1999,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, |
5211 |
+ default: |
5212 |
+ ret = -EINVAL; |
5213 |
+ } |
5214 |
++ |
5215 |
++ /* zeroout eof blocks in the cluster. */ |
5216 |
++ if (!ret && change_size && orig_isize < size) { |
5217 |
++ ret = ocfs2_zeroout_partial_cluster(inode, orig_isize, |
5218 |
++ size - orig_isize); |
5219 |
++ if (!ret) |
5220 |
++ i_size_write(inode, size); |
5221 |
++ } |
5222 |
+ up_write(&OCFS2_I(inode)->ip_alloc_sem); |
5223 |
+ if (ret) { |
5224 |
+ mlog_errno(ret); |
5225 |
+@@ -1975,9 +2023,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, |
5226 |
+ goto out_inode_unlock; |
5227 |
+ } |
5228 |
+ |
5229 |
+- if (change_size && i_size_read(inode) < size) |
5230 |
+- i_size_write(inode, size); |
5231 |
+- |
5232 |
+ inode->i_ctime = inode->i_mtime = current_time(inode); |
5233 |
+ ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); |
5234 |
+ if (ret < 0) |
5235 |
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h |
5236 |
+index 9c68b2da14c63..e5a4c68093fc2 100644 |
5237 |
+--- a/include/linux/mlx5/mlx5_ifc.h |
5238 |
++++ b/include/linux/mlx5/mlx5_ifc.h |
5239 |
+@@ -1260,6 +1260,8 @@ enum mlx5_fc_bulk_alloc_bitmask { |
5240 |
+ |
5241 |
+ #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) |
5242 |
+ |
5243 |
++#define MLX5_FT_MAX_MULTIPATH_LEVEL 63 |
5244 |
++ |
5245 |
+ enum { |
5246 |
+ MLX5_STEERING_FORMAT_CONNECTX_5 = 0, |
5247 |
+ MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, |
5248 |
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h |
5249 |
+index 5e772392a3795..136b1d996075c 100644 |
5250 |
+--- a/include/linux/pgtable.h |
5251 |
++++ b/include/linux/pgtable.h |
5252 |
+@@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres |
5253 |
+ * To be differentiate with macro pte_mkyoung, this macro is used on platforms |
5254 |
+ * where software maintains page access bit. |
5255 |
+ */ |
5256 |
++#ifndef pte_sw_mkyoung |
5257 |
++static inline pte_t pte_sw_mkyoung(pte_t pte) |
5258 |
++{ |
5259 |
++ return pte; |
5260 |
++} |
5261 |
++#define pte_sw_mkyoung pte_sw_mkyoung |
5262 |
++#endif |
5263 |
++ |
5264 |
+ #ifndef pte_savedwrite |
5265 |
+ #define pte_savedwrite pte_write |
5266 |
+ #endif |
5267 |
+diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h |
5268 |
+index fafc1beea504a..9837fb011f2fb 100644 |
5269 |
+--- a/include/linux/platform_data/ti-sysc.h |
5270 |
++++ b/include/linux/platform_data/ti-sysc.h |
5271 |
+@@ -50,6 +50,7 @@ struct sysc_regbits { |
5272 |
+ s8 emufree_shift; |
5273 |
+ }; |
5274 |
+ |
5275 |
++#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27) |
5276 |
+ #define SYSC_QUIRK_GPMC_DEBUG BIT(26) |
5277 |
+ #define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25) |
5278 |
+ #define SYSC_MODULE_QUIRK_PRUSS BIT(24) |
5279 |
+diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h |
5280 |
+index 48ecca8530ffa..b655d8666f555 100644 |
5281 |
+--- a/include/net/caif/caif_dev.h |
5282 |
++++ b/include/net/caif/caif_dev.h |
5283 |
+@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer); |
5284 |
+ * The link_support layer is used to add any Link Layer specific |
5285 |
+ * framing. |
5286 |
+ */ |
5287 |
+-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, |
5288 |
++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, |
5289 |
+ struct cflayer *link_support, int head_room, |
5290 |
+ struct cflayer **layer, int (**rcv_func)( |
5291 |
+ struct sk_buff *, struct net_device *, |
5292 |
+diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h |
5293 |
+index 2aa5e91d84576..8819ff4db35a6 100644 |
5294 |
+--- a/include/net/caif/cfcnfg.h |
5295 |
++++ b/include/net/caif/cfcnfg.h |
5296 |
+@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg); |
5297 |
+ * @fcs: Specify if checksum is used in CAIF Framing Layer. |
5298 |
+ * @head_room: Head space needed by link specific protocol. |
5299 |
+ */ |
5300 |
+-void |
5301 |
++int |
5302 |
+ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, |
5303 |
+ struct net_device *dev, struct cflayer *phy_layer, |
5304 |
+ enum cfcnfg_phy_preference pref, |
5305 |
+diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h |
5306 |
+index 14a55e03bb3ce..67cce8757175a 100644 |
5307 |
+--- a/include/net/caif/cfserl.h |
5308 |
++++ b/include/net/caif/cfserl.h |
5309 |
+@@ -9,4 +9,5 @@ |
5310 |
+ #include <net/caif/caif_layer.h> |
5311 |
+ |
5312 |
+ struct cflayer *cfserl_create(int instance, bool use_stx); |
5313 |
++void cfserl_release(struct cflayer *layer); |
5314 |
+ #endif |
5315 |
+diff --git a/include/net/tls.h b/include/net/tls.h |
5316 |
+index 3eccb525e8f79..8341a8d1e8073 100644 |
5317 |
+--- a/include/net/tls.h |
5318 |
++++ b/include/net/tls.h |
5319 |
+@@ -193,7 +193,11 @@ struct tls_offload_context_tx { |
5320 |
+ (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX) |
5321 |
+ |
5322 |
+ enum tls_context_flags { |
5323 |
+- TLS_RX_SYNC_RUNNING = 0, |
5324 |
++ /* tls_device_down was called after the netdev went down, device state |
5325 |
++ * was released, and kTLS works in software, even though rx_conf is |
5326 |
++ * still TLS_HW (needed for transition). |
5327 |
++ */ |
5328 |
++ TLS_RX_DEV_DEGRADED = 0, |
5329 |
+ /* Unlike RX where resync is driven entirely by the core in TX only |
5330 |
+ * the driver knows when things went out of sync, so we need the flag |
5331 |
+ * to be atomic. |
5332 |
+@@ -266,6 +270,7 @@ struct tls_context { |
5333 |
+ |
5334 |
+ /* cache cold stuff */ |
5335 |
+ struct proto *sk_proto; |
5336 |
++ struct sock *sk; |
5337 |
+ |
5338 |
+ void (*sk_destruct)(struct sock *sk); |
5339 |
+ |
5340 |
+@@ -448,6 +453,9 @@ static inline u16 tls_user_config(struct tls_context *ctx, bool tx) |
5341 |
+ struct sk_buff * |
5342 |
+ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, |
5343 |
+ struct sk_buff *skb); |
5344 |
++struct sk_buff * |
5345 |
++tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev, |
5346 |
++ struct sk_buff *skb); |
5347 |
+ |
5348 |
+ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) |
5349 |
+ { |
5350 |
+diff --git a/init/main.c b/init/main.c |
5351 |
+index 53b278845b886..5bd1a25f1d6f5 100644 |
5352 |
+--- a/init/main.c |
5353 |
++++ b/init/main.c |
5354 |
+@@ -1514,7 +1514,7 @@ static noinline void __init kernel_init_freeable(void) |
5355 |
+ */ |
5356 |
+ set_mems_allowed(node_states[N_MEMORY]); |
5357 |
+ |
5358 |
+- cad_pid = task_pid(current); |
5359 |
++ cad_pid = get_pid(task_pid(current)); |
5360 |
+ |
5361 |
+ smp_prepare_cpus(setup_max_cpus); |
5362 |
+ |
5363 |
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c |
5364 |
+index 308427fe03a3a..6140e91e9c891 100644 |
5365 |
+--- a/kernel/bpf/helpers.c |
5366 |
++++ b/kernel/bpf/helpers.c |
5367 |
+@@ -14,6 +14,7 @@ |
5368 |
+ #include <linux/jiffies.h> |
5369 |
+ #include <linux/pid_namespace.h> |
5370 |
+ #include <linux/proc_ns.h> |
5371 |
++#include <linux/security.h> |
5372 |
+ |
5373 |
+ #include "../../lib/kstrtox.h" |
5374 |
+ |
5375 |
+@@ -741,11 +742,13 @@ bpf_base_func_proto(enum bpf_func_id func_id) |
5376 |
+ case BPF_FUNC_probe_read_user: |
5377 |
+ return &bpf_probe_read_user_proto; |
5378 |
+ case BPF_FUNC_probe_read_kernel: |
5379 |
+- return &bpf_probe_read_kernel_proto; |
5380 |
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
5381 |
++ NULL : &bpf_probe_read_kernel_proto; |
5382 |
+ case BPF_FUNC_probe_read_user_str: |
5383 |
+ return &bpf_probe_read_user_str_proto; |
5384 |
+ case BPF_FUNC_probe_read_kernel_str: |
5385 |
+- return &bpf_probe_read_kernel_str_proto; |
5386 |
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
5387 |
++ NULL : &bpf_probe_read_kernel_str_proto; |
5388 |
+ case BPF_FUNC_snprintf_btf: |
5389 |
+ return &bpf_snprintf_btf_proto; |
5390 |
+ default: |
5391 |
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c |
5392 |
+index b0c45d923f0f9..9bb3d2823f442 100644 |
5393 |
+--- a/kernel/trace/bpf_trace.c |
5394 |
++++ b/kernel/trace/bpf_trace.c |
5395 |
+@@ -215,16 +215,11 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = { |
5396 |
+ static __always_inline int |
5397 |
+ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) |
5398 |
+ { |
5399 |
+- int ret = security_locked_down(LOCKDOWN_BPF_READ); |
5400 |
++ int ret; |
5401 |
+ |
5402 |
+- if (unlikely(ret < 0)) |
5403 |
+- goto fail; |
5404 |
+ ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); |
5405 |
+ if (unlikely(ret < 0)) |
5406 |
+- goto fail; |
5407 |
+- return ret; |
5408 |
+-fail: |
5409 |
+- memset(dst, 0, size); |
5410 |
++ memset(dst, 0, size); |
5411 |
+ return ret; |
5412 |
+ } |
5413 |
+ |
5414 |
+@@ -246,10 +241,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = { |
5415 |
+ static __always_inline int |
5416 |
+ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) |
5417 |
+ { |
5418 |
+- int ret = security_locked_down(LOCKDOWN_BPF_READ); |
5419 |
+- |
5420 |
+- if (unlikely(ret < 0)) |
5421 |
+- goto fail; |
5422 |
++ int ret; |
5423 |
+ |
5424 |
+ /* |
5425 |
+ * The strncpy_from_kernel_nofault() call will likely not fill the |
5426 |
+@@ -262,11 +254,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) |
5427 |
+ */ |
5428 |
+ ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); |
5429 |
+ if (unlikely(ret < 0)) |
5430 |
+- goto fail; |
5431 |
+- |
5432 |
+- return ret; |
5433 |
+-fail: |
5434 |
+- memset(dst, 0, size); |
5435 |
++ memset(dst, 0, size); |
5436 |
+ return ret; |
5437 |
+ } |
5438 |
+ |
5439 |
+@@ -1322,16 +1310,20 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
5440 |
+ case BPF_FUNC_probe_read_user: |
5441 |
+ return &bpf_probe_read_user_proto; |
5442 |
+ case BPF_FUNC_probe_read_kernel: |
5443 |
+- return &bpf_probe_read_kernel_proto; |
5444 |
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
5445 |
++ NULL : &bpf_probe_read_kernel_proto; |
5446 |
+ case BPF_FUNC_probe_read_user_str: |
5447 |
+ return &bpf_probe_read_user_str_proto; |
5448 |
+ case BPF_FUNC_probe_read_kernel_str: |
5449 |
+- return &bpf_probe_read_kernel_str_proto; |
5450 |
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
5451 |
++ NULL : &bpf_probe_read_kernel_str_proto; |
5452 |
+ #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE |
5453 |
+ case BPF_FUNC_probe_read: |
5454 |
+- return &bpf_probe_read_compat_proto; |
5455 |
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
5456 |
++ NULL : &bpf_probe_read_compat_proto; |
5457 |
+ case BPF_FUNC_probe_read_str: |
5458 |
+- return &bpf_probe_read_compat_str_proto; |
5459 |
++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
5460 |
++ NULL : &bpf_probe_read_compat_str_proto; |
5461 |
+ #endif |
5462 |
+ #ifdef CONFIG_CGROUPS |
5463 |
+ case BPF_FUNC_get_current_cgroup_id: |
5464 |
+diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c |
5465 |
+index a9bd6ce1ba02b..726fd2030f645 100644 |
5466 |
+--- a/mm/debug_vm_pgtable.c |
5467 |
++++ b/mm/debug_vm_pgtable.c |
5468 |
+@@ -192,7 +192,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm, |
5469 |
+ |
5470 |
+ pr_debug("Validating PMD advanced\n"); |
5471 |
+ /* Align the address wrt HPAGE_PMD_SIZE */ |
5472 |
+- vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE; |
5473 |
++ vaddr &= HPAGE_PMD_MASK; |
5474 |
+ |
5475 |
+ pgtable_trans_huge_deposit(mm, pmdp, pgtable); |
5476 |
+ |
5477 |
+@@ -330,7 +330,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm, |
5478 |
+ |
5479 |
+ pr_debug("Validating PUD advanced\n"); |
5480 |
+ /* Align the address wrt HPAGE_PUD_SIZE */ |
5481 |
+- vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE; |
5482 |
++ vaddr &= HPAGE_PUD_MASK; |
5483 |
+ |
5484 |
+ set_pud_at(mm, vaddr, pudp, pud); |
5485 |
+ pudp_set_wrprotect(mm, vaddr, pudp); |
5486 |
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
5487 |
+index 96b722af092e7..ce63ec0187c55 100644 |
5488 |
+--- a/mm/hugetlb.c |
5489 |
++++ b/mm/hugetlb.c |
5490 |
+@@ -4705,10 +4705,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, |
5491 |
+ struct page *page; |
5492 |
+ |
5493 |
+ if (!*pagep) { |
5494 |
+- ret = -ENOMEM; |
5495 |
++ /* If a page already exists, then it's UFFDIO_COPY for |
5496 |
++ * a non-missing case. Return -EEXIST. |
5497 |
++ */ |
5498 |
++ if (vm_shared && |
5499 |
++ hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { |
5500 |
++ ret = -EEXIST; |
5501 |
++ goto out; |
5502 |
++ } |
5503 |
++ |
5504 |
+ page = alloc_huge_page(dst_vma, dst_addr, 0); |
5505 |
+- if (IS_ERR(page)) |
5506 |
++ if (IS_ERR(page)) { |
5507 |
++ ret = -ENOMEM; |
5508 |
+ goto out; |
5509 |
++ } |
5510 |
+ |
5511 |
+ ret = copy_huge_page_from_user(page, |
5512 |
+ (const void __user *) src_addr, |
5513 |
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c |
5514 |
+index f0be2c5038b5d..6f29981e317fe 100644 |
5515 |
+--- a/mm/kfence/core.c |
5516 |
++++ b/mm/kfence/core.c |
5517 |
+@@ -20,6 +20,7 @@ |
5518 |
+ #include <linux/moduleparam.h> |
5519 |
+ #include <linux/random.h> |
5520 |
+ #include <linux/rcupdate.h> |
5521 |
++#include <linux/sched/sysctl.h> |
5522 |
+ #include <linux/seq_file.h> |
5523 |
+ #include <linux/slab.h> |
5524 |
+ #include <linux/spinlock.h> |
5525 |
+@@ -620,7 +621,16 @@ static void toggle_allocation_gate(struct work_struct *work) |
5526 |
+ /* Enable static key, and await allocation to happen. */ |
5527 |
+ static_branch_enable(&kfence_allocation_key); |
5528 |
+ |
5529 |
+- wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), HZ); |
5530 |
++ if (sysctl_hung_task_timeout_secs) { |
5531 |
++ /* |
5532 |
++ * During low activity with no allocations we might wait a |
5533 |
++ * while; let's avoid the hung task warning. |
5534 |
++ */ |
5535 |
++ wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), |
5536 |
++ sysctl_hung_task_timeout_secs * HZ / 2); |
5537 |
++ } else { |
5538 |
++ wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate)); |
5539 |
++ } |
5540 |
+ |
5541 |
+ /* Disable static key and reset timer. */ |
5542 |
+ static_branch_disable(&kfence_allocation_key); |
5543 |
+diff --git a/mm/memory.c b/mm/memory.c |
5544 |
+index 550405fc3b5e6..14a6c66b37483 100644 |
5545 |
+--- a/mm/memory.c |
5546 |
++++ b/mm/memory.c |
5547 |
+@@ -2896,6 +2896,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) |
5548 |
+ } |
5549 |
+ flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); |
5550 |
+ entry = mk_pte(new_page, vma->vm_page_prot); |
5551 |
++ entry = pte_sw_mkyoung(entry); |
5552 |
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
5553 |
+ |
5554 |
+ /* |
5555 |
+@@ -3561,6 +3562,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) |
5556 |
+ __SetPageUptodate(page); |
5557 |
+ |
5558 |
+ entry = mk_pte(page, vma->vm_page_prot); |
5559 |
++ entry = pte_sw_mkyoung(entry); |
5560 |
+ if (vma->vm_flags & VM_WRITE) |
5561 |
+ entry = pte_mkwrite(pte_mkdirty(entry)); |
5562 |
+ |
5563 |
+@@ -3745,6 +3747,8 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) |
5564 |
+ |
5565 |
+ if (prefault && arch_wants_old_prefaulted_pte()) |
5566 |
+ entry = pte_mkold(entry); |
5567 |
++ else |
5568 |
++ entry = pte_sw_mkyoung(entry); |
5569 |
+ |
5570 |
+ if (write) |
5571 |
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
5572 |
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
5573 |
+index 4bb3cdfc47f87..d9dbf45f7590e 100644 |
5574 |
+--- a/mm/page_alloc.c |
5575 |
++++ b/mm/page_alloc.c |
5576 |
+@@ -8951,6 +8951,8 @@ bool take_page_off_buddy(struct page *page) |
5577 |
+ del_page_from_free_list(page_head, zone, page_order); |
5578 |
+ break_down_buddy_pages(zone, page_head, page, 0, |
5579 |
+ page_order, migratetype); |
5580 |
++ if (!is_migrate_isolate(migratetype)) |
5581 |
++ __mod_zone_freepage_state(zone, -1, migratetype); |
5582 |
+ ret = true; |
5583 |
+ break; |
5584 |
+ } |
5585 |
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c |
5586 |
+index b0d9c36acc033..4fad2ca661ed9 100644 |
5587 |
+--- a/net/bluetooth/hci_core.c |
5588 |
++++ b/net/bluetooth/hci_core.c |
5589 |
+@@ -1608,8 +1608,13 @@ setup_failed: |
5590 |
+ } else { |
5591 |
+ /* Init failed, cleanup */ |
5592 |
+ flush_work(&hdev->tx_work); |
5593 |
+- flush_work(&hdev->cmd_work); |
5594 |
++ |
5595 |
++ /* Since hci_rx_work() is possible to awake new cmd_work |
5596 |
++ * it should be flushed first to avoid unexpected call of |
5597 |
++ * hci_cmd_work() |
5598 |
++ */ |
5599 |
+ flush_work(&hdev->rx_work); |
5600 |
++ flush_work(&hdev->cmd_work); |
5601 |
+ |
5602 |
+ skb_queue_purge(&hdev->cmd_q); |
5603 |
+ skb_queue_purge(&hdev->rx_q); |
5604 |
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c |
5605 |
+index 251b9128f530a..eed0dd066e12c 100644 |
5606 |
+--- a/net/bluetooth/hci_sock.c |
5607 |
++++ b/net/bluetooth/hci_sock.c |
5608 |
+@@ -762,7 +762,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) |
5609 |
+ /* Detach sockets from device */ |
5610 |
+ read_lock(&hci_sk_list.lock); |
5611 |
+ sk_for_each(sk, &hci_sk_list.head) { |
5612 |
+- bh_lock_sock_nested(sk); |
5613 |
++ lock_sock(sk); |
5614 |
+ if (hci_pi(sk)->hdev == hdev) { |
5615 |
+ hci_pi(sk)->hdev = NULL; |
5616 |
+ sk->sk_err = EPIPE; |
5617 |
+@@ -771,7 +771,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) |
5618 |
+ |
5619 |
+ hci_dev_put(hdev); |
5620 |
+ } |
5621 |
+- bh_unlock_sock(sk); |
5622 |
++ release_sock(sk); |
5623 |
+ } |
5624 |
+ read_unlock(&hci_sk_list.lock); |
5625 |
+ } |
5626 |
+diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c |
5627 |
+index c10e5a55758d2..440139706130a 100644 |
5628 |
+--- a/net/caif/caif_dev.c |
5629 |
++++ b/net/caif/caif_dev.c |
5630 |
+@@ -308,7 +308,7 @@ static void dev_flowctrl(struct net_device *dev, int on) |
5631 |
+ caifd_put(caifd); |
5632 |
+ } |
5633 |
+ |
5634 |
+-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, |
5635 |
++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, |
5636 |
+ struct cflayer *link_support, int head_room, |
5637 |
+ struct cflayer **layer, |
5638 |
+ int (**rcv_func)(struct sk_buff *, struct net_device *, |
5639 |
+@@ -319,11 +319,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, |
5640 |
+ enum cfcnfg_phy_preference pref; |
5641 |
+ struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); |
5642 |
+ struct caif_device_entry_list *caifdevs; |
5643 |
++ int res; |
5644 |
+ |
5645 |
+ caifdevs = caif_device_list(dev_net(dev)); |
5646 |
+ caifd = caif_device_alloc(dev); |
5647 |
+ if (!caifd) |
5648 |
+- return; |
5649 |
++ return -ENOMEM; |
5650 |
+ *layer = &caifd->layer; |
5651 |
+ spin_lock_init(&caifd->flow_lock); |
5652 |
+ |
5653 |
+@@ -344,7 +345,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, |
5654 |
+ strlcpy(caifd->layer.name, dev->name, |
5655 |
+ sizeof(caifd->layer.name)); |
5656 |
+ caifd->layer.transmit = transmit; |
5657 |
+- cfcnfg_add_phy_layer(cfg, |
5658 |
++ res = cfcnfg_add_phy_layer(cfg, |
5659 |
+ dev, |
5660 |
+ &caifd->layer, |
5661 |
+ pref, |
5662 |
+@@ -354,6 +355,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, |
5663 |
+ mutex_unlock(&caifdevs->lock); |
5664 |
+ if (rcv_func) |
5665 |
+ *rcv_func = receive; |
5666 |
++ return res; |
5667 |
+ } |
5668 |
+ EXPORT_SYMBOL(caif_enroll_dev); |
5669 |
+ |
5670 |
+@@ -368,6 +370,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, |
5671 |
+ struct cflayer *layer, *link_support; |
5672 |
+ int head_room = 0; |
5673 |
+ struct caif_device_entry_list *caifdevs; |
5674 |
++ int res; |
5675 |
+ |
5676 |
+ cfg = get_cfcnfg(dev_net(dev)); |
5677 |
+ caifdevs = caif_device_list(dev_net(dev)); |
5678 |
+@@ -393,8 +396,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, |
5679 |
+ break; |
5680 |
+ } |
5681 |
+ } |
5682 |
+- caif_enroll_dev(dev, caifdev, link_support, head_room, |
5683 |
++ res = caif_enroll_dev(dev, caifdev, link_support, head_room, |
5684 |
+ &layer, NULL); |
5685 |
++ if (res) |
5686 |
++ cfserl_release(link_support); |
5687 |
+ caifdev->flowctrl = dev_flowctrl; |
5688 |
+ break; |
5689 |
+ |
5690 |
+diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c |
5691 |
+index a0116b9503d9d..b02e1292f7f19 100644 |
5692 |
+--- a/net/caif/caif_usb.c |
5693 |
++++ b/net/caif/caif_usb.c |
5694 |
+@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], |
5695 |
+ return (struct cflayer *) this; |
5696 |
+ } |
5697 |
+ |
5698 |
++static void cfusbl_release(struct cflayer *layer) |
5699 |
++{ |
5700 |
++ kfree(layer); |
5701 |
++} |
5702 |
++ |
5703 |
+ static struct packet_type caif_usb_type __read_mostly = { |
5704 |
+ .type = cpu_to_be16(ETH_P_802_EX1), |
5705 |
+ }; |
5706 |
+@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, |
5707 |
+ struct cflayer *layer, *link_support; |
5708 |
+ struct usbnet *usbnet; |
5709 |
+ struct usb_device *usbdev; |
5710 |
++ int res; |
5711 |
+ |
5712 |
+ /* Check whether we have a NCM device, and find its VID/PID. */ |
5713 |
+ if (!(dev->dev.parent && dev->dev.parent->driver && |
5714 |
+@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, |
5715 |
+ if (dev->num_tx_queues > 1) |
5716 |
+ pr_warn("USB device uses more than one tx queue\n"); |
5717 |
+ |
5718 |
+- caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, |
5719 |
++ res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, |
5720 |
+ &layer, &caif_usb_type.func); |
5721 |
++ if (res) |
5722 |
++ goto err; |
5723 |
++ |
5724 |
+ if (!pack_added) |
5725 |
+ dev_add_pack(&caif_usb_type); |
5726 |
+ pack_added = true; |
5727 |
+@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, |
5728 |
+ strlcpy(layer->name, dev->name, sizeof(layer->name)); |
5729 |
+ |
5730 |
+ return 0; |
5731 |
++err: |
5732 |
++ cfusbl_release(link_support); |
5733 |
++ return res; |
5734 |
+ } |
5735 |
+ |
5736 |
+ static struct notifier_block caif_device_notifier = { |
5737 |
+diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c |
5738 |
+index 399239a14420f..cac30e676ac94 100644 |
5739 |
+--- a/net/caif/cfcnfg.c |
5740 |
++++ b/net/caif/cfcnfg.c |
5741 |
+@@ -450,7 +450,7 @@ unlock: |
5742 |
+ rcu_read_unlock(); |
5743 |
+ } |
5744 |
+ |
5745 |
+-void |
5746 |
++int |
5747 |
+ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, |
5748 |
+ struct net_device *dev, struct cflayer *phy_layer, |
5749 |
+ enum cfcnfg_phy_preference pref, |
5750 |
+@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, |
5751 |
+ { |
5752 |
+ struct cflayer *frml; |
5753 |
+ struct cfcnfg_phyinfo *phyinfo = NULL; |
5754 |
+- int i; |
5755 |
++ int i, res = 0; |
5756 |
+ u8 phyid; |
5757 |
+ |
5758 |
+ mutex_lock(&cnfg->lock); |
5759 |
+@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, |
5760 |
+ goto got_phyid; |
5761 |
+ } |
5762 |
+ pr_warn("Too many CAIF Link Layers (max 6)\n"); |
5763 |
++ res = -EEXIST; |
5764 |
+ goto out; |
5765 |
+ |
5766 |
+ got_phyid: |
5767 |
+ phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); |
5768 |
+- if (!phyinfo) |
5769 |
++ if (!phyinfo) { |
5770 |
++ res = -ENOMEM; |
5771 |
+ goto out_err; |
5772 |
++ } |
5773 |
+ |
5774 |
+ phy_layer->id = phyid; |
5775 |
+ phyinfo->pref = pref; |
5776 |
+@@ -492,8 +495,10 @@ got_phyid: |
5777 |
+ |
5778 |
+ frml = cffrml_create(phyid, fcs); |
5779 |
+ |
5780 |
+- if (!frml) |
5781 |
++ if (!frml) { |
5782 |
++ res = -ENOMEM; |
5783 |
+ goto out_err; |
5784 |
++ } |
5785 |
+ phyinfo->frm_layer = frml; |
5786 |
+ layer_set_up(frml, cnfg->mux); |
5787 |
+ |
5788 |
+@@ -511,11 +516,12 @@ got_phyid: |
5789 |
+ list_add_rcu(&phyinfo->node, &cnfg->phys); |
5790 |
+ out: |
5791 |
+ mutex_unlock(&cnfg->lock); |
5792 |
+- return; |
5793 |
++ return res; |
5794 |
+ |
5795 |
+ out_err: |
5796 |
+ kfree(phyinfo); |
5797 |
+ mutex_unlock(&cnfg->lock); |
5798 |
++ return res; |
5799 |
+ } |
5800 |
+ EXPORT_SYMBOL(cfcnfg_add_phy_layer); |
5801 |
+ |
5802 |
+diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c |
5803 |
+index e11725a4bb0ed..40cd57ad0a0f4 100644 |
5804 |
+--- a/net/caif/cfserl.c |
5805 |
++++ b/net/caif/cfserl.c |
5806 |
+@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); |
5807 |
+ static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, |
5808 |
+ int phyid); |
5809 |
+ |
5810 |
++void cfserl_release(struct cflayer *layer) |
5811 |
++{ |
5812 |
++ kfree(layer); |
5813 |
++} |
5814 |
++ |
5815 |
+ struct cflayer *cfserl_create(int instance, bool use_stx) |
5816 |
+ { |
5817 |
+ struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC); |
5818 |
+diff --git a/net/core/devlink.c b/net/core/devlink.c |
5819 |
+index 737b61c2976e1..4c363fa7d4d11 100644 |
5820 |
+--- a/net/core/devlink.c |
5821 |
++++ b/net/core/devlink.c |
5822 |
+@@ -705,7 +705,6 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, |
5823 |
+ case DEVLINK_PORT_FLAVOUR_PHYSICAL: |
5824 |
+ case DEVLINK_PORT_FLAVOUR_CPU: |
5825 |
+ case DEVLINK_PORT_FLAVOUR_DSA: |
5826 |
+- case DEVLINK_PORT_FLAVOUR_VIRTUAL: |
5827 |
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER, |
5828 |
+ attrs->phys.port_number)) |
5829 |
+ return -EMSGSIZE; |
5830 |
+@@ -8629,7 +8628,6 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, |
5831 |
+ |
5832 |
+ switch (attrs->flavour) { |
5833 |
+ case DEVLINK_PORT_FLAVOUR_PHYSICAL: |
5834 |
+- case DEVLINK_PORT_FLAVOUR_VIRTUAL: |
5835 |
+ if (!attrs->split) |
5836 |
+ n = snprintf(name, len, "p%u", attrs->phys.port_number); |
5837 |
+ else |
5838 |
+@@ -8670,6 +8668,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, |
5839 |
+ n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf, |
5840 |
+ attrs->pci_sf.sf); |
5841 |
+ break; |
5842 |
++ case DEVLINK_PORT_FLAVOUR_VIRTUAL: |
5843 |
++ return -EOPNOTSUPP; |
5844 |
+ } |
5845 |
+ |
5846 |
+ if (n >= len) |
5847 |
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c |
5848 |
+index 98f20efbfadf2..bf774575ad716 100644 |
5849 |
+--- a/net/core/neighbour.c |
5850 |
++++ b/net/core/neighbour.c |
5851 |
+@@ -238,6 +238,7 @@ static int neigh_forced_gc(struct neigh_table *tbl) |
5852 |
+ |
5853 |
+ write_lock(&n->lock); |
5854 |
+ if ((n->nud_state == NUD_FAILED) || |
5855 |
++ (n->nud_state == NUD_NOARP) || |
5856 |
+ (tbl->is_multicast && |
5857 |
+ tbl->is_multicast(n->primary_key)) || |
5858 |
+ time_after(tref, n->updated)) |
5859 |
+diff --git a/net/core/sock.c b/net/core/sock.c |
5860 |
+index 9c7b143e7a964..a266760cd65ea 100644 |
5861 |
+--- a/net/core/sock.c |
5862 |
++++ b/net/core/sock.c |
5863 |
+@@ -815,10 +815,18 @@ void sock_set_rcvbuf(struct sock *sk, int val) |
5864 |
+ } |
5865 |
+ EXPORT_SYMBOL(sock_set_rcvbuf); |
5866 |
+ |
5867 |
++static void __sock_set_mark(struct sock *sk, u32 val) |
5868 |
++{ |
5869 |
++ if (val != sk->sk_mark) { |
5870 |
++ sk->sk_mark = val; |
5871 |
++ sk_dst_reset(sk); |
5872 |
++ } |
5873 |
++} |
5874 |
++ |
5875 |
+ void sock_set_mark(struct sock *sk, u32 val) |
5876 |
+ { |
5877 |
+ lock_sock(sk); |
5878 |
+- sk->sk_mark = val; |
5879 |
++ __sock_set_mark(sk, val); |
5880 |
+ release_sock(sk); |
5881 |
+ } |
5882 |
+ EXPORT_SYMBOL(sock_set_mark); |
5883 |
+@@ -1126,10 +1134,10 @@ set_sndbuf: |
5884 |
+ case SO_MARK: |
5885 |
+ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { |
5886 |
+ ret = -EPERM; |
5887 |
+- } else if (val != sk->sk_mark) { |
5888 |
+- sk->sk_mark = val; |
5889 |
+- sk_dst_reset(sk); |
5890 |
++ break; |
5891 |
+ } |
5892 |
++ |
5893 |
++ __sock_set_mark(sk, val); |
5894 |
+ break; |
5895 |
+ |
5896 |
+ case SO_RXQ_OVFL: |
5897 |
+diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c |
5898 |
+index 008c1ec6e20c1..122ad5833fb1c 100644 |
5899 |
+--- a/net/dsa/tag_8021q.c |
5900 |
++++ b/net/dsa/tag_8021q.c |
5901 |
+@@ -64,7 +64,7 @@ |
5902 |
+ #define DSA_8021Q_SUBVLAN_HI_SHIFT 9 |
5903 |
+ #define DSA_8021Q_SUBVLAN_HI_MASK GENMASK(9, 9) |
5904 |
+ #define DSA_8021Q_SUBVLAN_LO_SHIFT 4 |
5905 |
+-#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(4, 3) |
5906 |
++#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(5, 4) |
5907 |
+ #define DSA_8021Q_SUBVLAN_HI(x) (((x) & GENMASK(2, 2)) >> 2) |
5908 |
+ #define DSA_8021Q_SUBVLAN_LO(x) ((x) & GENMASK(1, 0)) |
5909 |
+ #define DSA_8021Q_SUBVLAN(x) \ |
5910 |
+diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c |
5911 |
+index 0c1b0770c59ea..c23c152860b73 100644 |
5912 |
+--- a/net/ieee802154/nl-mac.c |
5913 |
++++ b/net/ieee802154/nl-mac.c |
5914 |
+@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info) |
5915 |
+ nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) || |
5916 |
+ nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, |
5917 |
+ be32_to_cpu(params.frame_counter)) || |
5918 |
+- ieee802154_llsec_fill_key_id(msg, ¶ms.out_key)) |
5919 |
++ ieee802154_llsec_fill_key_id(msg, ¶ms.out_key)) { |
5920 |
++ rc = -ENOBUFS; |
5921 |
+ goto out_free; |
5922 |
++ } |
5923 |
+ |
5924 |
+ dev_put(dev); |
5925 |
+ |
5926 |
+diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c |
5927 |
+index 2cdc7e63fe172..88215b5c93aa4 100644 |
5928 |
+--- a/net/ieee802154/nl-phy.c |
5929 |
++++ b/net/ieee802154/nl-phy.c |
5930 |
+@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) |
5931 |
+ } |
5932 |
+ |
5933 |
+ if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || |
5934 |
+- nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) |
5935 |
++ nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) { |
5936 |
++ rc = -EMSGSIZE; |
5937 |
+ goto nla_put_failure; |
5938 |
++ } |
5939 |
+ dev_put(dev); |
5940 |
+ |
5941 |
+ wpan_phy_put(phy); |
5942 |
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
5943 |
+index 373d48073106f..36e80b3598b01 100644 |
5944 |
+--- a/net/ipv6/route.c |
5945 |
++++ b/net/ipv6/route.c |
5946 |
+@@ -3676,11 +3676,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, |
5947 |
+ if (nh) { |
5948 |
+ if (rt->fib6_src.plen) { |
5949 |
+ NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); |
5950 |
+- goto out; |
5951 |
++ goto out_free; |
5952 |
+ } |
5953 |
+ if (!nexthop_get(nh)) { |
5954 |
+ NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); |
5955 |
+- goto out; |
5956 |
++ goto out_free; |
5957 |
+ } |
5958 |
+ rt->nh = nh; |
5959 |
+ fib6_nh = nexthop_fib6_nh(rt->nh); |
5960 |
+@@ -3717,6 +3717,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, |
5961 |
+ out: |
5962 |
+ fib6_info_release(rt); |
5963 |
+ return ERR_PTR(err); |
5964 |
++out_free: |
5965 |
++ ip_fib_metrics_put(rt->fib6_metrics); |
5966 |
++ kfree(rt); |
5967 |
++ return ERR_PTR(err); |
5968 |
+ } |
5969 |
+ |
5970 |
+ int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, |
5971 |
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c |
5972 |
+index 228dd40828c4b..225b988215171 100644 |
5973 |
+--- a/net/mptcp/protocol.c |
5974 |
++++ b/net/mptcp/protocol.c |
5975 |
+@@ -937,6 +937,10 @@ static void __mptcp_update_wmem(struct sock *sk) |
5976 |
+ { |
5977 |
+ struct mptcp_sock *msk = mptcp_sk(sk); |
5978 |
+ |
5979 |
++#ifdef CONFIG_LOCKDEP |
5980 |
++ WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock)); |
5981 |
++#endif |
5982 |
++ |
5983 |
+ if (!msk->wmem_reserved) |
5984 |
+ return; |
5985 |
+ |
5986 |
+@@ -1075,10 +1079,20 @@ out: |
5987 |
+ |
5988 |
+ static void __mptcp_clean_una_wakeup(struct sock *sk) |
5989 |
+ { |
5990 |
++#ifdef CONFIG_LOCKDEP |
5991 |
++ WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock)); |
5992 |
++#endif |
5993 |
+ __mptcp_clean_una(sk); |
5994 |
+ mptcp_write_space(sk); |
5995 |
+ } |
5996 |
+ |
5997 |
++static void mptcp_clean_una_wakeup(struct sock *sk) |
5998 |
++{ |
5999 |
++ mptcp_data_lock(sk); |
6000 |
++ __mptcp_clean_una_wakeup(sk); |
6001 |
++ mptcp_data_unlock(sk); |
6002 |
++} |
6003 |
++ |
6004 |
+ static void mptcp_enter_memory_pressure(struct sock *sk) |
6005 |
+ { |
6006 |
+ struct mptcp_subflow_context *subflow; |
6007 |
+@@ -2288,7 +2302,7 @@ static void __mptcp_retrans(struct sock *sk) |
6008 |
+ struct sock *ssk; |
6009 |
+ int ret; |
6010 |
+ |
6011 |
+- __mptcp_clean_una_wakeup(sk); |
6012 |
++ mptcp_clean_una_wakeup(sk); |
6013 |
+ dfrag = mptcp_rtx_head(sk); |
6014 |
+ if (!dfrag) { |
6015 |
+ if (mptcp_data_fin_enabled(msk)) { |
6016 |
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c |
6017 |
+index 1936db3574d2e..8425cd393bf3e 100644 |
6018 |
+--- a/net/mptcp/subflow.c |
6019 |
++++ b/net/mptcp/subflow.c |
6020 |
+@@ -608,21 +608,20 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, |
6021 |
+ |
6022 |
+ /* if the sk is MP_CAPABLE, we try to fetch the client key */ |
6023 |
+ if (subflow_req->mp_capable) { |
6024 |
+- if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) { |
6025 |
+- /* here we can receive and accept an in-window, |
6026 |
+- * out-of-order pkt, which will not carry the MP_CAPABLE |
6027 |
+- * opt even on mptcp enabled paths |
6028 |
+- */ |
6029 |
+- goto create_msk; |
6030 |
+- } |
6031 |
+- |
6032 |
++ /* we can receive and accept an in-window, out-of-order pkt, |
6033 |
++ * which may not carry the MP_CAPABLE opt even on mptcp enabled |
6034 |
++ * paths: always try to extract the peer key, and fallback |
6035 |
++ * for packets missing it. |
6036 |
++ * Even OoO DSS packets coming legitly after dropped or |
6037 |
++ * reordered MPC will cause fallback, but we don't have other |
6038 |
++ * options. |
6039 |
++ */ |
6040 |
+ mptcp_get_options(skb, &mp_opt); |
6041 |
+ if (!mp_opt.mp_capable) { |
6042 |
+ fallback = true; |
6043 |
+ goto create_child; |
6044 |
+ } |
6045 |
+ |
6046 |
+-create_msk: |
6047 |
+ new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req); |
6048 |
+ if (!new_msk) |
6049 |
+ fallback = true; |
6050 |
+@@ -985,22 +984,11 @@ static bool subflow_check_data_avail(struct sock *ssk) |
6051 |
+ u64 old_ack; |
6052 |
+ |
6053 |
+ status = get_mapping_status(ssk, msk); |
6054 |
+- pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status); |
6055 |
+- if (status == MAPPING_INVALID) { |
6056 |
+- ssk->sk_err = EBADMSG; |
6057 |
+- goto fatal; |
6058 |
+- } |
6059 |
+- if (status == MAPPING_DUMMY) { |
6060 |
+- __mptcp_do_fallback(msk); |
6061 |
+- skb = skb_peek(&ssk->sk_receive_queue); |
6062 |
+- subflow->map_valid = 1; |
6063 |
+- subflow->map_seq = READ_ONCE(msk->ack_seq); |
6064 |
+- subflow->map_data_len = skb->len; |
6065 |
+- subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - |
6066 |
+- subflow->ssn_offset; |
6067 |
+- subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL; |
6068 |
+- return true; |
6069 |
+- } |
6070 |
++ if (unlikely(status == MAPPING_INVALID)) |
6071 |
++ goto fallback; |
6072 |
++ |
6073 |
++ if (unlikely(status == MAPPING_DUMMY)) |
6074 |
++ goto fallback; |
6075 |
+ |
6076 |
+ if (status != MAPPING_OK) |
6077 |
+ goto no_data; |
6078 |
+@@ -1013,10 +1001,8 @@ static bool subflow_check_data_avail(struct sock *ssk) |
6079 |
+ * MP_CAPABLE-based mapping |
6080 |
+ */ |
6081 |
+ if (unlikely(!READ_ONCE(msk->can_ack))) { |
6082 |
+- if (!subflow->mpc_map) { |
6083 |
+- ssk->sk_err = EBADMSG; |
6084 |
+- goto fatal; |
6085 |
+- } |
6086 |
++ if (!subflow->mpc_map) |
6087 |
++ goto fallback; |
6088 |
+ WRITE_ONCE(msk->remote_key, subflow->remote_key); |
6089 |
+ WRITE_ONCE(msk->ack_seq, subflow->map_seq); |
6090 |
+ WRITE_ONCE(msk->can_ack, true); |
6091 |
+@@ -1044,15 +1030,29 @@ static bool subflow_check_data_avail(struct sock *ssk) |
6092 |
+ no_data: |
6093 |
+ subflow_sched_work_if_closed(msk, ssk); |
6094 |
+ return false; |
6095 |
+-fatal: |
6096 |
+- /* fatal protocol error, close the socket */ |
6097 |
+- /* This barrier is coupled with smp_rmb() in tcp_poll() */ |
6098 |
+- smp_wmb(); |
6099 |
+- ssk->sk_error_report(ssk); |
6100 |
+- tcp_set_state(ssk, TCP_CLOSE); |
6101 |
+- tcp_send_active_reset(ssk, GFP_ATOMIC); |
6102 |
+- subflow->data_avail = 0; |
6103 |
+- return false; |
6104 |
++ |
6105 |
++fallback: |
6106 |
++ /* RFC 8684 section 3.7. */ |
6107 |
++ if (subflow->mp_join || subflow->fully_established) { |
6108 |
++ /* fatal protocol error, close the socket. |
6109 |
++ * subflow_error_report() will introduce the appropriate barriers |
6110 |
++ */ |
6111 |
++ ssk->sk_err = EBADMSG; |
6112 |
++ ssk->sk_error_report(ssk); |
6113 |
++ tcp_set_state(ssk, TCP_CLOSE); |
6114 |
++ tcp_send_active_reset(ssk, GFP_ATOMIC); |
6115 |
++ subflow->data_avail = 0; |
6116 |
++ return false; |
6117 |
++ } |
6118 |
++ |
6119 |
++ __mptcp_do_fallback(msk); |
6120 |
++ skb = skb_peek(&ssk->sk_receive_queue); |
6121 |
++ subflow->map_valid = 1; |
6122 |
++ subflow->map_seq = READ_ONCE(msk->ack_seq); |
6123 |
++ subflow->map_data_len = skb->len; |
6124 |
++ subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; |
6125 |
++ subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL; |
6126 |
++ return true; |
6127 |
+ } |
6128 |
+ |
6129 |
+ bool mptcp_subflow_data_available(struct sock *sk) |
6130 |
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c |
6131 |
+index d45dbcba8b49c..c25097092a060 100644 |
6132 |
+--- a/net/netfilter/ipvs/ip_vs_ctl.c |
6133 |
++++ b/net/netfilter/ipvs/ip_vs_ctl.c |
6134 |
+@@ -1367,7 +1367,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, |
6135 |
+ ip_vs_addr_copy(svc->af, &svc->addr, &u->addr); |
6136 |
+ svc->port = u->port; |
6137 |
+ svc->fwmark = u->fwmark; |
6138 |
+- svc->flags = u->flags; |
6139 |
++ svc->flags = u->flags & ~IP_VS_SVC_F_HASHED; |
6140 |
+ svc->timeout = u->timeout * HZ; |
6141 |
+ svc->netmask = u->netmask; |
6142 |
+ svc->ipvs = ipvs; |
6143 |
+diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c |
6144 |
+index 47e9319d2cf31..71892822bbf5d 100644 |
6145 |
+--- a/net/netfilter/nf_conntrack_proto.c |
6146 |
++++ b/net/netfilter/nf_conntrack_proto.c |
6147 |
+@@ -660,7 +660,7 @@ int nf_conntrack_proto_init(void) |
6148 |
+ |
6149 |
+ #if IS_ENABLED(CONFIG_IPV6) |
6150 |
+ cleanup_sockopt: |
6151 |
+- nf_unregister_sockopt(&so_getorigdst6); |
6152 |
++ nf_unregister_sockopt(&so_getorigdst); |
6153 |
+ #endif |
6154 |
+ return ret; |
6155 |
+ } |
6156 |
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c |
6157 |
+index 878ed49d0c569..31016c144c48b 100644 |
6158 |
+--- a/net/netfilter/nf_tables_api.c |
6159 |
++++ b/net/netfilter/nf_tables_api.c |
6160 |
+@@ -3288,8 +3288,10 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, |
6161 |
+ if (n == NFT_RULE_MAXEXPRS) |
6162 |
+ goto err1; |
6163 |
+ err = nf_tables_expr_parse(&ctx, tmp, &info[n]); |
6164 |
+- if (err < 0) |
6165 |
++ if (err < 0) { |
6166 |
++ NL_SET_BAD_ATTR(extack, tmp); |
6167 |
+ goto err1; |
6168 |
++ } |
6169 |
+ size += info[n].ops->size; |
6170 |
+ n++; |
6171 |
+ } |
6172 |
+diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c |
6173 |
+index 0f94fce1d3ed3..04a12a264cf74 100644 |
6174 |
+--- a/net/netfilter/nfnetlink_cthelper.c |
6175 |
++++ b/net/netfilter/nfnetlink_cthelper.c |
6176 |
+@@ -380,10 +380,14 @@ static int |
6177 |
+ nfnl_cthelper_update(const struct nlattr * const tb[], |
6178 |
+ struct nf_conntrack_helper *helper) |
6179 |
+ { |
6180 |
++ u32 size; |
6181 |
+ int ret; |
6182 |
+ |
6183 |
+- if (tb[NFCTH_PRIV_DATA_LEN]) |
6184 |
+- return -EBUSY; |
6185 |
++ if (tb[NFCTH_PRIV_DATA_LEN]) { |
6186 |
++ size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); |
6187 |
++ if (size != helper->data_len) |
6188 |
++ return -EBUSY; |
6189 |
++ } |
6190 |
+ |
6191 |
+ if (tb[NFCTH_POLICY]) { |
6192 |
+ ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]); |
6193 |
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c |
6194 |
+index 882fe8648653d..6d2b382f5e075 100644 |
6195 |
+--- a/net/netfilter/nft_ct.c |
6196 |
++++ b/net/netfilter/nft_ct.c |
6197 |
+@@ -1216,7 +1216,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj, |
6198 |
+ struct nf_conn *ct; |
6199 |
+ |
6200 |
+ ct = nf_ct_get(pkt->skb, &ctinfo); |
6201 |
+- if (!ct || ctinfo == IP_CT_UNTRACKED) { |
6202 |
++ if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) { |
6203 |
+ regs->verdict.code = NFT_BREAK; |
6204 |
+ return; |
6205 |
+ } |
6206 |
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c |
6207 |
+index 53dbe733f9981..6cfd30fc07985 100644 |
6208 |
+--- a/net/nfc/llcp_sock.c |
6209 |
++++ b/net/nfc/llcp_sock.c |
6210 |
+@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) |
6211 |
+ if (!llcp_sock->service_name) { |
6212 |
+ nfc_llcp_local_put(llcp_sock->local); |
6213 |
+ llcp_sock->local = NULL; |
6214 |
++ llcp_sock->dev = NULL; |
6215 |
+ ret = -ENOMEM; |
6216 |
+ goto put_dev; |
6217 |
+ } |
6218 |
+@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) |
6219 |
+ llcp_sock->local = NULL; |
6220 |
+ kfree(llcp_sock->service_name); |
6221 |
+ llcp_sock->service_name = NULL; |
6222 |
++ llcp_sock->dev = NULL; |
6223 |
+ ret = -EADDRINUSE; |
6224 |
+ goto put_dev; |
6225 |
+ } |
6226 |
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c |
6227 |
+index 48fdf7293deaa..ba7f57cb41c30 100644 |
6228 |
+--- a/net/sched/act_ct.c |
6229 |
++++ b/net/sched/act_ct.c |
6230 |
+@@ -984,7 +984,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, |
6231 |
+ */ |
6232 |
+ cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force); |
6233 |
+ if (!cached) { |
6234 |
+- if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) { |
6235 |
++ if (tcf_ct_flow_table_lookup(p, skb, family)) { |
6236 |
+ skip_add = true; |
6237 |
+ goto do_nat; |
6238 |
+ } |
6239 |
+@@ -1024,10 +1024,11 @@ do_nat: |
6240 |
+ * even if the connection is already confirmed. |
6241 |
+ */ |
6242 |
+ nf_conntrack_confirm(skb); |
6243 |
+- } else if (!skip_add) { |
6244 |
+- tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo); |
6245 |
+ } |
6246 |
+ |
6247 |
++ if (!skip_add) |
6248 |
++ tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo); |
6249 |
++ |
6250 |
+ out_push: |
6251 |
+ skb_push_rcsum(skb, nh_ofs); |
6252 |
+ |
6253 |
+@@ -1204,9 +1205,6 @@ static int tcf_ct_fill_params(struct net *net, |
6254 |
+ sizeof(p->zone)); |
6255 |
+ } |
6256 |
+ |
6257 |
+- if (p->zone == NF_CT_DEFAULT_ZONE_ID) |
6258 |
+- return 0; |
6259 |
+- |
6260 |
+ nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0); |
6261 |
+ tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL); |
6262 |
+ if (!tmpl) { |
6263 |
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c |
6264 |
+index 081c11d5717c4..8827987ba9034 100644 |
6265 |
+--- a/net/sched/sch_htb.c |
6266 |
++++ b/net/sched/sch_htb.c |
6267 |
+@@ -1488,7 +1488,8 @@ static void htb_parent_to_leaf_offload(struct Qdisc *sch, |
6268 |
+ struct Qdisc *old_q; |
6269 |
+ |
6270 |
+ /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ |
6271 |
+- qdisc_refcount_inc(new_q); |
6272 |
++ if (new_q) |
6273 |
++ qdisc_refcount_inc(new_q); |
6274 |
+ old_q = htb_graft_helper(dev_queue, new_q); |
6275 |
+ WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); |
6276 |
+ } |
6277 |
+@@ -1675,10 +1676,9 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg, |
6278 |
+ cl->parent->common.classid, |
6279 |
+ NULL); |
6280 |
+ if (q->offload) { |
6281 |
+- if (new_q) { |
6282 |
++ if (new_q) |
6283 |
+ htb_set_lockdep_class_child(new_q); |
6284 |
+- htb_parent_to_leaf_offload(sch, dev_queue, new_q); |
6285 |
+- } |
6286 |
++ htb_parent_to_leaf_offload(sch, dev_queue, new_q); |
6287 |
+ } |
6288 |
+ } |
6289 |
+ |
6290 |
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c |
6291 |
+index a4389ef08a980..0c8882052ba08 100644 |
6292 |
+--- a/net/tipc/bearer.c |
6293 |
++++ b/net/tipc/bearer.c |
6294 |
+@@ -243,7 +243,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest) |
6295 |
+ */ |
6296 |
+ static int tipc_enable_bearer(struct net *net, const char *name, |
6297 |
+ u32 disc_domain, u32 prio, |
6298 |
+- struct nlattr *attr[]) |
6299 |
++ struct nlattr *attr[], |
6300 |
++ struct netlink_ext_ack *extack) |
6301 |
+ { |
6302 |
+ struct tipc_net *tn = tipc_net(net); |
6303 |
+ struct tipc_bearer_names b_names; |
6304 |
+@@ -254,20 +255,24 @@ static int tipc_enable_bearer(struct net *net, const char *name, |
6305 |
+ int bearer_id = 0; |
6306 |
+ int res = -EINVAL; |
6307 |
+ char *errstr = ""; |
6308 |
++ u32 i; |
6309 |
+ |
6310 |
+ if (!bearer_name_validate(name, &b_names)) { |
6311 |
+ errstr = "illegal name"; |
6312 |
++ NL_SET_ERR_MSG(extack, "Illegal name"); |
6313 |
+ goto rejected; |
6314 |
+ } |
6315 |
+ |
6316 |
+ if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) { |
6317 |
+ errstr = "illegal priority"; |
6318 |
++ NL_SET_ERR_MSG(extack, "Illegal priority"); |
6319 |
+ goto rejected; |
6320 |
+ } |
6321 |
+ |
6322 |
+ m = tipc_media_find(b_names.media_name); |
6323 |
+ if (!m) { |
6324 |
+ errstr = "media not registered"; |
6325 |
++ NL_SET_ERR_MSG(extack, "Media not registered"); |
6326 |
+ goto rejected; |
6327 |
+ } |
6328 |
+ |
6329 |
+@@ -275,33 +280,43 @@ static int tipc_enable_bearer(struct net *net, const char *name, |
6330 |
+ prio = m->priority; |
6331 |
+ |
6332 |
+ /* Check new bearer vs existing ones and find free bearer id if any */ |
6333 |
+- while (bearer_id < MAX_BEARERS) { |
6334 |
+- b = rtnl_dereference(tn->bearer_list[bearer_id]); |
6335 |
+- if (!b) |
6336 |
+- break; |
6337 |
++ bearer_id = MAX_BEARERS; |
6338 |
++ i = MAX_BEARERS; |
6339 |
++ while (i-- != 0) { |
6340 |
++ b = rtnl_dereference(tn->bearer_list[i]); |
6341 |
++ if (!b) { |
6342 |
++ bearer_id = i; |
6343 |
++ continue; |
6344 |
++ } |
6345 |
+ if (!strcmp(name, b->name)) { |
6346 |
+ errstr = "already enabled"; |
6347 |
++ NL_SET_ERR_MSG(extack, "Already enabled"); |
6348 |
+ goto rejected; |
6349 |
+ } |
6350 |
+- bearer_id++; |
6351 |
+- if (b->priority != prio) |
6352 |
+- continue; |
6353 |
+- if (++with_this_prio <= 2) |
6354 |
+- continue; |
6355 |
+- pr_warn("Bearer <%s>: already 2 bearers with priority %u\n", |
6356 |
+- name, prio); |
6357 |
+- if (prio == TIPC_MIN_LINK_PRI) { |
6358 |
+- errstr = "cannot adjust to lower"; |
6359 |
+- goto rejected; |
6360 |
++ |
6361 |
++ if (b->priority == prio && |
6362 |
++ (++with_this_prio > 2)) { |
6363 |
++ pr_warn("Bearer <%s>: already 2 bearers with priority %u\n", |
6364 |
++ name, prio); |
6365 |
++ |
6366 |
++ if (prio == TIPC_MIN_LINK_PRI) { |
6367 |
++ errstr = "cannot adjust to lower"; |
6368 |
++ NL_SET_ERR_MSG(extack, "Cannot adjust to lower"); |
6369 |
++ goto rejected; |
6370 |
++ } |
6371 |
++ |
6372 |
++ pr_warn("Bearer <%s>: trying with adjusted priority\n", |
6373 |
++ name); |
6374 |
++ prio--; |
6375 |
++ bearer_id = MAX_BEARERS; |
6376 |
++ i = MAX_BEARERS; |
6377 |
++ with_this_prio = 1; |
6378 |
+ } |
6379 |
+- pr_warn("Bearer <%s>: trying with adjusted priority\n", name); |
6380 |
+- prio--; |
6381 |
+- bearer_id = 0; |
6382 |
+- with_this_prio = 1; |
6383 |
+ } |
6384 |
+ |
6385 |
+ if (bearer_id >= MAX_BEARERS) { |
6386 |
+ errstr = "max 3 bearers permitted"; |
6387 |
++ NL_SET_ERR_MSG(extack, "Max 3 bearers permitted"); |
6388 |
+ goto rejected; |
6389 |
+ } |
6390 |
+ |
6391 |
+@@ -315,6 +330,7 @@ static int tipc_enable_bearer(struct net *net, const char *name, |
6392 |
+ if (res) { |
6393 |
+ kfree(b); |
6394 |
+ errstr = "failed to enable media"; |
6395 |
++ NL_SET_ERR_MSG(extack, "Failed to enable media"); |
6396 |
+ goto rejected; |
6397 |
+ } |
6398 |
+ |
6399 |
+@@ -331,6 +347,7 @@ static int tipc_enable_bearer(struct net *net, const char *name, |
6400 |
+ if (res) { |
6401 |
+ bearer_disable(net, b); |
6402 |
+ errstr = "failed to create discoverer"; |
6403 |
++ NL_SET_ERR_MSG(extack, "Failed to create discoverer"); |
6404 |
+ goto rejected; |
6405 |
+ } |
6406 |
+ |
6407 |
+@@ -909,6 +926,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info) |
6408 |
+ bearer = tipc_bearer_find(net, name); |
6409 |
+ if (!bearer) { |
6410 |
+ err = -EINVAL; |
6411 |
++ NL_SET_ERR_MSG(info->extack, "Bearer not found"); |
6412 |
+ goto err_out; |
6413 |
+ } |
6414 |
+ |
6415 |
+@@ -948,8 +966,10 @@ int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) |
6416 |
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); |
6417 |
+ |
6418 |
+ bearer = tipc_bearer_find(net, name); |
6419 |
+- if (!bearer) |
6420 |
++ if (!bearer) { |
6421 |
++ NL_SET_ERR_MSG(info->extack, "Bearer not found"); |
6422 |
+ return -EINVAL; |
6423 |
++ } |
6424 |
+ |
6425 |
+ bearer_disable(net, bearer); |
6426 |
+ |
6427 |
+@@ -1007,7 +1027,8 @@ int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) |
6428 |
+ prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); |
6429 |
+ } |
6430 |
+ |
6431 |
+- return tipc_enable_bearer(net, bearer, domain, prio, attrs); |
6432 |
++ return tipc_enable_bearer(net, bearer, domain, prio, attrs, |
6433 |
++ info->extack); |
6434 |
+ } |
6435 |
+ |
6436 |
+ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) |
6437 |
+@@ -1046,6 +1067,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) |
6438 |
+ b = tipc_bearer_find(net, name); |
6439 |
+ if (!b) { |
6440 |
+ rtnl_unlock(); |
6441 |
++ NL_SET_ERR_MSG(info->extack, "Bearer not found"); |
6442 |
+ return -EINVAL; |
6443 |
+ } |
6444 |
+ |
6445 |
+@@ -1086,8 +1108,10 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) |
6446 |
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); |
6447 |
+ |
6448 |
+ b = tipc_bearer_find(net, name); |
6449 |
+- if (!b) |
6450 |
++ if (!b) { |
6451 |
++ NL_SET_ERR_MSG(info->extack, "Bearer not found"); |
6452 |
+ return -EINVAL; |
6453 |
++ } |
6454 |
+ |
6455 |
+ if (attrs[TIPC_NLA_BEARER_PROP]) { |
6456 |
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; |
6457 |
+@@ -1106,12 +1130,18 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) |
6458 |
+ if (props[TIPC_NLA_PROP_WIN]) |
6459 |
+ b->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); |
6460 |
+ if (props[TIPC_NLA_PROP_MTU]) { |
6461 |
+- if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) |
6462 |
++ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) { |
6463 |
++ NL_SET_ERR_MSG(info->extack, |
6464 |
++ "MTU property is unsupported"); |
6465 |
+ return -EINVAL; |
6466 |
++ } |
6467 |
+ #ifdef CONFIG_TIPC_MEDIA_UDP |
6468 |
+ if (tipc_udp_mtu_bad(nla_get_u32 |
6469 |
+- (props[TIPC_NLA_PROP_MTU]))) |
6470 |
++ (props[TIPC_NLA_PROP_MTU]))) { |
6471 |
++ NL_SET_ERR_MSG(info->extack, |
6472 |
++ "MTU value is out-of-range"); |
6473 |
+ return -EINVAL; |
6474 |
++ } |
6475 |
+ b->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]); |
6476 |
+ tipc_node_apply_property(net, b, TIPC_NLA_PROP_MTU); |
6477 |
+ #endif |
6478 |
+@@ -1239,6 +1269,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info) |
6479 |
+ rtnl_lock(); |
6480 |
+ media = tipc_media_find(name); |
6481 |
+ if (!media) { |
6482 |
++ NL_SET_ERR_MSG(info->extack, "Media not found"); |
6483 |
+ err = -EINVAL; |
6484 |
+ goto err_out; |
6485 |
+ } |
6486 |
+@@ -1275,9 +1306,10 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) |
6487 |
+ name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); |
6488 |
+ |
6489 |
+ m = tipc_media_find(name); |
6490 |
+- if (!m) |
6491 |
++ if (!m) { |
6492 |
++ NL_SET_ERR_MSG(info->extack, "Media not found"); |
6493 |
+ return -EINVAL; |
6494 |
+- |
6495 |
++ } |
6496 |
+ if (attrs[TIPC_NLA_MEDIA_PROP]) { |
6497 |
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; |
6498 |
+ |
6499 |
+@@ -1293,12 +1325,18 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) |
6500 |
+ if (props[TIPC_NLA_PROP_WIN]) |
6501 |
+ m->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); |
6502 |
+ if (props[TIPC_NLA_PROP_MTU]) { |
6503 |
+- if (m->type_id != TIPC_MEDIA_TYPE_UDP) |
6504 |
++ if (m->type_id != TIPC_MEDIA_TYPE_UDP) { |
6505 |
++ NL_SET_ERR_MSG(info->extack, |
6506 |
++ "MTU property is unsupported"); |
6507 |
+ return -EINVAL; |
6508 |
++ } |
6509 |
+ #ifdef CONFIG_TIPC_MEDIA_UDP |
6510 |
+ if (tipc_udp_mtu_bad(nla_get_u32 |
6511 |
+- (props[TIPC_NLA_PROP_MTU]))) |
6512 |
++ (props[TIPC_NLA_PROP_MTU]))) { |
6513 |
++ NL_SET_ERR_MSG(info->extack, |
6514 |
++ "MTU value is out-of-range"); |
6515 |
+ return -EINVAL; |
6516 |
++ } |
6517 |
+ m->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]); |
6518 |
+ #endif |
6519 |
+ } |
6520 |
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c |
6521 |
+index d9cd229aa111b..9b1ea17f3b1dc 100644 |
6522 |
+--- a/net/tls/tls_device.c |
6523 |
++++ b/net/tls/tls_device.c |
6524 |
+@@ -50,6 +50,7 @@ static void tls_device_gc_task(struct work_struct *work); |
6525 |
+ static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task); |
6526 |
+ static LIST_HEAD(tls_device_gc_list); |
6527 |
+ static LIST_HEAD(tls_device_list); |
6528 |
++static LIST_HEAD(tls_device_down_list); |
6529 |
+ static DEFINE_SPINLOCK(tls_device_lock); |
6530 |
+ |
6531 |
+ static void tls_device_free_ctx(struct tls_context *ctx) |
6532 |
+@@ -680,15 +681,13 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx, |
6533 |
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); |
6534 |
+ struct net_device *netdev; |
6535 |
+ |
6536 |
+- if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags))) |
6537 |
+- return; |
6538 |
+- |
6539 |
+ trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type); |
6540 |
++ rcu_read_lock(); |
6541 |
+ netdev = READ_ONCE(tls_ctx->netdev); |
6542 |
+ if (netdev) |
6543 |
+ netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, |
6544 |
+ TLS_OFFLOAD_CTX_DIR_RX); |
6545 |
+- clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags); |
6546 |
++ rcu_read_unlock(); |
6547 |
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC); |
6548 |
+ } |
6549 |
+ |
6550 |
+@@ -761,6 +760,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) |
6551 |
+ |
6552 |
+ if (tls_ctx->rx_conf != TLS_HW) |
6553 |
+ return; |
6554 |
++ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) |
6555 |
++ return; |
6556 |
+ |
6557 |
+ prot = &tls_ctx->prot_info; |
6558 |
+ rx_ctx = tls_offload_ctx_rx(tls_ctx); |
6559 |
+@@ -963,6 +964,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, |
6560 |
+ |
6561 |
+ ctx->sw.decrypted |= is_decrypted; |
6562 |
+ |
6563 |
++ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) { |
6564 |
++ if (likely(is_encrypted || is_decrypted)) |
6565 |
++ return 0; |
6566 |
++ |
6567 |
++ /* After tls_device_down disables the offload, the next SKB will |
6568 |
++ * likely have initial fragments decrypted, and final ones not |
6569 |
++ * decrypted. We need to reencrypt that single SKB. |
6570 |
++ */ |
6571 |
++ return tls_device_reencrypt(sk, skb); |
6572 |
++ } |
6573 |
++ |
6574 |
+ /* Return immediately if the record is either entirely plaintext or |
6575 |
+ * entirely ciphertext. Otherwise handle reencrypt partially decrypted |
6576 |
+ * record. |
6577 |
+@@ -1292,6 +1304,26 @@ static int tls_device_down(struct net_device *netdev) |
6578 |
+ spin_unlock_irqrestore(&tls_device_lock, flags); |
6579 |
+ |
6580 |
+ list_for_each_entry_safe(ctx, tmp, &list, list) { |
6581 |
++ /* Stop offloaded TX and switch to the fallback. |
6582 |
++ * tls_is_sk_tx_device_offloaded will return false. |
6583 |
++ */ |
6584 |
++ WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw); |
6585 |
++ |
6586 |
++ /* Stop the RX and TX resync. |
6587 |
++ * tls_dev_resync must not be called after tls_dev_del. |
6588 |
++ */ |
6589 |
++ WRITE_ONCE(ctx->netdev, NULL); |
6590 |
++ |
6591 |
++ /* Start skipping the RX resync logic completely. */ |
6592 |
++ set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags); |
6593 |
++ |
6594 |
++ /* Sync with inflight packets. After this point: |
6595 |
++ * TX: no non-encrypted packets will be passed to the driver. |
6596 |
++ * RX: resync requests from the driver will be ignored. |
6597 |
++ */ |
6598 |
++ synchronize_net(); |
6599 |
++ |
6600 |
++ /* Release the offload context on the driver side. */ |
6601 |
+ if (ctx->tx_conf == TLS_HW) |
6602 |
+ netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
6603 |
+ TLS_OFFLOAD_CTX_DIR_TX); |
6604 |
+@@ -1299,15 +1331,21 @@ static int tls_device_down(struct net_device *netdev) |
6605 |
+ !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags)) |
6606 |
+ netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
6607 |
+ TLS_OFFLOAD_CTX_DIR_RX); |
6608 |
+- WRITE_ONCE(ctx->netdev, NULL); |
6609 |
+- smp_mb__before_atomic(); /* pairs with test_and_set_bit() */ |
6610 |
+- while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags)) |
6611 |
+- usleep_range(10, 200); |
6612 |
++ |
6613 |
+ dev_put(netdev); |
6614 |
+- list_del_init(&ctx->list); |
6615 |
+ |
6616 |
+- if (refcount_dec_and_test(&ctx->refcount)) |
6617 |
+- tls_device_free_ctx(ctx); |
6618 |
++ /* Move the context to a separate list for two reasons: |
6619 |
++ * 1. When the context is deallocated, list_del is called. |
6620 |
++ * 2. It's no longer an offloaded context, so we don't want to |
6621 |
++ * run offload-specific code on this context. |
6622 |
++ */ |
6623 |
++ spin_lock_irqsave(&tls_device_lock, flags); |
6624 |
++ list_move_tail(&ctx->list, &tls_device_down_list); |
6625 |
++ spin_unlock_irqrestore(&tls_device_lock, flags); |
6626 |
++ |
6627 |
++ /* Device contexts for RX and TX will be freed in on sk_destruct |
6628 |
++ * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW. |
6629 |
++ */ |
6630 |
+ } |
6631 |
+ |
6632 |
+ up_write(&device_offload_lock); |
6633 |
+diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c |
6634 |
+index cacf040872c74..e40bedd112b68 100644 |
6635 |
+--- a/net/tls/tls_device_fallback.c |
6636 |
++++ b/net/tls/tls_device_fallback.c |
6637 |
+@@ -431,6 +431,13 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk, |
6638 |
+ } |
6639 |
+ EXPORT_SYMBOL_GPL(tls_validate_xmit_skb); |
6640 |
+ |
6641 |
++struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk, |
6642 |
++ struct net_device *dev, |
6643 |
++ struct sk_buff *skb) |
6644 |
++{ |
6645 |
++ return tls_sw_fallback(sk, skb); |
6646 |
++} |
6647 |
++ |
6648 |
+ struct sk_buff *tls_encrypt_skb(struct sk_buff *skb) |
6649 |
+ { |
6650 |
+ return tls_sw_fallback(skb->sk, skb); |
6651 |
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c |
6652 |
+index 47b7c5334c346..fde56ff491637 100644 |
6653 |
+--- a/net/tls/tls_main.c |
6654 |
++++ b/net/tls/tls_main.c |
6655 |
+@@ -636,6 +636,7 @@ struct tls_context *tls_ctx_create(struct sock *sk) |
6656 |
+ mutex_init(&ctx->tx_lock); |
6657 |
+ rcu_assign_pointer(icsk->icsk_ulp_data, ctx); |
6658 |
+ ctx->sk_proto = READ_ONCE(sk->sk_prot); |
6659 |
++ ctx->sk = sk; |
6660 |
+ return ctx; |
6661 |
+ } |
6662 |
+ |
6663 |
+diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c |
6664 |
+index 21dbf63d6e415..9ec93d90e8a5a 100644 |
6665 |
+--- a/samples/vfio-mdev/mdpy-fb.c |
6666 |
++++ b/samples/vfio-mdev/mdpy-fb.c |
6667 |
+@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev, |
6668 |
+ if (format != DRM_FORMAT_XRGB8888) { |
6669 |
+ pci_err(pdev, "format mismatch (0x%x != 0x%x)\n", |
6670 |
+ format, DRM_FORMAT_XRGB8888); |
6671 |
+- return -EINVAL; |
6672 |
++ ret = -EINVAL; |
6673 |
++ goto err_release_regions; |
6674 |
+ } |
6675 |
+ if (width < 100 || width > 10000) { |
6676 |
+ pci_err(pdev, "width (%d) out of range\n", width); |
6677 |
+- return -EINVAL; |
6678 |
++ ret = -EINVAL; |
6679 |
++ goto err_release_regions; |
6680 |
+ } |
6681 |
+ if (height < 100 || height > 10000) { |
6682 |
+ pci_err(pdev, "height (%d) out of range\n", height); |
6683 |
+- return -EINVAL; |
6684 |
++ ret = -EINVAL; |
6685 |
++ goto err_release_regions; |
6686 |
+ } |
6687 |
+ pci_info(pdev, "mdpy found: %dx%d framebuffer\n", |
6688 |
+ width, height); |
6689 |
+ |
6690 |
+ info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev); |
6691 |
+- if (!info) |
6692 |
++ if (!info) { |
6693 |
++ ret = -ENOMEM; |
6694 |
+ goto err_release_regions; |
6695 |
++ } |
6696 |
+ pci_set_drvdata(pdev, info); |
6697 |
+ par = info->par; |
6698 |
+ |
6699 |
+diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal |
6700 |
+index 735e11e9041b9..19468831fcc73 100644 |
6701 |
+--- a/scripts/Makefile.modfinal |
6702 |
++++ b/scripts/Makefile.modfinal |
6703 |
+@@ -59,7 +59,7 @@ quiet_cmd_ld_ko_o = LD [M] $@ |
6704 |
+ quiet_cmd_btf_ko = BTF [M] $@ |
6705 |
+ cmd_btf_ko = \ |
6706 |
+ if [ -f vmlinux ]; then \ |
6707 |
+- LLVM_OBJCOPY=$(OBJCOPY) $(PAHOLE) -J --btf_base vmlinux $@; \ |
6708 |
++ LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \ |
6709 |
+ else \ |
6710 |
+ printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \ |
6711 |
+ fi; |
6712 |
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh |
6713 |
+index 3b261b0f74f0a..0a16928e495b9 100755 |
6714 |
+--- a/scripts/link-vmlinux.sh |
6715 |
++++ b/scripts/link-vmlinux.sh |
6716 |
+@@ -228,7 +228,7 @@ gen_btf() |
6717 |
+ vmlinux_link ${1} |
6718 |
+ |
6719 |
+ info "BTF" ${2} |
6720 |
+- LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1} |
6721 |
++ LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${1} |
6722 |
+ |
6723 |
+ # Create ${2} which contains just .BTF section but no symbols. Add |
6724 |
+ # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all |
6725 |
+diff --git a/sound/core/timer.c b/sound/core/timer.c |
6726 |
+index 6898b1ac0d7f4..92b7008fcdb86 100644 |
6727 |
+--- a/sound/core/timer.c |
6728 |
++++ b/sound/core/timer.c |
6729 |
+@@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event) |
6730 |
+ return; |
6731 |
+ if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) |
6732 |
+ return; |
6733 |
++ event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */ |
6734 |
+ list_for_each_entry(ts, &ti->slave_active_head, active_list) |
6735 |
+ if (ts->ccallback) |
6736 |
+- ts->ccallback(ts, event + 100, &tstamp, resolution); |
6737 |
++ ts->ccallback(ts, event, &tstamp, resolution); |
6738 |
+ } |
6739 |
+ |
6740 |
+ /* start/continue a master timer */ |
6741 |
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c |
6742 |
+index 2026f1ccaf5a7..a220f7ac81263 100644 |
6743 |
+--- a/sound/pci/hda/hda_codec.c |
6744 |
++++ b/sound/pci/hda/hda_codec.c |
6745 |
+@@ -2973,6 +2973,7 @@ static int hda_codec_runtime_resume(struct device *dev) |
6746 |
+ #ifdef CONFIG_PM_SLEEP |
6747 |
+ static int hda_codec_pm_prepare(struct device *dev) |
6748 |
+ { |
6749 |
++ dev->power.power_state = PMSG_SUSPEND; |
6750 |
+ return pm_runtime_suspended(dev); |
6751 |
+ } |
6752 |
+ |
6753 |
+@@ -2980,6 +2981,10 @@ static void hda_codec_pm_complete(struct device *dev) |
6754 |
+ { |
6755 |
+ struct hda_codec *codec = dev_to_hda_codec(dev); |
6756 |
+ |
6757 |
++ /* If no other pm-functions are called between prepare() and complete() */ |
6758 |
++ if (dev->power.power_state.event == PM_EVENT_SUSPEND) |
6759 |
++ dev->power.power_state = PMSG_RESUME; |
6760 |
++ |
6761 |
+ if (pm_runtime_suspended(dev) && (codec->jackpoll_interval || |
6762 |
+ hda_codec_need_resume(codec) || codec->forced_resume)) |
6763 |
+ pm_request_resume(dev); |
6764 |
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
6765 |
+index d8424d226714f..cc13a68197f3c 100644 |
6766 |
+--- a/sound/pci/hda/patch_realtek.c |
6767 |
++++ b/sound/pci/hda/patch_realtek.c |
6768 |
+@@ -8289,6 +8289,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
6769 |
+ SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), |
6770 |
+ SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), |
6771 |
+ SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), |
6772 |
++ SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3), |
6773 |
+ SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), |
6774 |
+ SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN), |
6775 |
+ SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), |
6776 |
+diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c |
6777 |
+index 7b2d471a6419d..4343356f3cf9a 100644 |
6778 |
+--- a/tools/perf/util/dwarf-aux.c |
6779 |
++++ b/tools/perf/util/dwarf-aux.c |
6780 |
+@@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data) |
6781 |
+ if ((tag == DW_TAG_formal_parameter || |
6782 |
+ tag == DW_TAG_variable) && |
6783 |
+ die_compare_name(die_mem, fvp->name) && |
6784 |
+- /* Does the DIE have location information or external instance? */ |
6785 |
++ /* |
6786 |
++ * Does the DIE have location information or const value |
6787 |
++ * or external instance? |
6788 |
++ */ |
6789 |
+ (dwarf_attr(die_mem, DW_AT_external, &attr) || |
6790 |
+- dwarf_attr(die_mem, DW_AT_location, &attr))) |
6791 |
++ dwarf_attr(die_mem, DW_AT_location, &attr) || |
6792 |
++ dwarf_attr(die_mem, DW_AT_const_value, &attr))) |
6793 |
+ return DIE_FIND_CB_END; |
6794 |
+ if (dwarf_haspc(die_mem, fvp->addr)) |
6795 |
+ return DIE_FIND_CB_CONTINUE; |
6796 |
+diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c |
6797 |
+index 1b118c9c86a69..bba61b95a37a8 100644 |
6798 |
+--- a/tools/perf/util/probe-finder.c |
6799 |
++++ b/tools/perf/util/probe-finder.c |
6800 |
+@@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr, |
6801 |
+ immediate_value_is_supported()) { |
6802 |
+ Dwarf_Sword snum; |
6803 |
+ |
6804 |
++ if (!tvar) |
6805 |
++ return 0; |
6806 |
++ |
6807 |
+ dwarf_formsdata(&attr, &snum); |
6808 |
+ ret = asprintf(&tvar->value, "\\%ld", (long)snum); |
6809 |
+ |
6810 |
+diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh |
6811 |
+index 7ed7cd95e58fe..ebc4ee0fe179f 100755 |
6812 |
+--- a/tools/testing/selftests/wireguard/netns.sh |
6813 |
++++ b/tools/testing/selftests/wireguard/netns.sh |
6814 |
+@@ -363,6 +363,7 @@ ip1 -6 rule add table main suppress_prefixlength 0 |
6815 |
+ ip1 -4 route add default dev wg0 table 51820 |
6816 |
+ ip1 -4 rule add not fwmark 51820 table 51820 |
6817 |
+ ip1 -4 rule add table main suppress_prefixlength 0 |
6818 |
++n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter' |
6819 |
+ # Flood the pings instead of sending just one, to trigger routing table reference counting bugs. |
6820 |
+ n1 ping -W 1 -c 100 -f 192.168.99.7 |
6821 |
+ n1 ping -W 1 -c 100 -f abab::1111 |
6822 |
+diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config |
6823 |
+index 4eecb432a66c1..74db83a0aedd8 100644 |
6824 |
+--- a/tools/testing/selftests/wireguard/qemu/kernel.config |
6825 |
++++ b/tools/testing/selftests/wireguard/qemu/kernel.config |
6826 |
+@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y |
6827 |
+ CONFIG_NETFILTER_XT_NAT=y |
6828 |
+ CONFIG_NETFILTER_XT_MATCH_LENGTH=y |
6829 |
+ CONFIG_NETFILTER_XT_MARK=y |
6830 |
+-CONFIG_NF_CONNTRACK_IPV4=y |
6831 |
+ CONFIG_NF_NAT_IPV4=y |
6832 |
+ CONFIG_IP_NF_IPTABLES=y |
6833 |
+ CONFIG_IP_NF_FILTER=y |