1 |
commit: caebc7cdc6f994cb1054f8f9fb224f6f3192d62a |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Thu Sep 15 10:30:00 2022 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Thu Sep 15 10:30:00 2022 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=caebc7cd |
7 |
|
8 |
Linux patch 5.15.68 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1067_linux-5.15.68.patch | 4593 ++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 4597 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 647f7781..6021975a 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -311,6 +311,10 @@ Patch: 1066_linux-5.15.67.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.15.67 |
23 |
|
24 |
+Patch: 1067_linux-5.15.68.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.15.68 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1067_linux-5.15.68.patch b/1067_linux-5.15.68.patch |
33 |
new file mode 100644 |
34 |
index 00000000..eca1443c |
35 |
--- /dev/null |
36 |
+++ b/1067_linux-5.15.68.patch |
37 |
@@ -0,0 +1,4593 @@ |
38 |
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst |
39 |
+index 46644736e5835..663001f697733 100644 |
40 |
+--- a/Documentation/arm64/silicon-errata.rst |
41 |
++++ b/Documentation/arm64/silicon-errata.rst |
42 |
+@@ -94,6 +94,8 @@ stable kernels. |
43 |
+ +----------------+-----------------+-----------------+-----------------------------+ |
44 |
+ | ARM | Cortex-A510 | #2441009 | ARM64_ERRATUM_2441009 | |
45 |
+ +----------------+-----------------+-----------------+-----------------------------+ |
46 |
++| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 | |
47 |
+++----------------+-----------------+-----------------+-----------------------------+ |
48 |
+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | |
49 |
+ +----------------+-----------------+-----------------+-----------------------------+ |
50 |
+ | ARM | Neoverse-N1 | #1349291 | N/A | |
51 |
+diff --git a/Makefile b/Makefile |
52 |
+index eca45b7be9c1e..d6b672375c079 100644 |
53 |
+--- a/Makefile |
54 |
++++ b/Makefile |
55 |
+@@ -1,7 +1,7 @@ |
56 |
+ # SPDX-License-Identifier: GPL-2.0 |
57 |
+ VERSION = 5 |
58 |
+ PATCHLEVEL = 15 |
59 |
+-SUBLEVEL = 67 |
60 |
++SUBLEVEL = 68 |
61 |
+ EXTRAVERSION = |
62 |
+ NAME = Trick or Treat |
63 |
+ |
64 |
+@@ -1332,8 +1332,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj |
65 |
+ |
66 |
+ PHONY += headers |
67 |
+ headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts |
68 |
+- $(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \ |
69 |
+- $(error Headers not exportable for the $(SRCARCH) architecture)) |
70 |
++ $(if $(filter um, $(SRCARCH)), $(error Headers not exportable for UML)) |
71 |
+ $(Q)$(MAKE) $(hdr-inst)=include/uapi |
72 |
+ $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi |
73 |
+ |
74 |
+diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi |
75 |
+index 025a78310e3ab..a818e8ebd638f 100644 |
76 |
+--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi |
77 |
++++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi |
78 |
+@@ -68,8 +68,8 @@ |
79 |
+ regulators { |
80 |
+ vdd_3v3: VDD_IO { |
81 |
+ regulator-name = "VDD_IO"; |
82 |
+- regulator-min-microvolt = <1200000>; |
83 |
+- regulator-max-microvolt = <3700000>; |
84 |
++ regulator-min-microvolt = <3300000>; |
85 |
++ regulator-max-microvolt = <3300000>; |
86 |
+ regulator-initial-mode = <2>; |
87 |
+ regulator-allowed-modes = <2>, <4>; |
88 |
+ regulator-always-on; |
89 |
+@@ -87,8 +87,8 @@ |
90 |
+ |
91 |
+ vddio_ddr: VDD_DDR { |
92 |
+ regulator-name = "VDD_DDR"; |
93 |
+- regulator-min-microvolt = <600000>; |
94 |
+- regulator-max-microvolt = <1850000>; |
95 |
++ regulator-min-microvolt = <1200000>; |
96 |
++ regulator-max-microvolt = <1200000>; |
97 |
+ regulator-initial-mode = <2>; |
98 |
+ regulator-allowed-modes = <2>, <4>; |
99 |
+ regulator-always-on; |
100 |
+@@ -110,8 +110,8 @@ |
101 |
+ |
102 |
+ vdd_core: VDD_CORE { |
103 |
+ regulator-name = "VDD_CORE"; |
104 |
+- regulator-min-microvolt = <600000>; |
105 |
+- regulator-max-microvolt = <1850000>; |
106 |
++ regulator-min-microvolt = <1250000>; |
107 |
++ regulator-max-microvolt = <1250000>; |
108 |
+ regulator-initial-mode = <2>; |
109 |
+ regulator-allowed-modes = <2>, <4>; |
110 |
+ regulator-always-on; |
111 |
+@@ -152,8 +152,8 @@ |
112 |
+ |
113 |
+ LDO1 { |
114 |
+ regulator-name = "LDO1"; |
115 |
+- regulator-min-microvolt = <1200000>; |
116 |
+- regulator-max-microvolt = <3700000>; |
117 |
++ regulator-min-microvolt = <3300000>; |
118 |
++ regulator-max-microvolt = <3300000>; |
119 |
+ regulator-always-on; |
120 |
+ |
121 |
+ regulator-state-standby { |
122 |
+@@ -167,9 +167,8 @@ |
123 |
+ |
124 |
+ LDO2 { |
125 |
+ regulator-name = "LDO2"; |
126 |
+- regulator-min-microvolt = <1200000>; |
127 |
+- regulator-max-microvolt = <3700000>; |
128 |
+- regulator-always-on; |
129 |
++ regulator-min-microvolt = <1800000>; |
130 |
++ regulator-max-microvolt = <3300000>; |
131 |
+ |
132 |
+ regulator-state-standby { |
133 |
+ regulator-on-in-suspend; |
134 |
+diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts |
135 |
+index fd1a288f686bc..4ebbbe65c0cee 100644 |
136 |
+--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts |
137 |
++++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts |
138 |
+@@ -197,8 +197,8 @@ |
139 |
+ regulators { |
140 |
+ vdd_io_reg: VDD_IO { |
141 |
+ regulator-name = "VDD_IO"; |
142 |
+- regulator-min-microvolt = <1200000>; |
143 |
+- regulator-max-microvolt = <3700000>; |
144 |
++ regulator-min-microvolt = <3300000>; |
145 |
++ regulator-max-microvolt = <3300000>; |
146 |
+ regulator-initial-mode = <2>; |
147 |
+ regulator-allowed-modes = <2>, <4>; |
148 |
+ regulator-always-on; |
149 |
+@@ -216,8 +216,8 @@ |
150 |
+ |
151 |
+ VDD_DDR { |
152 |
+ regulator-name = "VDD_DDR"; |
153 |
+- regulator-min-microvolt = <600000>; |
154 |
+- regulator-max-microvolt = <1850000>; |
155 |
++ regulator-min-microvolt = <1350000>; |
156 |
++ regulator-max-microvolt = <1350000>; |
157 |
+ regulator-initial-mode = <2>; |
158 |
+ regulator-allowed-modes = <2>, <4>; |
159 |
+ regulator-always-on; |
160 |
+@@ -235,8 +235,8 @@ |
161 |
+ |
162 |
+ VDD_CORE { |
163 |
+ regulator-name = "VDD_CORE"; |
164 |
+- regulator-min-microvolt = <600000>; |
165 |
+- regulator-max-microvolt = <1850000>; |
166 |
++ regulator-min-microvolt = <1250000>; |
167 |
++ regulator-max-microvolt = <1250000>; |
168 |
+ regulator-initial-mode = <2>; |
169 |
+ regulator-allowed-modes = <2>, <4>; |
170 |
+ regulator-always-on; |
171 |
+@@ -258,7 +258,6 @@ |
172 |
+ regulator-max-microvolt = <1850000>; |
173 |
+ regulator-initial-mode = <2>; |
174 |
+ regulator-allowed-modes = <2>, <4>; |
175 |
+- regulator-always-on; |
176 |
+ |
177 |
+ regulator-state-standby { |
178 |
+ regulator-on-in-suspend; |
179 |
+@@ -273,8 +272,8 @@ |
180 |
+ |
181 |
+ LDO1 { |
182 |
+ regulator-name = "LDO1"; |
183 |
+- regulator-min-microvolt = <1200000>; |
184 |
+- regulator-max-microvolt = <3700000>; |
185 |
++ regulator-min-microvolt = <2500000>; |
186 |
++ regulator-max-microvolt = <2500000>; |
187 |
+ regulator-always-on; |
188 |
+ |
189 |
+ regulator-state-standby { |
190 |
+@@ -288,8 +287,8 @@ |
191 |
+ |
192 |
+ LDO2 { |
193 |
+ regulator-name = "LDO2"; |
194 |
+- regulator-min-microvolt = <1200000>; |
195 |
+- regulator-max-microvolt = <3700000>; |
196 |
++ regulator-min-microvolt = <3300000>; |
197 |
++ regulator-max-microvolt = <3300000>; |
198 |
+ regulator-always-on; |
199 |
+ |
200 |
+ regulator-state-standby { |
201 |
+diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi |
202 |
+index b167b33bd108d..9a3e5f7827152 100644 |
203 |
+--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi |
204 |
++++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi |
205 |
+@@ -51,16 +51,6 @@ |
206 |
+ vin-supply = <®_3p3v_s5>; |
207 |
+ }; |
208 |
+ |
209 |
+- reg_3p3v_s0: regulator-3p3v-s0 { |
210 |
+- compatible = "regulator-fixed"; |
211 |
+- regulator-name = "V_3V3_S0"; |
212 |
+- regulator-min-microvolt = <3300000>; |
213 |
+- regulator-max-microvolt = <3300000>; |
214 |
+- regulator-always-on; |
215 |
+- regulator-boot-on; |
216 |
+- vin-supply = <®_3p3v_s5>; |
217 |
+- }; |
218 |
+- |
219 |
+ reg_3p3v_s5: regulator-3p3v-s5 { |
220 |
+ compatible = "regulator-fixed"; |
221 |
+ regulator-name = "V_3V3_S5"; |
222 |
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c |
223 |
+index ed1050404ef0a..c8cc993ca8ca1 100644 |
224 |
+--- a/arch/arm/mach-at91/pm.c |
225 |
++++ b/arch/arm/mach-at91/pm.c |
226 |
+@@ -350,9 +350,41 @@ extern u32 at91_pm_suspend_in_sram_sz; |
227 |
+ |
228 |
+ static int at91_suspend_finish(unsigned long val) |
229 |
+ { |
230 |
++ unsigned char modified_gray_code[] = { |
231 |
++ 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d, |
232 |
++ 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b, |
233 |
++ 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13, |
234 |
++ 0x10, 0x11, |
235 |
++ }; |
236 |
++ unsigned int tmp, index; |
237 |
+ int i; |
238 |
+ |
239 |
+ if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) { |
240 |
++ /* |
241 |
++ * Bootloader will perform DDR recalibration and will try to |
242 |
++ * restore the ZQ0SR0 with the value saved here. But the |
243 |
++ * calibration is buggy and restoring some values from ZQ0SR0 |
244 |
++ * is forbidden and risky thus we need to provide processed |
245 |
++ * values for these (modified gray code values). |
246 |
++ */ |
247 |
++ tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0); |
248 |
++ |
249 |
++ /* Store pull-down output impedance select. */ |
250 |
++ index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f; |
251 |
++ soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index]; |
252 |
++ |
253 |
++ /* Store pull-up output impedance select. */ |
254 |
++ index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f; |
255 |
++ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; |
256 |
++ |
257 |
++ /* Store pull-down on-die termination impedance select. */ |
258 |
++ index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f; |
259 |
++ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; |
260 |
++ |
261 |
++ /* Store pull-up on-die termination impedance select. */ |
262 |
++ index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f; |
263 |
++ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; |
264 |
++ |
265 |
+ /* |
266 |
+ * The 1st 8 words of memory might get corrupted in the process |
267 |
+ * of DDR PHY recalibration; it is saved here in securam and it |
268 |
+@@ -841,10 +873,6 @@ static int __init at91_pm_backup_init(void) |
269 |
+ of_scan_flat_dt(at91_pm_backup_scan_memcs, &located); |
270 |
+ if (!located) |
271 |
+ goto securam_fail; |
272 |
+- |
273 |
+- /* DDR3PHY_ZQ0SR0 */ |
274 |
+- soc_pm.bu->ddr_phy_calibration[0] = readl(soc_pm.data.ramc_phy + |
275 |
+- 0x188); |
276 |
+ } |
277 |
+ |
278 |
+ return 0; |
279 |
+diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S |
280 |
+index fdb4f63ecde4b..65cfcc19a936c 100644 |
281 |
+--- a/arch/arm/mach-at91/pm_suspend.S |
282 |
++++ b/arch/arm/mach-at91/pm_suspend.S |
283 |
+@@ -172,9 +172,15 @@ sr_ena_2: |
284 |
+ /* Put DDR PHY's DLL in bypass mode for non-backup modes. */ |
285 |
+ cmp r7, #AT91_PM_BACKUP |
286 |
+ beq sr_ena_3 |
287 |
+- ldr tmp1, [r3, #DDR3PHY_PIR] |
288 |
+- orr tmp1, tmp1, #DDR3PHY_PIR_DLLBYP |
289 |
+- str tmp1, [r3, #DDR3PHY_PIR] |
290 |
++ |
291 |
++ /* Disable DX DLLs. */ |
292 |
++ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR] |
293 |
++ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS |
294 |
++ str tmp1, [r3, #DDR3PHY_DX0DLLCR] |
295 |
++ |
296 |
++ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR] |
297 |
++ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS |
298 |
++ str tmp1, [r3, #DDR3PHY_DX1DLLCR] |
299 |
+ |
300 |
+ sr_ena_3: |
301 |
+ /* Power down DDR PHY data receivers. */ |
302 |
+@@ -221,10 +227,14 @@ sr_ena_3: |
303 |
+ bic tmp1, tmp1, #DDR3PHY_DSGCR_ODTPDD_ODT0 |
304 |
+ str tmp1, [r3, #DDR3PHY_DSGCR] |
305 |
+ |
306 |
+- /* Take DDR PHY's DLL out of bypass mode. */ |
307 |
+- ldr tmp1, [r3, #DDR3PHY_PIR] |
308 |
+- bic tmp1, tmp1, #DDR3PHY_PIR_DLLBYP |
309 |
+- str tmp1, [r3, #DDR3PHY_PIR] |
310 |
++ /* Enable DX DLLs. */ |
311 |
++ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR] |
312 |
++ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS |
313 |
++ str tmp1, [r3, #DDR3PHY_DX0DLLCR] |
314 |
++ |
315 |
++ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR] |
316 |
++ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS |
317 |
++ str tmp1, [r3, #DDR3PHY_DX1DLLCR] |
318 |
+ |
319 |
+ /* Enable quasi-dynamic programming. */ |
320 |
+ mov tmp1, #0 |
321 |
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig |
322 |
+index 9d80c783142f9..24cce3b9ff1a7 100644 |
323 |
+--- a/arch/arm64/Kconfig |
324 |
++++ b/arch/arm64/Kconfig |
325 |
+@@ -683,6 +683,23 @@ config ARM64_ERRATUM_2441009 |
326 |
+ |
327 |
+ If unsure, say Y. |
328 |
+ |
329 |
++config ARM64_ERRATUM_2457168 |
330 |
++ bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly" |
331 |
++ depends on ARM64_AMU_EXTN |
332 |
++ default y |
333 |
++ help |
334 |
++ This option adds the workaround for ARM Cortex-A510 erratum 2457168. |
335 |
++ |
336 |
++ The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate |
337 |
++ as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments |
338 |
++ incorrectly giving a significantly higher output value. |
339 |
++ |
340 |
++ Work around this problem by returning 0 when reading the affected counter in |
341 |
++ key locations that results in disabling all users of this counter. This effect |
342 |
++ is the same to firmware disabling affected counters. |
343 |
++ |
344 |
++ If unsure, say Y. |
345 |
++ |
346 |
+ config CAVIUM_ERRATUM_22375 |
347 |
+ bool "Cavium erratum 22375, 24313" |
348 |
+ default y |
349 |
+@@ -1626,6 +1643,8 @@ config ARM64_BTI_KERNEL |
350 |
+ depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI |
351 |
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 |
352 |
+ depends on !CC_IS_GCC || GCC_VERSION >= 100100 |
353 |
++ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671 |
354 |
++ depends on !CC_IS_GCC |
355 |
+ # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9 |
356 |
+ depends on !CC_IS_CLANG || CLANG_VERSION >= 120000 |
357 |
+ depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) |
358 |
+diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c |
359 |
+index 587543c6c51cb..97c42be71338a 100644 |
360 |
+--- a/arch/arm64/kernel/cacheinfo.c |
361 |
++++ b/arch/arm64/kernel/cacheinfo.c |
362 |
+@@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, |
363 |
+ |
364 |
+ int init_cache_level(unsigned int cpu) |
365 |
+ { |
366 |
+- unsigned int ctype, level, leaves, fw_level; |
367 |
++ unsigned int ctype, level, leaves; |
368 |
++ int fw_level; |
369 |
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
370 |
+ |
371 |
+ for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { |
372 |
+@@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu) |
373 |
+ else |
374 |
+ fw_level = acpi_find_last_cache_level(cpu); |
375 |
+ |
376 |
++ if (fw_level < 0) |
377 |
++ return fw_level; |
378 |
++ |
379 |
+ if (level < fw_level) { |
380 |
+ /* |
381 |
+ * some external caches not specified in CLIDR_EL1 |
382 |
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c |
383 |
+index 23c57e0a7fd14..25c495f58f67a 100644 |
384 |
+--- a/arch/arm64/kernel/cpu_errata.c |
385 |
++++ b/arch/arm64/kernel/cpu_errata.c |
386 |
+@@ -550,6 +550,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { |
387 |
+ .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP, |
388 |
+ ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), |
389 |
+ }, |
390 |
++#endif |
391 |
++#ifdef CONFIG_ARM64_ERRATUM_2457168 |
392 |
++ { |
393 |
++ .desc = "ARM erratum 2457168", |
394 |
++ .capability = ARM64_WORKAROUND_2457168, |
395 |
++ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, |
396 |
++ /* Cortex-A510 r0p0-r1p1 */ |
397 |
++ CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) |
398 |
++ }, |
399 |
+ #endif |
400 |
+ { |
401 |
+ } |
402 |
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c |
403 |
+index 474aa55c2f680..3e52a9e8b50be 100644 |
404 |
+--- a/arch/arm64/kernel/cpufeature.c |
405 |
++++ b/arch/arm64/kernel/cpufeature.c |
406 |
+@@ -1736,7 +1736,10 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) |
407 |
+ pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", |
408 |
+ smp_processor_id()); |
409 |
+ cpumask_set_cpu(smp_processor_id(), &amu_cpus); |
410 |
+- update_freq_counters_refs(); |
411 |
++ |
412 |
++ /* 0 reference values signal broken/disabled counters */ |
413 |
++ if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168)) |
414 |
++ update_freq_counters_refs(); |
415 |
+ } |
416 |
+ } |
417 |
+ |
418 |
+diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c |
419 |
+index db93ce2b0113b..46a0b4d6e2519 100644 |
420 |
+--- a/arch/arm64/kernel/hibernate.c |
421 |
++++ b/arch/arm64/kernel/hibernate.c |
422 |
+@@ -326,6 +326,11 @@ static void swsusp_mte_restore_tags(void) |
423 |
+ unsigned long pfn = xa_state.xa_index; |
424 |
+ struct page *page = pfn_to_online_page(pfn); |
425 |
+ |
426 |
++ /* |
427 |
++ * It is not required to invoke page_kasan_tag_reset(page) |
428 |
++ * at this point since the tags stored in page->flags are |
429 |
++ * already restored. |
430 |
++ */ |
431 |
+ mte_restore_page_tags(page_address(page), tags); |
432 |
+ |
433 |
+ mte_free_tag_storage(tags); |
434 |
+diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c |
435 |
+index 10207e3e5ae20..7c1c82c8115cc 100644 |
436 |
+--- a/arch/arm64/kernel/mte.c |
437 |
++++ b/arch/arm64/kernel/mte.c |
438 |
+@@ -44,6 +44,15 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte, |
439 |
+ if (!pte_is_tagged) |
440 |
+ return; |
441 |
+ |
442 |
++ page_kasan_tag_reset(page); |
443 |
++ /* |
444 |
++ * We need smp_wmb() in between setting the flags and clearing the |
445 |
++ * tags because if another thread reads page->flags and builds a |
446 |
++ * tagged address out of it, there is an actual dependency to the |
447 |
++ * memory access, but on the current thread we do not guarantee that |
448 |
++ * the new page->flags are visible before the tags were updated. |
449 |
++ */ |
450 |
++ smp_wmb(); |
451 |
+ mte_clear_page_tags(page_address(page)); |
452 |
+ } |
453 |
+ |
454 |
+diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c |
455 |
+index 4dd14a6620c17..acf67ef4c505d 100644 |
456 |
+--- a/arch/arm64/kernel/topology.c |
457 |
++++ b/arch/arm64/kernel/topology.c |
458 |
+@@ -308,12 +308,25 @@ core_initcall(init_amu_fie); |
459 |
+ |
460 |
+ static void cpu_read_corecnt(void *val) |
461 |
+ { |
462 |
++ /* |
463 |
++ * A value of 0 can be returned if the current CPU does not support AMUs |
464 |
++ * or if the counter is disabled for this CPU. A return value of 0 at |
465 |
++ * counter read is properly handled as an error case by the users of the |
466 |
++ * counter. |
467 |
++ */ |
468 |
+ *(u64 *)val = read_corecnt(); |
469 |
+ } |
470 |
+ |
471 |
+ static void cpu_read_constcnt(void *val) |
472 |
+ { |
473 |
+- *(u64 *)val = read_constcnt(); |
474 |
++ /* |
475 |
++ * Return 0 if the current CPU is affected by erratum 2457168. A value |
476 |
++ * of 0 is also returned if the current CPU does not support AMUs or if |
477 |
++ * the counter is disabled. A return value of 0 at counter read is |
478 |
++ * properly handled as an error case by the users of the counter. |
479 |
++ */ |
480 |
++ *(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ? |
481 |
++ 0UL : read_constcnt(); |
482 |
+ } |
483 |
+ |
484 |
+ static inline |
485 |
+@@ -340,7 +353,22 @@ int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val) |
486 |
+ */ |
487 |
+ bool cpc_ffh_supported(void) |
488 |
+ { |
489 |
+- return freq_counters_valid(get_cpu_with_amu_feat()); |
490 |
++ int cpu = get_cpu_with_amu_feat(); |
491 |
++ |
492 |
++ /* |
493 |
++ * FFH is considered supported if there is at least one present CPU that |
494 |
++ * supports AMUs. Using FFH to read core and reference counters for CPUs |
495 |
++ * that do not support AMUs, have counters disabled or that are affected |
496 |
++ * by errata, will result in a return value of 0. |
497 |
++ * |
498 |
++ * This is done to allow any enabled and valid counters to be read |
499 |
++ * through FFH, knowing that potentially returning 0 as counter value is |
500 |
++ * properly handled by the users of these counters. |
501 |
++ */ |
502 |
++ if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) |
503 |
++ return false; |
504 |
++ |
505 |
++ return true; |
506 |
+ } |
507 |
+ |
508 |
+ int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) |
509 |
+diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c |
510 |
+index 24913271e898c..0dea80bf6de46 100644 |
511 |
+--- a/arch/arm64/mm/copypage.c |
512 |
++++ b/arch/arm64/mm/copypage.c |
513 |
+@@ -23,6 +23,15 @@ void copy_highpage(struct page *to, struct page *from) |
514 |
+ |
515 |
+ if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { |
516 |
+ set_bit(PG_mte_tagged, &to->flags); |
517 |
++ page_kasan_tag_reset(to); |
518 |
++ /* |
519 |
++ * We need smp_wmb() in between setting the flags and clearing the |
520 |
++ * tags because if another thread reads page->flags and builds a |
521 |
++ * tagged address out of it, there is an actual dependency to the |
522 |
++ * memory access, but on the current thread we do not guarantee that |
523 |
++ * the new page->flags are visible before the tags were updated. |
524 |
++ */ |
525 |
++ smp_wmb(); |
526 |
+ mte_copy_page_tags(kto, kfrom); |
527 |
+ } |
528 |
+ } |
529 |
+diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c |
530 |
+index c52c1847079c1..7c4ef56265ee1 100644 |
531 |
+--- a/arch/arm64/mm/mteswap.c |
532 |
++++ b/arch/arm64/mm/mteswap.c |
533 |
+@@ -53,6 +53,15 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page) |
534 |
+ if (!tags) |
535 |
+ return false; |
536 |
+ |
537 |
++ page_kasan_tag_reset(page); |
538 |
++ /* |
539 |
++ * We need smp_wmb() in between setting the flags and clearing the |
540 |
++ * tags because if another thread reads page->flags and builds a |
541 |
++ * tagged address out of it, there is an actual dependency to the |
542 |
++ * memory access, but on the current thread we do not guarantee that |
543 |
++ * the new page->flags are visible before the tags were updated. |
544 |
++ */ |
545 |
++ smp_wmb(); |
546 |
+ mte_restore_page_tags(page_address(page), tags); |
547 |
+ |
548 |
+ return true; |
549 |
+diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps |
550 |
+index b71c6cbb23095..cfaffd3c82890 100644 |
551 |
+--- a/arch/arm64/tools/cpucaps |
552 |
++++ b/arch/arm64/tools/cpucaps |
553 |
+@@ -54,6 +54,7 @@ WORKAROUND_1418040 |
554 |
+ WORKAROUND_1463225 |
555 |
+ WORKAROUND_1508412 |
556 |
+ WORKAROUND_1542419 |
557 |
++WORKAROUND_2457168 |
558 |
+ WORKAROUND_CAVIUM_23154 |
559 |
+ WORKAROUND_CAVIUM_27456 |
560 |
+ WORKAROUND_CAVIUM_30115 |
561 |
+diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c |
562 |
+index e9de6da0ce51f..9dcfe9de55b0a 100644 |
563 |
+--- a/arch/mips/loongson32/ls1c/board.c |
564 |
++++ b/arch/mips/loongson32/ls1c/board.c |
565 |
+@@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = { |
566 |
+ static int __init ls1c_platform_init(void) |
567 |
+ { |
568 |
+ ls1x_serial_set_uartclk(&ls1x_uart_pdev); |
569 |
+- ls1x_rtc_set_extclk(&ls1x_rtc_pdev); |
570 |
+ |
571 |
+ return platform_add_devices(ls1c_platform_devices, |
572 |
+ ARRAY_SIZE(ls1c_platform_devices)); |
573 |
+diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h |
574 |
+index 5779d463b341f..aa4e883431c1a 100644 |
575 |
+--- a/arch/parisc/include/asm/bitops.h |
576 |
++++ b/arch/parisc/include/asm/bitops.h |
577 |
+@@ -12,14 +12,6 @@ |
578 |
+ #include <asm/barrier.h> |
579 |
+ #include <linux/atomic.h> |
580 |
+ |
581 |
+-/* compiler build environment sanity checks: */ |
582 |
+-#if !defined(CONFIG_64BIT) && defined(__LP64__) |
583 |
+-#error "Please use 'ARCH=parisc' to build the 32-bit kernel." |
584 |
+-#endif |
585 |
+-#if defined(CONFIG_64BIT) && !defined(__LP64__) |
586 |
+-#error "Please use 'ARCH=parisc64' to build the 64-bit kernel." |
587 |
+-#endif |
588 |
+- |
589 |
+ /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion |
590 |
+ * on use of volatile and __*_bit() (set/clear/change): |
591 |
+ * *_bit() want use of volatile. |
592 |
+diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S |
593 |
+index aa93d775c34db..598d0938449da 100644 |
594 |
+--- a/arch/parisc/kernel/head.S |
595 |
++++ b/arch/parisc/kernel/head.S |
596 |
+@@ -22,7 +22,7 @@ |
597 |
+ #include <linux/init.h> |
598 |
+ #include <linux/pgtable.h> |
599 |
+ |
600 |
+- .level PA_ASM_LEVEL |
601 |
++ .level 1.1 |
602 |
+ |
603 |
+ __INITDATA |
604 |
+ ENTRY(boot_args) |
605 |
+@@ -69,6 +69,47 @@ $bss_loop: |
606 |
+ stw,ma %arg2,4(%r1) |
607 |
+ stw,ma %arg3,4(%r1) |
608 |
+ |
609 |
++#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20) |
610 |
++ /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU |
611 |
++ * and halt kernel if we detect a PA1.x CPU. */ |
612 |
++ ldi 32,%r10 |
613 |
++ mtctl %r10,%cr11 |
614 |
++ .level 2.0 |
615 |
++ mfctl,w %cr11,%r10 |
616 |
++ .level 1.1 |
617 |
++ comib,<>,n 0,%r10,$cpu_ok |
618 |
++ |
619 |
++ load32 PA(msg1),%arg0 |
620 |
++ ldi msg1_end-msg1,%arg1 |
621 |
++$iodc_panic: |
622 |
++ copy %arg0, %r10 |
623 |
++ copy %arg1, %r11 |
624 |
++ load32 PA(init_stack),%sp |
625 |
++#define MEM_CONS 0x3A0 |
626 |
++ ldw MEM_CONS+32(%r0),%arg0 // HPA |
627 |
++ ldi ENTRY_IO_COUT,%arg1 |
628 |
++ ldw MEM_CONS+36(%r0),%arg2 // SPA |
629 |
++ ldw MEM_CONS+8(%r0),%arg3 // layers |
630 |
++ load32 PA(__bss_start),%r1 |
631 |
++ stw %r1,-52(%sp) // arg4 |
632 |
++ stw %r0,-56(%sp) // arg5 |
633 |
++ stw %r10,-60(%sp) // arg6 = ptr to text |
634 |
++ stw %r11,-64(%sp) // arg7 = len |
635 |
++ stw %r0,-68(%sp) // arg8 |
636 |
++ load32 PA(.iodc_panic_ret), %rp |
637 |
++ ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC |
638 |
++ bv,n (%r1) |
639 |
++.iodc_panic_ret: |
640 |
++ b . /* wait endless with ... */ |
641 |
++ or %r10,%r10,%r10 /* qemu idle sleep */ |
642 |
++msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n" |
643 |
++msg1_end: |
644 |
++ |
645 |
++$cpu_ok: |
646 |
++#endif |
647 |
++ |
648 |
++ .level PA_ASM_LEVEL |
649 |
++ |
650 |
+ /* Initialize startup VM. Just map first 16/32 MB of memory */ |
651 |
+ load32 PA(swapper_pg_dir),%r4 |
652 |
+ mtctl %r4,%cr24 /* Initialize kernel root pointer */ |
653 |
+diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c |
654 |
+index a50f2ff1b00e8..383b4799b6dd3 100644 |
655 |
+--- a/arch/s390/kernel/nmi.c |
656 |
++++ b/arch/s390/kernel/nmi.c |
657 |
+@@ -62,7 +62,7 @@ static inline unsigned long nmi_get_mcesa_size(void) |
658 |
+ * The structure is required for machine check happening early in |
659 |
+ * the boot process. |
660 |
+ */ |
661 |
+-static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE); |
662 |
++static struct mcesa boot_mcesa __aligned(MCESA_MAX_SIZE); |
663 |
+ |
664 |
+ void __init nmi_alloc_boot_cpu(struct lowcore *lc) |
665 |
+ { |
666 |
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c |
667 |
+index 6b1a8697fae8d..4dfe37b068898 100644 |
668 |
+--- a/arch/s390/kernel/setup.c |
669 |
++++ b/arch/s390/kernel/setup.c |
670 |
+@@ -484,6 +484,7 @@ static void __init setup_lowcore_dat_off(void) |
671 |
+ put_abs_lowcore(restart_data, lc->restart_data); |
672 |
+ put_abs_lowcore(restart_source, lc->restart_source); |
673 |
+ put_abs_lowcore(restart_psw, lc->restart_psw); |
674 |
++ put_abs_lowcore(mcesad, lc->mcesad); |
675 |
+ |
676 |
+ lc->spinlock_lockval = arch_spin_lockval(0); |
677 |
+ lc->spinlock_index = 0; |
678 |
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
679 |
+index cddf7e13c2322..799431d287ee8 100644 |
680 |
+--- a/drivers/cpufreq/cpufreq.c |
681 |
++++ b/drivers/cpufreq/cpufreq.c |
682 |
+@@ -532,7 +532,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy, |
683 |
+ |
684 |
+ target_freq = clamp_val(target_freq, policy->min, policy->max); |
685 |
+ |
686 |
+- if (!cpufreq_driver->target_index) |
687 |
++ if (!policy->freq_table) |
688 |
+ return target_freq; |
689 |
+ |
690 |
+ idx = cpufreq_frequency_table_target(policy, target_freq, relation); |
691 |
+diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c |
692 |
+index 4dde8edd53b62..3e8d4b51a8140 100644 |
693 |
+--- a/drivers/firmware/efi/capsule-loader.c |
694 |
++++ b/drivers/firmware/efi/capsule-loader.c |
695 |
+@@ -242,29 +242,6 @@ failed: |
696 |
+ return ret; |
697 |
+ } |
698 |
+ |
699 |
+-/** |
700 |
+- * efi_capsule_flush - called by file close or file flush |
701 |
+- * @file: file pointer |
702 |
+- * @id: not used |
703 |
+- * |
704 |
+- * If a capsule is being partially uploaded then calling this function |
705 |
+- * will be treated as upload termination and will free those completed |
706 |
+- * buffer pages and -ECANCELED will be returned. |
707 |
+- **/ |
708 |
+-static int efi_capsule_flush(struct file *file, fl_owner_t id) |
709 |
+-{ |
710 |
+- int ret = 0; |
711 |
+- struct capsule_info *cap_info = file->private_data; |
712 |
+- |
713 |
+- if (cap_info->index > 0) { |
714 |
+- pr_err("capsule upload not complete\n"); |
715 |
+- efi_free_all_buff_pages(cap_info); |
716 |
+- ret = -ECANCELED; |
717 |
+- } |
718 |
+- |
719 |
+- return ret; |
720 |
+-} |
721 |
+- |
722 |
+ /** |
723 |
+ * efi_capsule_release - called by file close |
724 |
+ * @inode: not used |
725 |
+@@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file) |
726 |
+ { |
727 |
+ struct capsule_info *cap_info = file->private_data; |
728 |
+ |
729 |
++ if (cap_info->index > 0 && |
730 |
++ (cap_info->header.headersize == 0 || |
731 |
++ cap_info->count < cap_info->total_size)) { |
732 |
++ pr_err("capsule upload not complete\n"); |
733 |
++ efi_free_all_buff_pages(cap_info); |
734 |
++ } |
735 |
++ |
736 |
+ kfree(cap_info->pages); |
737 |
+ kfree(cap_info->phys); |
738 |
+ kfree(file->private_data); |
739 |
+@@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = { |
740 |
+ .owner = THIS_MODULE, |
741 |
+ .open = efi_capsule_open, |
742 |
+ .write = efi_capsule_write, |
743 |
+- .flush = efi_capsule_flush, |
744 |
+ .release = efi_capsule_release, |
745 |
+ .llseek = no_llseek, |
746 |
+ }; |
747 |
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile |
748 |
+index d0537573501e9..2c67f71f23753 100644 |
749 |
+--- a/drivers/firmware/efi/libstub/Makefile |
750 |
++++ b/drivers/firmware/efi/libstub/Makefile |
751 |
+@@ -37,6 +37,13 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \ |
752 |
+ $(call cc-option,-fno-addrsig) \ |
753 |
+ -D__DISABLE_EXPORTS |
754 |
+ |
755 |
++# |
756 |
++# struct randomization only makes sense for Linux internal types, which the EFI |
757 |
++# stub code never touches, so let's turn off struct randomization for the stub |
758 |
++# altogether |
759 |
++# |
760 |
++KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS)) |
761 |
++ |
762 |
+ # remove SCS flags from all objects in this directory |
763 |
+ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS)) |
764 |
+ # disable LTO |
765 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c |
766 |
+index 57e9932d8a04e..5b41c29f3ed50 100644 |
767 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c |
768 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c |
769 |
+@@ -2729,6 +2729,9 @@ static int psp_hw_fini(void *handle) |
770 |
+ psp_rap_terminate(psp); |
771 |
+ psp_dtm_terminate(psp); |
772 |
+ psp_hdcp_terminate(psp); |
773 |
++ |
774 |
++ if (adev->gmc.xgmi.num_physical_nodes > 1) |
775 |
++ psp_xgmi_terminate(psp); |
776 |
+ } |
777 |
+ |
778 |
+ psp_asd_unload(psp); |
779 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c |
780 |
+index a799e0b1ff736..ce0b9cb61f582 100644 |
781 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c |
782 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c |
783 |
+@@ -723,7 +723,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) |
784 |
+ amdgpu_put_xgmi_hive(hive); |
785 |
+ } |
786 |
+ |
787 |
+- return psp_xgmi_terminate(&adev->psp); |
788 |
++ return 0; |
789 |
+ } |
790 |
+ |
791 |
+ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) |
792 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
793 |
+index db27fcf87cd04..16cbae04078ad 100644 |
794 |
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
795 |
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
796 |
+@@ -2624,7 +2624,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) |
797 |
+ |
798 |
+ gfx_v9_0_tiling_mode_table_init(adev); |
799 |
+ |
800 |
+- gfx_v9_0_setup_rb(adev); |
801 |
++ if (adev->gfx.num_gfx_rings) |
802 |
++ gfx_v9_0_setup_rb(adev); |
803 |
+ gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); |
804 |
+ adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2); |
805 |
+ |
806 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c |
807 |
+index b3bede1dc41da..4259f623a9d7a 100644 |
808 |
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c |
809 |
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c |
810 |
+@@ -176,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) |
811 |
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); |
812 |
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); |
813 |
+ |
814 |
++ tmp = mmVM_L2_CNTL3_DEFAULT; |
815 |
+ if (adev->gmc.translate_further) { |
816 |
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); |
817 |
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, |
818 |
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |
819 |
+index 5c9f5214bc4e9..6d694cea24201 100644 |
820 |
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |
821 |
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |
822 |
+@@ -3007,7 +3007,7 @@ void crtc_debugfs_init(struct drm_crtc *crtc) |
823 |
+ &crc_win_y_end_fops); |
824 |
+ debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc, |
825 |
+ &crc_win_update_fops); |
826 |
+- |
827 |
++ dput(dir); |
828 |
+ } |
829 |
+ #endif |
830 |
+ /* |
831 |
+diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c |
832 |
+index 847a0dce7f1d3..d24f5b90feabf 100644 |
833 |
+--- a/drivers/gpu/drm/bridge/display-connector.c |
834 |
++++ b/drivers/gpu/drm/bridge/display-connector.c |
835 |
+@@ -13,6 +13,7 @@ |
836 |
+ #include <linux/platform_device.h> |
837 |
+ #include <linux/regulator/consumer.h> |
838 |
+ |
839 |
++#include <drm/drm_atomic_helper.h> |
840 |
+ #include <drm/drm_bridge.h> |
841 |
+ #include <drm/drm_edid.h> |
842 |
+ |
843 |
+@@ -87,10 +88,95 @@ static struct edid *display_connector_get_edid(struct drm_bridge *bridge, |
844 |
+ return drm_get_edid(connector, conn->bridge.ddc); |
845 |
+ } |
846 |
+ |
847 |
++/* |
848 |
++ * Since this bridge is tied to the connector, it acts like a passthrough, |
849 |
++ * so concerning the output bus formats, either pass the bus formats from the |
850 |
++ * previous bridge or return fallback data like done in the bridge function: |
851 |
++ * drm_atomic_bridge_chain_select_bus_fmts(). |
852 |
++ * This supports negotiation if the bridge chain has all bits in place. |
853 |
++ */ |
854 |
++static u32 *display_connector_get_output_bus_fmts(struct drm_bridge *bridge, |
855 |
++ struct drm_bridge_state *bridge_state, |
856 |
++ struct drm_crtc_state *crtc_state, |
857 |
++ struct drm_connector_state *conn_state, |
858 |
++ unsigned int *num_output_fmts) |
859 |
++{ |
860 |
++ struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); |
861 |
++ struct drm_bridge_state *prev_bridge_state; |
862 |
++ |
863 |
++ if (!prev_bridge || !prev_bridge->funcs->atomic_get_output_bus_fmts) { |
864 |
++ struct drm_connector *conn = conn_state->connector; |
865 |
++ u32 *out_bus_fmts; |
866 |
++ |
867 |
++ *num_output_fmts = 1; |
868 |
++ out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL); |
869 |
++ if (!out_bus_fmts) |
870 |
++ return NULL; |
871 |
++ |
872 |
++ if (conn->display_info.num_bus_formats && |
873 |
++ conn->display_info.bus_formats) |
874 |
++ out_bus_fmts[0] = conn->display_info.bus_formats[0]; |
875 |
++ else |
876 |
++ out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; |
877 |
++ |
878 |
++ return out_bus_fmts; |
879 |
++ } |
880 |
++ |
881 |
++ prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, |
882 |
++ prev_bridge); |
883 |
++ |
884 |
++ return prev_bridge->funcs->atomic_get_output_bus_fmts(prev_bridge, prev_bridge_state, |
885 |
++ crtc_state, conn_state, |
886 |
++ num_output_fmts); |
887 |
++} |
888 |
++ |
889 |
++/* |
890 |
++ * Since this bridge is tied to the connector, it acts like a passthrough, |
891 |
++ * so concerning the input bus formats, either pass the bus formats from the |
892 |
++ * previous bridge or MEDIA_BUS_FMT_FIXED (like select_bus_fmt_recursive()) |
893 |
++ * when atomic_get_input_bus_fmts is not supported. |
894 |
++ * This supports negotiation if the bridge chain has all bits in place. |
895 |
++ */ |
896 |
++static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge, |
897 |
++ struct drm_bridge_state *bridge_state, |
898 |
++ struct drm_crtc_state *crtc_state, |
899 |
++ struct drm_connector_state *conn_state, |
900 |
++ u32 output_fmt, |
901 |
++ unsigned int *num_input_fmts) |
902 |
++{ |
903 |
++ struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); |
904 |
++ struct drm_bridge_state *prev_bridge_state; |
905 |
++ |
906 |
++ if (!prev_bridge || !prev_bridge->funcs->atomic_get_input_bus_fmts) { |
907 |
++ u32 *in_bus_fmts; |
908 |
++ |
909 |
++ *num_input_fmts = 1; |
910 |
++ in_bus_fmts = kmalloc(sizeof(*in_bus_fmts), GFP_KERNEL); |
911 |
++ if (!in_bus_fmts) |
912 |
++ return NULL; |
913 |
++ |
914 |
++ in_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; |
915 |
++ |
916 |
++ return in_bus_fmts; |
917 |
++ } |
918 |
++ |
919 |
++ prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, |
920 |
++ prev_bridge); |
921 |
++ |
922 |
++ return prev_bridge->funcs->atomic_get_input_bus_fmts(prev_bridge, prev_bridge_state, |
923 |
++ crtc_state, conn_state, output_fmt, |
924 |
++ num_input_fmts); |
925 |
++} |
926 |
++ |
927 |
+ static const struct drm_bridge_funcs display_connector_bridge_funcs = { |
928 |
+ .attach = display_connector_attach, |
929 |
+ .detect = display_connector_detect, |
930 |
+ .get_edid = display_connector_get_edid, |
931 |
++ .atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts, |
932 |
++ .atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts, |
933 |
++ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, |
934 |
++ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, |
935 |
++ .atomic_reset = drm_atomic_helper_bridge_reset, |
936 |
+ }; |
937 |
+ |
938 |
+ static irqreturn_t display_connector_hpd_irq(int irq, void *arg) |
939 |
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c |
940 |
+index 6410563a9cb6f..dbd19a34b517b 100644 |
941 |
+--- a/drivers/gpu/drm/drm_gem.c |
942 |
++++ b/drivers/gpu/drm/drm_gem.c |
943 |
+@@ -167,21 +167,6 @@ void drm_gem_private_object_init(struct drm_device *dev, |
944 |
+ } |
945 |
+ EXPORT_SYMBOL(drm_gem_private_object_init); |
946 |
+ |
947 |
+-static void |
948 |
+-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) |
949 |
+-{ |
950 |
+- /* |
951 |
+- * Note: obj->dma_buf can't disappear as long as we still hold a |
952 |
+- * handle reference in obj->handle_count. |
953 |
+- */ |
954 |
+- mutex_lock(&filp->prime.lock); |
955 |
+- if (obj->dma_buf) { |
956 |
+- drm_prime_remove_buf_handle_locked(&filp->prime, |
957 |
+- obj->dma_buf); |
958 |
+- } |
959 |
+- mutex_unlock(&filp->prime.lock); |
960 |
+-} |
961 |
+- |
962 |
+ /** |
963 |
+ * drm_gem_object_handle_free - release resources bound to userspace handles |
964 |
+ * @obj: GEM object to clean up. |
965 |
+@@ -252,7 +237,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) |
966 |
+ if (obj->funcs->close) |
967 |
+ obj->funcs->close(obj, file_priv); |
968 |
+ |
969 |
+- drm_gem_remove_prime_handles(obj, file_priv); |
970 |
++ drm_prime_remove_buf_handle(&file_priv->prime, id); |
971 |
+ drm_vma_node_revoke(&obj->vma_node, file_priv); |
972 |
+ |
973 |
+ drm_gem_object_handle_put_unlocked(obj); |
974 |
+diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h |
975 |
+index 17f3548c8ed25..d05e6a5b66873 100644 |
976 |
+--- a/drivers/gpu/drm/drm_internal.h |
977 |
++++ b/drivers/gpu/drm/drm_internal.h |
978 |
+@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, |
979 |
+ |
980 |
+ void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); |
981 |
+ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); |
982 |
+-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, |
983 |
+- struct dma_buf *dma_buf); |
984 |
++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, |
985 |
++ uint32_t handle); |
986 |
+ |
987 |
+ /* drm_drv.c */ |
988 |
+ struct drm_minor *drm_minor_acquire(unsigned int minor_id); |
989 |
+diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c |
990 |
+index d6c7f4f9a7a29..a350310b65d89 100644 |
991 |
+--- a/drivers/gpu/drm/drm_prime.c |
992 |
++++ b/drivers/gpu/drm/drm_prime.c |
993 |
+@@ -187,29 +187,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri |
994 |
+ return -ENOENT; |
995 |
+ } |
996 |
+ |
997 |
+-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, |
998 |
+- struct dma_buf *dma_buf) |
999 |
++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, |
1000 |
++ uint32_t handle) |
1001 |
+ { |
1002 |
+ struct rb_node *rb; |
1003 |
+ |
1004 |
+- rb = prime_fpriv->dmabufs.rb_node; |
1005 |
++ mutex_lock(&prime_fpriv->lock); |
1006 |
++ |
1007 |
++ rb = prime_fpriv->handles.rb_node; |
1008 |
+ while (rb) { |
1009 |
+ struct drm_prime_member *member; |
1010 |
+ |
1011 |
+- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); |
1012 |
+- if (member->dma_buf == dma_buf) { |
1013 |
++ member = rb_entry(rb, struct drm_prime_member, handle_rb); |
1014 |
++ if (member->handle == handle) { |
1015 |
+ rb_erase(&member->handle_rb, &prime_fpriv->handles); |
1016 |
+ rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); |
1017 |
+ |
1018 |
+- dma_buf_put(dma_buf); |
1019 |
++ dma_buf_put(member->dma_buf); |
1020 |
+ kfree(member); |
1021 |
+- return; |
1022 |
+- } else if (member->dma_buf < dma_buf) { |
1023 |
++ break; |
1024 |
++ } else if (member->handle < handle) { |
1025 |
+ rb = rb->rb_right; |
1026 |
+ } else { |
1027 |
+ rb = rb->rb_left; |
1028 |
+ } |
1029 |
+ } |
1030 |
++ |
1031 |
++ mutex_unlock(&prime_fpriv->lock); |
1032 |
+ } |
1033 |
+ |
1034 |
+ void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) |
1035 |
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c |
1036 |
+index 508a514c5e37d..d77d91c0a03af 100644 |
1037 |
+--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c |
1038 |
++++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c |
1039 |
+@@ -475,6 +475,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp, |
1040 |
+ intel_dp_compute_rate(intel_dp, crtc_state->port_clock, |
1041 |
+ &link_bw, &rate_select); |
1042 |
+ |
1043 |
++ /* |
1044 |
++ * WaEdpLinkRateDataReload |
1045 |
++ * |
1046 |
++ * Parade PS8461E MUX (used on varius TGL+ laptops) needs |
1047 |
++ * to snoop the link rates reported by the sink when we |
1048 |
++ * use LINK_RATE_SET in order to operate in jitter cleaning |
1049 |
++ * mode (as opposed to redriver mode). Unfortunately it |
1050 |
++ * loses track of the snooped link rates when powered down, |
1051 |
++ * so we need to make it re-snoop often. Without this high |
1052 |
++ * link rates are not stable. |
1053 |
++ */ |
1054 |
++ if (!link_bw) { |
1055 |
++ struct intel_connector *connector = intel_dp->attached_connector; |
1056 |
++ __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; |
1057 |
++ |
1058 |
++ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n", |
1059 |
++ connector->base.base.id, connector->base.name); |
1060 |
++ |
1061 |
++ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, |
1062 |
++ sink_rates, sizeof(sink_rates)); |
1063 |
++ } |
1064 |
++ |
1065 |
+ if (link_bw) |
1066 |
+ drm_dbg_kms(&i915->drm, |
1067 |
+ "Using LINK_BW_SET value %02x\n", link_bw); |
1068 |
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c |
1069 |
+index 4f0fbf6674316..92905ebb7b459 100644 |
1070 |
+--- a/drivers/gpu/drm/radeon/radeon_device.c |
1071 |
++++ b/drivers/gpu/drm/radeon/radeon_device.c |
1072 |
+@@ -1617,6 +1617,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, |
1073 |
+ if (r) { |
1074 |
+ /* delay GPU reset to resume */ |
1075 |
+ radeon_fence_driver_force_completion(rdev, i); |
1076 |
++ } else { |
1077 |
++ /* finish executing delayed work */ |
1078 |
++ flush_delayed_work(&rdev->fence_drv[i].lockup_work); |
1079 |
+ } |
1080 |
+ } |
1081 |
+ |
1082 |
+diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c |
1083 |
+index 1ba1e31459690..05da83841536f 100644 |
1084 |
+--- a/drivers/hwmon/mr75203.c |
1085 |
++++ b/drivers/hwmon/mr75203.c |
1086 |
+@@ -68,8 +68,9 @@ |
1087 |
+ |
1088 |
+ /* VM Individual Macro Register */ |
1089 |
+ #define VM_COM_REG_SIZE 0x200 |
1090 |
+-#define VM_SDIF_DONE(n) (VM_COM_REG_SIZE + 0x34 + 0x200 * (n)) |
1091 |
+-#define VM_SDIF_DATA(n) (VM_COM_REG_SIZE + 0x40 + 0x200 * (n)) |
1092 |
++#define VM_SDIF_DONE(vm) (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm)) |
1093 |
++#define VM_SDIF_DATA(vm, ch) \ |
1094 |
++ (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch)) |
1095 |
+ |
1096 |
+ /* SDA Slave Register */ |
1097 |
+ #define IP_CTRL 0x00 |
1098 |
+@@ -115,6 +116,7 @@ struct pvt_device { |
1099 |
+ u32 t_num; |
1100 |
+ u32 p_num; |
1101 |
+ u32 v_num; |
1102 |
++ u32 c_num; |
1103 |
+ u32 ip_freq; |
1104 |
+ u8 *vm_idx; |
1105 |
+ }; |
1106 |
+@@ -178,14 +180,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val) |
1107 |
+ { |
1108 |
+ struct pvt_device *pvt = dev_get_drvdata(dev); |
1109 |
+ struct regmap *v_map = pvt->v_map; |
1110 |
++ u8 vm_idx, ch_idx; |
1111 |
+ u32 n, stat; |
1112 |
+- u8 vm_idx; |
1113 |
+ int ret; |
1114 |
+ |
1115 |
+- if (channel >= pvt->v_num) |
1116 |
++ if (channel >= pvt->v_num * pvt->c_num) |
1117 |
+ return -EINVAL; |
1118 |
+ |
1119 |
+- vm_idx = pvt->vm_idx[channel]; |
1120 |
++ vm_idx = pvt->vm_idx[channel / pvt->c_num]; |
1121 |
++ ch_idx = channel % pvt->c_num; |
1122 |
+ |
1123 |
+ switch (attr) { |
1124 |
+ case hwmon_in_input: |
1125 |
+@@ -196,13 +199,23 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val) |
1126 |
+ if (ret) |
1127 |
+ return ret; |
1128 |
+ |
1129 |
+- ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n); |
1130 |
++ ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n); |
1131 |
+ if(ret < 0) |
1132 |
+ return ret; |
1133 |
+ |
1134 |
+ n &= SAMPLE_DATA_MSK; |
1135 |
+- /* Convert the N bitstream count into voltage */ |
1136 |
+- *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS; |
1137 |
++ /* |
1138 |
++ * Convert the N bitstream count into voltage. |
1139 |
++ * To support negative voltage calculation for 64bit machines |
1140 |
++ * n must be cast to long, since n and *val differ both in |
1141 |
++ * signedness and in size. |
1142 |
++ * Division is used instead of right shift, because for signed |
1143 |
++ * numbers, the sign bit is used to fill the vacated bit |
1144 |
++ * positions, and if the number is negative, 1 is used. |
1145 |
++ * BIT(x) may not be used instead of (1 << x) because it's |
1146 |
++ * unsigned. |
1147 |
++ */ |
1148 |
++ *val = (PVT_N_CONST * (long)n - PVT_R_CONST) / (1 << PVT_CONV_BITS); |
1149 |
+ |
1150 |
+ return 0; |
1151 |
+ default: |
1152 |
+@@ -385,6 +398,19 @@ static int pvt_init(struct pvt_device *pvt) |
1153 |
+ if (ret) |
1154 |
+ return ret; |
1155 |
+ |
1156 |
++ val = (BIT(pvt->c_num) - 1) | VM_CH_INIT | |
1157 |
++ IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG; |
1158 |
++ ret = regmap_write(v_map, SDIF_W, val); |
1159 |
++ if (ret < 0) |
1160 |
++ return ret; |
1161 |
++ |
1162 |
++ ret = regmap_read_poll_timeout(v_map, SDIF_STAT, |
1163 |
++ val, !(val & SDIF_BUSY), |
1164 |
++ PVT_POLL_DELAY_US, |
1165 |
++ PVT_POLL_TIMEOUT_US); |
1166 |
++ if (ret) |
1167 |
++ return ret; |
1168 |
++ |
1169 |
+ val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT | |
1170 |
+ CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT | |
1171 |
+ SDIF_WRN_W | SDIF_PROG; |
1172 |
+@@ -499,8 +525,8 @@ static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt |
1173 |
+ |
1174 |
+ static int mr75203_probe(struct platform_device *pdev) |
1175 |
+ { |
1176 |
++ u32 ts_num, vm_num, pd_num, ch_num, val, index, i; |
1177 |
+ const struct hwmon_channel_info **pvt_info; |
1178 |
+- u32 ts_num, vm_num, pd_num, val, index, i; |
1179 |
+ struct device *dev = &pdev->dev; |
1180 |
+ u32 *temp_config, *in_config; |
1181 |
+ struct device *hwmon_dev; |
1182 |
+@@ -541,9 +567,11 @@ static int mr75203_probe(struct platform_device *pdev) |
1183 |
+ ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT; |
1184 |
+ pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT; |
1185 |
+ vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT; |
1186 |
++ ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT; |
1187 |
+ pvt->t_num = ts_num; |
1188 |
+ pvt->p_num = pd_num; |
1189 |
+ pvt->v_num = vm_num; |
1190 |
++ pvt->c_num = ch_num; |
1191 |
+ val = 0; |
1192 |
+ if (ts_num) |
1193 |
+ val++; |
1194 |
+@@ -580,7 +608,7 @@ static int mr75203_probe(struct platform_device *pdev) |
1195 |
+ } |
1196 |
+ |
1197 |
+ if (vm_num) { |
1198 |
+- u32 num = vm_num; |
1199 |
++ u32 total_ch; |
1200 |
+ |
1201 |
+ ret = pvt_get_regmap(pdev, "vm", pvt); |
1202 |
+ if (ret) |
1203 |
+@@ -594,30 +622,30 @@ static int mr75203_probe(struct platform_device *pdev) |
1204 |
+ ret = device_property_read_u8_array(dev, "intel,vm-map", |
1205 |
+ pvt->vm_idx, vm_num); |
1206 |
+ if (ret) { |
1207 |
+- num = 0; |
1208 |
++ /* |
1209 |
++ * Incase intel,vm-map property is not defined, we |
1210 |
++ * assume incremental channel numbers. |
1211 |
++ */ |
1212 |
++ for (i = 0; i < vm_num; i++) |
1213 |
++ pvt->vm_idx[i] = i; |
1214 |
+ } else { |
1215 |
+ for (i = 0; i < vm_num; i++) |
1216 |
+ if (pvt->vm_idx[i] >= vm_num || |
1217 |
+ pvt->vm_idx[i] == 0xff) { |
1218 |
+- num = i; |
1219 |
++ pvt->v_num = i; |
1220 |
++ vm_num = i; |
1221 |
+ break; |
1222 |
+ } |
1223 |
+ } |
1224 |
+ |
1225 |
+- /* |
1226 |
+- * Incase intel,vm-map property is not defined, we assume |
1227 |
+- * incremental channel numbers. |
1228 |
+- */ |
1229 |
+- for (i = num; i < vm_num; i++) |
1230 |
+- pvt->vm_idx[i] = i; |
1231 |
+- |
1232 |
+- in_config = devm_kcalloc(dev, num + 1, |
1233 |
++ total_ch = ch_num * vm_num; |
1234 |
++ in_config = devm_kcalloc(dev, total_ch + 1, |
1235 |
+ sizeof(*in_config), GFP_KERNEL); |
1236 |
+ if (!in_config) |
1237 |
+ return -ENOMEM; |
1238 |
+ |
1239 |
+- memset32(in_config, HWMON_I_INPUT, num); |
1240 |
+- in_config[num] = 0; |
1241 |
++ memset32(in_config, HWMON_I_INPUT, total_ch); |
1242 |
++ in_config[total_ch] = 0; |
1243 |
+ pvt_in.config = in_config; |
1244 |
+ |
1245 |
+ pvt_info[index++] = &pvt_in; |
1246 |
+diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c |
1247 |
+index 8bd6435c13e82..2148fd543bb4b 100644 |
1248 |
+--- a/drivers/hwmon/tps23861.c |
1249 |
++++ b/drivers/hwmon/tps23861.c |
1250 |
+@@ -489,18 +489,20 @@ static char *tps23861_port_poe_plus_status(struct tps23861_data *data, int port) |
1251 |
+ |
1252 |
+ static int tps23861_port_resistance(struct tps23861_data *data, int port) |
1253 |
+ { |
1254 |
+- u16 regval; |
1255 |
++ unsigned int raw_val; |
1256 |
++ __le16 regval; |
1257 |
+ |
1258 |
+ regmap_bulk_read(data->regmap, |
1259 |
+ PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * (port - 1), |
1260 |
+ ®val, |
1261 |
+ 2); |
1262 |
+ |
1263 |
+- switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, regval)) { |
1264 |
++ raw_val = le16_to_cpu(regval); |
1265 |
++ switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, raw_val)) { |
1266 |
+ case PORT_RESISTANCE_RSN_OTHER: |
1267 |
+- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB) / 10000; |
1268 |
++ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB) / 10000; |
1269 |
+ case PORT_RESISTANCE_RSN_LOW: |
1270 |
+- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB_LOW) / 10000; |
1271 |
++ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB_LOW) / 10000; |
1272 |
+ case PORT_RESISTANCE_RSN_SHORT: |
1273 |
+ case PORT_RESISTANCE_RSN_OPEN: |
1274 |
+ default: |
1275 |
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c |
1276 |
+index a814dabcdff43..0da66dd40d6a8 100644 |
1277 |
+--- a/drivers/infiniband/core/cma.c |
1278 |
++++ b/drivers/infiniband/core/cma.c |
1279 |
+@@ -1718,8 +1718,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, |
1280 |
+ } |
1281 |
+ |
1282 |
+ if (!validate_net_dev(*net_dev, |
1283 |
+- (struct sockaddr *)&req->listen_addr_storage, |
1284 |
+- (struct sockaddr *)&req->src_addr_storage)) { |
1285 |
++ (struct sockaddr *)&req->src_addr_storage, |
1286 |
++ (struct sockaddr *)&req->listen_addr_storage)) { |
1287 |
+ id_priv = ERR_PTR(-EHOSTUNREACH); |
1288 |
+ goto err; |
1289 |
+ } |
1290 |
+diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c |
1291 |
+index 7a47343d11f9f..b052de1b9ccb9 100644 |
1292 |
+--- a/drivers/infiniband/core/umem_odp.c |
1293 |
++++ b/drivers/infiniband/core/umem_odp.c |
1294 |
+@@ -463,7 +463,7 @@ retry: |
1295 |
+ mutex_unlock(&umem_odp->umem_mutex); |
1296 |
+ |
1297 |
+ out_put_mm: |
1298 |
+- mmput(owning_mm); |
1299 |
++ mmput_async(owning_mm); |
1300 |
+ out_put_task: |
1301 |
+ if (owning_process) |
1302 |
+ put_task_struct(owning_process); |
1303 |
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h |
1304 |
+index df4501e77fd17..d3d5b5f57052c 100644 |
1305 |
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h |
1306 |
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h |
1307 |
+@@ -98,7 +98,7 @@ |
1308 |
+ |
1309 |
+ #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE |
1310 |
+ #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE |
1311 |
+-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 |
1312 |
++#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000 |
1313 |
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 |
1314 |
+ #define HNS_ROCE_INVALID_LKEY 0x0 |
1315 |
+ #define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 |
1316 |
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c |
1317 |
+index 9af4509894e68..5d50d2d1deca9 100644 |
1318 |
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c |
1319 |
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c |
1320 |
+@@ -495,11 +495,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, |
1321 |
+ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + |
1322 |
+ hr_qp->rq.rsv_sge); |
1323 |
+ |
1324 |
+- if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) |
1325 |
+- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); |
1326 |
+- else |
1327 |
+- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * |
1328 |
+- hr_qp->rq.max_gs); |
1329 |
++ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * |
1330 |
++ hr_qp->rq.max_gs); |
1331 |
+ |
1332 |
+ hr_qp->rq.wqe_cnt = cnt; |
1333 |
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE && |
1334 |
+diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c |
1335 |
+index 9b544a3b12886..7e6c3ba8df6ab 100644 |
1336 |
+--- a/drivers/infiniband/hw/irdma/uk.c |
1337 |
++++ b/drivers/infiniband/hw/irdma/uk.c |
1338 |
+@@ -1068,6 +1068,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) |
1339 |
+ enum irdma_status_code ret_code; |
1340 |
+ bool move_cq_head = true; |
1341 |
+ u8 polarity; |
1342 |
++ u8 op_type; |
1343 |
+ bool ext_valid; |
1344 |
+ __le64 *ext_cqe; |
1345 |
+ |
1346 |
+@@ -1250,7 +1251,6 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) |
1347 |
+ do { |
1348 |
+ __le64 *sw_wqe; |
1349 |
+ u64 wqe_qword; |
1350 |
+- u8 op_type; |
1351 |
+ u32 tail; |
1352 |
+ |
1353 |
+ tail = qp->sq_ring.tail; |
1354 |
+@@ -1267,6 +1267,8 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) |
1355 |
+ break; |
1356 |
+ } |
1357 |
+ } while (1); |
1358 |
++ if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR) |
1359 |
++ info->minor_err = FLUSH_MW_BIND_ERR; |
1360 |
+ qp->sq_flush_seen = true; |
1361 |
+ if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) |
1362 |
+ qp->sq_flush_complete = true; |
1363 |
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c |
1364 |
+index cac4fb228b9b0..5275616398d83 100644 |
1365 |
+--- a/drivers/infiniband/hw/irdma/verbs.c |
1366 |
++++ b/drivers/infiniband/hw/irdma/verbs.c |
1367 |
+@@ -36,15 +36,18 @@ static int irdma_query_device(struct ib_device *ibdev, |
1368 |
+ props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags; |
1369 |
+ props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags; |
1370 |
+ props->max_cq = rf->max_cq - rf->used_cqs; |
1371 |
+- props->max_cqe = rf->max_cqe; |
1372 |
++ props->max_cqe = rf->max_cqe - 1; |
1373 |
+ props->max_mr = rf->max_mr - rf->used_mrs; |
1374 |
+ props->max_mw = props->max_mr; |
1375 |
+ props->max_pd = rf->max_pd - rf->used_pds; |
1376 |
+ props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges; |
1377 |
+ props->max_qp_rd_atom = hw_attrs->max_hw_ird; |
1378 |
+ props->max_qp_init_rd_atom = hw_attrs->max_hw_ord; |
1379 |
+- if (rdma_protocol_roce(ibdev, 1)) |
1380 |
++ if (rdma_protocol_roce(ibdev, 1)) { |
1381 |
++ props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN; |
1382 |
+ props->max_pkeys = IRDMA_PKEY_TBL_SZ; |
1383 |
++ } |
1384 |
++ |
1385 |
+ props->max_ah = rf->max_ah; |
1386 |
+ props->max_mcast_grp = rf->max_mcg; |
1387 |
+ props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX; |
1388 |
+diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c |
1389 |
+index ec242a5a17a35..f6f2df855c2ed 100644 |
1390 |
+--- a/drivers/infiniband/hw/mlx5/mad.c |
1391 |
++++ b/drivers/infiniband/hw/mlx5/mad.c |
1392 |
+@@ -166,6 +166,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num, |
1393 |
+ mdev = dev->mdev; |
1394 |
+ mdev_port_num = 1; |
1395 |
+ } |
1396 |
++ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) { |
1397 |
++ /* set local port to one for Function-Per-Port HCA. */ |
1398 |
++ mdev = dev->mdev; |
1399 |
++ mdev_port_num = 1; |
1400 |
++ } |
1401 |
++ |
1402 |
+ /* Declaring support of extended counters */ |
1403 |
+ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { |
1404 |
+ struct ib_class_port_info cpi = {}; |
1405 |
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c |
1406 |
+index 1f4e60257700e..7d47b521070b1 100644 |
1407 |
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c |
1408 |
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c |
1409 |
+@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) |
1410 |
+ dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); |
1411 |
+ |
1412 |
+ if (paddr) |
1413 |
+- return virt_to_page(paddr); |
1414 |
++ return virt_to_page((void *)paddr); |
1415 |
+ |
1416 |
+ return NULL; |
1417 |
+ } |
1418 |
+@@ -533,13 +533,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) |
1419 |
+ kunmap_local(kaddr); |
1420 |
+ } |
1421 |
+ } else { |
1422 |
+- u64 va = sge->laddr + sge_off; |
1423 |
++ /* |
1424 |
++ * Cast to an uintptr_t to preserve all 64 bits |
1425 |
++ * in sge->laddr. |
1426 |
++ */ |
1427 |
++ uintptr_t va = (uintptr_t)(sge->laddr + sge_off); |
1428 |
+ |
1429 |
+- page_array[seg] = virt_to_page(va & PAGE_MASK); |
1430 |
++ /* |
1431 |
++ * virt_to_page() takes a (void *) pointer |
1432 |
++ * so cast to a (void *) meaning it will be 64 |
1433 |
++ * bits on a 64 bit platform and 32 bits on a |
1434 |
++ * 32 bit platform. |
1435 |
++ */ |
1436 |
++ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK)); |
1437 |
+ if (do_crc) |
1438 |
+ crypto_shash_update( |
1439 |
+ c_tx->mpa_crc_hd, |
1440 |
+- (void *)(uintptr_t)va, |
1441 |
++ (void *)va, |
1442 |
+ plen); |
1443 |
+ } |
1444 |
+ |
1445 |
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c |
1446 |
+index 9edbb309b96c0..c644617725a88 100644 |
1447 |
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c |
1448 |
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c |
1449 |
+@@ -1011,7 +1011,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path, |
1450 |
+ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, |
1451 |
+ struct rtrs_clt_io_req *req, |
1452 |
+ struct rtrs_rbuf *rbuf, bool fr_en, |
1453 |
+- u32 size, u32 imm, struct ib_send_wr *wr, |
1454 |
++ u32 count, u32 size, u32 imm, |
1455 |
++ struct ib_send_wr *wr, |
1456 |
+ struct ib_send_wr *tail) |
1457 |
+ { |
1458 |
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); |
1459 |
+@@ -1031,12 +1032,12 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, |
1460 |
+ num_sge = 2; |
1461 |
+ ptail = tail; |
1462 |
+ } else { |
1463 |
+- for_each_sg(req->sglist, sg, req->sg_cnt, i) { |
1464 |
++ for_each_sg(req->sglist, sg, count, i) { |
1465 |
+ sge[i].addr = sg_dma_address(sg); |
1466 |
+ sge[i].length = sg_dma_len(sg); |
1467 |
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; |
1468 |
+ } |
1469 |
+- num_sge = 1 + req->sg_cnt; |
1470 |
++ num_sge = 1 + count; |
1471 |
+ } |
1472 |
+ sge[i].addr = req->iu->dma_addr; |
1473 |
+ sge[i].length = size; |
1474 |
+@@ -1149,7 +1150,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) |
1475 |
+ */ |
1476 |
+ rtrs_clt_update_all_stats(req, WRITE); |
1477 |
+ |
1478 |
+- ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, |
1479 |
++ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count, |
1480 |
+ req->usr_len + sizeof(*msg), |
1481 |
+ imm, wr, &inv_wr); |
1482 |
+ if (ret) { |
1483 |
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c |
1484 |
+index 1ca31b919e987..733116554e0bc 100644 |
1485 |
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c |
1486 |
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c |
1487 |
+@@ -600,7 +600,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path) |
1488 |
+ struct sg_table *sgt = &srv_mr->sgt; |
1489 |
+ struct scatterlist *s; |
1490 |
+ struct ib_mr *mr; |
1491 |
+- int nr, chunks; |
1492 |
++ int nr, nr_sgt, chunks; |
1493 |
+ |
1494 |
+ chunks = chunks_per_mr * mri; |
1495 |
+ if (!always_invalidate) |
1496 |
+@@ -615,19 +615,19 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path) |
1497 |
+ sg_set_page(s, srv->chunks[chunks + i], |
1498 |
+ max_chunk_size, 0); |
1499 |
+ |
1500 |
+- nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl, |
1501 |
++ nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl, |
1502 |
+ sgt->nents, DMA_BIDIRECTIONAL); |
1503 |
+- if (nr < sgt->nents) { |
1504 |
+- err = nr < 0 ? nr : -EINVAL; |
1505 |
++ if (!nr_sgt) { |
1506 |
++ err = -EINVAL; |
1507 |
+ goto free_sg; |
1508 |
+ } |
1509 |
+ mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, |
1510 |
+- sgt->nents); |
1511 |
++ nr_sgt); |
1512 |
+ if (IS_ERR(mr)) { |
1513 |
+ err = PTR_ERR(mr); |
1514 |
+ goto unmap_sg; |
1515 |
+ } |
1516 |
+- nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents, |
1517 |
++ nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt, |
1518 |
+ NULL, max_chunk_size); |
1519 |
+ if (nr < 0 || nr < sgt->nents) { |
1520 |
+ err = nr < 0 ? nr : -EINVAL; |
1521 |
+@@ -646,7 +646,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path) |
1522 |
+ } |
1523 |
+ } |
1524 |
+ /* Eventually dma addr for each chunk can be cached */ |
1525 |
+- for_each_sg(sgt->sgl, s, sgt->orig_nents, i) |
1526 |
++ for_each_sg(sgt->sgl, s, nr_sgt, i) |
1527 |
+ srv_path->dma_addr[chunks + i] = sg_dma_address(s); |
1528 |
+ |
1529 |
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); |
1530 |
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c |
1531 |
+index 5d416ec228717..473b3a08cf96d 100644 |
1532 |
+--- a/drivers/infiniband/ulp/srp/ib_srp.c |
1533 |
++++ b/drivers/infiniband/ulp/srp/ib_srp.c |
1534 |
+@@ -1955,7 +1955,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) |
1535 |
+ if (scmnd) { |
1536 |
+ req = scsi_cmd_priv(scmnd); |
1537 |
+ scmnd = srp_claim_req(ch, req, NULL, scmnd); |
1538 |
+- } else { |
1539 |
++ } |
1540 |
++ if (!scmnd) { |
1541 |
+ shost_printk(KERN_ERR, target->scsi_host, |
1542 |
+ "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", |
1543 |
+ rsp->tag, ch - target->ch, ch->qp->qp_num); |
1544 |
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c |
1545 |
+index e23e70af718f1..7154fb551ddc9 100644 |
1546 |
+--- a/drivers/iommu/amd/iommu.c |
1547 |
++++ b/drivers/iommu/amd/iommu.c |
1548 |
+@@ -852,7 +852,8 @@ static void build_completion_wait(struct iommu_cmd *cmd, |
1549 |
+ memset(cmd, 0, sizeof(*cmd)); |
1550 |
+ cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; |
1551 |
+ cmd->data[1] = upper_32_bits(paddr); |
1552 |
+- cmd->data[2] = data; |
1553 |
++ cmd->data[2] = lower_32_bits(data); |
1554 |
++ cmd->data[3] = upper_32_bits(data); |
1555 |
+ CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); |
1556 |
+ } |
1557 |
+ |
1558 |
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c |
1559 |
+index a1ffb3d6d9015..bc5444daca9b4 100644 |
1560 |
+--- a/drivers/iommu/intel/iommu.c |
1561 |
++++ b/drivers/iommu/intel/iommu.c |
1562 |
+@@ -542,14 +542,36 @@ static inline int domain_pfn_supported(struct dmar_domain *domain, |
1563 |
+ return !(addr_width < BITS_PER_LONG && pfn >> addr_width); |
1564 |
+ } |
1565 |
+ |
1566 |
++/* |
1567 |
++ * Calculate the Supported Adjusted Guest Address Widths of an IOMMU. |
1568 |
++ * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of |
1569 |
++ * the returned SAGAW. |
1570 |
++ */ |
1571 |
++static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) |
1572 |
++{ |
1573 |
++ unsigned long fl_sagaw, sl_sagaw; |
1574 |
++ |
1575 |
++ fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0); |
1576 |
++ sl_sagaw = cap_sagaw(iommu->cap); |
1577 |
++ |
1578 |
++ /* Second level only. */ |
1579 |
++ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) |
1580 |
++ return sl_sagaw; |
1581 |
++ |
1582 |
++ /* First level only. */ |
1583 |
++ if (!ecap_slts(iommu->ecap)) |
1584 |
++ return fl_sagaw; |
1585 |
++ |
1586 |
++ return fl_sagaw & sl_sagaw; |
1587 |
++} |
1588 |
++ |
1589 |
+ static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) |
1590 |
+ { |
1591 |
+ unsigned long sagaw; |
1592 |
+ int agaw; |
1593 |
+ |
1594 |
+- sagaw = cap_sagaw(iommu->cap); |
1595 |
+- for (agaw = width_to_agaw(max_gaw); |
1596 |
+- agaw >= 0; agaw--) { |
1597 |
++ sagaw = __iommu_calculate_sagaw(iommu); |
1598 |
++ for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) { |
1599 |
+ if (test_bit(agaw, &sagaw)) |
1600 |
+ break; |
1601 |
+ } |
1602 |
+diff --git a/drivers/md/md.c b/drivers/md/md.c |
1603 |
+index c8f2e8524bfb7..04e1e294b4b1e 100644 |
1604 |
+--- a/drivers/md/md.c |
1605 |
++++ b/drivers/md/md.c |
1606 |
+@@ -5651,6 +5651,7 @@ static int md_alloc(dev_t dev, char *name) |
1607 |
+ * removed (mddev_delayed_delete). |
1608 |
+ */ |
1609 |
+ flush_workqueue(md_misc_wq); |
1610 |
++ flush_workqueue(md_rdev_misc_wq); |
1611 |
+ |
1612 |
+ mutex_lock(&disks_mutex); |
1613 |
+ mddev = mddev_alloc(dev); |
1614 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h |
1615 |
+index 210f09118edea..0f19c237cb587 100644 |
1616 |
+--- a/drivers/net/ethernet/intel/i40e/i40e.h |
1617 |
++++ b/drivers/net/ethernet/intel/i40e/i40e.h |
1618 |
+@@ -1286,4 +1286,18 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, |
1619 |
+ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, |
1620 |
+ struct i40e_cloud_filter *filter, |
1621 |
+ bool add); |
1622 |
++ |
1623 |
++/** |
1624 |
++ * i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF |
1625 |
++ * @pf: pointer to a pf. |
1626 |
++ * |
1627 |
++ * Check and return value of flag I40E_FLAG_TC_MQPRIO. |
1628 |
++ * |
1629 |
++ * Return: I40E_FLAG_TC_MQPRIO set state. |
1630 |
++ **/ |
1631 |
++static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf) |
1632 |
++{ |
1633 |
++ return pf->flags & I40E_FLAG_TC_MQPRIO; |
1634 |
++} |
1635 |
++ |
1636 |
+ #endif /* _I40E_H_ */ |
1637 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c |
1638 |
+index ea2bb0140a6eb..10d7a982a5b9b 100644 |
1639 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_client.c |
1640 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c |
1641 |
+@@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) |
1642 |
+ "Cannot locate client instance close routine\n"); |
1643 |
+ return; |
1644 |
+ } |
1645 |
++ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { |
1646 |
++ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n"); |
1647 |
++ return; |
1648 |
++ } |
1649 |
+ cdev->client->ops->close(&cdev->lan_info, cdev->client, reset); |
1650 |
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); |
1651 |
+ i40e_client_release_qvlist(&cdev->lan_info); |
1652 |
+@@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf) |
1653 |
+ /* Remove failed client instance */ |
1654 |
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, |
1655 |
+ &cdev->state); |
1656 |
+- i40e_client_del_instance(pf); |
1657 |
+ return; |
1658 |
+ } |
1659 |
+ } |
1660 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c |
1661 |
+index 669ae53f4c728..8e770c5e181ea 100644 |
1662 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c |
1663 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c |
1664 |
+@@ -4921,7 +4921,7 @@ static int i40e_set_channels(struct net_device *dev, |
1665 |
+ /* We do not support setting channels via ethtool when TCs are |
1666 |
+ * configured through mqprio |
1667 |
+ */ |
1668 |
+- if (pf->flags & I40E_FLAG_TC_MQPRIO) |
1669 |
++ if (i40e_is_tc_mqprio_enabled(pf)) |
1670 |
+ return -EINVAL; |
1671 |
+ |
1672 |
+ /* verify they are not requesting separate vectors */ |
1673 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c |
1674 |
+index 536f9198bd47a..ce6eea7a60027 100644 |
1675 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c |
1676 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c |
1677 |
+@@ -5320,7 +5320,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) |
1678 |
+ u8 num_tc = 0; |
1679 |
+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; |
1680 |
+ |
1681 |
+- if (pf->flags & I40E_FLAG_TC_MQPRIO) |
1682 |
++ if (i40e_is_tc_mqprio_enabled(pf)) |
1683 |
+ return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; |
1684 |
+ |
1685 |
+ /* If neither MQPRIO nor DCB is enabled, then always use single TC */ |
1686 |
+@@ -5352,7 +5352,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) |
1687 |
+ **/ |
1688 |
+ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) |
1689 |
+ { |
1690 |
+- if (pf->flags & I40E_FLAG_TC_MQPRIO) |
1691 |
++ if (i40e_is_tc_mqprio_enabled(pf)) |
1692 |
+ return i40e_mqprio_get_enabled_tc(pf); |
1693 |
+ |
1694 |
+ /* If neither MQPRIO nor DCB is enabled for this PF then just return |
1695 |
+@@ -5449,7 +5449,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, |
1696 |
+ int i; |
1697 |
+ |
1698 |
+ /* There is no need to reset BW when mqprio mode is on. */ |
1699 |
+- if (pf->flags & I40E_FLAG_TC_MQPRIO) |
1700 |
++ if (i40e_is_tc_mqprio_enabled(pf)) |
1701 |
+ return 0; |
1702 |
+ if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { |
1703 |
+ ret = i40e_set_bw_limit(vsi, vsi->seid, 0); |
1704 |
+@@ -5521,7 +5521,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) |
1705 |
+ vsi->tc_config.tc_info[i].qoffset); |
1706 |
+ } |
1707 |
+ |
1708 |
+- if (pf->flags & I40E_FLAG_TC_MQPRIO) |
1709 |
++ if (i40e_is_tc_mqprio_enabled(pf)) |
1710 |
+ return; |
1711 |
+ |
1712 |
+ /* Assign UP2TC map for the VSI */ |
1713 |
+@@ -5682,7 +5682,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) |
1714 |
+ ctxt.vf_num = 0; |
1715 |
+ ctxt.uplink_seid = vsi->uplink_seid; |
1716 |
+ ctxt.info = vsi->info; |
1717 |
+- if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) { |
1718 |
++ if (i40e_is_tc_mqprio_enabled(pf)) { |
1719 |
+ ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc); |
1720 |
+ if (ret) |
1721 |
+ goto out; |
1722 |
+@@ -6406,7 +6406,7 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, |
1723 |
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; |
1724 |
+ |
1725 |
+ if (vsi->type == I40E_VSI_MAIN) { |
1726 |
+- if (pf->flags & I40E_FLAG_TC_MQPRIO) |
1727 |
++ if (i40e_is_tc_mqprio_enabled(pf)) |
1728 |
+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); |
1729 |
+ else |
1730 |
+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); |
1731 |
+@@ -6517,6 +6517,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi) |
1732 |
+ vsi->tc_seid_map[i] = ch->seid; |
1733 |
+ } |
1734 |
+ } |
1735 |
++ |
1736 |
++ /* reset to reconfigure TX queue contexts */ |
1737 |
++ i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true); |
1738 |
+ return ret; |
1739 |
+ |
1740 |
+ err_free: |
1741 |
+@@ -7800,7 +7803,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev) |
1742 |
+ netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n"); |
1743 |
+ return ERR_PTR(-EINVAL); |
1744 |
+ } |
1745 |
+- if ((pf->flags & I40E_FLAG_TC_MQPRIO)) { |
1746 |
++ if (i40e_is_tc_mqprio_enabled(pf)) { |
1747 |
+ netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n"); |
1748 |
+ return ERR_PTR(-EINVAL); |
1749 |
+ } |
1750 |
+@@ -8053,7 +8056,7 @@ config_tc: |
1751 |
+ /* Quiesce VSI queues */ |
1752 |
+ i40e_quiesce_vsi(vsi); |
1753 |
+ |
1754 |
+- if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO)) |
1755 |
++ if (!hw && !i40e_is_tc_mqprio_enabled(pf)) |
1756 |
+ i40e_remove_queue_channels(vsi); |
1757 |
+ |
1758 |
+ /* Configure VSI for enabled TCs */ |
1759 |
+@@ -8077,7 +8080,7 @@ config_tc: |
1760 |
+ "Setup channel (id:%u) utilizing num_queues %d\n", |
1761 |
+ vsi->seid, vsi->tc_config.tc_info[0].qcount); |
1762 |
+ |
1763 |
+- if (pf->flags & I40E_FLAG_TC_MQPRIO) { |
1764 |
++ if (i40e_is_tc_mqprio_enabled(pf)) { |
1765 |
+ if (vsi->mqprio_qopt.max_rate[0]) { |
1766 |
+ u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; |
1767 |
+ |
1768 |
+@@ -10731,7 +10734,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) |
1769 |
+ * unless I40E_FLAG_TC_MQPRIO was enabled or DCB |
1770 |
+ * is not supported with new link speed |
1771 |
+ */ |
1772 |
+- if (pf->flags & I40E_FLAG_TC_MQPRIO) { |
1773 |
++ if (i40e_is_tc_mqprio_enabled(pf)) { |
1774 |
+ i40e_aq_set_dcb_parameters(hw, false, NULL); |
1775 |
+ } else { |
1776 |
+ if (I40E_IS_X710TL_DEVICE(hw->device_id) && |
1777 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
1778 |
+index d3a4a33977ee8..326fd25d055f8 100644 |
1779 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
1780 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
1781 |
+@@ -3651,7 +3651,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev, |
1782 |
+ u8 prio; |
1783 |
+ |
1784 |
+ /* is DCB enabled at all? */ |
1785 |
+- if (vsi->tc_config.numtc == 1) |
1786 |
++ if (vsi->tc_config.numtc == 1 || |
1787 |
++ i40e_is_tc_mqprio_enabled(vsi->back)) |
1788 |
+ return netdev_pick_tx(netdev, skb, sb_dev); |
1789 |
+ |
1790 |
+ prio = skb->priority; |
1791 |
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c |
1792 |
+index db95786c3419f..00b2ef01f4ea6 100644 |
1793 |
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c |
1794 |
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c |
1795 |
+@@ -2222,6 +2222,11 @@ static void iavf_reset_task(struct work_struct *work) |
1796 |
+ int i = 0, err; |
1797 |
+ bool running; |
1798 |
+ |
1799 |
++ /* Detach interface to avoid subsequent NDO callbacks */ |
1800 |
++ rtnl_lock(); |
1801 |
++ netif_device_detach(netdev); |
1802 |
++ rtnl_unlock(); |
1803 |
++ |
1804 |
+ /* When device is being removed it doesn't make sense to run the reset |
1805 |
+ * task, just return in such a case. |
1806 |
+ */ |
1807 |
+@@ -2229,7 +2234,7 @@ static void iavf_reset_task(struct work_struct *work) |
1808 |
+ if (adapter->state != __IAVF_REMOVE) |
1809 |
+ queue_work(iavf_wq, &adapter->reset_task); |
1810 |
+ |
1811 |
+- return; |
1812 |
++ goto reset_finish; |
1813 |
+ } |
1814 |
+ |
1815 |
+ while (!mutex_trylock(&adapter->client_lock)) |
1816 |
+@@ -2299,7 +2304,6 @@ continue_reset: |
1817 |
+ |
1818 |
+ if (running) { |
1819 |
+ netif_carrier_off(netdev); |
1820 |
+- netif_tx_stop_all_queues(netdev); |
1821 |
+ adapter->link_up = false; |
1822 |
+ iavf_napi_disable_all(adapter); |
1823 |
+ } |
1824 |
+@@ -2412,7 +2416,7 @@ continue_reset: |
1825 |
+ mutex_unlock(&adapter->client_lock); |
1826 |
+ mutex_unlock(&adapter->crit_lock); |
1827 |
+ |
1828 |
+- return; |
1829 |
++ goto reset_finish; |
1830 |
+ reset_err: |
1831 |
+ if (running) { |
1832 |
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); |
1833 |
+@@ -2423,6 +2427,10 @@ reset_err: |
1834 |
+ mutex_unlock(&adapter->client_lock); |
1835 |
+ mutex_unlock(&adapter->crit_lock); |
1836 |
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); |
1837 |
++reset_finish: |
1838 |
++ rtnl_lock(); |
1839 |
++ netif_device_attach(netdev); |
1840 |
++ rtnl_unlock(); |
1841 |
+ } |
1842 |
+ |
1843 |
+ /** |
1844 |
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c |
1845 |
+index b9d45c7dbef18..63ae4674d2000 100644 |
1846 |
+--- a/drivers/net/ethernet/intel/ice/ice_main.c |
1847 |
++++ b/drivers/net/ethernet/intel/ice/ice_main.c |
1848 |
+@@ -3549,7 +3549,7 @@ static int ice_init_pf(struct ice_pf *pf) |
1849 |
+ |
1850 |
+ pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); |
1851 |
+ if (!pf->avail_rxqs) { |
1852 |
+- devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); |
1853 |
++ bitmap_free(pf->avail_txqs); |
1854 |
+ pf->avail_txqs = NULL; |
1855 |
+ return -ENOMEM; |
1856 |
+ } |
1857 |
+diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c |
1858 |
+index 73f7962a37d33..c49062ad72c6c 100644 |
1859 |
+--- a/drivers/net/phy/meson-gxl.c |
1860 |
++++ b/drivers/net/phy/meson-gxl.c |
1861 |
+@@ -243,13 +243,7 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev) |
1862 |
+ irq_status == INTSRC_ENERGY_DETECT) |
1863 |
+ return IRQ_HANDLED; |
1864 |
+ |
1865 |
+- /* Give PHY some time before MAC starts sending data. This works |
1866 |
+- * around an issue where network doesn't come up properly. |
1867 |
+- */ |
1868 |
+- if (!(irq_status & INTSRC_LINK_DOWN)) |
1869 |
+- phy_queue_state_machine(phydev, msecs_to_jiffies(100)); |
1870 |
+- else |
1871 |
+- phy_trigger_machine(phydev); |
1872 |
++ phy_trigger_machine(phydev); |
1873 |
+ |
1874 |
+ return IRQ_HANDLED; |
1875 |
+ } |
1876 |
+diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c |
1877 |
+index 532e3b91777d9..150805aec4071 100644 |
1878 |
+--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c |
1879 |
++++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c |
1880 |
+@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, |
1881 |
+ /* Repeat initial/next rate. |
1882 |
+ * For legacy IL_NUMBER_TRY == 1, this loop will not execute. |
1883 |
+ * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */ |
1884 |
+- while (repeat_rate > 0) { |
1885 |
++ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) { |
1886 |
+ if (is_legacy(tbl_type.lq_type)) { |
1887 |
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) |
1888 |
+ ant_toggle_cnt++; |
1889 |
+@@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, |
1890 |
+ cpu_to_le32(new_rate); |
1891 |
+ repeat_rate--; |
1892 |
+ idx++; |
1893 |
+- if (idx >= LINK_QUAL_MAX_RETRY_NUM) |
1894 |
+- goto out; |
1895 |
+ } |
1896 |
+ |
1897 |
+ il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, |
1898 |
+@@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, |
1899 |
+ repeat_rate--; |
1900 |
+ } |
1901 |
+ |
1902 |
+-out: |
1903 |
+ lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; |
1904 |
+ lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; |
1905 |
+ |
1906 |
+diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h |
1907 |
+index 86209b391a3d6..e6e23fc585ee8 100644 |
1908 |
+--- a/drivers/net/wireless/microchip/wilc1000/netdev.h |
1909 |
++++ b/drivers/net/wireless/microchip/wilc1000/netdev.h |
1910 |
+@@ -252,6 +252,7 @@ struct wilc { |
1911 |
+ u8 *rx_buffer; |
1912 |
+ u32 rx_buffer_offset; |
1913 |
+ u8 *tx_buffer; |
1914 |
++ u32 *vmm_table; |
1915 |
+ |
1916 |
+ struct txq_handle txq[NQUEUES]; |
1917 |
+ int txq_entries; |
1918 |
+diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c |
1919 |
+index 8b3b735231085..6c0727fc4abd9 100644 |
1920 |
+--- a/drivers/net/wireless/microchip/wilc1000/sdio.c |
1921 |
++++ b/drivers/net/wireless/microchip/wilc1000/sdio.c |
1922 |
+@@ -27,6 +27,7 @@ struct wilc_sdio { |
1923 |
+ bool irq_gpio; |
1924 |
+ u32 block_size; |
1925 |
+ int has_thrpt_enh3; |
1926 |
++ u8 *cmd53_buf; |
1927 |
+ }; |
1928 |
+ |
1929 |
+ struct sdio_cmd52 { |
1930 |
+@@ -46,6 +47,7 @@ struct sdio_cmd53 { |
1931 |
+ u32 count: 9; |
1932 |
+ u8 *buffer; |
1933 |
+ u32 block_size; |
1934 |
++ bool use_global_buf; |
1935 |
+ }; |
1936 |
+ |
1937 |
+ static const struct wilc_hif_func wilc_hif_sdio; |
1938 |
+@@ -90,6 +92,8 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd) |
1939 |
+ { |
1940 |
+ struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev); |
1941 |
+ int size, ret; |
1942 |
++ struct wilc_sdio *sdio_priv = wilc->bus_data; |
1943 |
++ u8 *buf = cmd->buffer; |
1944 |
+ |
1945 |
+ sdio_claim_host(func); |
1946 |
+ |
1947 |
+@@ -100,12 +104,23 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd) |
1948 |
+ else |
1949 |
+ size = cmd->count; |
1950 |
+ |
1951 |
++ if (cmd->use_global_buf) { |
1952 |
++ if (size > sizeof(u32)) |
1953 |
++ return -EINVAL; |
1954 |
++ |
1955 |
++ buf = sdio_priv->cmd53_buf; |
1956 |
++ } |
1957 |
++ |
1958 |
+ if (cmd->read_write) { /* write */ |
1959 |
+- ret = sdio_memcpy_toio(func, cmd->address, |
1960 |
+- (void *)cmd->buffer, size); |
1961 |
++ if (cmd->use_global_buf) |
1962 |
++ memcpy(buf, cmd->buffer, size); |
1963 |
++ |
1964 |
++ ret = sdio_memcpy_toio(func, cmd->address, buf, size); |
1965 |
+ } else { /* read */ |
1966 |
+- ret = sdio_memcpy_fromio(func, (void *)cmd->buffer, |
1967 |
+- cmd->address, size); |
1968 |
++ ret = sdio_memcpy_fromio(func, buf, cmd->address, size); |
1969 |
++ |
1970 |
++ if (cmd->use_global_buf) |
1971 |
++ memcpy(cmd->buffer, buf, size); |
1972 |
+ } |
1973 |
+ |
1974 |
+ sdio_release_host(func); |
1975 |
+@@ -127,6 +142,12 @@ static int wilc_sdio_probe(struct sdio_func *func, |
1976 |
+ if (!sdio_priv) |
1977 |
+ return -ENOMEM; |
1978 |
+ |
1979 |
++ sdio_priv->cmd53_buf = kzalloc(sizeof(u32), GFP_KERNEL); |
1980 |
++ if (!sdio_priv->cmd53_buf) { |
1981 |
++ ret = -ENOMEM; |
1982 |
++ goto free; |
1983 |
++ } |
1984 |
++ |
1985 |
+ ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO, |
1986 |
+ &wilc_hif_sdio); |
1987 |
+ if (ret) |
1988 |
+@@ -160,6 +181,7 @@ dispose_irq: |
1989 |
+ irq_dispose_mapping(wilc->dev_irq_num); |
1990 |
+ wilc_netdev_cleanup(wilc); |
1991 |
+ free: |
1992 |
++ kfree(sdio_priv->cmd53_buf); |
1993 |
+ kfree(sdio_priv); |
1994 |
+ return ret; |
1995 |
+ } |
1996 |
+@@ -171,6 +193,7 @@ static void wilc_sdio_remove(struct sdio_func *func) |
1997 |
+ |
1998 |
+ clk_disable_unprepare(wilc->rtc_clk); |
1999 |
+ wilc_netdev_cleanup(wilc); |
2000 |
++ kfree(sdio_priv->cmd53_buf); |
2001 |
+ kfree(sdio_priv); |
2002 |
+ } |
2003 |
+ |
2004 |
+@@ -367,8 +390,9 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data) |
2005 |
+ cmd.address = WILC_SDIO_FBR_DATA_REG; |
2006 |
+ cmd.block_mode = 0; |
2007 |
+ cmd.increment = 1; |
2008 |
+- cmd.count = 4; |
2009 |
++ cmd.count = sizeof(u32); |
2010 |
+ cmd.buffer = (u8 *)&data; |
2011 |
++ cmd.use_global_buf = true; |
2012 |
+ cmd.block_size = sdio_priv->block_size; |
2013 |
+ ret = wilc_sdio_cmd53(wilc, &cmd); |
2014 |
+ if (ret) |
2015 |
+@@ -406,6 +430,7 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) |
2016 |
+ nblk = size / block_size; |
2017 |
+ nleft = size % block_size; |
2018 |
+ |
2019 |
++ cmd.use_global_buf = false; |
2020 |
+ if (nblk > 0) { |
2021 |
+ cmd.block_mode = 1; |
2022 |
+ cmd.increment = 1; |
2023 |
+@@ -484,8 +509,9 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data) |
2024 |
+ cmd.address = WILC_SDIO_FBR_DATA_REG; |
2025 |
+ cmd.block_mode = 0; |
2026 |
+ cmd.increment = 1; |
2027 |
+- cmd.count = 4; |
2028 |
++ cmd.count = sizeof(u32); |
2029 |
+ cmd.buffer = (u8 *)data; |
2030 |
++ cmd.use_global_buf = true; |
2031 |
+ |
2032 |
+ cmd.block_size = sdio_priv->block_size; |
2033 |
+ ret = wilc_sdio_cmd53(wilc, &cmd); |
2034 |
+@@ -527,6 +553,7 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size) |
2035 |
+ nblk = size / block_size; |
2036 |
+ nleft = size % block_size; |
2037 |
+ |
2038 |
++ cmd.use_global_buf = false; |
2039 |
+ if (nblk > 0) { |
2040 |
+ cmd.block_mode = 1; |
2041 |
+ cmd.increment = 1; |
2042 |
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c |
2043 |
+index 200a103a0a858..380699983a75b 100644 |
2044 |
+--- a/drivers/net/wireless/microchip/wilc1000/wlan.c |
2045 |
++++ b/drivers/net/wireless/microchip/wilc1000/wlan.c |
2046 |
+@@ -701,7 +701,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) |
2047 |
+ int ret = 0; |
2048 |
+ int counter; |
2049 |
+ int timeout; |
2050 |
+- u32 vmm_table[WILC_VMM_TBL_SIZE]; |
2051 |
++ u32 *vmm_table = wilc->vmm_table; |
2052 |
+ u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0}; |
2053 |
+ const struct wilc_hif_func *func; |
2054 |
+ int srcu_idx; |
2055 |
+@@ -1220,6 +1220,8 @@ void wilc_wlan_cleanup(struct net_device *dev) |
2056 |
+ while ((rqe = wilc_wlan_rxq_remove(wilc))) |
2057 |
+ kfree(rqe); |
2058 |
+ |
2059 |
++ kfree(wilc->vmm_table); |
2060 |
++ wilc->vmm_table = NULL; |
2061 |
+ kfree(wilc->rx_buffer); |
2062 |
+ wilc->rx_buffer = NULL; |
2063 |
+ kfree(wilc->tx_buffer); |
2064 |
+@@ -1455,6 +1457,14 @@ int wilc_wlan_init(struct net_device *dev) |
2065 |
+ goto fail; |
2066 |
+ } |
2067 |
+ |
2068 |
++ if (!wilc->vmm_table) |
2069 |
++ wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL); |
2070 |
++ |
2071 |
++ if (!wilc->vmm_table) { |
2072 |
++ ret = -ENOBUFS; |
2073 |
++ goto fail; |
2074 |
++ } |
2075 |
++ |
2076 |
+ if (!wilc->tx_buffer) |
2077 |
+ wilc->tx_buffer = kmalloc(WILC_TX_BUFF_SIZE, GFP_KERNEL); |
2078 |
+ |
2079 |
+@@ -1479,7 +1489,8 @@ int wilc_wlan_init(struct net_device *dev) |
2080 |
+ return 0; |
2081 |
+ |
2082 |
+ fail: |
2083 |
+- |
2084 |
++ kfree(wilc->vmm_table); |
2085 |
++ wilc->vmm_table = NULL; |
2086 |
+ kfree(wilc->rx_buffer); |
2087 |
+ wilc->rx_buffer = NULL; |
2088 |
+ kfree(wilc->tx_buffer); |
2089 |
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c |
2090 |
+index c6b032f95d2e4..4627847c6daab 100644 |
2091 |
+--- a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c |
2092 |
++++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c |
2093 |
+@@ -372,8 +372,6 @@ bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol, |
2094 |
+ struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol, |
2095 |
+ struct ipc_pipe *pipe) |
2096 |
+ { |
2097 |
+- u32 tail = |
2098 |
+- le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]); |
2099 |
+ struct ipc_protocol_td *p_td; |
2100 |
+ struct sk_buff *skb; |
2101 |
+ |
2102 |
+@@ -403,14 +401,6 @@ struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol, |
2103 |
+ goto ret; |
2104 |
+ } |
2105 |
+ |
2106 |
+- if (!IPC_CB(skb)) { |
2107 |
+- dev_err(ipc_protocol->dev, "pipe# %d, tail: %d skb_cb is NULL", |
2108 |
+- pipe->pipe_nr, tail); |
2109 |
+- ipc_pcie_kfree_skb(ipc_protocol->pcie, skb); |
2110 |
+- skb = NULL; |
2111 |
+- goto ret; |
2112 |
+- } |
2113 |
+- |
2114 |
+ if (p_td->buffer.address != IPC_CB(skb)->mapping) { |
2115 |
+ dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p", |
2116 |
+ (unsigned long long)p_td->buffer.address, skb->data); |
2117 |
+diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c |
2118 |
+index 990360d75cb64..e85b3c5d4acce 100644 |
2119 |
+--- a/drivers/net/xen-netback/xenbus.c |
2120 |
++++ b/drivers/net/xen-netback/xenbus.c |
2121 |
+@@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be) |
2122 |
+ unsigned int queue_index; |
2123 |
+ |
2124 |
+ xen_unregister_watchers(vif); |
2125 |
+- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); |
2126 |
+ #ifdef CONFIG_DEBUG_FS |
2127 |
+ xenvif_debugfs_delif(vif); |
2128 |
+ #endif /* CONFIG_DEBUG_FS */ |
2129 |
+@@ -984,6 +983,7 @@ static int netback_remove(struct xenbus_device *dev) |
2130 |
+ struct backend_info *be = dev_get_drvdata(&dev->dev); |
2131 |
+ |
2132 |
+ unregister_hotplug_status_watch(be); |
2133 |
++ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); |
2134 |
+ if (be->vif) { |
2135 |
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); |
2136 |
+ backend_disconnect(be); |
2137 |
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c |
2138 |
+index 20138e132558c..96d8d7844e846 100644 |
2139 |
+--- a/drivers/nvme/host/tcp.c |
2140 |
++++ b/drivers/nvme/host/tcp.c |
2141 |
+@@ -119,7 +119,6 @@ struct nvme_tcp_queue { |
2142 |
+ struct mutex send_mutex; |
2143 |
+ struct llist_head req_list; |
2144 |
+ struct list_head send_list; |
2145 |
+- bool more_requests; |
2146 |
+ |
2147 |
+ /* recv state */ |
2148 |
+ void *pdu; |
2149 |
+@@ -315,7 +314,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) |
2150 |
+ static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) |
2151 |
+ { |
2152 |
+ return !list_empty(&queue->send_list) || |
2153 |
+- !llist_empty(&queue->req_list) || queue->more_requests; |
2154 |
++ !llist_empty(&queue->req_list); |
2155 |
+ } |
2156 |
+ |
2157 |
+ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, |
2158 |
+@@ -334,9 +333,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, |
2159 |
+ */ |
2160 |
+ if (queue->io_cpu == raw_smp_processor_id() && |
2161 |
+ sync && empty && mutex_trylock(&queue->send_mutex)) { |
2162 |
+- queue->more_requests = !last; |
2163 |
+ nvme_tcp_send_all(queue); |
2164 |
+- queue->more_requests = false; |
2165 |
+ mutex_unlock(&queue->send_mutex); |
2166 |
+ } |
2167 |
+ |
2168 |
+@@ -1209,7 +1206,7 @@ static void nvme_tcp_io_work(struct work_struct *w) |
2169 |
+ else if (unlikely(result < 0)) |
2170 |
+ return; |
2171 |
+ |
2172 |
+- if (!pending) |
2173 |
++ if (!pending || !queue->rd_enabled) |
2174 |
+ return; |
2175 |
+ |
2176 |
+ } while (!time_after(jiffies, deadline)); /* quota is exhausted */ |
2177 |
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c |
2178 |
+index a8dafe8670f20..0a0c1d956c73a 100644 |
2179 |
+--- a/drivers/nvme/target/core.c |
2180 |
++++ b/drivers/nvme/target/core.c |
2181 |
+@@ -736,6 +736,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status) |
2182 |
+ |
2183 |
+ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) |
2184 |
+ { |
2185 |
++ struct nvmet_ns *ns = req->ns; |
2186 |
++ |
2187 |
+ if (!req->sq->sqhd_disabled) |
2188 |
+ nvmet_update_sq_head(req); |
2189 |
+ req->cqe->sq_id = cpu_to_le16(req->sq->qid); |
2190 |
+@@ -746,9 +748,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) |
2191 |
+ |
2192 |
+ trace_nvmet_req_complete(req); |
2193 |
+ |
2194 |
+- if (req->ns) |
2195 |
+- nvmet_put_namespace(req->ns); |
2196 |
+ req->ops->queue_response(req); |
2197 |
++ if (ns) |
2198 |
++ nvmet_put_namespace(ns); |
2199 |
+ } |
2200 |
+ |
2201 |
+ void nvmet_req_complete(struct nvmet_req *req, u16 status) |
2202 |
+diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c |
2203 |
+index 235553337fb2d..1466698751c55 100644 |
2204 |
+--- a/drivers/nvme/target/zns.c |
2205 |
++++ b/drivers/nvme/target/zns.c |
2206 |
+@@ -100,6 +100,7 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) |
2207 |
+ struct nvme_id_ns_zns *id_zns; |
2208 |
+ u64 zsze; |
2209 |
+ u16 status; |
2210 |
++ u32 mar, mor; |
2211 |
+ |
2212 |
+ if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { |
2213 |
+ req->error_loc = offsetof(struct nvme_identify, nsid); |
2214 |
+@@ -126,8 +127,20 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) |
2215 |
+ zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> |
2216 |
+ req->ns->blksize_shift; |
2217 |
+ id_zns->lbafe[0].zsze = cpu_to_le64(zsze); |
2218 |
+- id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev)); |
2219 |
+- id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev)); |
2220 |
++ |
2221 |
++ mor = bdev_max_open_zones(req->ns->bdev); |
2222 |
++ if (!mor) |
2223 |
++ mor = U32_MAX; |
2224 |
++ else |
2225 |
++ mor--; |
2226 |
++ id_zns->mor = cpu_to_le32(mor); |
2227 |
++ |
2228 |
++ mar = bdev_max_active_zones(req->ns->bdev); |
2229 |
++ if (!mar) |
2230 |
++ mar = U32_MAX; |
2231 |
++ else |
2232 |
++ mar--; |
2233 |
++ id_zns->mar = cpu_to_le32(mar); |
2234 |
+ |
2235 |
+ done: |
2236 |
+ status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns)); |
2237 |
+diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c |
2238 |
+index 9be007c9420f9..f69ab90b5e22d 100644 |
2239 |
+--- a/drivers/parisc/ccio-dma.c |
2240 |
++++ b/drivers/parisc/ccio-dma.c |
2241 |
+@@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr) |
2242 |
+ } |
2243 |
+ } |
2244 |
+ |
2245 |
+-static void __init ccio_init_resources(struct ioc *ioc) |
2246 |
++static int __init ccio_init_resources(struct ioc *ioc) |
2247 |
+ { |
2248 |
+ struct resource *res = ioc->mmio_region; |
2249 |
+ char *name = kmalloc(14, GFP_KERNEL); |
2250 |
+- |
2251 |
++ if (unlikely(!name)) |
2252 |
++ return -ENOMEM; |
2253 |
+ snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path); |
2254 |
+ |
2255 |
+ ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low); |
2256 |
+ ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv); |
2257 |
++ return 0; |
2258 |
+ } |
2259 |
+ |
2260 |
+ static int new_ioc_area(struct resource *res, unsigned long size, |
2261 |
+@@ -1543,7 +1545,10 @@ static int __init ccio_probe(struct parisc_device *dev) |
2262 |
+ return -ENOMEM; |
2263 |
+ } |
2264 |
+ ccio_ioc_init(ioc); |
2265 |
+- ccio_init_resources(ioc); |
2266 |
++ if (ccio_init_resources(ioc)) { |
2267 |
++ kfree(ioc); |
2268 |
++ return -ENOMEM; |
2269 |
++ } |
2270 |
+ hppa_dma_ops = &ccio_ops; |
2271 |
+ |
2272 |
+ hba = kzalloc(sizeof(*hba), GFP_KERNEL); |
2273 |
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c |
2274 |
+index f4f28e5888b1c..43613db7af754 100644 |
2275 |
+--- a/drivers/regulator/core.c |
2276 |
++++ b/drivers/regulator/core.c |
2277 |
+@@ -2688,13 +2688,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev) |
2278 |
+ */ |
2279 |
+ static int _regulator_handle_consumer_enable(struct regulator *regulator) |
2280 |
+ { |
2281 |
++ int ret; |
2282 |
+ struct regulator_dev *rdev = regulator->rdev; |
2283 |
+ |
2284 |
+ lockdep_assert_held_once(&rdev->mutex.base); |
2285 |
+ |
2286 |
+ regulator->enable_count++; |
2287 |
+- if (regulator->uA_load && regulator->enable_count == 1) |
2288 |
+- return drms_uA_update(rdev); |
2289 |
++ if (regulator->uA_load && regulator->enable_count == 1) { |
2290 |
++ ret = drms_uA_update(rdev); |
2291 |
++ if (ret) |
2292 |
++ regulator->enable_count--; |
2293 |
++ return ret; |
2294 |
++ } |
2295 |
+ |
2296 |
+ return 0; |
2297 |
+ } |
2298 |
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c |
2299 |
+index 6d04b3323eb7e..33e33fff89865 100644 |
2300 |
+--- a/drivers/scsi/lpfc/lpfc_init.c |
2301 |
++++ b/drivers/scsi/lpfc/lpfc_init.c |
2302 |
+@@ -7893,7 +7893,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) |
2303 |
+ /* Allocate device driver memory */ |
2304 |
+ rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); |
2305 |
+ if (rc) |
2306 |
+- return -ENOMEM; |
2307 |
++ goto out_destroy_workqueue; |
2308 |
+ |
2309 |
+ /* IF Type 2 ports get initialized now. */ |
2310 |
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= |
2311 |
+@@ -8309,6 +8309,9 @@ out_free_bsmbx: |
2312 |
+ lpfc_destroy_bootstrap_mbox(phba); |
2313 |
+ out_free_mem: |
2314 |
+ lpfc_mem_free(phba); |
2315 |
++out_destroy_workqueue: |
2316 |
++ destroy_workqueue(phba->wq); |
2317 |
++ phba->wq = NULL; |
2318 |
+ return rc; |
2319 |
+ } |
2320 |
+ |
2321 |
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c |
2322 |
+index eb5ceb75a15ec..056837849ead5 100644 |
2323 |
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c |
2324 |
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c |
2325 |
+@@ -5279,7 +5279,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance) |
2326 |
+ if (!fusion->log_to_span) { |
2327 |
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n", |
2328 |
+ __func__, __LINE__); |
2329 |
+- kfree(instance->ctrl_context); |
2330 |
+ return -ENOMEM; |
2331 |
+ } |
2332 |
+ } |
2333 |
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
2334 |
+index 5351959fbaba3..9eb3d0b4891dd 100644 |
2335 |
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
2336 |
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
2337 |
+@@ -3670,6 +3670,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) |
2338 |
+ fw_event = list_first_entry(&ioc->fw_event_list, |
2339 |
+ struct fw_event_work, list); |
2340 |
+ list_del_init(&fw_event->list); |
2341 |
++ fw_event_work_put(fw_event); |
2342 |
+ } |
2343 |
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags); |
2344 |
+ |
2345 |
+@@ -3751,7 +3752,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) |
2346 |
+ if (cancel_work_sync(&fw_event->work)) |
2347 |
+ fw_event_work_put(fw_event); |
2348 |
+ |
2349 |
+- fw_event_work_put(fw_event); |
2350 |
+ } |
2351 |
+ ioc->fw_events_cleanup = 0; |
2352 |
+ } |
2353 |
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c |
2354 |
+index 7ab3c9e4d4783..b86f6e1f21b5c 100644 |
2355 |
+--- a/drivers/scsi/qla2xxx/qla_target.c |
2356 |
++++ b/drivers/scsi/qla2xxx/qla_target.c |
2357 |
+@@ -6961,14 +6961,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha) |
2358 |
+ |
2359 |
+ if (ha->flags.msix_enabled) { |
2360 |
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { |
2361 |
+- if (IS_QLA2071(ha)) { |
2362 |
+- /* 4 ports Baker: Enable Interrupt Handshake */ |
2363 |
+- icb->msix_atio = 0; |
2364 |
+- icb->firmware_options_2 |= cpu_to_le32(BIT_26); |
2365 |
+- } else { |
2366 |
+- icb->msix_atio = cpu_to_le16(msix->entry); |
2367 |
+- icb->firmware_options_2 &= cpu_to_le32(~BIT_26); |
2368 |
+- } |
2369 |
++ icb->msix_atio = cpu_to_le16(msix->entry); |
2370 |
++ icb->firmware_options_2 &= cpu_to_le32(~BIT_26); |
2371 |
+ ql_dbg(ql_dbg_init, vha, 0xf072, |
2372 |
+ "Registering ICB vector 0x%x for atio que.\n", |
2373 |
+ msix->entry); |
2374 |
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c |
2375 |
+index 2f6468f22b489..dae1a85f1512c 100644 |
2376 |
+--- a/drivers/scsi/ufs/ufshcd.c |
2377 |
++++ b/drivers/scsi/ufs/ufshcd.c |
2378 |
+@@ -8476,6 +8476,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, |
2379 |
+ struct scsi_device *sdp; |
2380 |
+ unsigned long flags; |
2381 |
+ int ret, retries; |
2382 |
++ unsigned long deadline; |
2383 |
++ int32_t remaining; |
2384 |
+ |
2385 |
+ spin_lock_irqsave(hba->host->host_lock, flags); |
2386 |
+ sdp = hba->sdev_ufs_device; |
2387 |
+@@ -8508,9 +8510,14 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, |
2388 |
+ * callbacks hence set the RQF_PM flag so that it doesn't resume the |
2389 |
+ * already suspended childs. |
2390 |
+ */ |
2391 |
++ deadline = jiffies + 10 * HZ; |
2392 |
+ for (retries = 3; retries > 0; --retries) { |
2393 |
++ ret = -ETIMEDOUT; |
2394 |
++ remaining = deadline - jiffies; |
2395 |
++ if (remaining <= 0) |
2396 |
++ break; |
2397 |
+ ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, |
2398 |
+- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL); |
2399 |
++ remaining / HZ, 0, 0, RQF_PM, NULL); |
2400 |
+ if (!scsi_status_is_check_condition(ret) || |
2401 |
+ !scsi_sense_valid(&sshdr) || |
2402 |
+ sshdr.sense_key != UNIT_ATTENTION) |
2403 |
+diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c |
2404 |
+index 70ad0f3dce283..286f5d57c0cab 100644 |
2405 |
+--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c |
2406 |
++++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c |
2407 |
+@@ -684,13 +684,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev) |
2408 |
+ const struct of_device_id *of_id = NULL; |
2409 |
+ struct device_node *dn; |
2410 |
+ void __iomem *base; |
2411 |
+- int ret, i; |
2412 |
++ int ret, i, s; |
2413 |
+ |
2414 |
+ /* AON ctrl registers */ |
2415 |
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL); |
2416 |
+ if (IS_ERR(base)) { |
2417 |
+ pr_err("error mapping AON_CTRL\n"); |
2418 |
+- return PTR_ERR(base); |
2419 |
++ ret = PTR_ERR(base); |
2420 |
++ goto aon_err; |
2421 |
+ } |
2422 |
+ ctrl.aon_ctrl_base = base; |
2423 |
+ |
2424 |
+@@ -700,8 +701,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev) |
2425 |
+ /* Assume standard offset */ |
2426 |
+ ctrl.aon_sram = ctrl.aon_ctrl_base + |
2427 |
+ AON_CTRL_SYSTEM_DATA_RAM_OFS; |
2428 |
++ s = 0; |
2429 |
+ } else { |
2430 |
+ ctrl.aon_sram = base; |
2431 |
++ s = 1; |
2432 |
+ } |
2433 |
+ |
2434 |
+ writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC); |
2435 |
+@@ -711,7 +714,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev) |
2436 |
+ (const void **)&ddr_phy_data); |
2437 |
+ if (IS_ERR(base)) { |
2438 |
+ pr_err("error mapping DDR PHY\n"); |
2439 |
+- return PTR_ERR(base); |
2440 |
++ ret = PTR_ERR(base); |
2441 |
++ goto ddr_phy_err; |
2442 |
+ } |
2443 |
+ ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot; |
2444 |
+ ctrl.pll_status_offset = ddr_phy_data->pll_status_offset; |
2445 |
+@@ -731,17 +735,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev) |
2446 |
+ for_each_matching_node(dn, ddr_shimphy_dt_ids) { |
2447 |
+ i = ctrl.num_memc; |
2448 |
+ if (i >= MAX_NUM_MEMC) { |
2449 |
++ of_node_put(dn); |
2450 |
+ pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC); |
2451 |
+ break; |
2452 |
+ } |
2453 |
+ |
2454 |
+ base = of_io_request_and_map(dn, 0, dn->full_name); |
2455 |
+ if (IS_ERR(base)) { |
2456 |
++ of_node_put(dn); |
2457 |
+ if (!ctrl.support_warm_boot) |
2458 |
+ break; |
2459 |
+ |
2460 |
+ pr_err("error mapping DDR SHIMPHY %d\n", i); |
2461 |
+- return PTR_ERR(base); |
2462 |
++ ret = PTR_ERR(base); |
2463 |
++ goto ddr_shimphy_err; |
2464 |
+ } |
2465 |
+ ctrl.memcs[i].ddr_shimphy_base = base; |
2466 |
+ ctrl.num_memc++; |
2467 |
+@@ -752,14 +759,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev) |
2468 |
+ for_each_matching_node(dn, brcmstb_memc_of_match) { |
2469 |
+ base = of_iomap(dn, 0); |
2470 |
+ if (!base) { |
2471 |
++ of_node_put(dn); |
2472 |
+ pr_err("error mapping DDR Sequencer %d\n", i); |
2473 |
+- return -ENOMEM; |
2474 |
++ ret = -ENOMEM; |
2475 |
++ goto brcmstb_memc_err; |
2476 |
+ } |
2477 |
+ |
2478 |
+ of_id = of_match_node(brcmstb_memc_of_match, dn); |
2479 |
+ if (!of_id) { |
2480 |
+ iounmap(base); |
2481 |
+- return -EINVAL; |
2482 |
++ of_node_put(dn); |
2483 |
++ ret = -EINVAL; |
2484 |
++ goto brcmstb_memc_err; |
2485 |
+ } |
2486 |
+ |
2487 |
+ ddr_seq_data = of_id->data; |
2488 |
+@@ -779,21 +790,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev) |
2489 |
+ dn = of_find_matching_node(NULL, sram_dt_ids); |
2490 |
+ if (!dn) { |
2491 |
+ pr_err("SRAM not found\n"); |
2492 |
+- return -EINVAL; |
2493 |
++ ret = -EINVAL; |
2494 |
++ goto brcmstb_memc_err; |
2495 |
+ } |
2496 |
+ |
2497 |
+ ret = brcmstb_init_sram(dn); |
2498 |
+ of_node_put(dn); |
2499 |
+ if (ret) { |
2500 |
+ pr_err("error setting up SRAM for PM\n"); |
2501 |
+- return ret; |
2502 |
++ goto brcmstb_memc_err; |
2503 |
+ } |
2504 |
+ |
2505 |
+ ctrl.pdev = pdev; |
2506 |
+ |
2507 |
+ ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL); |
2508 |
+- if (!ctrl.s3_params) |
2509 |
+- return -ENOMEM; |
2510 |
++ if (!ctrl.s3_params) { |
2511 |
++ ret = -ENOMEM; |
2512 |
++ goto s3_params_err; |
2513 |
++ } |
2514 |
+ ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params, |
2515 |
+ sizeof(*ctrl.s3_params), |
2516 |
+ DMA_TO_DEVICE); |
2517 |
+@@ -813,7 +827,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev) |
2518 |
+ |
2519 |
+ out: |
2520 |
+ kfree(ctrl.s3_params); |
2521 |
+- |
2522 |
++s3_params_err: |
2523 |
++ iounmap(ctrl.boot_sram); |
2524 |
++brcmstb_memc_err: |
2525 |
++ for (i--; i >= 0; i--) |
2526 |
++ iounmap(ctrl.memcs[i].ddr_ctrl); |
2527 |
++ddr_shimphy_err: |
2528 |
++ for (i = 0; i < ctrl.num_memc; i++) |
2529 |
++ iounmap(ctrl.memcs[i].ddr_shimphy_base); |
2530 |
++ |
2531 |
++ iounmap(ctrl.memcs[0].ddr_phy_base); |
2532 |
++ddr_phy_err: |
2533 |
++ iounmap(ctrl.aon_ctrl_base); |
2534 |
++ if (s) |
2535 |
++ iounmap(ctrl.aon_sram); |
2536 |
++aon_err: |
2537 |
+ pr_warn("PM: initialization failed with code %d\n", ret); |
2538 |
+ |
2539 |
+ return ret; |
2540 |
+diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c |
2541 |
+index b4aa28420f2a8..4dc3a3f73511e 100644 |
2542 |
+--- a/drivers/soc/imx/gpcv2.c |
2543 |
++++ b/drivers/soc/imx/gpcv2.c |
2544 |
+@@ -237,6 +237,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd) |
2545 |
+ } |
2546 |
+ } |
2547 |
+ |
2548 |
++ reset_control_assert(domain->reset); |
2549 |
++ |
2550 |
+ /* Enable reset clocks for all devices in the domain */ |
2551 |
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks); |
2552 |
+ if (ret) { |
2553 |
+@@ -244,7 +246,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd) |
2554 |
+ goto out_regulator_disable; |
2555 |
+ } |
2556 |
+ |
2557 |
+- reset_control_assert(domain->reset); |
2558 |
++ /* delays for reset to propagate */ |
2559 |
++ udelay(5); |
2560 |
+ |
2561 |
+ if (domain->bits.pxx) { |
2562 |
+ /* request the domain to power up */ |
2563 |
+diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c |
2564 |
+index 6e662fb131d55..bd96ebb82c8ec 100644 |
2565 |
+--- a/drivers/tee/tee_shm.c |
2566 |
++++ b/drivers/tee/tee_shm.c |
2567 |
+@@ -9,6 +9,7 @@ |
2568 |
+ #include <linux/sched.h> |
2569 |
+ #include <linux/slab.h> |
2570 |
+ #include <linux/tee_drv.h> |
2571 |
++#include <linux/uaccess.h> |
2572 |
+ #include <linux/uio.h> |
2573 |
+ #include "tee_private.h" |
2574 |
+ |
2575 |
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c |
2576 |
+index 0e9217687f5c3..852e6c5643e5d 100644 |
2577 |
+--- a/drivers/vfio/vfio_iommu_type1.c |
2578 |
++++ b/drivers/vfio/vfio_iommu_type1.c |
2579 |
+@@ -561,6 +561,18 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, |
2580 |
+ ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, |
2581 |
+ pages, NULL, NULL); |
2582 |
+ if (ret > 0) { |
2583 |
++ int i; |
2584 |
++ |
2585 |
++ /* |
2586 |
++ * The zero page is always resident, we don't need to pin it |
2587 |
++ * and it falls into our invalid/reserved test so we don't |
2588 |
++ * unpin in put_pfn(). Unpin all zero pages in the batch here. |
2589 |
++ */ |
2590 |
++ for (i = 0 ; i < ret; i++) { |
2591 |
++ if (unlikely(is_zero_pfn(page_to_pfn(pages[i])))) |
2592 |
++ unpin_user_page(pages[i]); |
2593 |
++ } |
2594 |
++ |
2595 |
+ *pfn = page_to_pfn(pages[0]); |
2596 |
+ goto done; |
2597 |
+ } |
2598 |
+diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c |
2599 |
+index 393894af26f84..2b00a9d554fc0 100644 |
2600 |
+--- a/drivers/video/fbdev/chipsfb.c |
2601 |
++++ b/drivers/video/fbdev/chipsfb.c |
2602 |
+@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) |
2603 |
+ err_release_fb: |
2604 |
+ framebuffer_release(p); |
2605 |
+ err_disable: |
2606 |
++ pci_disable_device(dp); |
2607 |
+ err_out: |
2608 |
+ return rc; |
2609 |
+ } |
2610 |
+diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c |
2611 |
+index ce699396d6bad..09ee27e7fc25f 100644 |
2612 |
+--- a/drivers/video/fbdev/core/fbsysfs.c |
2613 |
++++ b/drivers/video/fbdev/core/fbsysfs.c |
2614 |
+@@ -84,6 +84,10 @@ void framebuffer_release(struct fb_info *info) |
2615 |
+ if (WARN_ON(refcount_read(&info->count))) |
2616 |
+ return; |
2617 |
+ |
2618 |
++#if IS_ENABLED(CONFIG_FB_BACKLIGHT) |
2619 |
++ mutex_destroy(&info->bl_curve_mutex); |
2620 |
++#endif |
2621 |
++ |
2622 |
+ kfree(info->apertures); |
2623 |
+ kfree(info); |
2624 |
+ } |
2625 |
+diff --git a/fs/afs/flock.c b/fs/afs/flock.c |
2626 |
+index c4210a3964d8b..bbcc5afd15760 100644 |
2627 |
+--- a/fs/afs/flock.c |
2628 |
++++ b/fs/afs/flock.c |
2629 |
+@@ -76,7 +76,7 @@ void afs_lock_op_done(struct afs_call *call) |
2630 |
+ if (call->error == 0) { |
2631 |
+ spin_lock(&vnode->lock); |
2632 |
+ trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0); |
2633 |
+- vnode->locked_at = call->reply_time; |
2634 |
++ vnode->locked_at = call->issue_time; |
2635 |
+ afs_schedule_lock_extension(vnode); |
2636 |
+ spin_unlock(&vnode->lock); |
2637 |
+ } |
2638 |
+diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c |
2639 |
+index 4943413d9c5f7..7d37f63ef0f09 100644 |
2640 |
+--- a/fs/afs/fsclient.c |
2641 |
++++ b/fs/afs/fsclient.c |
2642 |
+@@ -131,7 +131,7 @@ bad: |
2643 |
+ |
2644 |
+ static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry) |
2645 |
+ { |
2646 |
+- return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry; |
2647 |
++ return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry; |
2648 |
+ } |
2649 |
+ |
2650 |
+ static void xdr_decode_AFSCallBack(const __be32 **_bp, |
2651 |
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h |
2652 |
+index 0ad97a8fc0d49..567e61b553f56 100644 |
2653 |
+--- a/fs/afs/internal.h |
2654 |
++++ b/fs/afs/internal.h |
2655 |
+@@ -138,7 +138,6 @@ struct afs_call { |
2656 |
+ bool need_attention; /* T if RxRPC poked us */ |
2657 |
+ bool async; /* T if asynchronous */ |
2658 |
+ bool upgrade; /* T to request service upgrade */ |
2659 |
+- bool have_reply_time; /* T if have got reply_time */ |
2660 |
+ bool intr; /* T if interruptible */ |
2661 |
+ bool unmarshalling_error; /* T if an unmarshalling error occurred */ |
2662 |
+ u16 service_id; /* Actual service ID (after upgrade) */ |
2663 |
+@@ -152,7 +151,7 @@ struct afs_call { |
2664 |
+ } __attribute__((packed)); |
2665 |
+ __be64 tmp64; |
2666 |
+ }; |
2667 |
+- ktime_t reply_time; /* Time of first reply packet */ |
2668 |
++ ktime_t issue_time; /* Time of issue of operation */ |
2669 |
+ }; |
2670 |
+ |
2671 |
+ struct afs_call_type { |
2672 |
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c |
2673 |
+index a5434f3e57c68..e3de7fea36435 100644 |
2674 |
+--- a/fs/afs/rxrpc.c |
2675 |
++++ b/fs/afs/rxrpc.c |
2676 |
+@@ -347,6 +347,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) |
2677 |
+ if (call->max_lifespan) |
2678 |
+ rxrpc_kernel_set_max_life(call->net->socket, rxcall, |
2679 |
+ call->max_lifespan); |
2680 |
++ call->issue_time = ktime_get_real(); |
2681 |
+ |
2682 |
+ /* send the request */ |
2683 |
+ iov[0].iov_base = call->request; |
2684 |
+@@ -497,12 +498,6 @@ static void afs_deliver_to_call(struct afs_call *call) |
2685 |
+ return; |
2686 |
+ } |
2687 |
+ |
2688 |
+- if (!call->have_reply_time && |
2689 |
+- rxrpc_kernel_get_reply_time(call->net->socket, |
2690 |
+- call->rxcall, |
2691 |
+- &call->reply_time)) |
2692 |
+- call->have_reply_time = true; |
2693 |
+- |
2694 |
+ ret = call->type->deliver(call); |
2695 |
+ state = READ_ONCE(call->state); |
2696 |
+ if (ret == 0 && call->unmarshalling_error) |
2697 |
+diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c |
2698 |
+index 2b35cba8ad62b..88ea20e79ae27 100644 |
2699 |
+--- a/fs/afs/yfsclient.c |
2700 |
++++ b/fs/afs/yfsclient.c |
2701 |
+@@ -239,8 +239,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp, |
2702 |
+ struct afs_callback *cb = &scb->callback; |
2703 |
+ ktime_t cb_expiry; |
2704 |
+ |
2705 |
+- cb_expiry = call->reply_time; |
2706 |
+- cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100); |
2707 |
++ cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100); |
2708 |
+ cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC); |
2709 |
+ scb->have_cb = true; |
2710 |
+ *_bp += xdr_size(x); |
2711 |
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c |
2712 |
+index 7a127d3c521f9..96958ca474bd4 100644 |
2713 |
+--- a/fs/btrfs/zoned.c |
2714 |
++++ b/fs/btrfs/zoned.c |
2715 |
+@@ -392,10 +392,19 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) |
2716 |
+ * since btrfs adds the pages one by one to a bio, and btrfs cannot |
2717 |
+ * increase the metadata reservation even if it increases the number of |
2718 |
+ * extents, it is safe to stick with the limit. |
2719 |
++ * |
2720 |
++ * With the zoned emulation, we can have non-zoned device on the zoned |
2721 |
++ * mode. In this case, we don't have a valid max zone append size. So, |
2722 |
++ * use max_segments * PAGE_SIZE as the pseudo max_zone_append_size. |
2723 |
+ */ |
2724 |
+- zone_info->max_zone_append_size = |
2725 |
+- min_t(u64, (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT, |
2726 |
+- (u64)bdev_max_segments(bdev) << PAGE_SHIFT); |
2727 |
++ if (bdev_is_zoned(bdev)) { |
2728 |
++ zone_info->max_zone_append_size = min_t(u64, |
2729 |
++ (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT, |
2730 |
++ (u64)bdev_max_segments(bdev) << PAGE_SHIFT); |
2731 |
++ } else { |
2732 |
++ zone_info->max_zone_append_size = |
2733 |
++ (u64)bdev_max_segments(bdev) << PAGE_SHIFT; |
2734 |
++ } |
2735 |
+ if (!IS_ALIGNED(nr_sectors, zone_sectors)) |
2736 |
+ zone_info->nr_zones++; |
2737 |
+ |
2738 |
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c |
2739 |
+index 2f117c57160dc..26f9cd3282918 100644 |
2740 |
+--- a/fs/debugfs/inode.c |
2741 |
++++ b/fs/debugfs/inode.c |
2742 |
+@@ -734,6 +734,28 @@ void debugfs_remove(struct dentry *dentry) |
2743 |
+ } |
2744 |
+ EXPORT_SYMBOL_GPL(debugfs_remove); |
2745 |
+ |
2746 |
++/** |
2747 |
++ * debugfs_lookup_and_remove - lookup a directory or file and recursively remove it |
2748 |
++ * @name: a pointer to a string containing the name of the item to look up. |
2749 |
++ * @parent: a pointer to the parent dentry of the item. |
2750 |
++ * |
2751 |
++ * This is the equlivant of doing something like |
2752 |
++ * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting |
2753 |
++ * handled for the directory being looked up. |
2754 |
++ */ |
2755 |
++void debugfs_lookup_and_remove(const char *name, struct dentry *parent) |
2756 |
++{ |
2757 |
++ struct dentry *dentry; |
2758 |
++ |
2759 |
++ dentry = debugfs_lookup(name, parent); |
2760 |
++ if (!dentry) |
2761 |
++ return; |
2762 |
++ |
2763 |
++ debugfs_remove(dentry); |
2764 |
++ dput(dentry); |
2765 |
++} |
2766 |
++EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove); |
2767 |
++ |
2768 |
+ /** |
2769 |
+ * debugfs_rename - rename a file/directory in the debugfs filesystem |
2770 |
+ * @old_dir: a pointer to the parent dentry for the renamed object. This |
2771 |
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h |
2772 |
+index 9524e155b38fa..b77acf09726c6 100644 |
2773 |
+--- a/fs/erofs/internal.h |
2774 |
++++ b/fs/erofs/internal.h |
2775 |
+@@ -143,7 +143,6 @@ struct erofs_workgroup { |
2776 |
+ atomic_t refcount; |
2777 |
+ }; |
2778 |
+ |
2779 |
+-#if defined(CONFIG_SMP) |
2780 |
+ static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, |
2781 |
+ int val) |
2782 |
+ { |
2783 |
+@@ -172,34 +171,6 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) |
2784 |
+ return atomic_cond_read_relaxed(&grp->refcount, |
2785 |
+ VAL != EROFS_LOCKED_MAGIC); |
2786 |
+ } |
2787 |
+-#else |
2788 |
+-static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, |
2789 |
+- int val) |
2790 |
+-{ |
2791 |
+- preempt_disable(); |
2792 |
+- /* no need to spin on UP platforms, let's just disable preemption. */ |
2793 |
+- if (val != atomic_read(&grp->refcount)) { |
2794 |
+- preempt_enable(); |
2795 |
+- return false; |
2796 |
+- } |
2797 |
+- return true; |
2798 |
+-} |
2799 |
+- |
2800 |
+-static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, |
2801 |
+- int orig_val) |
2802 |
+-{ |
2803 |
+- preempt_enable(); |
2804 |
+-} |
2805 |
+- |
2806 |
+-static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) |
2807 |
+-{ |
2808 |
+- int v = atomic_read(&grp->refcount); |
2809 |
+- |
2810 |
+- /* workgroup is never freezed on uniprocessor systems */ |
2811 |
+- DBG_BUGON(v == EROFS_LOCKED_MAGIC); |
2812 |
+- return v; |
2813 |
+-} |
2814 |
+-#endif /* !CONFIG_SMP */ |
2815 |
+ #endif /* !CONFIG_EROFS_FS_ZIP */ |
2816 |
+ |
2817 |
+ /* we strictly follow PAGE_SIZE and no buffer head yet */ |
2818 |
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c |
2819 |
+index 78219396788b4..32c3d0c454b19 100644 |
2820 |
+--- a/fs/nfs/dir.c |
2821 |
++++ b/fs/nfs/dir.c |
2822 |
+@@ -78,6 +78,7 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir |
2823 |
+ ctx->attr_gencount = nfsi->attr_gencount; |
2824 |
+ ctx->dir_cookie = 0; |
2825 |
+ ctx->dup_cookie = 0; |
2826 |
++ ctx->page_index = 0; |
2827 |
+ spin_lock(&dir->i_lock); |
2828 |
+ if (list_empty(&nfsi->open_files) && |
2829 |
+ (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER)) |
2830 |
+@@ -85,6 +86,7 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir |
2831 |
+ NFS_INO_INVALID_DATA | |
2832 |
+ NFS_INO_REVAL_FORCED); |
2833 |
+ list_add(&ctx->list, &nfsi->open_files); |
2834 |
++ clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags); |
2835 |
+ spin_unlock(&dir->i_lock); |
2836 |
+ return ctx; |
2837 |
+ } |
2838 |
+@@ -626,8 +628,7 @@ void nfs_force_use_readdirplus(struct inode *dir) |
2839 |
+ if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) && |
2840 |
+ !list_empty(&nfsi->open_files)) { |
2841 |
+ set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags); |
2842 |
+- invalidate_mapping_pages(dir->i_mapping, |
2843 |
+- nfsi->page_index + 1, -1); |
2844 |
++ set_bit(NFS_INO_FORCE_READDIR, &nfsi->flags); |
2845 |
+ } |
2846 |
+ } |
2847 |
+ |
2848 |
+@@ -938,10 +939,8 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) |
2849 |
+ sizeof(nfsi->cookieverf)); |
2850 |
+ } |
2851 |
+ res = nfs_readdir_search_array(desc); |
2852 |
+- if (res == 0) { |
2853 |
+- nfsi->page_index = desc->page_index; |
2854 |
++ if (res == 0) |
2855 |
+ return 0; |
2856 |
+- } |
2857 |
+ nfs_readdir_page_unlock_and_put_cached(desc); |
2858 |
+ return res; |
2859 |
+ } |
2860 |
+@@ -1081,6 +1080,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) |
2861 |
+ struct nfs_inode *nfsi = NFS_I(inode); |
2862 |
+ struct nfs_open_dir_context *dir_ctx = file->private_data; |
2863 |
+ struct nfs_readdir_descriptor *desc; |
2864 |
++ pgoff_t page_index; |
2865 |
+ int res; |
2866 |
+ |
2867 |
+ dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n", |
2868 |
+@@ -1111,10 +1111,15 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) |
2869 |
+ desc->dir_cookie = dir_ctx->dir_cookie; |
2870 |
+ desc->dup_cookie = dir_ctx->dup_cookie; |
2871 |
+ desc->duped = dir_ctx->duped; |
2872 |
++ page_index = dir_ctx->page_index; |
2873 |
+ desc->attr_gencount = dir_ctx->attr_gencount; |
2874 |
+ memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf)); |
2875 |
+ spin_unlock(&file->f_lock); |
2876 |
+ |
2877 |
++ if (test_and_clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags) && |
2878 |
++ list_is_singular(&nfsi->open_files)) |
2879 |
++ invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1); |
2880 |
++ |
2881 |
+ do { |
2882 |
+ res = readdir_search_pagecache(desc); |
2883 |
+ |
2884 |
+@@ -1151,6 +1156,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) |
2885 |
+ dir_ctx->dup_cookie = desc->dup_cookie; |
2886 |
+ dir_ctx->duped = desc->duped; |
2887 |
+ dir_ctx->attr_gencount = desc->attr_gencount; |
2888 |
++ dir_ctx->page_index = desc->page_index; |
2889 |
+ memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf)); |
2890 |
+ spin_unlock(&file->f_lock); |
2891 |
+ |
2892 |
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c |
2893 |
+index a8693cc50c7ca..ad5114e480097 100644 |
2894 |
+--- a/fs/nfs/file.c |
2895 |
++++ b/fs/nfs/file.c |
2896 |
+@@ -223,8 +223,10 @@ nfs_file_fsync_commit(struct file *file, int datasync) |
2897 |
+ int |
2898 |
+ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
2899 |
+ { |
2900 |
+- struct nfs_open_context *ctx = nfs_file_open_context(file); |
2901 |
+ struct inode *inode = file_inode(file); |
2902 |
++ struct nfs_inode *nfsi = NFS_I(inode); |
2903 |
++ long save_nredirtied = atomic_long_read(&nfsi->redirtied_pages); |
2904 |
++ long nredirtied; |
2905 |
+ int ret; |
2906 |
+ |
2907 |
+ trace_nfs_fsync_enter(inode); |
2908 |
+@@ -239,15 +241,10 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
2909 |
+ ret = pnfs_sync_inode(inode, !!datasync); |
2910 |
+ if (ret != 0) |
2911 |
+ break; |
2912 |
+- if (!test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags)) |
2913 |
++ nredirtied = atomic_long_read(&nfsi->redirtied_pages); |
2914 |
++ if (nredirtied == save_nredirtied) |
2915 |
+ break; |
2916 |
+- /* |
2917 |
+- * If nfs_file_fsync_commit detected a server reboot, then |
2918 |
+- * resend all dirty pages that might have been covered by |
2919 |
+- * the NFS_CONTEXT_RESEND_WRITES flag |
2920 |
+- */ |
2921 |
+- start = 0; |
2922 |
+- end = LLONG_MAX; |
2923 |
++ save_nredirtied = nredirtied; |
2924 |
+ } |
2925 |
+ |
2926 |
+ trace_nfs_fsync_exit(inode, ret); |
2927 |
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c |
2928 |
+index cb407af9e9e92..e4524635a129a 100644 |
2929 |
+--- a/fs/nfs/inode.c |
2930 |
++++ b/fs/nfs/inode.c |
2931 |
+@@ -431,6 +431,23 @@ nfs_ilookup(struct super_block *sb, struct nfs_fattr *fattr, struct nfs_fh *fh) |
2932 |
+ return inode; |
2933 |
+ } |
2934 |
+ |
2935 |
++static void nfs_inode_init_regular(struct nfs_inode *nfsi) |
2936 |
++{ |
2937 |
++ atomic_long_set(&nfsi->nrequests, 0); |
2938 |
++ atomic_long_set(&nfsi->redirtied_pages, 0); |
2939 |
++ INIT_LIST_HEAD(&nfsi->commit_info.list); |
2940 |
++ atomic_long_set(&nfsi->commit_info.ncommit, 0); |
2941 |
++ atomic_set(&nfsi->commit_info.rpcs_out, 0); |
2942 |
++ mutex_init(&nfsi->commit_mutex); |
2943 |
++} |
2944 |
++ |
2945 |
++static void nfs_inode_init_dir(struct nfs_inode *nfsi) |
2946 |
++{ |
2947 |
++ nfsi->cache_change_attribute = 0; |
2948 |
++ memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf)); |
2949 |
++ init_rwsem(&nfsi->rmdir_sem); |
2950 |
++} |
2951 |
++ |
2952 |
+ /* |
2953 |
+ * This is our front-end to iget that looks up inodes by file handle |
2954 |
+ * instead of inode number. |
2955 |
+@@ -485,10 +502,12 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st |
2956 |
+ if (S_ISREG(inode->i_mode)) { |
2957 |
+ inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops; |
2958 |
+ inode->i_data.a_ops = &nfs_file_aops; |
2959 |
++ nfs_inode_init_regular(nfsi); |
2960 |
+ } else if (S_ISDIR(inode->i_mode)) { |
2961 |
+ inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops; |
2962 |
+ inode->i_fop = &nfs_dir_operations; |
2963 |
+ inode->i_data.a_ops = &nfs_dir_aops; |
2964 |
++ nfs_inode_init_dir(nfsi); |
2965 |
+ /* Deal with crossing mountpoints */ |
2966 |
+ if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT || |
2967 |
+ fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) { |
2968 |
+@@ -514,7 +533,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st |
2969 |
+ inode->i_uid = make_kuid(&init_user_ns, -2); |
2970 |
+ inode->i_gid = make_kgid(&init_user_ns, -2); |
2971 |
+ inode->i_blocks = 0; |
2972 |
+- memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf)); |
2973 |
+ nfsi->write_io = 0; |
2974 |
+ nfsi->read_io = 0; |
2975 |
+ |
2976 |
+@@ -2282,14 +2300,7 @@ static void init_once(void *foo) |
2977 |
+ INIT_LIST_HEAD(&nfsi->open_files); |
2978 |
+ INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); |
2979 |
+ INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); |
2980 |
+- INIT_LIST_HEAD(&nfsi->commit_info.list); |
2981 |
+- atomic_long_set(&nfsi->nrequests, 0); |
2982 |
+- atomic_long_set(&nfsi->commit_info.ncommit, 0); |
2983 |
+- atomic_set(&nfsi->commit_info.rpcs_out, 0); |
2984 |
+- init_rwsem(&nfsi->rmdir_sem); |
2985 |
+- mutex_init(&nfsi->commit_mutex); |
2986 |
+ nfs4_init_once(nfsi); |
2987 |
+- nfsi->cache_change_attribute = 0; |
2988 |
+ } |
2989 |
+ |
2990 |
+ static int __init nfs_init_inodecache(void) |
2991 |
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c |
2992 |
+index cdb29fd235492..be70874bc3292 100644 |
2993 |
+--- a/fs/nfs/write.c |
2994 |
++++ b/fs/nfs/write.c |
2995 |
+@@ -1394,10 +1394,12 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr, |
2996 |
+ */ |
2997 |
+ static void nfs_redirty_request(struct nfs_page *req) |
2998 |
+ { |
2999 |
++ struct nfs_inode *nfsi = NFS_I(page_file_mapping(req->wb_page)->host); |
3000 |
++ |
3001 |
+ /* Bump the transmission count */ |
3002 |
+ req->wb_nio++; |
3003 |
+ nfs_mark_request_dirty(req); |
3004 |
+- set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); |
3005 |
++ atomic_long_inc(&nfsi->redirtied_pages); |
3006 |
+ nfs_end_page_writeback(req); |
3007 |
+ nfs_release_request(req); |
3008 |
+ } |
3009 |
+@@ -1870,7 +1872,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) |
3010 |
+ /* We have a mismatch. Write the page again */ |
3011 |
+ dprintk_cont(" mismatch\n"); |
3012 |
+ nfs_mark_request_dirty(req); |
3013 |
+- set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); |
3014 |
++ atomic_long_inc(&NFS_I(data->inode)->redirtied_pages); |
3015 |
+ next: |
3016 |
+ nfs_unlock_and_release_request(req); |
3017 |
+ /* Latency breaker */ |
3018 |
+diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h |
3019 |
+index c841367ff8c95..25b4263d66d70 100644 |
3020 |
+--- a/include/linux/buffer_head.h |
3021 |
++++ b/include/linux/buffer_head.h |
3022 |
+@@ -136,6 +136,17 @@ BUFFER_FNS(Defer_Completion, defer_completion) |
3023 |
+ |
3024 |
+ static __always_inline void set_buffer_uptodate(struct buffer_head *bh) |
3025 |
+ { |
3026 |
++ /* |
3027 |
++ * If somebody else already set this uptodate, they will |
3028 |
++ * have done the memory barrier, and a reader will thus |
3029 |
++ * see *some* valid buffer state. |
3030 |
++ * |
3031 |
++ * Any other serialization (with IO errors or whatever that |
3032 |
++ * might clear the bit) has to come from other state (eg BH_Lock). |
3033 |
++ */ |
3034 |
++ if (test_bit(BH_Uptodate, &bh->b_state)) |
3035 |
++ return; |
3036 |
++ |
3037 |
+ /* |
3038 |
+ * make it consistent with folio_mark_uptodate |
3039 |
+ * pairs with smp_load_acquire in buffer_uptodate |
3040 |
+diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h |
3041 |
+index c869f1e73d755..f60674692d365 100644 |
3042 |
+--- a/include/linux/debugfs.h |
3043 |
++++ b/include/linux/debugfs.h |
3044 |
+@@ -91,6 +91,8 @@ struct dentry *debugfs_create_automount(const char *name, |
3045 |
+ void debugfs_remove(struct dentry *dentry); |
3046 |
+ #define debugfs_remove_recursive debugfs_remove |
3047 |
+ |
3048 |
++void debugfs_lookup_and_remove(const char *name, struct dentry *parent); |
3049 |
++ |
3050 |
+ const struct file_operations *debugfs_real_fops(const struct file *filp); |
3051 |
+ |
3052 |
+ int debugfs_file_get(struct dentry *dentry); |
3053 |
+@@ -225,6 +227,10 @@ static inline void debugfs_remove(struct dentry *dentry) |
3054 |
+ static inline void debugfs_remove_recursive(struct dentry *dentry) |
3055 |
+ { } |
3056 |
+ |
3057 |
++static inline void debugfs_lookup_and_remove(const char *name, |
3058 |
++ struct dentry *parent) |
3059 |
++{ } |
3060 |
++ |
3061 |
+ const struct file_operations *debugfs_real_fops(const struct file *filp); |
3062 |
+ |
3063 |
+ static inline int debugfs_file_get(struct dentry *dentry) |
3064 |
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h |
3065 |
+index 66b6cc24ab8c9..71467d661fb66 100644 |
3066 |
+--- a/include/linux/nfs_fs.h |
3067 |
++++ b/include/linux/nfs_fs.h |
3068 |
+@@ -103,6 +103,7 @@ struct nfs_open_dir_context { |
3069 |
+ __be32 verf[NFS_DIR_VERIFIER_SIZE]; |
3070 |
+ __u64 dir_cookie; |
3071 |
+ __u64 dup_cookie; |
3072 |
++ pgoff_t page_index; |
3073 |
+ signed char duped; |
3074 |
+ }; |
3075 |
+ |
3076 |
+@@ -154,36 +155,40 @@ struct nfs_inode { |
3077 |
+ unsigned long attrtimeo_timestamp; |
3078 |
+ |
3079 |
+ unsigned long attr_gencount; |
3080 |
+- /* "Generation counter" for the attribute cache. This is |
3081 |
+- * bumped whenever we update the metadata on the |
3082 |
+- * server. |
3083 |
+- */ |
3084 |
+- unsigned long cache_change_attribute; |
3085 |
+ |
3086 |
+ struct rb_root access_cache; |
3087 |
+ struct list_head access_cache_entry_lru; |
3088 |
+ struct list_head access_cache_inode_lru; |
3089 |
+ |
3090 |
+- /* |
3091 |
+- * This is the cookie verifier used for NFSv3 readdir |
3092 |
+- * operations |
3093 |
+- */ |
3094 |
+- __be32 cookieverf[NFS_DIR_VERIFIER_SIZE]; |
3095 |
+- |
3096 |
+- atomic_long_t nrequests; |
3097 |
+- struct nfs_mds_commit_info commit_info; |
3098 |
++ union { |
3099 |
++ /* Directory */ |
3100 |
++ struct { |
3101 |
++ /* "Generation counter" for the attribute cache. |
3102 |
++ * This is bumped whenever we update the metadata |
3103 |
++ * on the server. |
3104 |
++ */ |
3105 |
++ unsigned long cache_change_attribute; |
3106 |
++ /* |
3107 |
++ * This is the cookie verifier used for NFSv3 readdir |
3108 |
++ * operations |
3109 |
++ */ |
3110 |
++ __be32 cookieverf[NFS_DIR_VERIFIER_SIZE]; |
3111 |
++ /* Readers: in-flight sillydelete RPC calls */ |
3112 |
++ /* Writers: rmdir */ |
3113 |
++ struct rw_semaphore rmdir_sem; |
3114 |
++ }; |
3115 |
++ /* Regular file */ |
3116 |
++ struct { |
3117 |
++ atomic_long_t nrequests; |
3118 |
++ atomic_long_t redirtied_pages; |
3119 |
++ struct nfs_mds_commit_info commit_info; |
3120 |
++ struct mutex commit_mutex; |
3121 |
++ }; |
3122 |
++ }; |
3123 |
+ |
3124 |
+ /* Open contexts for shared mmap writes */ |
3125 |
+ struct list_head open_files; |
3126 |
+ |
3127 |
+- /* Readers: in-flight sillydelete RPC calls */ |
3128 |
+- /* Writers: rmdir */ |
3129 |
+- struct rw_semaphore rmdir_sem; |
3130 |
+- struct mutex commit_mutex; |
3131 |
+- |
3132 |
+- /* track last access to cached pages */ |
3133 |
+- unsigned long page_index; |
3134 |
+- |
3135 |
+ #if IS_ENABLED(CONFIG_NFS_V4) |
3136 |
+ struct nfs4_cached_acl *nfs4_acl; |
3137 |
+ /* NFSv4 state */ |
3138 |
+@@ -272,6 +277,7 @@ struct nfs4_copy_state { |
3139 |
+ #define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ |
3140 |
+ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ |
3141 |
+ #define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ |
3142 |
++#define NFS_INO_FORCE_READDIR (7) /* force readdirplus */ |
3143 |
+ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ |
3144 |
+ #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ |
3145 |
+ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ |
3146 |
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
3147 |
+index ae598ed86b50b..cfb889f66c703 100644 |
3148 |
+--- a/include/linux/skbuff.h |
3149 |
++++ b/include/linux/skbuff.h |
3150 |
+@@ -2232,6 +2232,22 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb) |
3151 |
+ return skb_headlen(skb) + __skb_pagelen(skb); |
3152 |
+ } |
3153 |
+ |
3154 |
++static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, |
3155 |
++ int i, struct page *page, |
3156 |
++ int off, int size) |
3157 |
++{ |
3158 |
++ skb_frag_t *frag = &shinfo->frags[i]; |
3159 |
++ |
3160 |
++ /* |
3161 |
++ * Propagate page pfmemalloc to the skb if we can. The problem is |
3162 |
++ * that not all callers have unique ownership of the page but rely |
3163 |
++ * on page_is_pfmemalloc doing the right thing(tm). |
3164 |
++ */ |
3165 |
++ frag->bv_page = page; |
3166 |
++ frag->bv_offset = off; |
3167 |
++ skb_frag_size_set(frag, size); |
3168 |
++} |
3169 |
++ |
3170 |
+ /** |
3171 |
+ * __skb_fill_page_desc - initialise a paged fragment in an skb |
3172 |
+ * @skb: buffer containing fragment to be initialised |
3173 |
+@@ -2248,17 +2264,7 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb) |
3174 |
+ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, |
3175 |
+ struct page *page, int off, int size) |
3176 |
+ { |
3177 |
+- skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
3178 |
+- |
3179 |
+- /* |
3180 |
+- * Propagate page pfmemalloc to the skb if we can. The problem is |
3181 |
+- * that not all callers have unique ownership of the page but rely |
3182 |
+- * on page_is_pfmemalloc doing the right thing(tm). |
3183 |
+- */ |
3184 |
+- frag->bv_page = page; |
3185 |
+- frag->bv_offset = off; |
3186 |
+- skb_frag_size_set(frag, size); |
3187 |
+- |
3188 |
++ __skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size); |
3189 |
+ page = compound_head(page); |
3190 |
+ if (page_is_pfmemalloc(page)) |
3191 |
+ skb->pfmemalloc = true; |
3192 |
+@@ -2285,6 +2291,27 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i, |
3193 |
+ skb_shinfo(skb)->nr_frags = i + 1; |
3194 |
+ } |
3195 |
+ |
3196 |
++/** |
3197 |
++ * skb_fill_page_desc_noacc - initialise a paged fragment in an skb |
3198 |
++ * @skb: buffer containing fragment to be initialised |
3199 |
++ * @i: paged fragment index to initialise |
3200 |
++ * @page: the page to use for this fragment |
3201 |
++ * @off: the offset to the data with @page |
3202 |
++ * @size: the length of the data |
3203 |
++ * |
3204 |
++ * Variant of skb_fill_page_desc() which does not deal with |
3205 |
++ * pfmemalloc, if page is not owned by us. |
3206 |
++ */ |
3207 |
++static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i, |
3208 |
++ struct page *page, int off, |
3209 |
++ int size) |
3210 |
++{ |
3211 |
++ struct skb_shared_info *shinfo = skb_shinfo(skb); |
3212 |
++ |
3213 |
++ __skb_fill_page_desc_noacc(shinfo, i, page, off, size); |
3214 |
++ shinfo->nr_frags = i + 1; |
3215 |
++} |
3216 |
++ |
3217 |
+ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, |
3218 |
+ int size, unsigned int truesize); |
3219 |
+ |
3220 |
+diff --git a/include/linux/udp.h b/include/linux/udp.h |
3221 |
+index ae66dadd85434..0727276e7538c 100644 |
3222 |
+--- a/include/linux/udp.h |
3223 |
++++ b/include/linux/udp.h |
3224 |
+@@ -75,6 +75,7 @@ struct udp_sock { |
3225 |
+ * For encapsulation sockets. |
3226 |
+ */ |
3227 |
+ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); |
3228 |
++ void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset); |
3229 |
+ int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb); |
3230 |
+ void (*encap_destroy)(struct sock *sk); |
3231 |
+ |
3232 |
+diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h |
3233 |
+index afc7ce713657b..72394f441dad8 100644 |
3234 |
+--- a/include/net/udp_tunnel.h |
3235 |
++++ b/include/net/udp_tunnel.h |
3236 |
+@@ -67,6 +67,9 @@ static inline int udp_sock_create(struct net *net, |
3237 |
+ typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); |
3238 |
+ typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk, |
3239 |
+ struct sk_buff *skb); |
3240 |
++typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk, |
3241 |
++ struct sk_buff *skb, |
3242 |
++ unsigned int udp_offset); |
3243 |
+ typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); |
3244 |
+ typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk, |
3245 |
+ struct list_head *head, |
3246 |
+@@ -80,6 +83,7 @@ struct udp_tunnel_sock_cfg { |
3247 |
+ __u8 encap_type; |
3248 |
+ udp_tunnel_encap_rcv_t encap_rcv; |
3249 |
+ udp_tunnel_encap_err_lookup_t encap_err_lookup; |
3250 |
++ udp_tunnel_encap_err_rcv_t encap_err_rcv; |
3251 |
+ udp_tunnel_encap_destroy_t encap_destroy; |
3252 |
+ udp_tunnel_gro_receive_t gro_receive; |
3253 |
+ udp_tunnel_gro_complete_t gro_complete; |
3254 |
+diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h |
3255 |
+index f6542584ca139..f203f34dba12e 100644 |
3256 |
+--- a/include/soc/at91/sama7-ddr.h |
3257 |
++++ b/include/soc/at91/sama7-ddr.h |
3258 |
+@@ -11,8 +11,6 @@ |
3259 |
+ #ifndef __SAMA7_DDR_H__ |
3260 |
+ #define __SAMA7_DDR_H__ |
3261 |
+ |
3262 |
+-#ifdef CONFIG_SOC_SAMA7 |
3263 |
+- |
3264 |
+ /* DDR3PHY */ |
3265 |
+ #define DDR3PHY_PIR (0x04) /* DDR3PHY PHY Initialization Register */ |
3266 |
+ #define DDR3PHY_PIR_DLLBYP (1 << 17) /* DLL Bypass */ |
3267 |
+@@ -40,6 +38,14 @@ |
3268 |
+ #define DDR3PHY_DSGCR_ODTPDD_ODT0 (1 << 20) /* ODT[0] Power Down Driver */ |
3269 |
+ |
3270 |
+ #define DDR3PHY_ZQ0SR0 (0x188) /* ZQ status register 0 */ |
3271 |
++#define DDR3PHY_ZQ0SR0_PDO_OFF (0) /* Pull-down output impedance select offset */ |
3272 |
++#define DDR3PHY_ZQ0SR0_PUO_OFF (5) /* Pull-up output impedance select offset */ |
3273 |
++#define DDR3PHY_ZQ0SR0_PDODT_OFF (10) /* Pull-down on-die termination impedance select offset */ |
3274 |
++#define DDR3PHY_ZQ0SRO_PUODT_OFF (15) /* Pull-up on-die termination impedance select offset */ |
3275 |
++ |
3276 |
++#define DDR3PHY_DX0DLLCR (0x1CC) /* DDR3PHY DATX8 DLL Control Register */ |
3277 |
++#define DDR3PHY_DX1DLLCR (0x20C) /* DDR3PHY DATX8 DLL Control Register */ |
3278 |
++#define DDR3PHY_DXDLLCR_DLLDIS (1 << 31) /* DLL Disable */ |
3279 |
+ |
3280 |
+ /* UDDRC */ |
3281 |
+ #define UDDRC_STAT (0x04) /* UDDRC Operating Mode Status Register */ |
3282 |
+@@ -75,6 +81,4 @@ |
3283 |
+ #define UDDRC_PCTRL_3 (0x6A0) /* UDDRC Port 3 Control Register */ |
3284 |
+ #define UDDRC_PCTRL_4 (0x750) /* UDDRC Port 4 Control Register */ |
3285 |
+ |
3286 |
+-#endif /* CONFIG_SOC_SAMA7 */ |
3287 |
+- |
3288 |
+ #endif /* __SAMA7_DDR_H__ */ |
3289 |
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c |
3290 |
+index 416dd7db3fb2c..75c3881af0784 100644 |
3291 |
+--- a/kernel/cgroup/cgroup.c |
3292 |
++++ b/kernel/cgroup/cgroup.c |
3293 |
+@@ -2345,6 +2345,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) |
3294 |
+ } |
3295 |
+ EXPORT_SYMBOL_GPL(task_cgroup_path); |
3296 |
+ |
3297 |
++/** |
3298 |
++ * cgroup_attach_lock - Lock for ->attach() |
3299 |
++ * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem |
3300 |
++ * |
3301 |
++ * cgroup migration sometimes needs to stabilize threadgroups against forks and |
3302 |
++ * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach() |
3303 |
++ * implementations (e.g. cpuset), also need to disable CPU hotplug. |
3304 |
++ * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can |
3305 |
++ * lead to deadlocks. |
3306 |
++ * |
3307 |
++ * Bringing up a CPU may involve creating and destroying tasks which requires |
3308 |
++ * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside |
3309 |
++ * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while |
3310 |
++ * write-locking threadgroup_rwsem, the locking order is reversed and we end up |
3311 |
++ * waiting for an on-going CPU hotplug operation which in turn is waiting for |
3312 |
++ * the threadgroup_rwsem to be released to create new tasks. For more details: |
3313 |
++ * |
3314 |
++ * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu |
3315 |
++ * |
3316 |
++ * Resolve the situation by always acquiring cpus_read_lock() before optionally |
3317 |
++ * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that |
3318 |
++ * CPU hotplug is disabled on entry. |
3319 |
++ */ |
3320 |
++static void cgroup_attach_lock(bool lock_threadgroup) |
3321 |
++{ |
3322 |
++ cpus_read_lock(); |
3323 |
++ if (lock_threadgroup) |
3324 |
++ percpu_down_write(&cgroup_threadgroup_rwsem); |
3325 |
++} |
3326 |
++ |
3327 |
++/** |
3328 |
++ * cgroup_attach_unlock - Undo cgroup_attach_lock() |
3329 |
++ * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem |
3330 |
++ */ |
3331 |
++static void cgroup_attach_unlock(bool lock_threadgroup) |
3332 |
++{ |
3333 |
++ if (lock_threadgroup) |
3334 |
++ percpu_up_write(&cgroup_threadgroup_rwsem); |
3335 |
++ cpus_read_unlock(); |
3336 |
++} |
3337 |
++ |
3338 |
+ /** |
3339 |
+ * cgroup_migrate_add_task - add a migration target task to a migration context |
3340 |
+ * @task: target task |
3341 |
+@@ -2821,8 +2862,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, |
3342 |
+ } |
3343 |
+ |
3344 |
+ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, |
3345 |
+- bool *locked) |
3346 |
+- __acquires(&cgroup_threadgroup_rwsem) |
3347 |
++ bool *threadgroup_locked) |
3348 |
+ { |
3349 |
+ struct task_struct *tsk; |
3350 |
+ pid_t pid; |
3351 |
+@@ -2839,12 +2879,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, |
3352 |
+ * Therefore, we can skip the global lock. |
3353 |
+ */ |
3354 |
+ lockdep_assert_held(&cgroup_mutex); |
3355 |
+- if (pid || threadgroup) { |
3356 |
+- percpu_down_write(&cgroup_threadgroup_rwsem); |
3357 |
+- *locked = true; |
3358 |
+- } else { |
3359 |
+- *locked = false; |
3360 |
+- } |
3361 |
++ *threadgroup_locked = pid || threadgroup; |
3362 |
++ cgroup_attach_lock(*threadgroup_locked); |
3363 |
+ |
3364 |
+ rcu_read_lock(); |
3365 |
+ if (pid) { |
3366 |
+@@ -2875,17 +2911,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, |
3367 |
+ goto out_unlock_rcu; |
3368 |
+ |
3369 |
+ out_unlock_threadgroup: |
3370 |
+- if (*locked) { |
3371 |
+- percpu_up_write(&cgroup_threadgroup_rwsem); |
3372 |
+- *locked = false; |
3373 |
+- } |
3374 |
++ cgroup_attach_unlock(*threadgroup_locked); |
3375 |
++ *threadgroup_locked = false; |
3376 |
+ out_unlock_rcu: |
3377 |
+ rcu_read_unlock(); |
3378 |
+ return tsk; |
3379 |
+ } |
3380 |
+ |
3381 |
+-void cgroup_procs_write_finish(struct task_struct *task, bool locked) |
3382 |
+- __releases(&cgroup_threadgroup_rwsem) |
3383 |
++void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked) |
3384 |
+ { |
3385 |
+ struct cgroup_subsys *ss; |
3386 |
+ int ssid; |
3387 |
+@@ -2893,8 +2926,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked) |
3388 |
+ /* release reference from cgroup_procs_write_start() */ |
3389 |
+ put_task_struct(task); |
3390 |
+ |
3391 |
+- if (locked) |
3392 |
+- percpu_up_write(&cgroup_threadgroup_rwsem); |
3393 |
++ cgroup_attach_unlock(threadgroup_locked); |
3394 |
++ |
3395 |
+ for_each_subsys(ss, ssid) |
3396 |
+ if (ss->post_attach) |
3397 |
+ ss->post_attach(); |
3398 |
+@@ -2949,12 +2982,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) |
3399 |
+ struct cgroup_subsys_state *d_css; |
3400 |
+ struct cgroup *dsct; |
3401 |
+ struct css_set *src_cset; |
3402 |
++ bool has_tasks; |
3403 |
+ int ret; |
3404 |
+ |
3405 |
+ lockdep_assert_held(&cgroup_mutex); |
3406 |
+ |
3407 |
+- percpu_down_write(&cgroup_threadgroup_rwsem); |
3408 |
+- |
3409 |
+ /* look up all csses currently attached to @cgrp's subtree */ |
3410 |
+ spin_lock_irq(&css_set_lock); |
3411 |
+ cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { |
3412 |
+@@ -2965,6 +2997,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) |
3413 |
+ } |
3414 |
+ spin_unlock_irq(&css_set_lock); |
3415 |
+ |
3416 |
++ /* |
3417 |
++ * We need to write-lock threadgroup_rwsem while migrating tasks. |
3418 |
++ * However, if there are no source csets for @cgrp, changing its |
3419 |
++ * controllers isn't gonna produce any task migrations and the |
3420 |
++ * write-locking can be skipped safely. |
3421 |
++ */ |
3422 |
++ has_tasks = !list_empty(&mgctx.preloaded_src_csets); |
3423 |
++ cgroup_attach_lock(has_tasks); |
3424 |
++ |
3425 |
+ /* NULL dst indicates self on default hierarchy */ |
3426 |
+ ret = cgroup_migrate_prepare_dst(&mgctx); |
3427 |
+ if (ret) |
3428 |
+@@ -2984,7 +3025,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) |
3429 |
+ ret = cgroup_migrate_execute(&mgctx); |
3430 |
+ out_finish: |
3431 |
+ cgroup_migrate_finish(&mgctx); |
3432 |
+- percpu_up_write(&cgroup_threadgroup_rwsem); |
3433 |
++ cgroup_attach_unlock(has_tasks); |
3434 |
+ return ret; |
3435 |
+ } |
3436 |
+ |
3437 |
+@@ -4932,13 +4973,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, |
3438 |
+ struct task_struct *task; |
3439 |
+ const struct cred *saved_cred; |
3440 |
+ ssize_t ret; |
3441 |
+- bool locked; |
3442 |
++ bool threadgroup_locked; |
3443 |
+ |
3444 |
+ dst_cgrp = cgroup_kn_lock_live(of->kn, false); |
3445 |
+ if (!dst_cgrp) |
3446 |
+ return -ENODEV; |
3447 |
+ |
3448 |
+- task = cgroup_procs_write_start(buf, threadgroup, &locked); |
3449 |
++ task = cgroup_procs_write_start(buf, threadgroup, &threadgroup_locked); |
3450 |
+ ret = PTR_ERR_OR_ZERO(task); |
3451 |
+ if (ret) |
3452 |
+ goto out_unlock; |
3453 |
+@@ -4964,7 +5005,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, |
3454 |
+ ret = cgroup_attach_task(dst_cgrp, task, threadgroup); |
3455 |
+ |
3456 |
+ out_finish: |
3457 |
+- cgroup_procs_write_finish(task, locked); |
3458 |
++ cgroup_procs_write_finish(task, threadgroup_locked); |
3459 |
+ out_unlock: |
3460 |
+ cgroup_kn_unlock(of->kn); |
3461 |
+ |
3462 |
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c |
3463 |
+index 9c5b659db63f4..3213d3c8ea0a8 100644 |
3464 |
+--- a/kernel/cgroup/cpuset.c |
3465 |
++++ b/kernel/cgroup/cpuset.c |
3466 |
+@@ -2249,7 +2249,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) |
3467 |
+ cgroup_taskset_first(tset, &css); |
3468 |
+ cs = css_cs(css); |
3469 |
+ |
3470 |
+- cpus_read_lock(); |
3471 |
++ lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ |
3472 |
+ percpu_down_write(&cpuset_rwsem); |
3473 |
+ |
3474 |
+ guarantee_online_mems(cs, &cpuset_attach_nodemask_to); |
3475 |
+@@ -2303,7 +2303,6 @@ static void cpuset_attach(struct cgroup_taskset *tset) |
3476 |
+ wake_up(&cpuset_attach_wq); |
3477 |
+ |
3478 |
+ percpu_up_write(&cpuset_rwsem); |
3479 |
+- cpus_read_unlock(); |
3480 |
+ } |
3481 |
+ |
3482 |
+ /* The various types of files and directories in a cpuset file system */ |
3483 |
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c |
3484 |
+index e62fb7a4da694..018f140aaaf4e 100644 |
3485 |
+--- a/kernel/dma/swiotlb.c |
3486 |
++++ b/kernel/dma/swiotlb.c |
3487 |
+@@ -435,7 +435,10 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size |
3488 |
+ } |
3489 |
+ } |
3490 |
+ |
3491 |
+-#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) |
3492 |
++static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx) |
3493 |
++{ |
3494 |
++ return start + (idx << IO_TLB_SHIFT); |
3495 |
++} |
3496 |
+ |
3497 |
+ /* |
3498 |
+ * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. |
3499 |
+diff --git a/kernel/fork.c b/kernel/fork.c |
3500 |
+index 89475c994ca91..908ba3c93893f 100644 |
3501 |
+--- a/kernel/fork.c |
3502 |
++++ b/kernel/fork.c |
3503 |
+@@ -1153,6 +1153,7 @@ void mmput_async(struct mm_struct *mm) |
3504 |
+ schedule_work(&mm->async_put_work); |
3505 |
+ } |
3506 |
+ } |
3507 |
++EXPORT_SYMBOL_GPL(mmput_async); |
3508 |
+ #endif |
3509 |
+ |
3510 |
+ /** |
3511 |
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
3512 |
+index ed3f24a81549c..9df585b9467e4 100644 |
3513 |
+--- a/kernel/kprobes.c |
3514 |
++++ b/kernel/kprobes.c |
3515 |
+@@ -1561,6 +1561,7 @@ static int check_kprobe_address_safe(struct kprobe *p, |
3516 |
+ /* Ensure it is not in reserved area nor out of text */ |
3517 |
+ if (!(core_kernel_text((unsigned long) p->addr) || |
3518 |
+ is_module_text_address((unsigned long) p->addr)) || |
3519 |
++ in_gate_area_no_mm((unsigned long) p->addr) || |
3520 |
+ within_kprobe_blacklist((unsigned long) p->addr) || |
3521 |
+ jump_label_text_reserved(p->addr, p->addr) || |
3522 |
+ static_call_text_reserved(p->addr, p->addr) || |
3523 |
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c |
3524 |
+index 7a2d32d2025fe..34c5ff3a0669b 100644 |
3525 |
+--- a/kernel/sched/debug.c |
3526 |
++++ b/kernel/sched/debug.c |
3527 |
+@@ -416,7 +416,7 @@ void update_sched_domain_debugfs(void) |
3528 |
+ char buf[32]; |
3529 |
+ |
3530 |
+ snprintf(buf, sizeof(buf), "cpu%d", cpu); |
3531 |
+- debugfs_remove(debugfs_lookup(buf, sd_dentry)); |
3532 |
++ debugfs_lookup_and_remove(buf, sd_dentry); |
3533 |
+ d_cpu = debugfs_create_dir(buf, sd_dentry); |
3534 |
+ |
3535 |
+ i = 0; |
3536 |
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c |
3537 |
+index 67c7979c40c0b..106f9813841a1 100644 |
3538 |
+--- a/kernel/trace/trace_events_trigger.c |
3539 |
++++ b/kernel/trace/trace_events_trigger.c |
3540 |
+@@ -128,7 +128,8 @@ static bool check_user_trigger(struct trace_event_file *file) |
3541 |
+ { |
3542 |
+ struct event_trigger_data *data; |
3543 |
+ |
3544 |
+- list_for_each_entry_rcu(data, &file->triggers, list) { |
3545 |
++ list_for_each_entry_rcu(data, &file->triggers, list, |
3546 |
++ lockdep_is_held(&event_mutex)) { |
3547 |
+ if (data->flags & EVENT_TRIGGER_FL_PROBE) |
3548 |
+ continue; |
3549 |
+ return true; |
3550 |
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c |
3551 |
+index 859303aae1809..b78861b8e0139 100644 |
3552 |
+--- a/mm/kmemleak.c |
3553 |
++++ b/mm/kmemleak.c |
3554 |
+@@ -1125,7 +1125,7 @@ EXPORT_SYMBOL(kmemleak_no_scan); |
3555 |
+ void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, |
3556 |
+ gfp_t gfp) |
3557 |
+ { |
3558 |
+- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) |
3559 |
++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) |
3560 |
+ kmemleak_alloc(__va(phys), size, min_count, gfp); |
3561 |
+ } |
3562 |
+ EXPORT_SYMBOL(kmemleak_alloc_phys); |
3563 |
+@@ -1139,7 +1139,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys); |
3564 |
+ */ |
3565 |
+ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) |
3566 |
+ { |
3567 |
+- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) |
3568 |
++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) |
3569 |
+ kmemleak_free_part(__va(phys), size); |
3570 |
+ } |
3571 |
+ EXPORT_SYMBOL(kmemleak_free_part_phys); |
3572 |
+@@ -1151,7 +1151,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys); |
3573 |
+ */ |
3574 |
+ void __ref kmemleak_not_leak_phys(phys_addr_t phys) |
3575 |
+ { |
3576 |
+- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) |
3577 |
++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) |
3578 |
+ kmemleak_not_leak(__va(phys)); |
3579 |
+ } |
3580 |
+ EXPORT_SYMBOL(kmemleak_not_leak_phys); |
3581 |
+@@ -1163,7 +1163,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys); |
3582 |
+ */ |
3583 |
+ void __ref kmemleak_ignore_phys(phys_addr_t phys) |
3584 |
+ { |
3585 |
+- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) |
3586 |
++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) |
3587 |
+ kmemleak_ignore(__va(phys)); |
3588 |
+ } |
3589 |
+ EXPORT_SYMBOL(kmemleak_ignore_phys); |
3590 |
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c |
3591 |
+index 10a2c7bca7199..a718204c4bfdd 100644 |
3592 |
+--- a/net/bridge/br_netfilter_hooks.c |
3593 |
++++ b/net/bridge/br_netfilter_hooks.c |
3594 |
+@@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ |
3595 |
+ /* - Bridged-and-DNAT'ed traffic doesn't |
3596 |
+ * require ip_forwarding. */ |
3597 |
+ if (rt->dst.dev == dev) { |
3598 |
++ skb_dst_drop(skb); |
3599 |
+ skb_dst_set(skb, &rt->dst); |
3600 |
+ goto bridged_dnat; |
3601 |
+ } |
3602 |
+@@ -413,6 +414,7 @@ bridged_dnat: |
3603 |
+ kfree_skb(skb); |
3604 |
+ return 0; |
3605 |
+ } |
3606 |
++ skb_dst_drop(skb); |
3607 |
+ skb_dst_set_noref(skb, &rt->dst); |
3608 |
+ } |
3609 |
+ |
3610 |
+diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c |
3611 |
+index e4e0c836c3f51..6b07f30675bb0 100644 |
3612 |
+--- a/net/bridge/br_netfilter_ipv6.c |
3613 |
++++ b/net/bridge/br_netfilter_ipv6.c |
3614 |
+@@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc |
3615 |
+ kfree_skb(skb); |
3616 |
+ return 0; |
3617 |
+ } |
3618 |
++ skb_dst_drop(skb); |
3619 |
+ skb_dst_set_noref(skb, &rt->dst); |
3620 |
+ } |
3621 |
+ |
3622 |
+diff --git a/net/core/datagram.c b/net/core/datagram.c |
3623 |
+index 15ab9ffb27fe9..28e5f921dcaf4 100644 |
3624 |
+--- a/net/core/datagram.c |
3625 |
++++ b/net/core/datagram.c |
3626 |
+@@ -677,7 +677,7 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb, |
3627 |
+ page_ref_sub(last_head, refs); |
3628 |
+ refs = 0; |
3629 |
+ } |
3630 |
+- skb_fill_page_desc(skb, frag++, head, start, size); |
3631 |
++ skb_fill_page_desc_noacc(skb, frag++, head, start, size); |
3632 |
+ } |
3633 |
+ if (refs) |
3634 |
+ page_ref_sub(last_head, refs); |
3635 |
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
3636 |
+index 563848242ad33..3c193e7d4bc67 100644 |
3637 |
+--- a/net/core/skbuff.c |
3638 |
++++ b/net/core/skbuff.c |
3639 |
+@@ -4188,9 +4188,8 @@ normal: |
3640 |
+ SKB_GSO_CB(nskb)->csum_start = |
3641 |
+ skb_headroom(nskb) + doffset; |
3642 |
+ } else { |
3643 |
+- skb_copy_bits(head_skb, offset, |
3644 |
+- skb_put(nskb, len), |
3645 |
+- len); |
3646 |
++ if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) |
3647 |
++ goto err; |
3648 |
+ } |
3649 |
+ continue; |
3650 |
+ } |
3651 |
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
3652 |
+index 0ebef2a5950cd..4f6b897ccf23f 100644 |
3653 |
+--- a/net/ipv4/tcp.c |
3654 |
++++ b/net/ipv4/tcp.c |
3655 |
+@@ -1002,7 +1002,7 @@ new_segment: |
3656 |
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); |
3657 |
+ } else { |
3658 |
+ get_page(page); |
3659 |
+- skb_fill_page_desc(skb, i, page, offset, copy); |
3660 |
++ skb_fill_page_desc_noacc(skb, i, page, offset, copy); |
3661 |
+ } |
3662 |
+ |
3663 |
+ if (!(flags & MSG_NO_SHARED_FRAGS)) |
3664 |
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
3665 |
+index 7fd7e7cba0c92..686e210d89c21 100644 |
3666 |
+--- a/net/ipv4/tcp_input.c |
3667 |
++++ b/net/ipv4/tcp_input.c |
3668 |
+@@ -2506,6 +2506,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp) |
3669 |
+ return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); |
3670 |
+ } |
3671 |
+ |
3672 |
++static bool tcp_is_non_sack_preventing_reopen(struct sock *sk) |
3673 |
++{ |
3674 |
++ struct tcp_sock *tp = tcp_sk(sk); |
3675 |
++ |
3676 |
++ if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { |
3677 |
++ /* Hold old state until something *above* high_seq |
3678 |
++ * is ACKed. For Reno it is MUST to prevent false |
3679 |
++ * fast retransmits (RFC2582). SACK TCP is safe. */ |
3680 |
++ if (!tcp_any_retrans_done(sk)) |
3681 |
++ tp->retrans_stamp = 0; |
3682 |
++ return true; |
3683 |
++ } |
3684 |
++ return false; |
3685 |
++} |
3686 |
++ |
3687 |
+ /* People celebrate: "We love our President!" */ |
3688 |
+ static bool tcp_try_undo_recovery(struct sock *sk) |
3689 |
+ { |
3690 |
+@@ -2528,14 +2543,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) |
3691 |
+ } else if (tp->rack.reo_wnd_persist) { |
3692 |
+ tp->rack.reo_wnd_persist--; |
3693 |
+ } |
3694 |
+- if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { |
3695 |
+- /* Hold old state until something *above* high_seq |
3696 |
+- * is ACKed. For Reno it is MUST to prevent false |
3697 |
+- * fast retransmits (RFC2582). SACK TCP is safe. */ |
3698 |
+- if (!tcp_any_retrans_done(sk)) |
3699 |
+- tp->retrans_stamp = 0; |
3700 |
++ if (tcp_is_non_sack_preventing_reopen(sk)) |
3701 |
+ return true; |
3702 |
+- } |
3703 |
+ tcp_set_ca_state(sk, TCP_CA_Open); |
3704 |
+ tp->is_sack_reneg = 0; |
3705 |
+ return false; |
3706 |
+@@ -2571,6 +2580,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) |
3707 |
+ NET_INC_STATS(sock_net(sk), |
3708 |
+ LINUX_MIB_TCPSPURIOUSRTOS); |
3709 |
+ inet_csk(sk)->icsk_retransmits = 0; |
3710 |
++ if (tcp_is_non_sack_preventing_reopen(sk)) |
3711 |
++ return true; |
3712 |
+ if (frto_undo || tcp_is_sack(tp)) { |
3713 |
+ tcp_set_ca_state(sk, TCP_CA_Open); |
3714 |
+ tp->is_sack_reneg = 0; |
3715 |
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c |
3716 |
+index efef7ba44e1d6..75d1977ecc07e 100644 |
3717 |
+--- a/net/ipv4/udp.c |
3718 |
++++ b/net/ipv4/udp.c |
3719 |
+@@ -781,6 +781,8 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) |
3720 |
+ */ |
3721 |
+ if (tunnel) { |
3722 |
+ /* ...not for tunnels though: we don't have a sending socket */ |
3723 |
++ if (udp_sk(sk)->encap_err_rcv) |
3724 |
++ udp_sk(sk)->encap_err_rcv(sk, skb, iph->ihl << 2); |
3725 |
+ goto out; |
3726 |
+ } |
3727 |
+ if (!inet->recverr) { |
3728 |
+diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c |
3729 |
+index b97e3635acf50..46101fd67a477 100644 |
3730 |
+--- a/net/ipv4/udp_tunnel_core.c |
3731 |
++++ b/net/ipv4/udp_tunnel_core.c |
3732 |
+@@ -75,6 +75,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, |
3733 |
+ |
3734 |
+ udp_sk(sk)->encap_type = cfg->encap_type; |
3735 |
+ udp_sk(sk)->encap_rcv = cfg->encap_rcv; |
3736 |
++ udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv; |
3737 |
+ udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup; |
3738 |
+ udp_sk(sk)->encap_destroy = cfg->encap_destroy; |
3739 |
+ udp_sk(sk)->gro_receive = cfg->gro_receive; |
3740 |
+diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c |
3741 |
+index fa6b64c95d3ae..0c7c6fc16c3c3 100644 |
3742 |
+--- a/net/ipv6/seg6.c |
3743 |
++++ b/net/ipv6/seg6.c |
3744 |
+@@ -191,6 +191,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info) |
3745 |
+ goto out_unlock; |
3746 |
+ } |
3747 |
+ |
3748 |
++ if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) { |
3749 |
++ err = -EINVAL; |
3750 |
++ goto out_unlock; |
3751 |
++ } |
3752 |
++ |
3753 |
+ if (hinfo) { |
3754 |
+ err = seg6_hmac_info_del(net, hmackeyid); |
3755 |
+ if (err) |
3756 |
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c |
3757 |
+index 4a9afdbd5f292..07726a51a3f09 100644 |
3758 |
+--- a/net/ipv6/udp.c |
3759 |
++++ b/net/ipv6/udp.c |
3760 |
+@@ -614,8 +614,11 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
3761 |
+ } |
3762 |
+ |
3763 |
+ /* Tunnels don't have an application socket: don't pass errors back */ |
3764 |
+- if (tunnel) |
3765 |
++ if (tunnel) { |
3766 |
++ if (udp_sk(sk)->encap_err_rcv) |
3767 |
++ udp_sk(sk)->encap_err_rcv(sk, skb, offset); |
3768 |
+ goto out; |
3769 |
++ } |
3770 |
+ |
3771 |
+ if (!np->recverr) { |
3772 |
+ if (!harderr || sk->sk_state != TCP_ESTABLISHED) |
3773 |
+diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c |
3774 |
+index 08ee4e760a3d2..18b90e334b5bd 100644 |
3775 |
+--- a/net/netfilter/nf_conntrack_irc.c |
3776 |
++++ b/net/netfilter/nf_conntrack_irc.c |
3777 |
+@@ -188,8 +188,9 @@ static int help(struct sk_buff *skb, unsigned int protoff, |
3778 |
+ |
3779 |
+ /* dcc_ip can be the internal OR external (NAT'ed) IP */ |
3780 |
+ tuple = &ct->tuplehash[dir].tuple; |
3781 |
+- if (tuple->src.u3.ip != dcc_ip && |
3782 |
+- tuple->dst.u3.ip != dcc_ip) { |
3783 |
++ if ((tuple->src.u3.ip != dcc_ip && |
3784 |
++ ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) || |
3785 |
++ dcc_port == 0) { |
3786 |
+ net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n", |
3787 |
+ &tuple->src.u3.ip, |
3788 |
+ &dcc_ip, dcc_port); |
3789 |
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c |
3790 |
+index 3cee5d8ee7027..1ecfdc4f23be8 100644 |
3791 |
+--- a/net/netfilter/nf_conntrack_proto_tcp.c |
3792 |
++++ b/net/netfilter/nf_conntrack_proto_tcp.c |
3793 |
+@@ -671,6 +671,37 @@ static bool tcp_in_window(struct nf_conn *ct, |
3794 |
+ tn->tcp_be_liberal) |
3795 |
+ res = true; |
3796 |
+ if (!res) { |
3797 |
++ bool seq_ok = before(seq, sender->td_maxend + 1); |
3798 |
++ |
3799 |
++ if (!seq_ok) { |
3800 |
++ u32 overshot = end - sender->td_maxend + 1; |
3801 |
++ bool ack_ok; |
3802 |
++ |
3803 |
++ ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1); |
3804 |
++ |
3805 |
++ if (in_recv_win && |
3806 |
++ ack_ok && |
3807 |
++ overshot <= receiver->td_maxwin && |
3808 |
++ before(sack, receiver->td_end + 1)) { |
3809 |
++ /* Work around TCPs that send more bytes than allowed by |
3810 |
++ * the receive window. |
3811 |
++ * |
3812 |
++ * If the (marked as invalid) packet is allowed to pass by |
3813 |
++ * the ruleset and the peer acks this data, then its possible |
3814 |
++ * all future packets will trigger 'ACK is over upper bound' check. |
3815 |
++ * |
3816 |
++ * Thus if only the sequence check fails then do update td_end so |
3817 |
++ * possible ACK for this data can update internal state. |
3818 |
++ */ |
3819 |
++ sender->td_end = end; |
3820 |
++ sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; |
3821 |
++ |
3822 |
++ nf_ct_l4proto_log_invalid(skb, ct, hook_state, |
3823 |
++ "%u bytes more than expected", overshot); |
3824 |
++ return res; |
3825 |
++ } |
3826 |
++ } |
3827 |
++ |
3828 |
+ nf_ct_l4proto_log_invalid(skb, ct, hook_state, |
3829 |
+ "%s", |
3830 |
+ before(seq, sender->td_maxend + 1) ? |
3831 |
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c |
3832 |
+index d8ca55d6be409..d35d09df83fee 100644 |
3833 |
+--- a/net/netfilter/nf_tables_api.c |
3834 |
++++ b/net/netfilter/nf_tables_api.c |
3835 |
+@@ -2072,8 +2072,10 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, |
3836 |
+ chain->flags |= NFT_CHAIN_BASE | flags; |
3837 |
+ basechain->policy = NF_ACCEPT; |
3838 |
+ if (chain->flags & NFT_CHAIN_HW_OFFLOAD && |
3839 |
+- !nft_chain_offload_support(basechain)) |
3840 |
++ !nft_chain_offload_support(basechain)) { |
3841 |
++ list_splice_init(&basechain->hook_list, &hook->list); |
3842 |
+ return -EOPNOTSUPP; |
3843 |
++ } |
3844 |
+ |
3845 |
+ flow_block_init(&basechain->flow_block); |
3846 |
+ |
3847 |
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h |
3848 |
+index f2d593e27b64f..f2e3fb77a02d3 100644 |
3849 |
+--- a/net/rxrpc/ar-internal.h |
3850 |
++++ b/net/rxrpc/ar-internal.h |
3851 |
+@@ -990,6 +990,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *); |
3852 |
+ /* |
3853 |
+ * peer_event.c |
3854 |
+ */ |
3855 |
++void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset); |
3856 |
+ void rxrpc_error_report(struct sock *); |
3857 |
+ void rxrpc_peer_keepalive_worker(struct work_struct *); |
3858 |
+ |
3859 |
+diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c |
3860 |
+index 6a1611b0e3037..ef43fe8bdd2ff 100644 |
3861 |
+--- a/net/rxrpc/local_object.c |
3862 |
++++ b/net/rxrpc/local_object.c |
3863 |
+@@ -137,6 +137,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) |
3864 |
+ |
3865 |
+ tuncfg.encap_type = UDP_ENCAP_RXRPC; |
3866 |
+ tuncfg.encap_rcv = rxrpc_input_packet; |
3867 |
++ tuncfg.encap_err_rcv = rxrpc_encap_err_rcv; |
3868 |
+ tuncfg.sk_user_data = local; |
3869 |
+ setup_udp_tunnel_sock(net, local->socket, &tuncfg); |
3870 |
+ |
3871 |
+diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c |
3872 |
+index be032850ae8ca..32561e9567fe3 100644 |
3873 |
+--- a/net/rxrpc/peer_event.c |
3874 |
++++ b/net/rxrpc/peer_event.c |
3875 |
+@@ -16,22 +16,105 @@ |
3876 |
+ #include <net/sock.h> |
3877 |
+ #include <net/af_rxrpc.h> |
3878 |
+ #include <net/ip.h> |
3879 |
++#include <net/icmp.h> |
3880 |
+ #include "ar-internal.h" |
3881 |
+ |
3882 |
++static void rxrpc_adjust_mtu(struct rxrpc_peer *, unsigned int); |
3883 |
+ static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *); |
3884 |
+ static void rxrpc_distribute_error(struct rxrpc_peer *, int, |
3885 |
+ enum rxrpc_call_completion); |
3886 |
+ |
3887 |
+ /* |
3888 |
+- * Find the peer associated with an ICMP packet. |
3889 |
++ * Find the peer associated with an ICMPv4 packet. |
3890 |
+ */ |
3891 |
+ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, |
3892 |
+- const struct sk_buff *skb, |
3893 |
++ struct sk_buff *skb, |
3894 |
++ unsigned int udp_offset, |
3895 |
++ unsigned int *info, |
3896 |
+ struct sockaddr_rxrpc *srx) |
3897 |
+ { |
3898 |
+- struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); |
3899 |
++ struct iphdr *ip, *ip0 = ip_hdr(skb); |
3900 |
++ struct icmphdr *icmp = icmp_hdr(skb); |
3901 |
++ struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset); |
3902 |
+ |
3903 |
+- _enter(""); |
3904 |
++ _enter("%u,%u,%u", ip0->protocol, icmp->type, icmp->code); |
3905 |
++ |
3906 |
++ switch (icmp->type) { |
3907 |
++ case ICMP_DEST_UNREACH: |
3908 |
++ *info = ntohs(icmp->un.frag.mtu); |
3909 |
++ fallthrough; |
3910 |
++ case ICMP_TIME_EXCEEDED: |
3911 |
++ case ICMP_PARAMETERPROB: |
3912 |
++ ip = (struct iphdr *)((void *)icmp + 8); |
3913 |
++ break; |
3914 |
++ default: |
3915 |
++ return NULL; |
3916 |
++ } |
3917 |
++ |
3918 |
++ memset(srx, 0, sizeof(*srx)); |
3919 |
++ srx->transport_type = local->srx.transport_type; |
3920 |
++ srx->transport_len = local->srx.transport_len; |
3921 |
++ srx->transport.family = local->srx.transport.family; |
3922 |
++ |
3923 |
++ /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice |
3924 |
++ * versa? |
3925 |
++ */ |
3926 |
++ switch (srx->transport.family) { |
3927 |
++ case AF_INET: |
3928 |
++ srx->transport_len = sizeof(srx->transport.sin); |
3929 |
++ srx->transport.family = AF_INET; |
3930 |
++ srx->transport.sin.sin_port = udp->dest; |
3931 |
++ memcpy(&srx->transport.sin.sin_addr, &ip->daddr, |
3932 |
++ sizeof(struct in_addr)); |
3933 |
++ break; |
3934 |
++ |
3935 |
++#ifdef CONFIG_AF_RXRPC_IPV6 |
3936 |
++ case AF_INET6: |
3937 |
++ srx->transport_len = sizeof(srx->transport.sin); |
3938 |
++ srx->transport.family = AF_INET; |
3939 |
++ srx->transport.sin.sin_port = udp->dest; |
3940 |
++ memcpy(&srx->transport.sin.sin_addr, &ip->daddr, |
3941 |
++ sizeof(struct in_addr)); |
3942 |
++ break; |
3943 |
++#endif |
3944 |
++ |
3945 |
++ default: |
3946 |
++ WARN_ON_ONCE(1); |
3947 |
++ return NULL; |
3948 |
++ } |
3949 |
++ |
3950 |
++ _net("ICMP {%pISp}", &srx->transport); |
3951 |
++ return rxrpc_lookup_peer_rcu(local, srx); |
3952 |
++} |
3953 |
++ |
3954 |
++#ifdef CONFIG_AF_RXRPC_IPV6 |
3955 |
++/* |
3956 |
++ * Find the peer associated with an ICMPv6 packet. |
3957 |
++ */ |
3958 |
++static struct rxrpc_peer *rxrpc_lookup_peer_icmp6_rcu(struct rxrpc_local *local, |
3959 |
++ struct sk_buff *skb, |
3960 |
++ unsigned int udp_offset, |
3961 |
++ unsigned int *info, |
3962 |
++ struct sockaddr_rxrpc *srx) |
3963 |
++{ |
3964 |
++ struct icmp6hdr *icmp = icmp6_hdr(skb); |
3965 |
++ struct ipv6hdr *ip, *ip0 = ipv6_hdr(skb); |
3966 |
++ struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset); |
3967 |
++ |
3968 |
++ _enter("%u,%u,%u", ip0->nexthdr, icmp->icmp6_type, icmp->icmp6_code); |
3969 |
++ |
3970 |
++ switch (icmp->icmp6_type) { |
3971 |
++ case ICMPV6_DEST_UNREACH: |
3972 |
++ *info = ntohl(icmp->icmp6_mtu); |
3973 |
++ fallthrough; |
3974 |
++ case ICMPV6_PKT_TOOBIG: |
3975 |
++ case ICMPV6_TIME_EXCEED: |
3976 |
++ case ICMPV6_PARAMPROB: |
3977 |
++ ip = (struct ipv6hdr *)((void *)icmp + 8); |
3978 |
++ break; |
3979 |
++ default: |
3980 |
++ return NULL; |
3981 |
++ } |
3982 |
+ |
3983 |
+ memset(srx, 0, sizeof(*srx)); |
3984 |
+ srx->transport_type = local->srx.transport_type; |
3985 |
+@@ -41,6 +124,165 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, |
3986 |
+ /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice |
3987 |
+ * versa? |
3988 |
+ */ |
3989 |
++ switch (srx->transport.family) { |
3990 |
++ case AF_INET: |
3991 |
++ _net("Rx ICMP6 on v4 sock"); |
3992 |
++ srx->transport_len = sizeof(srx->transport.sin); |
3993 |
++ srx->transport.family = AF_INET; |
3994 |
++ srx->transport.sin.sin_port = udp->dest; |
3995 |
++ memcpy(&srx->transport.sin.sin_addr, |
3996 |
++ &ip->daddr.s6_addr32[3], sizeof(struct in_addr)); |
3997 |
++ break; |
3998 |
++ case AF_INET6: |
3999 |
++ _net("Rx ICMP6"); |
4000 |
++ srx->transport.sin.sin_port = udp->dest; |
4001 |
++ memcpy(&srx->transport.sin6.sin6_addr, &ip->daddr, |
4002 |
++ sizeof(struct in6_addr)); |
4003 |
++ break; |
4004 |
++ default: |
4005 |
++ WARN_ON_ONCE(1); |
4006 |
++ return NULL; |
4007 |
++ } |
4008 |
++ |
4009 |
++ _net("ICMP {%pISp}", &srx->transport); |
4010 |
++ return rxrpc_lookup_peer_rcu(local, srx); |
4011 |
++} |
4012 |
++#endif /* CONFIG_AF_RXRPC_IPV6 */ |
4013 |
++ |
4014 |
++/* |
4015 |
++ * Handle an error received on the local endpoint as a tunnel. |
4016 |
++ */ |
4017 |
++void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, |
4018 |
++ unsigned int udp_offset) |
4019 |
++{ |
4020 |
++ struct sock_extended_err ee; |
4021 |
++ struct sockaddr_rxrpc srx; |
4022 |
++ struct rxrpc_local *local; |
4023 |
++ struct rxrpc_peer *peer; |
4024 |
++ unsigned int info = 0; |
4025 |
++ int err; |
4026 |
++ u8 version = ip_hdr(skb)->version; |
4027 |
++ u8 type = icmp_hdr(skb)->type; |
4028 |
++ u8 code = icmp_hdr(skb)->code; |
4029 |
++ |
4030 |
++ rcu_read_lock(); |
4031 |
++ local = rcu_dereference_sk_user_data(sk); |
4032 |
++ if (unlikely(!local)) { |
4033 |
++ rcu_read_unlock(); |
4034 |
++ return; |
4035 |
++ } |
4036 |
++ |
4037 |
++ rxrpc_new_skb(skb, rxrpc_skb_received); |
4038 |
++ |
4039 |
++ switch (ip_hdr(skb)->version) { |
4040 |
++ case IPVERSION: |
4041 |
++ peer = rxrpc_lookup_peer_icmp_rcu(local, skb, udp_offset, |
4042 |
++ &info, &srx); |
4043 |
++ break; |
4044 |
++#ifdef CONFIG_AF_RXRPC_IPV6 |
4045 |
++ case 6: |
4046 |
++ peer = rxrpc_lookup_peer_icmp6_rcu(local, skb, udp_offset, |
4047 |
++ &info, &srx); |
4048 |
++ break; |
4049 |
++#endif |
4050 |
++ default: |
4051 |
++ rcu_read_unlock(); |
4052 |
++ return; |
4053 |
++ } |
4054 |
++ |
4055 |
++ if (peer && !rxrpc_get_peer_maybe(peer)) |
4056 |
++ peer = NULL; |
4057 |
++ if (!peer) { |
4058 |
++ rcu_read_unlock(); |
4059 |
++ return; |
4060 |
++ } |
4061 |
++ |
4062 |
++ memset(&ee, 0, sizeof(ee)); |
4063 |
++ |
4064 |
++ switch (version) { |
4065 |
++ case IPVERSION: |
4066 |
++ switch (type) { |
4067 |
++ case ICMP_DEST_UNREACH: |
4068 |
++ switch (code) { |
4069 |
++ case ICMP_FRAG_NEEDED: |
4070 |
++ rxrpc_adjust_mtu(peer, info); |
4071 |
++ rcu_read_unlock(); |
4072 |
++ rxrpc_put_peer(peer); |
4073 |
++ return; |
4074 |
++ default: |
4075 |
++ break; |
4076 |
++ } |
4077 |
++ |
4078 |
++ err = EHOSTUNREACH; |
4079 |
++ if (code <= NR_ICMP_UNREACH) { |
4080 |
++ /* Might want to do something different with |
4081 |
++ * non-fatal errors |
4082 |
++ */ |
4083 |
++ //harderr = icmp_err_convert[code].fatal; |
4084 |
++ err = icmp_err_convert[code].errno; |
4085 |
++ } |
4086 |
++ break; |
4087 |
++ |
4088 |
++ case ICMP_TIME_EXCEEDED: |
4089 |
++ err = EHOSTUNREACH; |
4090 |
++ break; |
4091 |
++ default: |
4092 |
++ err = EPROTO; |
4093 |
++ break; |
4094 |
++ } |
4095 |
++ |
4096 |
++ ee.ee_origin = SO_EE_ORIGIN_ICMP; |
4097 |
++ ee.ee_type = type; |
4098 |
++ ee.ee_code = code; |
4099 |
++ ee.ee_errno = err; |
4100 |
++ break; |
4101 |
++ |
4102 |
++#ifdef CONFIG_AF_RXRPC_IPV6 |
4103 |
++ case 6: |
4104 |
++ switch (type) { |
4105 |
++ case ICMPV6_PKT_TOOBIG: |
4106 |
++ rxrpc_adjust_mtu(peer, info); |
4107 |
++ rcu_read_unlock(); |
4108 |
++ rxrpc_put_peer(peer); |
4109 |
++ return; |
4110 |
++ } |
4111 |
++ |
4112 |
++ icmpv6_err_convert(type, code, &err); |
4113 |
++ |
4114 |
++ if (err == EACCES) |
4115 |
++ err = EHOSTUNREACH; |
4116 |
++ |
4117 |
++ ee.ee_origin = SO_EE_ORIGIN_ICMP6; |
4118 |
++ ee.ee_type = type; |
4119 |
++ ee.ee_code = code; |
4120 |
++ ee.ee_errno = err; |
4121 |
++ break; |
4122 |
++#endif |
4123 |
++ } |
4124 |
++ |
4125 |
++ trace_rxrpc_rx_icmp(peer, &ee, &srx); |
4126 |
++ |
4127 |
++ rxrpc_distribute_error(peer, err, RXRPC_CALL_NETWORK_ERROR); |
4128 |
++ rcu_read_unlock(); |
4129 |
++ rxrpc_put_peer(peer); |
4130 |
++} |
4131 |
++ |
4132 |
++/* |
4133 |
++ * Find the peer associated with a local error. |
4134 |
++ */ |
4135 |
++static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local, |
4136 |
++ const struct sk_buff *skb, |
4137 |
++ struct sockaddr_rxrpc *srx) |
4138 |
++{ |
4139 |
++ struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); |
4140 |
++ |
4141 |
++ _enter(""); |
4142 |
++ |
4143 |
++ memset(srx, 0, sizeof(*srx)); |
4144 |
++ srx->transport_type = local->srx.transport_type; |
4145 |
++ srx->transport_len = local->srx.transport_len; |
4146 |
++ srx->transport.family = local->srx.transport.family; |
4147 |
++ |
4148 |
+ switch (srx->transport.family) { |
4149 |
+ case AF_INET: |
4150 |
+ srx->transport_len = sizeof(srx->transport.sin); |
4151 |
+@@ -104,10 +346,8 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, |
4152 |
+ /* |
4153 |
+ * Handle an MTU/fragmentation problem. |
4154 |
+ */ |
4155 |
+-static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr) |
4156 |
++static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu) |
4157 |
+ { |
4158 |
+- u32 mtu = serr->ee.ee_info; |
4159 |
+- |
4160 |
+ _net("Rx ICMP Fragmentation Needed (%d)", mtu); |
4161 |
+ |
4162 |
+ /* wind down the local interface MTU */ |
4163 |
+@@ -148,7 +388,7 @@ void rxrpc_error_report(struct sock *sk) |
4164 |
+ struct sock_exterr_skb *serr; |
4165 |
+ struct sockaddr_rxrpc srx; |
4166 |
+ struct rxrpc_local *local; |
4167 |
+- struct rxrpc_peer *peer; |
4168 |
++ struct rxrpc_peer *peer = NULL; |
4169 |
+ struct sk_buff *skb; |
4170 |
+ |
4171 |
+ rcu_read_lock(); |
4172 |
+@@ -172,41 +412,20 @@ void rxrpc_error_report(struct sock *sk) |
4173 |
+ } |
4174 |
+ rxrpc_new_skb(skb, rxrpc_skb_received); |
4175 |
+ serr = SKB_EXT_ERR(skb); |
4176 |
+- if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { |
4177 |
+- _leave("UDP empty message"); |
4178 |
+- rcu_read_unlock(); |
4179 |
+- rxrpc_free_skb(skb, rxrpc_skb_freed); |
4180 |
+- return; |
4181 |
+- } |
4182 |
+ |
4183 |
+- peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx); |
4184 |
+- if (peer && !rxrpc_get_peer_maybe(peer)) |
4185 |
+- peer = NULL; |
4186 |
+- if (!peer) { |
4187 |
+- rcu_read_unlock(); |
4188 |
+- rxrpc_free_skb(skb, rxrpc_skb_freed); |
4189 |
+- _leave(" [no peer]"); |
4190 |
+- return; |
4191 |
+- } |
4192 |
+- |
4193 |
+- trace_rxrpc_rx_icmp(peer, &serr->ee, &srx); |
4194 |
+- |
4195 |
+- if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && |
4196 |
+- serr->ee.ee_type == ICMP_DEST_UNREACH && |
4197 |
+- serr->ee.ee_code == ICMP_FRAG_NEEDED)) { |
4198 |
+- rxrpc_adjust_mtu(peer, serr); |
4199 |
+- rcu_read_unlock(); |
4200 |
+- rxrpc_free_skb(skb, rxrpc_skb_freed); |
4201 |
+- rxrpc_put_peer(peer); |
4202 |
+- _leave(" [MTU update]"); |
4203 |
+- return; |
4204 |
++ if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) { |
4205 |
++ peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx); |
4206 |
++ if (peer && !rxrpc_get_peer_maybe(peer)) |
4207 |
++ peer = NULL; |
4208 |
++ if (peer) { |
4209 |
++ trace_rxrpc_rx_icmp(peer, &serr->ee, &srx); |
4210 |
++ rxrpc_store_error(peer, serr); |
4211 |
++ } |
4212 |
+ } |
4213 |
+ |
4214 |
+- rxrpc_store_error(peer, serr); |
4215 |
+ rcu_read_unlock(); |
4216 |
+ rxrpc_free_skb(skb, rxrpc_skb_freed); |
4217 |
+ rxrpc_put_peer(peer); |
4218 |
+- |
4219 |
+ _leave(""); |
4220 |
+ } |
4221 |
+ |
4222 |
+diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c |
4223 |
+index 08aab5c01437d..db47844f4ac99 100644 |
4224 |
+--- a/net/rxrpc/rxkad.c |
4225 |
++++ b/net/rxrpc/rxkad.c |
4226 |
+@@ -540,7 +540,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, |
4227 |
+ * directly into the target buffer. |
4228 |
+ */ |
4229 |
+ sg = _sg; |
4230 |
+- nsg = skb_shinfo(skb)->nr_frags; |
4231 |
++ nsg = skb_shinfo(skb)->nr_frags + 1; |
4232 |
+ if (nsg <= 4) { |
4233 |
+ nsg = 4; |
4234 |
+ } else { |
4235 |
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c |
4236 |
+index 3d061a13d7ed2..2829455211f8c 100644 |
4237 |
+--- a/net/sched/sch_sfb.c |
4238 |
++++ b/net/sched/sch_sfb.c |
4239 |
+@@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) |
4240 |
+ } |
4241 |
+ } |
4242 |
+ |
4243 |
+-static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) |
4244 |
++static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) |
4245 |
+ { |
4246 |
+ u32 sfbhash; |
4247 |
+ |
4248 |
+- sfbhash = sfb_hash(skb, 0); |
4249 |
++ sfbhash = cb->hashes[0]; |
4250 |
+ if (sfbhash) |
4251 |
+ increment_one_qlen(sfbhash, 0, q); |
4252 |
+ |
4253 |
+- sfbhash = sfb_hash(skb, 1); |
4254 |
++ sfbhash = cb->hashes[1]; |
4255 |
+ if (sfbhash) |
4256 |
+ increment_one_qlen(sfbhash, 1, q); |
4257 |
+ } |
4258 |
+@@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
4259 |
+ { |
4260 |
+ |
4261 |
+ struct sfb_sched_data *q = qdisc_priv(sch); |
4262 |
++ unsigned int len = qdisc_pkt_len(skb); |
4263 |
+ struct Qdisc *child = q->qdisc; |
4264 |
+ struct tcf_proto *fl; |
4265 |
++ struct sfb_skb_cb cb; |
4266 |
+ int i; |
4267 |
+ u32 p_min = ~0; |
4268 |
+ u32 minqlen = ~0; |
4269 |
+@@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
4270 |
+ } |
4271 |
+ |
4272 |
+ enqueue: |
4273 |
++ memcpy(&cb, sfb_skb_cb(skb), sizeof(cb)); |
4274 |
+ ret = qdisc_enqueue(skb, child, to_free); |
4275 |
+ if (likely(ret == NET_XMIT_SUCCESS)) { |
4276 |
+- qdisc_qstats_backlog_inc(sch, skb); |
4277 |
++ sch->qstats.backlog += len; |
4278 |
+ sch->q.qlen++; |
4279 |
+- increment_qlen(skb, q); |
4280 |
++ increment_qlen(&cb, q); |
4281 |
+ } else if (net_xmit_drop_count(ret)) { |
4282 |
+ q->stats.childdrop++; |
4283 |
+ qdisc_qstats_drop(sch); |
4284 |
+diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c |
4285 |
+index 2f4d23238a7e3..9618e4429f0fe 100644 |
4286 |
+--- a/net/tipc/monitor.c |
4287 |
++++ b/net/tipc/monitor.c |
4288 |
+@@ -160,7 +160,7 @@ static void map_set(u64 *up_map, int i, unsigned int v) |
4289 |
+ |
4290 |
+ static int map_get(u64 up_map, int i) |
4291 |
+ { |
4292 |
+- return (up_map & (1 << i)) >> i; |
4293 |
++ return (up_map & (1ULL << i)) >> i; |
4294 |
+ } |
4295 |
+ |
4296 |
+ static struct tipc_peer *peer_prev(struct tipc_peer *peer) |
4297 |
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c |
4298 |
+index f158f0abd25d8..ca4a692fe1c36 100644 |
4299 |
+--- a/sound/core/oss/pcm_oss.c |
4300 |
++++ b/sound/core/oss/pcm_oss.c |
4301 |
+@@ -1664,14 +1664,14 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) |
4302 |
+ runtime = substream->runtime; |
4303 |
+ if (atomic_read(&substream->mmap_count)) |
4304 |
+ goto __direct; |
4305 |
+- err = snd_pcm_oss_make_ready(substream); |
4306 |
+- if (err < 0) |
4307 |
+- return err; |
4308 |
+ atomic_inc(&runtime->oss.rw_ref); |
4309 |
+ if (mutex_lock_interruptible(&runtime->oss.params_lock)) { |
4310 |
+ atomic_dec(&runtime->oss.rw_ref); |
4311 |
+ return -ERESTARTSYS; |
4312 |
+ } |
4313 |
++ err = snd_pcm_oss_make_ready_locked(substream); |
4314 |
++ if (err < 0) |
4315 |
++ goto unlock; |
4316 |
+ format = snd_pcm_oss_format_from(runtime->oss.format); |
4317 |
+ width = snd_pcm_format_physical_width(format); |
4318 |
+ if (runtime->oss.buffer_used > 0) { |
4319 |
+diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c |
4320 |
+index 9b4a7cdb103ad..12f12a294df5a 100644 |
4321 |
+--- a/sound/drivers/aloop.c |
4322 |
++++ b/sound/drivers/aloop.c |
4323 |
+@@ -605,17 +605,18 @@ static unsigned int loopback_jiffies_timer_pos_update |
4324 |
+ cable->streams[SNDRV_PCM_STREAM_PLAYBACK]; |
4325 |
+ struct loopback_pcm *dpcm_capt = |
4326 |
+ cable->streams[SNDRV_PCM_STREAM_CAPTURE]; |
4327 |
+- unsigned long delta_play = 0, delta_capt = 0; |
4328 |
++ unsigned long delta_play = 0, delta_capt = 0, cur_jiffies; |
4329 |
+ unsigned int running, count1, count2; |
4330 |
+ |
4331 |
++ cur_jiffies = jiffies; |
4332 |
+ running = cable->running ^ cable->pause; |
4333 |
+ if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) { |
4334 |
+- delta_play = jiffies - dpcm_play->last_jiffies; |
4335 |
++ delta_play = cur_jiffies - dpcm_play->last_jiffies; |
4336 |
+ dpcm_play->last_jiffies += delta_play; |
4337 |
+ } |
4338 |
+ |
4339 |
+ if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) { |
4340 |
+- delta_capt = jiffies - dpcm_capt->last_jiffies; |
4341 |
++ delta_capt = cur_jiffies - dpcm_capt->last_jiffies; |
4342 |
+ dpcm_capt->last_jiffies += delta_capt; |
4343 |
+ } |
4344 |
+ |
4345 |
+diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c |
4346 |
+index b2701a4452d86..48af77ae8020f 100644 |
4347 |
+--- a/sound/pci/emu10k1/emupcm.c |
4348 |
++++ b/sound/pci/emu10k1/emupcm.c |
4349 |
+@@ -124,7 +124,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct snd_emu10k1_pcm * epcm, int voic |
4350 |
+ epcm->voices[0]->epcm = epcm; |
4351 |
+ if (voices > 1) { |
4352 |
+ for (i = 1; i < voices; i++) { |
4353 |
+- epcm->voices[i] = &epcm->emu->voices[epcm->voices[0]->number + i]; |
4354 |
++ epcm->voices[i] = &epcm->emu->voices[(epcm->voices[0]->number + i) % NUM_G]; |
4355 |
+ epcm->voices[i]->epcm = epcm; |
4356 |
+ } |
4357 |
+ } |
4358 |
+diff --git a/sound/soc/atmel/mchp-spdiftx.c b/sound/soc/atmel/mchp-spdiftx.c |
4359 |
+index d243800464352..bcca1cf3cd7b6 100644 |
4360 |
+--- a/sound/soc/atmel/mchp-spdiftx.c |
4361 |
++++ b/sound/soc/atmel/mchp-spdiftx.c |
4362 |
+@@ -196,8 +196,7 @@ struct mchp_spdiftx_dev { |
4363 |
+ struct clk *pclk; |
4364 |
+ struct clk *gclk; |
4365 |
+ unsigned int fmt; |
4366 |
+- const struct mchp_i2s_caps *caps; |
4367 |
+- int gclk_enabled:1; |
4368 |
++ unsigned int gclk_enabled:1; |
4369 |
+ }; |
4370 |
+ |
4371 |
+ static inline int mchp_spdiftx_is_running(struct mchp_spdiftx_dev *dev) |
4372 |
+@@ -766,8 +765,6 @@ static const struct of_device_id mchp_spdiftx_dt_ids[] = { |
4373 |
+ MODULE_DEVICE_TABLE(of, mchp_spdiftx_dt_ids); |
4374 |
+ static int mchp_spdiftx_probe(struct platform_device *pdev) |
4375 |
+ { |
4376 |
+- struct device_node *np = pdev->dev.of_node; |
4377 |
+- const struct of_device_id *match; |
4378 |
+ struct mchp_spdiftx_dev *dev; |
4379 |
+ struct resource *mem; |
4380 |
+ struct regmap *regmap; |
4381 |
+@@ -781,11 +778,6 @@ static int mchp_spdiftx_probe(struct platform_device *pdev) |
4382 |
+ if (!dev) |
4383 |
+ return -ENOMEM; |
4384 |
+ |
4385 |
+- /* Get hardware capabilities. */ |
4386 |
+- match = of_match_node(mchp_spdiftx_dt_ids, np); |
4387 |
+- if (match) |
4388 |
+- dev->caps = match->data; |
4389 |
+- |
4390 |
+ /* Map I/O registers. */ |
4391 |
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); |
4392 |
+ if (IS_ERR(base)) |
4393 |
+diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c |
4394 |
+index fe8fd7367e21b..e5190aa588c63 100644 |
4395 |
+--- a/sound/soc/qcom/sm8250.c |
4396 |
++++ b/sound/soc/qcom/sm8250.c |
4397 |
+@@ -191,6 +191,7 @@ static int sm8250_platform_probe(struct platform_device *pdev) |
4398 |
+ if (!card) |
4399 |
+ return -ENOMEM; |
4400 |
+ |
4401 |
++ card->owner = THIS_MODULE; |
4402 |
+ /* Allocate the private data */ |
4403 |
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); |
4404 |
+ if (!data) |
4405 |
+diff --git a/sound/usb/card.c b/sound/usb/card.c |
4406 |
+index ff5f8de1bc540..713b84d8d42f1 100644 |
4407 |
+--- a/sound/usb/card.c |
4408 |
++++ b/sound/usb/card.c |
4409 |
+@@ -698,7 +698,7 @@ static bool check_delayed_register_option(struct snd_usb_audio *chip, int iface) |
4410 |
+ if (delayed_register[i] && |
4411 |
+ sscanf(delayed_register[i], "%x:%x", &id, &inum) == 2 && |
4412 |
+ id == chip->usb_id) |
4413 |
+- return inum != iface; |
4414 |
++ return iface < inum; |
4415 |
+ } |
4416 |
+ |
4417 |
+ return false; |
4418 |
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c |
4419 |
+index 743b8287cfcdd..11fa7745c017e 100644 |
4420 |
+--- a/sound/usb/endpoint.c |
4421 |
++++ b/sound/usb/endpoint.c |
4422 |
+@@ -731,7 +731,8 @@ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip, |
4423 |
+ * The endpoint needs to be closed via snd_usb_endpoint_close() later. |
4424 |
+ * |
4425 |
+ * Note that this function doesn't configure the endpoint. The substream |
4426 |
+- * needs to set it up later via snd_usb_endpoint_configure(). |
4427 |
++ * needs to set it up later via snd_usb_endpoint_set_params() and |
4428 |
++ * snd_usb_endpoint_prepare(). |
4429 |
+ */ |
4430 |
+ struct snd_usb_endpoint * |
4431 |
+ snd_usb_endpoint_open(struct snd_usb_audio *chip, |
4432 |
+@@ -1254,12 +1255,13 @@ out_of_memory: |
4433 |
+ /* |
4434 |
+ * snd_usb_endpoint_set_params: configure an snd_usb_endpoint |
4435 |
+ * |
4436 |
++ * It's called either from hw_params callback. |
4437 |
+ * Determine the number of URBs to be used on this endpoint. |
4438 |
+ * An endpoint must be configured before it can be started. |
4439 |
+ * An endpoint that is already running can not be reconfigured. |
4440 |
+ */ |
4441 |
+-static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, |
4442 |
+- struct snd_usb_endpoint *ep) |
4443 |
++int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, |
4444 |
++ struct snd_usb_endpoint *ep) |
4445 |
+ { |
4446 |
+ const struct audioformat *fmt = ep->cur_audiofmt; |
4447 |
+ int err; |
4448 |
+@@ -1315,18 +1317,18 @@ static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, |
4449 |
+ } |
4450 |
+ |
4451 |
+ /* |
4452 |
+- * snd_usb_endpoint_configure: Configure the endpoint |
4453 |
++ * snd_usb_endpoint_prepare: Prepare the endpoint |
4454 |
+ * |
4455 |
+ * This function sets up the EP to be fully usable state. |
4456 |
+- * It's called either from hw_params or prepare callback. |
4457 |
++ * It's called either from prepare callback. |
4458 |
+ * The function checks need_setup flag, and performs nothing unless needed, |
4459 |
+ * so it's safe to call this multiple times. |
4460 |
+ * |
4461 |
+ * This returns zero if unchanged, 1 if the configuration has changed, |
4462 |
+ * or a negative error code. |
4463 |
+ */ |
4464 |
+-int snd_usb_endpoint_configure(struct snd_usb_audio *chip, |
4465 |
+- struct snd_usb_endpoint *ep) |
4466 |
++int snd_usb_endpoint_prepare(struct snd_usb_audio *chip, |
4467 |
++ struct snd_usb_endpoint *ep) |
4468 |
+ { |
4469 |
+ bool iface_first; |
4470 |
+ int err = 0; |
4471 |
+@@ -1348,9 +1350,6 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip, |
4472 |
+ if (err < 0) |
4473 |
+ goto unlock; |
4474 |
+ } |
4475 |
+- err = snd_usb_endpoint_set_params(chip, ep); |
4476 |
+- if (err < 0) |
4477 |
+- goto unlock; |
4478 |
+ goto done; |
4479 |
+ } |
4480 |
+ |
4481 |
+@@ -1378,10 +1377,6 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip, |
4482 |
+ if (err < 0) |
4483 |
+ goto unlock; |
4484 |
+ |
4485 |
+- err = snd_usb_endpoint_set_params(chip, ep); |
4486 |
+- if (err < 0) |
4487 |
+- goto unlock; |
4488 |
+- |
4489 |
+ err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt); |
4490 |
+ if (err < 0) |
4491 |
+ goto unlock; |
4492 |
+diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h |
4493 |
+index 6a9af04cf175a..e67ea28faa54f 100644 |
4494 |
+--- a/sound/usb/endpoint.h |
4495 |
++++ b/sound/usb/endpoint.h |
4496 |
+@@ -17,8 +17,10 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip, |
4497 |
+ bool is_sync_ep); |
4498 |
+ void snd_usb_endpoint_close(struct snd_usb_audio *chip, |
4499 |
+ struct snd_usb_endpoint *ep); |
4500 |
+-int snd_usb_endpoint_configure(struct snd_usb_audio *chip, |
4501 |
+- struct snd_usb_endpoint *ep); |
4502 |
++int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, |
4503 |
++ struct snd_usb_endpoint *ep); |
4504 |
++int snd_usb_endpoint_prepare(struct snd_usb_audio *chip, |
4505 |
++ struct snd_usb_endpoint *ep); |
4506 |
+ int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock); |
4507 |
+ |
4508 |
+ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip, |
4509 |
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c |
4510 |
+index b6cd43c5ea3e6..2d60e6d1f8dff 100644 |
4511 |
+--- a/sound/usb/pcm.c |
4512 |
++++ b/sound/usb/pcm.c |
4513 |
+@@ -443,17 +443,17 @@ static int configure_endpoints(struct snd_usb_audio *chip, |
4514 |
+ if (stop_endpoints(subs, false)) |
4515 |
+ sync_pending_stops(subs); |
4516 |
+ if (subs->sync_endpoint) { |
4517 |
+- err = snd_usb_endpoint_configure(chip, subs->sync_endpoint); |
4518 |
++ err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint); |
4519 |
+ if (err < 0) |
4520 |
+ return err; |
4521 |
+ } |
4522 |
+- err = snd_usb_endpoint_configure(chip, subs->data_endpoint); |
4523 |
++ err = snd_usb_endpoint_prepare(chip, subs->data_endpoint); |
4524 |
+ if (err < 0) |
4525 |
+ return err; |
4526 |
+ snd_usb_set_format_quirk(subs, subs->cur_audiofmt); |
4527 |
+ } else { |
4528 |
+ if (subs->sync_endpoint) { |
4529 |
+- err = snd_usb_endpoint_configure(chip, subs->sync_endpoint); |
4530 |
++ err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint); |
4531 |
+ if (err < 0) |
4532 |
+ return err; |
4533 |
+ } |
4534 |
+@@ -551,7 +551,13 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, |
4535 |
+ subs->cur_audiofmt = fmt; |
4536 |
+ mutex_unlock(&chip->mutex); |
4537 |
+ |
4538 |
+- ret = configure_endpoints(chip, subs); |
4539 |
++ if (subs->sync_endpoint) { |
4540 |
++ ret = snd_usb_endpoint_set_params(chip, subs->sync_endpoint); |
4541 |
++ if (ret < 0) |
4542 |
++ goto unlock; |
4543 |
++ } |
4544 |
++ |
4545 |
++ ret = snd_usb_endpoint_set_params(chip, subs->data_endpoint); |
4546 |
+ |
4547 |
+ unlock: |
4548 |
+ if (ret < 0) |
4549 |
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
4550 |
+index 9bfead5efc4c1..5b4d8f5eade20 100644 |
4551 |
+--- a/sound/usb/quirks.c |
4552 |
++++ b/sound/usb/quirks.c |
4553 |
+@@ -1764,7 +1764,7 @@ bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface) |
4554 |
+ |
4555 |
+ for (q = registration_quirks; q->usb_id; q++) |
4556 |
+ if (chip->usb_id == q->usb_id) |
4557 |
+- return iface != q->interface; |
4558 |
++ return iface < q->interface; |
4559 |
+ |
4560 |
+ /* Register as normal */ |
4561 |
+ return false; |
4562 |
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c |
4563 |
+index ceb93d798182c..f10f4e6d3fb85 100644 |
4564 |
+--- a/sound/usb/stream.c |
4565 |
++++ b/sound/usb/stream.c |
4566 |
+@@ -495,6 +495,10 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, |
4567 |
+ return 0; |
4568 |
+ } |
4569 |
+ } |
4570 |
++ |
4571 |
++ if (chip->card->registered) |
4572 |
++ chip->need_delayed_register = true; |
4573 |
++ |
4574 |
+ /* look for an empty stream */ |
4575 |
+ list_for_each_entry(as, &chip->pcm_list, list) { |
4576 |
+ if (as->fmt_type != fp->fmt_type) |
4577 |
+@@ -502,9 +506,6 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, |
4578 |
+ subs = &as->substream[stream]; |
4579 |
+ if (subs->ep_num) |
4580 |
+ continue; |
4581 |
+- if (snd_device_get_state(chip->card, as->pcm) != |
4582 |
+- SNDRV_DEV_BUILD) |
4583 |
+- chip->need_delayed_register = true; |
4584 |
+ err = snd_pcm_new_stream(as->pcm, stream, 1); |
4585 |
+ if (err < 0) |
4586 |
+ return err; |
4587 |
+@@ -1105,7 +1106,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip, |
4588 |
+ * Dallas DS4201 workaround: It presents 5 altsettings, but the last |
4589 |
+ * one misses syncpipe, and does not produce any sound. |
4590 |
+ */ |
4591 |
+- if (chip->usb_id == USB_ID(0x04fa, 0x4201)) |
4592 |
++ if (chip->usb_id == USB_ID(0x04fa, 0x4201) && num >= 4) |
4593 |
+ num = 4; |
4594 |
+ |
4595 |
+ for (i = 0; i < num; i++) { |
4596 |
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c |
4597 |
+index cb3d81adf5ca8..c6c40191933d4 100644 |
4598 |
+--- a/tools/perf/builtin-script.c |
4599 |
++++ b/tools/perf/builtin-script.c |
4600 |
+@@ -435,6 +435,9 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session) |
4601 |
+ struct perf_event_attr *attr = &evsel->core.attr; |
4602 |
+ bool allow_user_set; |
4603 |
+ |
4604 |
++ if (evsel__is_dummy_event(evsel)) |
4605 |
++ return 0; |
4606 |
++ |
4607 |
+ if (perf_header__has_feat(&session->header, HEADER_STAT)) |
4608 |
+ return 0; |
4609 |
+ |
4610 |
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c |
4611 |
+index 44e40bad0e336..55a041329990c 100644 |
4612 |
+--- a/tools/perf/util/machine.c |
4613 |
++++ b/tools/perf/util/machine.c |
4614 |
+@@ -16,6 +16,7 @@ |
4615 |
+ #include "map_symbol.h" |
4616 |
+ #include "branch.h" |
4617 |
+ #include "mem-events.h" |
4618 |
++#include "path.h" |
4619 |
+ #include "srcline.h" |
4620 |
+ #include "symbol.h" |
4621 |
+ #include "sort.h" |
4622 |
+@@ -1407,7 +1408,7 @@ static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, i |
4623 |
+ struct stat st; |
4624 |
+ |
4625 |
+ /*sshfs might return bad dent->d_type, so we have to stat*/ |
4626 |
+- snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); |
4627 |
++ path__join(path, sizeof(path), dir_name, dent->d_name); |
4628 |
+ if (stat(path, &st)) |
4629 |
+ continue; |
4630 |
+ |