1 |
commit: 7646e418ef54f5f0f9d2dfa172c81d5b50674c07 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Wed Jun 29 11:07:00 2022 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Wed Jun 29 11:07:00 2022 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7646e418 |
7 |
|
8 |
Linux patch 5.18.8 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1007_linux-5.18.8.patch | 6743 +++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 6747 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 728697d0..b676cc58 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -71,6 +71,10 @@ Patch: 1006_linux-5.18.7.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.18.7 |
23 |
|
24 |
+Patch: 1007_linux-5.18.8.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.18.8 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1007_linux-5.18.8.patch b/1007_linux-5.18.8.patch |
33 |
new file mode 100644 |
34 |
index 00000000..5231e488 |
35 |
--- /dev/null |
36 |
+++ b/1007_linux-5.18.8.patch |
37 |
@@ -0,0 +1,6743 @@ |
38 |
+diff --git a/Documentation/ABI/testing/sysfs-bus-iio-vf610 b/Documentation/ABI/testing/sysfs-bus-iio-vf610 |
39 |
+index 308a6756d3bf3..491ead8044888 100644 |
40 |
+--- a/Documentation/ABI/testing/sysfs-bus-iio-vf610 |
41 |
++++ b/Documentation/ABI/testing/sysfs-bus-iio-vf610 |
42 |
+@@ -1,4 +1,4 @@ |
43 |
+-What: /sys/bus/iio/devices/iio:deviceX/conversion_mode |
44 |
++What: /sys/bus/iio/devices/iio:deviceX/in_conversion_mode |
45 |
+ KernelVersion: 4.2 |
46 |
+ Contact: linux-iio@×××××××××××.org |
47 |
+ Description: |
48 |
+diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml |
49 |
+index 8913497624de2..cb5da1df8d405 100644 |
50 |
+--- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml |
51 |
++++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml |
52 |
+@@ -135,7 +135,8 @@ properties: |
53 |
+ Phandle of a companion. |
54 |
+ |
55 |
+ phys: |
56 |
+- maxItems: 1 |
57 |
++ minItems: 1 |
58 |
++ maxItems: 3 |
59 |
+ |
60 |
+ phy-names: |
61 |
+ const: usb |
62 |
+diff --git a/Documentation/devicetree/bindings/usb/generic-ohci.yaml b/Documentation/devicetree/bindings/usb/generic-ohci.yaml |
63 |
+index acbf94fa5f74a..d5fd3aa53ed29 100644 |
64 |
+--- a/Documentation/devicetree/bindings/usb/generic-ohci.yaml |
65 |
++++ b/Documentation/devicetree/bindings/usb/generic-ohci.yaml |
66 |
+@@ -102,7 +102,8 @@ properties: |
67 |
+ Overrides the detected port count |
68 |
+ |
69 |
+ phys: |
70 |
+- maxItems: 1 |
71 |
++ minItems: 1 |
72 |
++ maxItems: 3 |
73 |
+ |
74 |
+ phy-names: |
75 |
+ const: usb |
76 |
+diff --git a/Documentation/vm/hwpoison.rst b/Documentation/vm/hwpoison.rst |
77 |
+index c742de1769d18..b9d5253c13057 100644 |
78 |
+--- a/Documentation/vm/hwpoison.rst |
79 |
++++ b/Documentation/vm/hwpoison.rst |
80 |
+@@ -120,7 +120,8 @@ Testing |
81 |
+ unpoison-pfn |
82 |
+ Software-unpoison page at PFN echoed into this file. This way |
83 |
+ a page can be reused again. This only works for Linux |
84 |
+- injected failures, not for real memory failures. |
85 |
++ injected failures, not for real memory failures. Once any hardware |
86 |
++ memory failure happens, this feature is disabled. |
87 |
+ |
88 |
+ Note these injection interfaces are not stable and might change between |
89 |
+ kernel versions |
90 |
+diff --git a/MAINTAINERS b/MAINTAINERS |
91 |
+index f468864fd268c..8e6622ed6de69 100644 |
92 |
+--- a/MAINTAINERS |
93 |
++++ b/MAINTAINERS |
94 |
+@@ -427,6 +427,7 @@ ACPI VIOT DRIVER |
95 |
+ M: Jean-Philippe Brucker <jean-philippe@××××××.org> |
96 |
+ L: linux-acpi@×××××××××××.org |
97 |
+ L: iommu@××××××××××××××××××××××.org |
98 |
++L: iommu@×××××××××××.dev |
99 |
+ S: Maintained |
100 |
+ F: drivers/acpi/viot.c |
101 |
+ F: include/linux/acpi_viot.h |
102 |
+@@ -960,6 +961,7 @@ AMD IOMMU (AMD-VI) |
103 |
+ M: Joerg Roedel <joro@××××××.org> |
104 |
+ R: Suravee Suthikulpanit <suravee.suthikulpanit@×××.com> |
105 |
+ L: iommu@××××××××××××××××××××××.org |
106 |
++L: iommu@×××××××××××.dev |
107 |
+ S: Maintained |
108 |
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git |
109 |
+ F: drivers/iommu/amd/ |
110 |
+@@ -5898,6 +5900,7 @@ M: Christoph Hellwig <hch@×××.de> |
111 |
+ M: Marek Szyprowski <m.szyprowski@×××××××.com> |
112 |
+ R: Robin Murphy <robin.murphy@×××.com> |
113 |
+ L: iommu@××××××××××××××××××××××.org |
114 |
++L: iommu@×××××××××××.dev |
115 |
+ S: Supported |
116 |
+ W: http://git.infradead.org/users/hch/dma-mapping.git |
117 |
+ T: git git://git.infradead.org/users/hch/dma-mapping.git |
118 |
+@@ -5910,6 +5913,7 @@ F: kernel/dma/ |
119 |
+ DMA MAPPING BENCHMARK |
120 |
+ M: Xiang Chen <chenxiang66@×××××××××.com> |
121 |
+ L: iommu@××××××××××××××××××××××.org |
122 |
++L: iommu@×××××××××××.dev |
123 |
+ F: kernel/dma/map_benchmark.c |
124 |
+ F: tools/testing/selftests/dma/ |
125 |
+ |
126 |
+@@ -7476,6 +7480,7 @@ F: drivers/gpu/drm/exynos/exynos_dp* |
127 |
+ EXYNOS SYSMMU (IOMMU) driver |
128 |
+ M: Marek Szyprowski <m.szyprowski@×××××××.com> |
129 |
+ L: iommu@××××××××××××××××××××××.org |
130 |
++L: iommu@×××××××××××.dev |
131 |
+ S: Maintained |
132 |
+ F: drivers/iommu/exynos-iommu.c |
133 |
+ |
134 |
+@@ -9875,6 +9880,7 @@ INTEL IOMMU (VT-d) |
135 |
+ M: David Woodhouse <dwmw2@×××××××××.org> |
136 |
+ M: Lu Baolu <baolu.lu@×××××××××××.com> |
137 |
+ L: iommu@××××××××××××××××××××××.org |
138 |
++L: iommu@×××××××××××.dev |
139 |
+ S: Supported |
140 |
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git |
141 |
+ F: drivers/iommu/intel/ |
142 |
+@@ -10253,6 +10259,7 @@ IOMMU DRIVERS |
143 |
+ M: Joerg Roedel <joro@××××××.org> |
144 |
+ M: Will Deacon <will@××××××.org> |
145 |
+ L: iommu@××××××××××××××××××××××.org |
146 |
++L: iommu@×××××××××××.dev |
147 |
+ S: Maintained |
148 |
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git |
149 |
+ F: Documentation/devicetree/bindings/iommu/ |
150 |
+@@ -12369,6 +12376,7 @@ F: drivers/i2c/busses/i2c-mt65xx.c |
151 |
+ MEDIATEK IOMMU DRIVER |
152 |
+ M: Yong Wu <yong.wu@××××××××.com> |
153 |
+ L: iommu@××××××××××××××××××××××.org |
154 |
++L: iommu@×××××××××××.dev |
155 |
+ L: linux-mediatek@×××××××××××××××.org (moderated for non-subscribers) |
156 |
+ S: Supported |
157 |
+ F: Documentation/devicetree/bindings/iommu/mediatek* |
158 |
+@@ -16354,6 +16362,7 @@ F: drivers/i2c/busses/i2c-qcom-cci.c |
159 |
+ QUALCOMM IOMMU |
160 |
+ M: Rob Clark <robdclark@×××××.com> |
161 |
+ L: iommu@××××××××××××××××××××××.org |
162 |
++L: iommu@×××××××××××.dev |
163 |
+ L: linux-arm-msm@×××××××××××.org |
164 |
+ S: Maintained |
165 |
+ F: drivers/iommu/arm/arm-smmu/qcom_iommu.c |
166 |
+@@ -18939,6 +18948,7 @@ F: arch/x86/boot/video* |
167 |
+ SWIOTLB SUBSYSTEM |
168 |
+ M: Christoph Hellwig <hch@×××××××××.org> |
169 |
+ L: iommu@××××××××××××××××××××××.org |
170 |
++L: iommu@×××××××××××.dev |
171 |
+ S: Supported |
172 |
+ W: http://git.infradead.org/users/hch/dma-mapping.git |
173 |
+ T: git git://git.infradead.org/users/hch/dma-mapping.git |
174 |
+@@ -21609,6 +21619,7 @@ M: Juergen Gross <jgross@××××.com> |
175 |
+ M: Stefano Stabellini <sstabellini@××××××.org> |
176 |
+ L: xen-devel@××××××××××××××××.org (moderated for non-subscribers) |
177 |
+ L: iommu@××××××××××××××××××××××.org |
178 |
++L: iommu@×××××××××××.dev |
179 |
+ S: Supported |
180 |
+ F: arch/x86/xen/*swiotlb* |
181 |
+ F: drivers/xen/*swiotlb* |
182 |
+diff --git a/Makefile b/Makefile |
183 |
+index 61d63068553c8..6ac3335f65aff 100644 |
184 |
+--- a/Makefile |
185 |
++++ b/Makefile |
186 |
+@@ -1,7 +1,7 @@ |
187 |
+ # SPDX-License-Identifier: GPL-2.0 |
188 |
+ VERSION = 5 |
189 |
+ PATCHLEVEL = 18 |
190 |
+-SUBLEVEL = 7 |
191 |
++SUBLEVEL = 8 |
192 |
+ EXTRAVERSION = |
193 |
+ NAME = Superb Owl |
194 |
+ |
195 |
+@@ -1139,7 +1139,7 @@ KBUILD_MODULES := 1 |
196 |
+ |
197 |
+ autoksyms_recursive: descend modules.order |
198 |
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \ |
199 |
+- "$(MAKE) -f $(srctree)/Makefile vmlinux" |
200 |
++ "$(MAKE) -f $(srctree)/Makefile autoksyms_recursive" |
201 |
+ endif |
202 |
+ |
203 |
+ autoksyms_h := $(if $(CONFIG_TRIM_UNUSED_KSYMS), include/generated/autoksyms.h) |
204 |
+diff --git a/arch/arm/boot/dts/bcm2711-rpi-400.dts b/arch/arm/boot/dts/bcm2711-rpi-400.dts |
205 |
+index f4d2fc20397c7..c53d9eb0b8027 100644 |
206 |
+--- a/arch/arm/boot/dts/bcm2711-rpi-400.dts |
207 |
++++ b/arch/arm/boot/dts/bcm2711-rpi-400.dts |
208 |
+@@ -28,12 +28,12 @@ |
209 |
+ &expgpio { |
210 |
+ gpio-line-names = "BT_ON", |
211 |
+ "WL_ON", |
212 |
+- "", |
213 |
++ "PWR_LED_OFF", |
214 |
+ "GLOBAL_RESET", |
215 |
+ "VDD_SD_IO_SEL", |
216 |
+- "CAM_GPIO", |
217 |
++ "GLOBAL_SHUTDOWN", |
218 |
+ "SD_PWR_ON", |
219 |
+- "SD_OC_N"; |
220 |
++ "SHUTDOWN_REQUEST"; |
221 |
+ }; |
222 |
+ |
223 |
+ &genet_mdio { |
224 |
+diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi |
225 |
+index d27beb47f9a3b..652feff334966 100644 |
226 |
+--- a/arch/arm/boot/dts/imx6qdl.dtsi |
227 |
++++ b/arch/arm/boot/dts/imx6qdl.dtsi |
228 |
+@@ -762,7 +762,7 @@ |
229 |
+ regulator-name = "vddpu"; |
230 |
+ regulator-min-microvolt = <725000>; |
231 |
+ regulator-max-microvolt = <1450000>; |
232 |
+- regulator-enable-ramp-delay = <150>; |
233 |
++ regulator-enable-ramp-delay = <380>; |
234 |
+ anatop-reg-offset = <0x140>; |
235 |
+ anatop-vol-bit-shift = <9>; |
236 |
+ anatop-vol-bit-width = <5>; |
237 |
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi |
238 |
+index 5af6d58666f42..9dd525871adf4 100644 |
239 |
+--- a/arch/arm/boot/dts/imx7s.dtsi |
240 |
++++ b/arch/arm/boot/dts/imx7s.dtsi |
241 |
+@@ -120,6 +120,7 @@ |
242 |
+ compatible = "usb-nop-xceiv"; |
243 |
+ clocks = <&clks IMX7D_USB_HSIC_ROOT_CLK>; |
244 |
+ clock-names = "main_clk"; |
245 |
++ power-domains = <&pgc_hsic_phy>; |
246 |
+ #phy-cells = <0>; |
247 |
+ }; |
248 |
+ |
249 |
+@@ -1153,7 +1154,6 @@ |
250 |
+ compatible = "fsl,imx7d-usb", "fsl,imx27-usb"; |
251 |
+ reg = <0x30b30000 0x200>; |
252 |
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>; |
253 |
+- power-domains = <&pgc_hsic_phy>; |
254 |
+ clocks = <&clks IMX7D_USB_CTRL_CLK>; |
255 |
+ fsl,usbphy = <&usbphynop3>; |
256 |
+ fsl,usbmisc = <&usbmisc3 0>; |
257 |
+diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c |
258 |
+index 53cb924353920..938bd932df9a0 100644 |
259 |
+--- a/arch/arm/kernel/crash_dump.c |
260 |
++++ b/arch/arm/kernel/crash_dump.c |
261 |
+@@ -14,22 +14,10 @@ |
262 |
+ #include <linux/crash_dump.h> |
263 |
+ #include <linux/uaccess.h> |
264 |
+ #include <linux/io.h> |
265 |
++#include <linux/uio.h> |
266 |
+ |
267 |
+-/** |
268 |
+- * copy_oldmem_page() - copy one page from old kernel memory |
269 |
+- * @pfn: page frame number to be copied |
270 |
+- * @buf: buffer where the copied page is placed |
271 |
+- * @csize: number of bytes to copy |
272 |
+- * @offset: offset in bytes into the page |
273 |
+- * @userbuf: if set, @buf is int he user address space |
274 |
+- * |
275 |
+- * This function copies one page from old kernel memory into buffer pointed by |
276 |
+- * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes |
277 |
+- * copied or negative error in case of failure. |
278 |
+- */ |
279 |
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
280 |
+- size_t csize, unsigned long offset, |
281 |
+- int userbuf) |
282 |
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, |
283 |
++ size_t csize, unsigned long offset) |
284 |
+ { |
285 |
+ void *vaddr; |
286 |
+ |
287 |
+@@ -40,14 +28,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
288 |
+ if (!vaddr) |
289 |
+ return -ENOMEM; |
290 |
+ |
291 |
+- if (userbuf) { |
292 |
+- if (copy_to_user(buf, vaddr + offset, csize)) { |
293 |
+- iounmap(vaddr); |
294 |
+- return -EFAULT; |
295 |
+- } |
296 |
+- } else { |
297 |
+- memcpy(buf, vaddr + offset, csize); |
298 |
+- } |
299 |
++ csize = copy_to_iter(vaddr + offset, csize, iter); |
300 |
+ |
301 |
+ iounmap(vaddr); |
302 |
+ return csize; |
303 |
+diff --git a/arch/arm/mach-axxia/platsmp.c b/arch/arm/mach-axxia/platsmp.c |
304 |
+index 512943eae30a5..2e203626eda52 100644 |
305 |
+--- a/arch/arm/mach-axxia/platsmp.c |
306 |
++++ b/arch/arm/mach-axxia/platsmp.c |
307 |
+@@ -39,6 +39,7 @@ static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle) |
308 |
+ return -ENOENT; |
309 |
+ |
310 |
+ syscon = of_iomap(syscon_np, 0); |
311 |
++ of_node_put(syscon_np); |
312 |
+ if (!syscon) |
313 |
+ return -ENOMEM; |
314 |
+ |
315 |
+diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c |
316 |
+index e4f4b20b83a2d..3fc4ec830e3a3 100644 |
317 |
+--- a/arch/arm/mach-cns3xxx/core.c |
318 |
++++ b/arch/arm/mach-cns3xxx/core.c |
319 |
+@@ -372,6 +372,7 @@ static void __init cns3xxx_init(void) |
320 |
+ /* De-Asscer SATA Reset */ |
321 |
+ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SATA)); |
322 |
+ } |
323 |
++ of_node_put(dn); |
324 |
+ |
325 |
+ dn = of_find_compatible_node(NULL, NULL, "cavium,cns3420-sdhci"); |
326 |
+ if (of_device_is_available(dn)) { |
327 |
+@@ -385,6 +386,7 @@ static void __init cns3xxx_init(void) |
328 |
+ cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SDIO)); |
329 |
+ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SDIO)); |
330 |
+ } |
331 |
++ of_node_put(dn); |
332 |
+ |
333 |
+ pm_power_off = cns3xxx_power_off; |
334 |
+ |
335 |
+diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c |
336 |
+index 8b48326be9fd5..51a247ca4da8c 100644 |
337 |
+--- a/arch/arm/mach-exynos/exynos.c |
338 |
++++ b/arch/arm/mach-exynos/exynos.c |
339 |
+@@ -149,6 +149,7 @@ static void exynos_map_pmu(void) |
340 |
+ np = of_find_matching_node(NULL, exynos_dt_pmu_match); |
341 |
+ if (np) |
342 |
+ pmu_base_addr = of_iomap(np, 0); |
343 |
++ of_node_put(np); |
344 |
+ } |
345 |
+ |
346 |
+ static void __init exynos_init_irq(void) |
347 |
+diff --git a/arch/arm64/boot/dts/exynos/exynos7885.dtsi b/arch/arm64/boot/dts/exynos/exynos7885.dtsi |
348 |
+index 3170661f5b672..9c233c56558ce 100644 |
349 |
+--- a/arch/arm64/boot/dts/exynos/exynos7885.dtsi |
350 |
++++ b/arch/arm64/boot/dts/exynos/exynos7885.dtsi |
351 |
+@@ -280,8 +280,8 @@ |
352 |
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>; |
353 |
+ pinctrl-names = "default"; |
354 |
+ pinctrl-0 = <&uart0_bus>; |
355 |
+- clocks = <&cmu_peri CLK_GOUT_UART0_EXT_UCLK>, |
356 |
+- <&cmu_peri CLK_GOUT_UART0_PCLK>; |
357 |
++ clocks = <&cmu_peri CLK_GOUT_UART0_PCLK>, |
358 |
++ <&cmu_peri CLK_GOUT_UART0_EXT_UCLK>; |
359 |
+ clock-names = "uart", "clk_uart_baud0"; |
360 |
+ samsung,uart-fifosize = <64>; |
361 |
+ status = "disabled"; |
362 |
+@@ -293,8 +293,8 @@ |
363 |
+ interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>; |
364 |
+ pinctrl-names = "default"; |
365 |
+ pinctrl-0 = <&uart1_bus>; |
366 |
+- clocks = <&cmu_peri CLK_GOUT_UART1_EXT_UCLK>, |
367 |
+- <&cmu_peri CLK_GOUT_UART1_PCLK>; |
368 |
++ clocks = <&cmu_peri CLK_GOUT_UART1_PCLK>, |
369 |
++ <&cmu_peri CLK_GOUT_UART1_EXT_UCLK>; |
370 |
+ clock-names = "uart", "clk_uart_baud0"; |
371 |
+ samsung,uart-fifosize = <256>; |
372 |
+ status = "disabled"; |
373 |
+@@ -306,8 +306,8 @@ |
374 |
+ interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>; |
375 |
+ pinctrl-names = "default"; |
376 |
+ pinctrl-0 = <&uart2_bus>; |
377 |
+- clocks = <&cmu_peri CLK_GOUT_UART2_EXT_UCLK>, |
378 |
+- <&cmu_peri CLK_GOUT_UART2_PCLK>; |
379 |
++ clocks = <&cmu_peri CLK_GOUT_UART2_PCLK>, |
380 |
++ <&cmu_peri CLK_GOUT_UART2_EXT_UCLK>; |
381 |
+ clock-names = "uart", "clk_uart_baud0"; |
382 |
+ samsung,uart-fifosize = <256>; |
383 |
+ status = "disabled"; |
384 |
+diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi |
385 |
+index f64b368c6c371..cdb530597c5eb 100644 |
386 |
+--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi |
387 |
++++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi |
388 |
+@@ -456,13 +456,11 @@ |
389 |
+ clock-names = "clk_ahb", "clk_xin"; |
390 |
+ mmc-ddr-1_8v; |
391 |
+ mmc-hs200-1_8v; |
392 |
+- mmc-hs400-1_8v; |
393 |
+ ti,trm-icp = <0x2>; |
394 |
+ ti,otap-del-sel-legacy = <0x0>; |
395 |
+ ti,otap-del-sel-mmc-hs = <0x0>; |
396 |
+ ti,otap-del-sel-ddr52 = <0x6>; |
397 |
+ ti,otap-del-sel-hs200 = <0x7>; |
398 |
+- ti,otap-del-sel-hs400 = <0x4>; |
399 |
+ }; |
400 |
+ |
401 |
+ sdhci1: mmc@fa00000 { |
402 |
+diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi |
403 |
+index be7f39299894e..19966f72c5b38 100644 |
404 |
+--- a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi |
405 |
++++ b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi |
406 |
+@@ -33,7 +33,7 @@ |
407 |
+ ranges; |
408 |
+ #interrupt-cells = <3>; |
409 |
+ interrupt-controller; |
410 |
+- reg = <0x00 0x01800000 0x00 0x200000>, /* GICD */ |
411 |
++ reg = <0x00 0x01800000 0x00 0x100000>, /* GICD */ |
412 |
+ <0x00 0x01900000 0x00 0x100000>, /* GICR */ |
413 |
+ <0x00 0x6f000000 0x00 0x2000>, /* GICC */ |
414 |
+ <0x00 0x6f010000 0x00 0x1000>, /* GICH */ |
415 |
+diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c |
416 |
+index 58303a9ec32c4..670e4ce818223 100644 |
417 |
+--- a/arch/arm64/kernel/crash_dump.c |
418 |
++++ b/arch/arm64/kernel/crash_dump.c |
419 |
+@@ -9,25 +9,11 @@ |
420 |
+ #include <linux/crash_dump.h> |
421 |
+ #include <linux/errno.h> |
422 |
+ #include <linux/io.h> |
423 |
+-#include <linux/memblock.h> |
424 |
+-#include <linux/uaccess.h> |
425 |
++#include <linux/uio.h> |
426 |
+ #include <asm/memory.h> |
427 |
+ |
428 |
+-/** |
429 |
+- * copy_oldmem_page() - copy one page from old kernel memory |
430 |
+- * @pfn: page frame number to be copied |
431 |
+- * @buf: buffer where the copied page is placed |
432 |
+- * @csize: number of bytes to copy |
433 |
+- * @offset: offset in bytes into the page |
434 |
+- * @userbuf: if set, @buf is in a user address space |
435 |
+- * |
436 |
+- * This function copies one page from old kernel memory into buffer pointed by |
437 |
+- * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes |
438 |
+- * copied or negative error in case of failure. |
439 |
+- */ |
440 |
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
441 |
+- size_t csize, unsigned long offset, |
442 |
+- int userbuf) |
443 |
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, |
444 |
++ size_t csize, unsigned long offset) |
445 |
+ { |
446 |
+ void *vaddr; |
447 |
+ |
448 |
+@@ -38,14 +24,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
449 |
+ if (!vaddr) |
450 |
+ return -ENOMEM; |
451 |
+ |
452 |
+- if (userbuf) { |
453 |
+- if (copy_to_user((char __user *)buf, vaddr + offset, csize)) { |
454 |
+- memunmap(vaddr); |
455 |
+- return -EFAULT; |
456 |
+- } |
457 |
+- } else { |
458 |
+- memcpy(buf, vaddr + offset, csize); |
459 |
+- } |
460 |
++ csize = copy_to_iter(vaddr + offset, csize, iter); |
461 |
+ |
462 |
+ memunmap(vaddr); |
463 |
+ |
464 |
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c |
465 |
+index a66d83540c15a..f88919a793adf 100644 |
466 |
+--- a/arch/arm64/kvm/arm.c |
467 |
++++ b/arch/arm64/kvm/arm.c |
468 |
+@@ -2011,11 +2011,11 @@ static int finalize_hyp_mode(void) |
469 |
+ return 0; |
470 |
+ |
471 |
+ /* |
472 |
+- * Exclude HYP BSS from kmemleak so that it doesn't get peeked |
473 |
+- * at, which would end badly once the section is inaccessible. |
474 |
+- * None of other sections should ever be introspected. |
475 |
++ * Exclude HYP sections from kmemleak so that they don't get peeked |
476 |
++ * at, which would end badly once inaccessible. |
477 |
+ */ |
478 |
+ kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start); |
479 |
++ kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size); |
480 |
+ return pkvm_drop_host_privileges(); |
481 |
+ } |
482 |
+ |
483 |
+diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c |
484 |
+index 0ed3c3dee4cde..4ef68e2aa7571 100644 |
485 |
+--- a/arch/ia64/kernel/crash_dump.c |
486 |
++++ b/arch/ia64/kernel/crash_dump.c |
487 |
+@@ -10,42 +10,18 @@ |
488 |
+ #include <linux/errno.h> |
489 |
+ #include <linux/types.h> |
490 |
+ #include <linux/crash_dump.h> |
491 |
+- |
492 |
++#include <linux/uio.h> |
493 |
+ #include <asm/page.h> |
494 |
+-#include <linux/uaccess.h> |
495 |
+ |
496 |
+-/** |
497 |
+- * copy_oldmem_page - copy one page from "oldmem" |
498 |
+- * @pfn: page frame number to be copied |
499 |
+- * @buf: target memory address for the copy; this can be in kernel address |
500 |
+- * space or user address space (see @userbuf) |
501 |
+- * @csize: number of bytes to copy |
502 |
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy |
503 |
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
504 |
+- * otherwise @buf is in kernel address space, use memcpy(). |
505 |
+- * |
506 |
+- * Copy a page from "oldmem". For this page, there is no pte mapped |
507 |
+- * in the current kernel. We stitch up a pte, similar to kmap_atomic. |
508 |
+- * |
509 |
+- * Calling copy_to_user() in atomic context is not desirable. Hence first |
510 |
+- * copying the data to a pre-allocated kernel page and then copying to user |
511 |
+- * space in non-atomic context. |
512 |
+- */ |
513 |
+-ssize_t |
514 |
+-copy_oldmem_page(unsigned long pfn, char *buf, |
515 |
+- size_t csize, unsigned long offset, int userbuf) |
516 |
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, |
517 |
++ size_t csize, unsigned long offset) |
518 |
+ { |
519 |
+ void *vaddr; |
520 |
+ |
521 |
+ if (!csize) |
522 |
+ return 0; |
523 |
+ vaddr = __va(pfn<<PAGE_SHIFT); |
524 |
+- if (userbuf) { |
525 |
+- if (copy_to_user(buf, (vaddr + offset), csize)) { |
526 |
+- return -EFAULT; |
527 |
+- } |
528 |
+- } else |
529 |
+- memcpy(buf, (vaddr + offset), csize); |
530 |
++ csize = copy_to_iter(vaddr + offset, csize, iter); |
531 |
+ return csize; |
532 |
+ } |
533 |
+ |
534 |
+diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c |
535 |
+index 2e50f55185a65..6e50f49024094 100644 |
536 |
+--- a/arch/mips/kernel/crash_dump.c |
537 |
++++ b/arch/mips/kernel/crash_dump.c |
538 |
+@@ -1,22 +1,10 @@ |
539 |
+ // SPDX-License-Identifier: GPL-2.0 |
540 |
+ #include <linux/highmem.h> |
541 |
+ #include <linux/crash_dump.h> |
542 |
++#include <linux/uio.h> |
543 |
+ |
544 |
+-/** |
545 |
+- * copy_oldmem_page - copy one page from "oldmem" |
546 |
+- * @pfn: page frame number to be copied |
547 |
+- * @buf: target memory address for the copy; this can be in kernel address |
548 |
+- * space or user address space (see @userbuf) |
549 |
+- * @csize: number of bytes to copy |
550 |
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy |
551 |
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
552 |
+- * otherwise @buf is in kernel address space, use memcpy(). |
553 |
+- * |
554 |
+- * Copy a page from "oldmem". For this page, there is no pte mapped |
555 |
+- * in the current kernel. |
556 |
+- */ |
557 |
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
558 |
+- size_t csize, unsigned long offset, int userbuf) |
559 |
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, |
560 |
++ size_t csize, unsigned long offset) |
561 |
+ { |
562 |
+ void *vaddr; |
563 |
+ |
564 |
+@@ -24,14 +12,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
565 |
+ return 0; |
566 |
+ |
567 |
+ vaddr = kmap_local_pfn(pfn); |
568 |
+- |
569 |
+- if (!userbuf) { |
570 |
+- memcpy(buf, vaddr + offset, csize); |
571 |
+- } else { |
572 |
+- if (copy_to_user(buf, vaddr + offset, csize)) |
573 |
+- csize = -EFAULT; |
574 |
+- } |
575 |
+- |
576 |
++ csize = copy_to_iter(vaddr + offset, csize, iter); |
577 |
+ kunmap_local(vaddr); |
578 |
+ |
579 |
+ return csize; |
580 |
+diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c |
581 |
+index 7b7f25b4b057e..9240bcdbe74e4 100644 |
582 |
+--- a/arch/mips/vr41xx/common/icu.c |
583 |
++++ b/arch/mips/vr41xx/common/icu.c |
584 |
+@@ -640,8 +640,6 @@ static int icu_get_irq(unsigned int irq) |
585 |
+ |
586 |
+ printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2); |
587 |
+ |
588 |
+- atomic_inc(&irq_err_count); |
589 |
+- |
590 |
+ return -1; |
591 |
+ } |
592 |
+ |
593 |
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig |
594 |
+index bd22578859d00..f3a2044ee4020 100644 |
595 |
+--- a/arch/parisc/Kconfig |
596 |
++++ b/arch/parisc/Kconfig |
597 |
+@@ -10,6 +10,7 @@ config PARISC |
598 |
+ select ARCH_WANT_FRAME_POINTERS |
599 |
+ select ARCH_HAS_ELF_RANDOMIZE |
600 |
+ select ARCH_HAS_STRICT_KERNEL_RWX |
601 |
++ select ARCH_HAS_STRICT_MODULE_RWX |
602 |
+ select ARCH_HAS_UBSAN_SANITIZE_ALL |
603 |
+ select ARCH_HAS_PTE_SPECIAL |
604 |
+ select ARCH_NO_SG_CHAIN |
605 |
+diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h |
606 |
+index d63a2acb91f2b..55d29c4f716e6 100644 |
607 |
+--- a/arch/parisc/include/asm/fb.h |
608 |
++++ b/arch/parisc/include/asm/fb.h |
609 |
+@@ -12,7 +12,7 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, |
610 |
+ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; |
611 |
+ } |
612 |
+ |
613 |
+-#if defined(CONFIG_STI_CONSOLE) || defined(CONFIG_FB_STI) |
614 |
++#if defined(CONFIG_FB_STI) |
615 |
+ int fb_is_primary_device(struct fb_info *info); |
616 |
+ #else |
617 |
+ static inline int fb_is_primary_device(struct fb_info *info) |
618 |
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c |
619 |
+index 0fd04073d4b68..a20c1c47b7808 100644 |
620 |
+--- a/arch/parisc/kernel/cache.c |
621 |
++++ b/arch/parisc/kernel/cache.c |
622 |
+@@ -722,7 +722,10 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon |
623 |
+ return; |
624 |
+ |
625 |
+ if (parisc_requires_coherency()) { |
626 |
+- flush_user_cache_page(vma, vmaddr); |
627 |
++ if (vma->vm_flags & VM_SHARED) |
628 |
++ flush_data_cache(); |
629 |
++ else |
630 |
++ flush_user_cache_page(vma, vmaddr); |
631 |
+ return; |
632 |
+ } |
633 |
+ |
634 |
+diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c |
635 |
+index 5693e1c67c2b4..32b4a97f1b79b 100644 |
636 |
+--- a/arch/powerpc/kernel/crash_dump.c |
637 |
++++ b/arch/powerpc/kernel/crash_dump.c |
638 |
+@@ -16,7 +16,7 @@ |
639 |
+ #include <asm/kdump.h> |
640 |
+ #include <asm/prom.h> |
641 |
+ #include <asm/firmware.h> |
642 |
+-#include <linux/uaccess.h> |
643 |
++#include <linux/uio.h> |
644 |
+ #include <asm/rtas.h> |
645 |
+ #include <asm/inst.h> |
646 |
+ |
647 |
+@@ -68,33 +68,8 @@ void __init setup_kdump_trampoline(void) |
648 |
+ } |
649 |
+ #endif /* CONFIG_NONSTATIC_KERNEL */ |
650 |
+ |
651 |
+-static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, |
652 |
+- unsigned long offset, int userbuf) |
653 |
+-{ |
654 |
+- if (userbuf) { |
655 |
+- if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) |
656 |
+- return -EFAULT; |
657 |
+- } else |
658 |
+- memcpy(buf, (vaddr + offset), csize); |
659 |
+- |
660 |
+- return csize; |
661 |
+-} |
662 |
+- |
663 |
+-/** |
664 |
+- * copy_oldmem_page - copy one page from "oldmem" |
665 |
+- * @pfn: page frame number to be copied |
666 |
+- * @buf: target memory address for the copy; this can be in kernel address |
667 |
+- * space or user address space (see @userbuf) |
668 |
+- * @csize: number of bytes to copy |
669 |
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy |
670 |
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
671 |
+- * otherwise @buf is in kernel address space, use memcpy(). |
672 |
+- * |
673 |
+- * Copy a page from "oldmem". For this page, there is no pte mapped |
674 |
+- * in the current kernel. We stitch up a pte, similar to kmap_atomic. |
675 |
+- */ |
676 |
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
677 |
+- size_t csize, unsigned long offset, int userbuf) |
678 |
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, |
679 |
++ size_t csize, unsigned long offset) |
680 |
+ { |
681 |
+ void *vaddr; |
682 |
+ phys_addr_t paddr; |
683 |
+@@ -107,10 +82,10 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
684 |
+ |
685 |
+ if (memblock_is_region_memory(paddr, csize)) { |
686 |
+ vaddr = __va(paddr); |
687 |
+- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); |
688 |
++ csize = copy_to_iter(vaddr + offset, csize, iter); |
689 |
+ } else { |
690 |
+ vaddr = ioremap_cache(paddr, PAGE_SIZE); |
691 |
+- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); |
692 |
++ csize = copy_to_iter(vaddr + offset, csize, iter); |
693 |
+ iounmap(vaddr); |
694 |
+ } |
695 |
+ |
696 |
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c |
697 |
+index a75d20f23dac8..9be279469a851 100644 |
698 |
+--- a/arch/powerpc/kernel/process.c |
699 |
++++ b/arch/powerpc/kernel/process.c |
700 |
+@@ -1857,7 +1857,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) |
701 |
+ tm_reclaim_current(0); |
702 |
+ #endif |
703 |
+ |
704 |
+- memset(regs->gpr, 0, sizeof(regs->gpr)); |
705 |
++ memset(®s->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0])); |
706 |
+ regs->ctr = 0; |
707 |
+ regs->link = 0; |
708 |
+ regs->xer = 0; |
709 |
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c |
710 |
+index 6bc89d9ccf635..276b4eb1435b5 100644 |
711 |
+--- a/arch/powerpc/kernel/rtas.c |
712 |
++++ b/arch/powerpc/kernel/rtas.c |
713 |
+@@ -1061,7 +1061,7 @@ static struct rtas_filter rtas_filters[] __ro_after_init = { |
714 |
+ { "get-time-of-day", -1, -1, -1, -1, -1 }, |
715 |
+ { "ibm,get-vpd", -1, 0, -1, 1, 2 }, |
716 |
+ { "ibm,lpar-perftools", -1, 2, 3, -1, -1 }, |
717 |
+- { "ibm,platform-dump", -1, 4, 5, -1, -1 }, |
718 |
++ { "ibm,platform-dump", -1, 4, 5, -1, -1 }, /* Special cased */ |
719 |
+ { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 }, |
720 |
+ { "ibm,scan-log-dump", -1, 0, 1, -1, -1 }, |
721 |
+ { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 }, |
722 |
+@@ -1110,6 +1110,15 @@ static bool block_rtas_call(int token, int nargs, |
723 |
+ size = 1; |
724 |
+ |
725 |
+ end = base + size - 1; |
726 |
++ |
727 |
++ /* |
728 |
++ * Special case for ibm,platform-dump - NULL buffer |
729 |
++ * address is used to indicate end of dump processing |
730 |
++ */ |
731 |
++ if (!strcmp(f->name, "ibm,platform-dump") && |
732 |
++ base == 0) |
733 |
++ return false; |
734 |
++ |
735 |
+ if (!in_rmo_buf(base, end)) |
736 |
+ goto err; |
737 |
+ } |
738 |
+diff --git a/arch/powerpc/platforms/microwatt/microwatt.h b/arch/powerpc/platforms/microwatt/microwatt.h |
739 |
+new file mode 100644 |
740 |
+index 0000000000000..335417e95e66f |
741 |
+--- /dev/null |
742 |
++++ b/arch/powerpc/platforms/microwatt/microwatt.h |
743 |
+@@ -0,0 +1,7 @@ |
744 |
++/* SPDX-License-Identifier: GPL-2.0 */ |
745 |
++#ifndef _MICROWATT_H |
746 |
++#define _MICROWATT_H |
747 |
++ |
748 |
++void microwatt_rng_init(void); |
749 |
++ |
750 |
++#endif /* _MICROWATT_H */ |
751 |
+diff --git a/arch/powerpc/platforms/microwatt/rng.c b/arch/powerpc/platforms/microwatt/rng.c |
752 |
+index 7bc4d1cbfaf04..8ece87d005c86 100644 |
753 |
+--- a/arch/powerpc/platforms/microwatt/rng.c |
754 |
++++ b/arch/powerpc/platforms/microwatt/rng.c |
755 |
+@@ -11,6 +11,7 @@ |
756 |
+ #include <asm/archrandom.h> |
757 |
+ #include <asm/cputable.h> |
758 |
+ #include <asm/machdep.h> |
759 |
++#include "microwatt.h" |
760 |
+ |
761 |
+ #define DARN_ERR 0xFFFFFFFFFFFFFFFFul |
762 |
+ |
763 |
+@@ -29,7 +30,7 @@ static int microwatt_get_random_darn(unsigned long *v) |
764 |
+ return 1; |
765 |
+ } |
766 |
+ |
767 |
+-static __init int rng_init(void) |
768 |
++void __init microwatt_rng_init(void) |
769 |
+ { |
770 |
+ unsigned long val; |
771 |
+ int i; |
772 |
+@@ -37,12 +38,7 @@ static __init int rng_init(void) |
773 |
+ for (i = 0; i < 10; i++) { |
774 |
+ if (microwatt_get_random_darn(&val)) { |
775 |
+ ppc_md.get_random_seed = microwatt_get_random_darn; |
776 |
+- return 0; |
777 |
++ return; |
778 |
+ } |
779 |
+ } |
780 |
+- |
781 |
+- pr_warn("Unable to use DARN for get_random_seed()\n"); |
782 |
+- |
783 |
+- return -EIO; |
784 |
+ } |
785 |
+-machine_subsys_initcall(, rng_init); |
786 |
+diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c |
787 |
+index 0b02603bdb747..6b32539395a48 100644 |
788 |
+--- a/arch/powerpc/platforms/microwatt/setup.c |
789 |
++++ b/arch/powerpc/platforms/microwatt/setup.c |
790 |
+@@ -16,6 +16,8 @@ |
791 |
+ #include <asm/xics.h> |
792 |
+ #include <asm/udbg.h> |
793 |
+ |
794 |
++#include "microwatt.h" |
795 |
++ |
796 |
+ static void __init microwatt_init_IRQ(void) |
797 |
+ { |
798 |
+ xics_init(); |
799 |
+@@ -32,10 +34,16 @@ static int __init microwatt_populate(void) |
800 |
+ } |
801 |
+ machine_arch_initcall(microwatt, microwatt_populate); |
802 |
+ |
803 |
++static void __init microwatt_setup_arch(void) |
804 |
++{ |
805 |
++ microwatt_rng_init(); |
806 |
++} |
807 |
++ |
808 |
+ define_machine(microwatt) { |
809 |
+ .name = "microwatt", |
810 |
+ .probe = microwatt_probe, |
811 |
+ .init_IRQ = microwatt_init_IRQ, |
812 |
++ .setup_arch = microwatt_setup_arch, |
813 |
+ .progress = udbg_progress, |
814 |
+ .calibrate_decr = generic_calibrate_decr, |
815 |
+ }; |
816 |
+diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h |
817 |
+index e297bf4abfcb8..866efdc103fdd 100644 |
818 |
+--- a/arch/powerpc/platforms/powernv/powernv.h |
819 |
++++ b/arch/powerpc/platforms/powernv/powernv.h |
820 |
+@@ -42,4 +42,6 @@ ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count); |
821 |
+ u32 __init memcons_get_size(struct memcons *mc); |
822 |
+ struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name); |
823 |
+ |
824 |
++void pnv_rng_init(void); |
825 |
++ |
826 |
+ #endif /* _POWERNV_H */ |
827 |
+diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c |
828 |
+index e3d44b36ae98f..463c78c52cc5d 100644 |
829 |
+--- a/arch/powerpc/platforms/powernv/rng.c |
830 |
++++ b/arch/powerpc/platforms/powernv/rng.c |
831 |
+@@ -17,6 +17,7 @@ |
832 |
+ #include <asm/prom.h> |
833 |
+ #include <asm/machdep.h> |
834 |
+ #include <asm/smp.h> |
835 |
++#include "powernv.h" |
836 |
+ |
837 |
+ #define DARN_ERR 0xFFFFFFFFFFFFFFFFul |
838 |
+ |
839 |
+@@ -28,7 +29,6 @@ struct powernv_rng { |
840 |
+ |
841 |
+ static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng); |
842 |
+ |
843 |
+- |
844 |
+ int powernv_hwrng_present(void) |
845 |
+ { |
846 |
+ struct powernv_rng *rng; |
847 |
+@@ -98,9 +98,6 @@ static int __init initialise_darn(void) |
848 |
+ return 0; |
849 |
+ } |
850 |
+ } |
851 |
+- |
852 |
+- pr_warn("Unable to use DARN for get_random_seed()\n"); |
853 |
+- |
854 |
+ return -EIO; |
855 |
+ } |
856 |
+ |
857 |
+@@ -163,32 +160,55 @@ static __init int rng_create(struct device_node *dn) |
858 |
+ |
859 |
+ rng_init_per_cpu(rng, dn); |
860 |
+ |
861 |
+- pr_info_once("Registering arch random hook.\n"); |
862 |
+- |
863 |
+ ppc_md.get_random_seed = powernv_get_random_long; |
864 |
+ |
865 |
+ return 0; |
866 |
+ } |
867 |
+ |
868 |
+-static __init int rng_init(void) |
869 |
++static int __init pnv_get_random_long_early(unsigned long *v) |
870 |
+ { |
871 |
+ struct device_node *dn; |
872 |
+- int rc; |
873 |
++ |
874 |
++ if (!slab_is_available()) |
875 |
++ return 0; |
876 |
++ |
877 |
++ if (cmpxchg(&ppc_md.get_random_seed, pnv_get_random_long_early, |
878 |
++ NULL) != pnv_get_random_long_early) |
879 |
++ return 0; |
880 |
+ |
881 |
+ for_each_compatible_node(dn, NULL, "ibm,power-rng") { |
882 |
+- rc = rng_create(dn); |
883 |
+- if (rc) { |
884 |
+- pr_err("Failed creating rng for %pOF (%d).\n", |
885 |
+- dn, rc); |
886 |
++ if (rng_create(dn)) |
887 |
+ continue; |
888 |
+- } |
889 |
+- |
890 |
+ /* Create devices for hwrng driver */ |
891 |
+ of_platform_device_create(dn, NULL, NULL); |
892 |
+ } |
893 |
+ |
894 |
+- initialise_darn(); |
895 |
++ if (!ppc_md.get_random_seed) |
896 |
++ return 0; |
897 |
++ return ppc_md.get_random_seed(v); |
898 |
++} |
899 |
++ |
900 |
++void __init pnv_rng_init(void) |
901 |
++{ |
902 |
++ struct device_node *dn; |
903 |
+ |
904 |
++ /* Prefer darn over the rest. */ |
905 |
++ if (!initialise_darn()) |
906 |
++ return; |
907 |
++ |
908 |
++ dn = of_find_compatible_node(NULL, NULL, "ibm,power-rng"); |
909 |
++ if (dn) |
910 |
++ ppc_md.get_random_seed = pnv_get_random_long_early; |
911 |
++ |
912 |
++ of_node_put(dn); |
913 |
++} |
914 |
++ |
915 |
++static int __init pnv_rng_late_init(void) |
916 |
++{ |
917 |
++ unsigned long v; |
918 |
++ /* In case it wasn't called during init for some other reason. */ |
919 |
++ if (ppc_md.get_random_seed == pnv_get_random_long_early) |
920 |
++ pnv_get_random_long_early(&v); |
921 |
+ return 0; |
922 |
+ } |
923 |
+-machine_subsys_initcall(powernv, rng_init); |
924 |
++machine_subsys_initcall(powernv, pnv_rng_late_init); |
925 |
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c |
926 |
+index 824c3ad7a0faf..dac545aa03082 100644 |
927 |
+--- a/arch/powerpc/platforms/powernv/setup.c |
928 |
++++ b/arch/powerpc/platforms/powernv/setup.c |
929 |
+@@ -203,6 +203,8 @@ static void __init pnv_setup_arch(void) |
930 |
+ pnv_check_guarded_cores(); |
931 |
+ |
932 |
+ /* XXX PMCS */ |
933 |
++ |
934 |
++ pnv_rng_init(); |
935 |
+ } |
936 |
+ |
937 |
+ static void __init pnv_init(void) |
938 |
+diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h |
939 |
+index af162aeeae86d..3f9b51298aa34 100644 |
940 |
+--- a/arch/powerpc/platforms/pseries/pseries.h |
941 |
++++ b/arch/powerpc/platforms/pseries/pseries.h |
942 |
+@@ -121,4 +121,6 @@ void pseries_lpar_read_hblkrm_characteristics(void); |
943 |
+ static inline void pseries_lpar_read_hblkrm_characteristics(void) { } |
944 |
+ #endif |
945 |
+ |
946 |
++void pseries_rng_init(void); |
947 |
++ |
948 |
+ #endif /* _PSERIES_PSERIES_H */ |
949 |
+diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c |
950 |
+index 6268545947b83..6ddfdeaace9ef 100644 |
951 |
+--- a/arch/powerpc/platforms/pseries/rng.c |
952 |
++++ b/arch/powerpc/platforms/pseries/rng.c |
953 |
+@@ -10,6 +10,7 @@ |
954 |
+ #include <asm/archrandom.h> |
955 |
+ #include <asm/machdep.h> |
956 |
+ #include <asm/plpar_wrappers.h> |
957 |
++#include "pseries.h" |
958 |
+ |
959 |
+ |
960 |
+ static int pseries_get_random_long(unsigned long *v) |
961 |
+@@ -24,19 +25,13 @@ static int pseries_get_random_long(unsigned long *v) |
962 |
+ return 0; |
963 |
+ } |
964 |
+ |
965 |
+-static __init int rng_init(void) |
966 |
++void __init pseries_rng_init(void) |
967 |
+ { |
968 |
+ struct device_node *dn; |
969 |
+ |
970 |
+ dn = of_find_compatible_node(NULL, NULL, "ibm,random"); |
971 |
+ if (!dn) |
972 |
+- return -ENODEV; |
973 |
+- |
974 |
+- pr_info("Registering arch random hook.\n"); |
975 |
+- |
976 |
++ return; |
977 |
+ ppc_md.get_random_seed = pseries_get_random_long; |
978 |
+- |
979 |
+ of_node_put(dn); |
980 |
+- return 0; |
981 |
+ } |
982 |
+-machine_subsys_initcall(pseries, rng_init); |
983 |
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c |
984 |
+index 955ff8aa1644d..f27735f623bae 100644 |
985 |
+--- a/arch/powerpc/platforms/pseries/setup.c |
986 |
++++ b/arch/powerpc/platforms/pseries/setup.c |
987 |
+@@ -852,6 +852,8 @@ static void __init pSeries_setup_arch(void) |
988 |
+ |
989 |
+ if (swiotlb_force == SWIOTLB_FORCE) |
990 |
+ ppc_swiotlb_enable = 1; |
991 |
++ |
992 |
++ pseries_rng_init(); |
993 |
+ } |
994 |
+ |
995 |
+ static void pseries_panic(char *str) |
996 |
+diff --git a/arch/riscv/kernel/crash_dump.c b/arch/riscv/kernel/crash_dump.c |
997 |
+index 86cc0ada57522..ea2158cee97b3 100644 |
998 |
+--- a/arch/riscv/kernel/crash_dump.c |
999 |
++++ b/arch/riscv/kernel/crash_dump.c |
1000 |
+@@ -7,22 +7,10 @@ |
1001 |
+ |
1002 |
+ #include <linux/crash_dump.h> |
1003 |
+ #include <linux/io.h> |
1004 |
++#include <linux/uio.h> |
1005 |
+ |
1006 |
+-/** |
1007 |
+- * copy_oldmem_page() - copy one page from old kernel memory |
1008 |
+- * @pfn: page frame number to be copied |
1009 |
+- * @buf: buffer where the copied page is placed |
1010 |
+- * @csize: number of bytes to copy |
1011 |
+- * @offset: offset in bytes into the page |
1012 |
+- * @userbuf: if set, @buf is in a user address space |
1013 |
+- * |
1014 |
+- * This function copies one page from old kernel memory into buffer pointed by |
1015 |
+- * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes |
1016 |
+- * copied or negative error in case of failure. |
1017 |
+- */ |
1018 |
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
1019 |
+- size_t csize, unsigned long offset, |
1020 |
+- int userbuf) |
1021 |
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, |
1022 |
++ size_t csize, unsigned long offset) |
1023 |
+ { |
1024 |
+ void *vaddr; |
1025 |
+ |
1026 |
+@@ -33,13 +21,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
1027 |
+ if (!vaddr) |
1028 |
+ return -ENOMEM; |
1029 |
+ |
1030 |
+- if (userbuf) { |
1031 |
+- if (copy_to_user((char __user *)buf, vaddr + offset, csize)) { |
1032 |
+- memunmap(vaddr); |
1033 |
+- return -EFAULT; |
1034 |
+- } |
1035 |
+- } else |
1036 |
+- memcpy(buf, vaddr + offset, csize); |
1037 |
++ csize = copy_to_iter(vaddr + offset, csize, iter); |
1038 |
+ |
1039 |
+ memunmap(vaddr); |
1040 |
+ return csize; |
1041 |
+diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c |
1042 |
+index 69819b7652504..28124d0fa1d5e 100644 |
1043 |
+--- a/arch/s390/kernel/crash_dump.c |
1044 |
++++ b/arch/s390/kernel/crash_dump.c |
1045 |
+@@ -15,6 +15,7 @@ |
1046 |
+ #include <linux/slab.h> |
1047 |
+ #include <linux/memblock.h> |
1048 |
+ #include <linux/elf.h> |
1049 |
++#include <linux/uio.h> |
1050 |
+ #include <asm/asm-offsets.h> |
1051 |
+ #include <asm/os_info.h> |
1052 |
+ #include <asm/elf.h> |
1053 |
+@@ -212,20 +213,30 @@ static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count) |
1054 |
+ /* |
1055 |
+ * Copy one page from "oldmem" |
1056 |
+ */ |
1057 |
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
1058 |
+- unsigned long offset, int userbuf) |
1059 |
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, |
1060 |
++ unsigned long offset) |
1061 |
+ { |
1062 |
+ unsigned long src; |
1063 |
+ int rc; |
1064 |
+ |
1065 |
++ if (!(iter_is_iovec(iter) || iov_iter_is_kvec(iter))) |
1066 |
++ return -EINVAL; |
1067 |
++ /* Multi-segment iterators are not supported */ |
1068 |
++ if (iter->nr_segs > 1) |
1069 |
++ return -EINVAL; |
1070 |
+ if (!csize) |
1071 |
+ return 0; |
1072 |
+ src = pfn_to_phys(pfn) + offset; |
1073 |
+- if (userbuf) |
1074 |
+- rc = copy_oldmem_user((void __force __user *) buf, src, csize); |
1075 |
++ |
1076 |
++ /* XXX: pass the iov_iter down to a common function */ |
1077 |
++ if (iter_is_iovec(iter)) |
1078 |
++ rc = copy_oldmem_user(iter->iov->iov_base, src, csize); |
1079 |
+ else |
1080 |
+- rc = copy_oldmem_kernel((void *) buf, src, csize); |
1081 |
+- return rc; |
1082 |
++ rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize); |
1083 |
++ if (rc < 0) |
1084 |
++ return rc; |
1085 |
++ iov_iter_advance(iter, csize); |
1086 |
++ return csize; |
1087 |
+ } |
1088 |
+ |
1089 |
+ /* |
1090 |
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c |
1091 |
+index 483ab5e10164d..f7dd3c849e68c 100644 |
1092 |
+--- a/arch/s390/kernel/perf_cpum_cf.c |
1093 |
++++ b/arch/s390/kernel/perf_cpum_cf.c |
1094 |
+@@ -516,6 +516,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type) |
1095 |
+ return err; |
1096 |
+ } |
1097 |
+ |
1098 |
++/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different |
1099 |
++ * attribute::type values: |
1100 |
++ * - PERF_TYPE_HARDWARE: |
1101 |
++ * - pmu->type: |
1102 |
++ * Handle both type of invocations identical. They address the same hardware. |
1103 |
++ * The result is different when event modifiers exclude_kernel and/or |
1104 |
++ * exclude_user are also set. |
1105 |
++ */ |
1106 |
++static int cpumf_pmu_event_type(struct perf_event *event) |
1107 |
++{ |
1108 |
++ u64 ev = event->attr.config; |
1109 |
++ |
1110 |
++ if (cpumf_generic_events_basic[PERF_COUNT_HW_CPU_CYCLES] == ev || |
1111 |
++ cpumf_generic_events_basic[PERF_COUNT_HW_INSTRUCTIONS] == ev || |
1112 |
++ cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev || |
1113 |
++ cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev) |
1114 |
++ return PERF_TYPE_HARDWARE; |
1115 |
++ return PERF_TYPE_RAW; |
1116 |
++} |
1117 |
++ |
1118 |
+ static int cpumf_pmu_event_init(struct perf_event *event) |
1119 |
+ { |
1120 |
+ unsigned int type = event->attr.type; |
1121 |
+@@ -525,7 +545,7 @@ static int cpumf_pmu_event_init(struct perf_event *event) |
1122 |
+ err = __hw_perf_event_init(event, type); |
1123 |
+ else if (event->pmu->type == type) |
1124 |
+ /* Registered as unknown PMU */ |
1125 |
+- err = __hw_perf_event_init(event, PERF_TYPE_RAW); |
1126 |
++ err = __hw_perf_event_init(event, cpumf_pmu_event_type(event)); |
1127 |
+ else |
1128 |
+ return -ENOENT; |
1129 |
+ |
1130 |
+diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c |
1131 |
+index 5b41b59698c1e..19ce6a950aaca 100644 |
1132 |
+--- a/arch/sh/kernel/crash_dump.c |
1133 |
++++ b/arch/sh/kernel/crash_dump.c |
1134 |
+@@ -8,23 +8,11 @@ |
1135 |
+ #include <linux/errno.h> |
1136 |
+ #include <linux/crash_dump.h> |
1137 |
+ #include <linux/io.h> |
1138 |
++#include <linux/uio.h> |
1139 |
+ #include <linux/uaccess.h> |
1140 |
+ |
1141 |
+-/** |
1142 |
+- * copy_oldmem_page - copy one page from "oldmem" |
1143 |
+- * @pfn: page frame number to be copied |
1144 |
+- * @buf: target memory address for the copy; this can be in kernel address |
1145 |
+- * space or user address space (see @userbuf) |
1146 |
+- * @csize: number of bytes to copy |
1147 |
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy |
1148 |
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
1149 |
+- * otherwise @buf is in kernel address space, use memcpy(). |
1150 |
+- * |
1151 |
+- * Copy a page from "oldmem". For this page, there is no pte mapped |
1152 |
+- * in the current kernel. We stitch up a pte, similar to kmap_atomic. |
1153 |
+- */ |
1154 |
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
1155 |
+- size_t csize, unsigned long offset, int userbuf) |
1156 |
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, |
1157 |
++ size_t csize, unsigned long offset) |
1158 |
+ { |
1159 |
+ void __iomem *vaddr; |
1160 |
+ |
1161 |
+@@ -32,15 +20,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
1162 |
+ return 0; |
1163 |
+ |
1164 |
+ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); |
1165 |
+- |
1166 |
+- if (userbuf) { |
1167 |
+- if (copy_to_user((void __user *)buf, (vaddr + offset), csize)) { |
1168 |
+- iounmap(vaddr); |
1169 |
+- return -EFAULT; |
1170 |
+- } |
1171 |
+- } else |
1172 |
+- memcpy(buf, (vaddr + offset), csize); |
1173 |
+- |
1174 |
++ csize = copy_to_iter(vaddr + offset, csize, iter); |
1175 |
+ iounmap(vaddr); |
1176 |
++ |
1177 |
+ return csize; |
1178 |
+ } |
1179 |
+diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c |
1180 |
+index 5fcac46aaf6b1..5f4ae5476e193 100644 |
1181 |
+--- a/arch/x86/kernel/crash_dump_32.c |
1182 |
++++ b/arch/x86/kernel/crash_dump_32.c |
1183 |
+@@ -10,8 +10,7 @@ |
1184 |
+ #include <linux/errno.h> |
1185 |
+ #include <linux/highmem.h> |
1186 |
+ #include <linux/crash_dump.h> |
1187 |
+- |
1188 |
+-#include <linux/uaccess.h> |
1189 |
++#include <linux/uio.h> |
1190 |
+ |
1191 |
+ static inline bool is_crashed_pfn_valid(unsigned long pfn) |
1192 |
+ { |
1193 |
+@@ -29,21 +28,8 @@ static inline bool is_crashed_pfn_valid(unsigned long pfn) |
1194 |
+ #endif |
1195 |
+ } |
1196 |
+ |
1197 |
+-/** |
1198 |
+- * copy_oldmem_page - copy one page from "oldmem" |
1199 |
+- * @pfn: page frame number to be copied |
1200 |
+- * @buf: target memory address for the copy; this can be in kernel address |
1201 |
+- * space or user address space (see @userbuf) |
1202 |
+- * @csize: number of bytes to copy |
1203 |
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy |
1204 |
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
1205 |
+- * otherwise @buf is in kernel address space, use memcpy(). |
1206 |
+- * |
1207 |
+- * Copy a page from "oldmem". For this page, there might be no pte mapped |
1208 |
+- * in the current kernel. |
1209 |
+- */ |
1210 |
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
1211 |
+- unsigned long offset, int userbuf) |
1212 |
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, |
1213 |
++ unsigned long offset) |
1214 |
+ { |
1215 |
+ void *vaddr; |
1216 |
+ |
1217 |
+@@ -54,14 +40,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
1218 |
+ return -EFAULT; |
1219 |
+ |
1220 |
+ vaddr = kmap_local_pfn(pfn); |
1221 |
+- |
1222 |
+- if (!userbuf) { |
1223 |
+- memcpy(buf, vaddr + offset, csize); |
1224 |
+- } else { |
1225 |
+- if (copy_to_user(buf, vaddr + offset, csize)) |
1226 |
+- csize = -EFAULT; |
1227 |
+- } |
1228 |
+- |
1229 |
++ csize = copy_to_iter(vaddr + offset, csize, iter); |
1230 |
+ kunmap_local(vaddr); |
1231 |
+ |
1232 |
+ return csize; |
1233 |
+diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c |
1234 |
+index 97529552dd249..94fe4aff9694b 100644 |
1235 |
+--- a/arch/x86/kernel/crash_dump_64.c |
1236 |
++++ b/arch/x86/kernel/crash_dump_64.c |
1237 |
+@@ -8,12 +8,12 @@ |
1238 |
+ |
1239 |
+ #include <linux/errno.h> |
1240 |
+ #include <linux/crash_dump.h> |
1241 |
+-#include <linux/uaccess.h> |
1242 |
++#include <linux/uio.h> |
1243 |
+ #include <linux/io.h> |
1244 |
+ #include <linux/cc_platform.h> |
1245 |
+ |
1246 |
+-static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
1247 |
+- unsigned long offset, int userbuf, |
1248 |
++static ssize_t __copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, |
1249 |
++ size_t csize, unsigned long offset, |
1250 |
+ bool encrypted) |
1251 |
+ { |
1252 |
+ void *vaddr; |
1253 |
+@@ -29,46 +29,27 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
1254 |
+ if (!vaddr) |
1255 |
+ return -ENOMEM; |
1256 |
+ |
1257 |
+- if (userbuf) { |
1258 |
+- if (copy_to_user((void __user *)buf, vaddr + offset, csize)) { |
1259 |
+- iounmap((void __iomem *)vaddr); |
1260 |
+- return -EFAULT; |
1261 |
+- } |
1262 |
+- } else |
1263 |
+- memcpy(buf, vaddr + offset, csize); |
1264 |
++ csize = copy_to_iter(vaddr + offset, csize, iter); |
1265 |
+ |
1266 |
+ iounmap((void __iomem *)vaddr); |
1267 |
+ return csize; |
1268 |
+ } |
1269 |
+ |
1270 |
+-/** |
1271 |
+- * copy_oldmem_page - copy one page of memory |
1272 |
+- * @pfn: page frame number to be copied |
1273 |
+- * @buf: target memory address for the copy; this can be in kernel address |
1274 |
+- * space or user address space (see @userbuf) |
1275 |
+- * @csize: number of bytes to copy |
1276 |
+- * @offset: offset in bytes into the page (based on pfn) to begin the copy |
1277 |
+- * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
1278 |
+- * otherwise @buf is in kernel address space, use memcpy(). |
1279 |
+- * |
1280 |
+- * Copy a page from the old kernel's memory. For this page, there is no pte |
1281 |
+- * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic. |
1282 |
+- */ |
1283 |
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
1284 |
+- unsigned long offset, int userbuf) |
1285 |
++ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, |
1286 |
++ unsigned long offset) |
1287 |
+ { |
1288 |
+- return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false); |
1289 |
++ return __copy_oldmem_page(iter, pfn, csize, offset, false); |
1290 |
+ } |
1291 |
+ |
1292 |
+-/** |
1293 |
++/* |
1294 |
+ * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the |
1295 |
+ * memory with the encryption mask set to accommodate kdump on SME-enabled |
1296 |
+ * machines. |
1297 |
+ */ |
1298 |
+-ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, |
1299 |
+- unsigned long offset, int userbuf) |
1300 |
++ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn, |
1301 |
++ size_t csize, unsigned long offset) |
1302 |
+ { |
1303 |
+- return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); |
1304 |
++ return __copy_oldmem_page(iter, pfn, csize, offset, true); |
1305 |
+ } |
1306 |
+ |
1307 |
+ ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) |
1308 |
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c |
1309 |
+index 4b7d490c0b639..76e9e6eb71d63 100644 |
1310 |
+--- a/arch/x86/kvm/svm/sev.c |
1311 |
++++ b/arch/x86/kvm/svm/sev.c |
1312 |
+@@ -1665,19 +1665,24 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm) |
1313 |
+ { |
1314 |
+ struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info; |
1315 |
+ struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info; |
1316 |
++ struct kvm_vcpu *dst_vcpu, *src_vcpu; |
1317 |
++ struct vcpu_svm *dst_svm, *src_svm; |
1318 |
+ struct kvm_sev_info *mirror; |
1319 |
++ unsigned long i; |
1320 |
+ |
1321 |
+ dst->active = true; |
1322 |
+ dst->asid = src->asid; |
1323 |
+ dst->handle = src->handle; |
1324 |
+ dst->pages_locked = src->pages_locked; |
1325 |
+ dst->enc_context_owner = src->enc_context_owner; |
1326 |
++ dst->es_active = src->es_active; |
1327 |
+ |
1328 |
+ src->asid = 0; |
1329 |
+ src->active = false; |
1330 |
+ src->handle = 0; |
1331 |
+ src->pages_locked = 0; |
1332 |
+ src->enc_context_owner = NULL; |
1333 |
++ src->es_active = false; |
1334 |
+ |
1335 |
+ list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list); |
1336 |
+ |
1337 |
+@@ -1704,26 +1709,21 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm) |
1338 |
+ list_del(&src->mirror_entry); |
1339 |
+ list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms); |
1340 |
+ } |
1341 |
+-} |
1342 |
+ |
1343 |
+-static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) |
1344 |
+-{ |
1345 |
+- unsigned long i; |
1346 |
+- struct kvm_vcpu *dst_vcpu, *src_vcpu; |
1347 |
+- struct vcpu_svm *dst_svm, *src_svm; |
1348 |
++ kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) { |
1349 |
++ dst_svm = to_svm(dst_vcpu); |
1350 |
+ |
1351 |
+- if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) |
1352 |
+- return -EINVAL; |
1353 |
++ sev_init_vmcb(dst_svm); |
1354 |
+ |
1355 |
+- kvm_for_each_vcpu(i, src_vcpu, src) { |
1356 |
+- if (!src_vcpu->arch.guest_state_protected) |
1357 |
+- return -EINVAL; |
1358 |
+- } |
1359 |
++ if (!dst->es_active) |
1360 |
++ continue; |
1361 |
+ |
1362 |
+- kvm_for_each_vcpu(i, src_vcpu, src) { |
1363 |
++ /* |
1364 |
++ * Note, the source is not required to have the same number of |
1365 |
++ * vCPUs as the destination when migrating a vanilla SEV VM. |
1366 |
++ */ |
1367 |
++ src_vcpu = kvm_get_vcpu(dst_kvm, i); |
1368 |
+ src_svm = to_svm(src_vcpu); |
1369 |
+- dst_vcpu = kvm_get_vcpu(dst, i); |
1370 |
+- dst_svm = to_svm(dst_vcpu); |
1371 |
+ |
1372 |
+ /* |
1373 |
+ * Transfer VMSA and GHCB state to the destination. Nullify and |
1374 |
+@@ -1740,8 +1740,23 @@ static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) |
1375 |
+ src_svm->vmcb->control.vmsa_pa = INVALID_PAGE; |
1376 |
+ src_vcpu->arch.guest_state_protected = false; |
1377 |
+ } |
1378 |
+- to_kvm_svm(src)->sev_info.es_active = false; |
1379 |
+- to_kvm_svm(dst)->sev_info.es_active = true; |
1380 |
++} |
1381 |
++ |
1382 |
++static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src) |
1383 |
++{ |
1384 |
++ struct kvm_vcpu *src_vcpu; |
1385 |
++ unsigned long i; |
1386 |
++ |
1387 |
++ if (!sev_es_guest(src)) |
1388 |
++ return 0; |
1389 |
++ |
1390 |
++ if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) |
1391 |
++ return -EINVAL; |
1392 |
++ |
1393 |
++ kvm_for_each_vcpu(i, src_vcpu, src) { |
1394 |
++ if (!src_vcpu->arch.guest_state_protected) |
1395 |
++ return -EINVAL; |
1396 |
++ } |
1397 |
+ |
1398 |
+ return 0; |
1399 |
+ } |
1400 |
+@@ -1789,11 +1804,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd) |
1401 |
+ if (ret) |
1402 |
+ goto out_dst_vcpu; |
1403 |
+ |
1404 |
+- if (sev_es_guest(source_kvm)) { |
1405 |
+- ret = sev_es_migrate_from(kvm, source_kvm); |
1406 |
+- if (ret) |
1407 |
+- goto out_source_vcpu; |
1408 |
+- } |
1409 |
++ ret = sev_check_source_vcpus(kvm, source_kvm); |
1410 |
++ if (ret) |
1411 |
++ goto out_source_vcpu; |
1412 |
+ |
1413 |
+ sev_migrate_from(kvm, source_kvm); |
1414 |
+ kvm_vm_dead(source_kvm); |
1415 |
+@@ -2910,7 +2923,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) |
1416 |
+ count, in); |
1417 |
+ } |
1418 |
+ |
1419 |
+-void sev_es_init_vmcb(struct vcpu_svm *svm) |
1420 |
++static void sev_es_init_vmcb(struct vcpu_svm *svm) |
1421 |
+ { |
1422 |
+ struct kvm_vcpu *vcpu = &svm->vcpu; |
1423 |
+ |
1424 |
+@@ -2955,6 +2968,15 @@ void sev_es_init_vmcb(struct vcpu_svm *svm) |
1425 |
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); |
1426 |
+ } |
1427 |
+ |
1428 |
++void sev_init_vmcb(struct vcpu_svm *svm) |
1429 |
++{ |
1430 |
++ svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; |
1431 |
++ clr_exception_intercept(svm, UD_VECTOR); |
1432 |
++ |
1433 |
++ if (sev_es_guest(svm->vcpu.kvm)) |
1434 |
++ sev_es_init_vmcb(svm); |
1435 |
++} |
1436 |
++ |
1437 |
+ void sev_es_vcpu_reset(struct vcpu_svm *svm) |
1438 |
+ { |
1439 |
+ /* |
1440 |
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c |
1441 |
+index 0c0a09b43b105..6bfb0b0e66bd3 100644 |
1442 |
+--- a/arch/x86/kvm/svm/svm.c |
1443 |
++++ b/arch/x86/kvm/svm/svm.c |
1444 |
+@@ -1125,15 +1125,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu) |
1445 |
+ svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; |
1446 |
+ } |
1447 |
+ |
1448 |
+- if (sev_guest(vcpu->kvm)) { |
1449 |
+- svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; |
1450 |
+- clr_exception_intercept(svm, UD_VECTOR); |
1451 |
+- |
1452 |
+- if (sev_es_guest(vcpu->kvm)) { |
1453 |
+- /* Perform SEV-ES specific VMCB updates */ |
1454 |
+- sev_es_init_vmcb(svm); |
1455 |
+- } |
1456 |
+- } |
1457 |
++ if (sev_guest(vcpu->kvm)) |
1458 |
++ sev_init_vmcb(svm); |
1459 |
+ |
1460 |
+ svm_hv_init_vmcb(svm->vmcb); |
1461 |
+ init_vmcb_after_set_cpuid(vcpu); |
1462 |
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h |
1463 |
+index 34babf9185fe5..8ec8fb58b924a 100644 |
1464 |
+--- a/arch/x86/kvm/svm/svm.h |
1465 |
++++ b/arch/x86/kvm/svm/svm.h |
1466 |
+@@ -616,10 +616,10 @@ void __init sev_set_cpu_caps(void); |
1467 |
+ void __init sev_hardware_setup(void); |
1468 |
+ void sev_hardware_unsetup(void); |
1469 |
+ int sev_cpu_init(struct svm_cpu_data *sd); |
1470 |
++void sev_init_vmcb(struct vcpu_svm *svm); |
1471 |
+ void sev_free_vcpu(struct kvm_vcpu *vcpu); |
1472 |
+ int sev_handle_vmgexit(struct kvm_vcpu *vcpu); |
1473 |
+ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); |
1474 |
+-void sev_es_init_vmcb(struct vcpu_svm *svm); |
1475 |
+ void sev_es_vcpu_reset(struct vcpu_svm *svm); |
1476 |
+ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
1477 |
+ void sev_es_prepare_switch_to_guest(struct vmcb_save_area *hostsa); |
1478 |
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c |
1479 |
+index 16b6efacf7c67..4c71fa04e784c 100644 |
1480 |
+--- a/arch/x86/net/bpf_jit_comp.c |
1481 |
++++ b/arch/x86/net/bpf_jit_comp.c |
1482 |
+@@ -1415,8 +1415,9 @@ st: if (is_imm8(insn->off)) |
1483 |
+ case BPF_JMP | BPF_CALL: |
1484 |
+ func = (u8 *) __bpf_call_base + imm32; |
1485 |
+ if (tail_call_reachable) { |
1486 |
++ /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ |
1487 |
+ EMIT3_off32(0x48, 0x8B, 0x85, |
1488 |
+- -(bpf_prog->aux->stack_depth + 8)); |
1489 |
++ -round_up(bpf_prog->aux->stack_depth, 8) - 8); |
1490 |
+ if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) |
1491 |
+ return -EINVAL; |
1492 |
+ } else { |
1493 |
+diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c |
1494 |
+index e8ceb15286081..16b8a6273772c 100644 |
1495 |
+--- a/arch/xtensa/kernel/time.c |
1496 |
++++ b/arch/xtensa/kernel/time.c |
1497 |
+@@ -154,6 +154,7 @@ static void __init calibrate_ccount(void) |
1498 |
+ cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu"); |
1499 |
+ if (cpu) { |
1500 |
+ clk = of_clk_get(cpu, 0); |
1501 |
++ of_node_put(cpu); |
1502 |
+ if (!IS_ERR(clk)) { |
1503 |
+ ccount_freq = clk_get_rate(clk); |
1504 |
+ return; |
1505 |
+diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c |
1506 |
+index 538e6748e85a7..c79c1d09ea863 100644 |
1507 |
+--- a/arch/xtensa/platforms/xtfpga/setup.c |
1508 |
++++ b/arch/xtensa/platforms/xtfpga/setup.c |
1509 |
+@@ -133,6 +133,7 @@ static int __init machine_setup(void) |
1510 |
+ |
1511 |
+ if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc"))) |
1512 |
+ update_local_mac(eth); |
1513 |
++ of_node_put(eth); |
1514 |
+ return 0; |
1515 |
+ } |
1516 |
+ arch_initcall(machine_setup); |
1517 |
+diff --git a/block/blk-core.c b/block/blk-core.c |
1518 |
+index 84f7b7884d072..a7329475aba25 100644 |
1519 |
+--- a/block/blk-core.c |
1520 |
++++ b/block/blk-core.c |
1521 |
+@@ -322,19 +322,6 @@ void blk_cleanup_queue(struct request_queue *q) |
1522 |
+ blk_mq_exit_queue(q); |
1523 |
+ } |
1524 |
+ |
1525 |
+- /* |
1526 |
+- * In theory, request pool of sched_tags belongs to request queue. |
1527 |
+- * However, the current implementation requires tag_set for freeing |
1528 |
+- * requests, so free the pool now. |
1529 |
+- * |
1530 |
+- * Queue has become frozen, there can't be any in-queue requests, so |
1531 |
+- * it is safe to free requests now. |
1532 |
+- */ |
1533 |
+- mutex_lock(&q->sysfs_lock); |
1534 |
+- if (q->elevator) |
1535 |
+- blk_mq_sched_free_rqs(q); |
1536 |
+- mutex_unlock(&q->sysfs_lock); |
1537 |
+- |
1538 |
+ /* @q is and will stay empty, shutdown and put */ |
1539 |
+ blk_put_queue(q); |
1540 |
+ } |
1541 |
+diff --git a/block/blk-mq.c b/block/blk-mq.c |
1542 |
+index 631fb87b4976f..37caa73bff893 100644 |
1543 |
+--- a/block/blk-mq.c |
1544 |
++++ b/block/blk-mq.c |
1545 |
+@@ -2777,15 +2777,20 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, |
1546 |
+ return NULL; |
1547 |
+ } |
1548 |
+ |
1549 |
+- rq_qos_throttle(q, *bio); |
1550 |
+- |
1551 |
+ if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) |
1552 |
+ return NULL; |
1553 |
+ if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) |
1554 |
+ return NULL; |
1555 |
+ |
1556 |
+- rq->cmd_flags = (*bio)->bi_opf; |
1557 |
++ /* |
1558 |
++ * If any qos ->throttle() end up blocking, we will have flushed the |
1559 |
++ * plug and hence killed the cached_rq list as well. Pop this entry |
1560 |
++ * before we throttle. |
1561 |
++ */ |
1562 |
+ plug->cached_rq = rq_list_next(rq); |
1563 |
++ rq_qos_throttle(q, *bio); |
1564 |
++ |
1565 |
++ rq->cmd_flags = (*bio)->bi_opf; |
1566 |
+ INIT_LIST_HEAD(&rq->queuelist); |
1567 |
+ return rq; |
1568 |
+ } |
1569 |
+diff --git a/block/genhd.c b/block/genhd.c |
1570 |
+index 3008ec2136543..13daac1a9aefa 100644 |
1571 |
+--- a/block/genhd.c |
1572 |
++++ b/block/genhd.c |
1573 |
+@@ -652,6 +652,17 @@ void del_gendisk(struct gendisk *disk) |
1574 |
+ |
1575 |
+ blk_sync_queue(q); |
1576 |
+ blk_flush_integrity(); |
1577 |
++ blk_mq_cancel_work_sync(q); |
1578 |
++ |
1579 |
++ blk_mq_quiesce_queue(q); |
1580 |
++ if (q->elevator) { |
1581 |
++ mutex_lock(&q->sysfs_lock); |
1582 |
++ elevator_exit(q); |
1583 |
++ mutex_unlock(&q->sysfs_lock); |
1584 |
++ } |
1585 |
++ rq_qos_exit(q); |
1586 |
++ blk_mq_unquiesce_queue(q); |
1587 |
++ |
1588 |
+ /* |
1589 |
+ * Allow using passthrough request again after the queue is torn down. |
1590 |
+ */ |
1591 |
+@@ -1120,31 +1131,6 @@ static const struct attribute_group *disk_attr_groups[] = { |
1592 |
+ NULL |
1593 |
+ }; |
1594 |
+ |
1595 |
+-static void disk_release_mq(struct request_queue *q) |
1596 |
+-{ |
1597 |
+- blk_mq_cancel_work_sync(q); |
1598 |
+- |
1599 |
+- /* |
1600 |
+- * There can't be any non non-passthrough bios in flight here, but |
1601 |
+- * requests stay around longer, including passthrough ones so we |
1602 |
+- * still need to freeze the queue here. |
1603 |
+- */ |
1604 |
+- blk_mq_freeze_queue(q); |
1605 |
+- |
1606 |
+- /* |
1607 |
+- * Since the I/O scheduler exit code may access cgroup information, |
1608 |
+- * perform I/O scheduler exit before disassociating from the block |
1609 |
+- * cgroup controller. |
1610 |
+- */ |
1611 |
+- if (q->elevator) { |
1612 |
+- mutex_lock(&q->sysfs_lock); |
1613 |
+- elevator_exit(q); |
1614 |
+- mutex_unlock(&q->sysfs_lock); |
1615 |
+- } |
1616 |
+- rq_qos_exit(q); |
1617 |
+- __blk_mq_unfreeze_queue(q, true); |
1618 |
+-} |
1619 |
+- |
1620 |
+ /** |
1621 |
+ * disk_release - releases all allocated resources of the gendisk |
1622 |
+ * @dev: the device representing this disk |
1623 |
+@@ -1166,9 +1152,6 @@ static void disk_release(struct device *dev) |
1624 |
+ might_sleep(); |
1625 |
+ WARN_ON_ONCE(disk_live(disk)); |
1626 |
+ |
1627 |
+- if (queue_is_mq(disk->queue)) |
1628 |
+- disk_release_mq(disk->queue); |
1629 |
+- |
1630 |
+ blkcg_exit_queue(disk->queue); |
1631 |
+ |
1632 |
+ disk_release_events(disk); |
1633 |
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c |
1634 |
+index 084d67fd55cc8..bc60c9cd32308 100644 |
1635 |
+--- a/drivers/base/memory.c |
1636 |
++++ b/drivers/base/memory.c |
1637 |
+@@ -558,7 +558,7 @@ static ssize_t hard_offline_page_store(struct device *dev, |
1638 |
+ if (kstrtoull(buf, 0, &pfn) < 0) |
1639 |
+ return -EINVAL; |
1640 |
+ pfn >>= PAGE_SHIFT; |
1641 |
+- ret = memory_failure(pfn, 0); |
1642 |
++ ret = memory_failure(pfn, MF_SW_SIMULATED); |
1643 |
+ if (ret == -EOPNOTSUPP) |
1644 |
+ ret = 0; |
1645 |
+ return ret ? ret : count; |
1646 |
+diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c |
1647 |
+index 400c7412a7dcf..a6db605707b00 100644 |
1648 |
+--- a/drivers/base/regmap/regmap-irq.c |
1649 |
++++ b/drivers/base/regmap/regmap-irq.c |
1650 |
+@@ -252,6 +252,7 @@ static void regmap_irq_enable(struct irq_data *data) |
1651 |
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); |
1652 |
+ struct regmap *map = d->map; |
1653 |
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); |
1654 |
++ unsigned int reg = irq_data->reg_offset / map->reg_stride; |
1655 |
+ unsigned int mask, type; |
1656 |
+ |
1657 |
+ type = irq_data->type.type_falling_val | irq_data->type.type_rising_val; |
1658 |
+@@ -268,14 +269,14 @@ static void regmap_irq_enable(struct irq_data *data) |
1659 |
+ * at the corresponding offset in regmap_irq_set_type(). |
1660 |
+ */ |
1661 |
+ if (d->chip->type_in_mask && type) |
1662 |
+- mask = d->type_buf[irq_data->reg_offset / map->reg_stride]; |
1663 |
++ mask = d->type_buf[reg] & irq_data->mask; |
1664 |
+ else |
1665 |
+ mask = irq_data->mask; |
1666 |
+ |
1667 |
+ if (d->chip->clear_on_unmask) |
1668 |
+ d->clear_status = true; |
1669 |
+ |
1670 |
+- d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask; |
1671 |
++ d->mask_buf[reg] &= ~mask; |
1672 |
+ } |
1673 |
+ |
1674 |
+ static void regmap_irq_disable(struct irq_data *data) |
1675 |
+@@ -386,6 +387,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, |
1676 |
+ subreg = &chip->sub_reg_offsets[b]; |
1677 |
+ for (i = 0; i < subreg->num_regs; i++) { |
1678 |
+ unsigned int offset = subreg->offset[i]; |
1679 |
++ unsigned int index = offset / map->reg_stride; |
1680 |
+ |
1681 |
+ if (chip->not_fixed_stride) |
1682 |
+ ret = regmap_read(map, |
1683 |
+@@ -394,7 +396,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, |
1684 |
+ else |
1685 |
+ ret = regmap_read(map, |
1686 |
+ chip->status_base + offset, |
1687 |
+- &data->status_buf[offset]); |
1688 |
++ &data->status_buf[index]); |
1689 |
+ |
1690 |
+ if (ret) |
1691 |
+ break; |
1692 |
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c |
1693 |
+index 003056d4f7f5f..966a6bf4c1627 100644 |
1694 |
+--- a/drivers/block/xen-blkfront.c |
1695 |
++++ b/drivers/block/xen-blkfront.c |
1696 |
+@@ -2137,9 +2137,11 @@ static void blkfront_closing(struct blkfront_info *info) |
1697 |
+ return; |
1698 |
+ |
1699 |
+ /* No more blkif_request(). */ |
1700 |
+- blk_mq_stop_hw_queues(info->rq); |
1701 |
+- blk_mark_disk_dead(info->gd); |
1702 |
+- set_capacity(info->gd, 0); |
1703 |
++ if (info->rq && info->gd) { |
1704 |
++ blk_mq_stop_hw_queues(info->rq); |
1705 |
++ blk_mark_disk_dead(info->gd); |
1706 |
++ set_capacity(info->gd, 0); |
1707 |
++ } |
1708 |
+ |
1709 |
+ for_each_rinfo(info, rinfo, i) { |
1710 |
+ /* No more gnttab callback work. */ |
1711 |
+@@ -2480,16 +2482,19 @@ static int blkfront_remove(struct xenbus_device *xbdev) |
1712 |
+ |
1713 |
+ dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); |
1714 |
+ |
1715 |
+- del_gendisk(info->gd); |
1716 |
++ if (info->gd) |
1717 |
++ del_gendisk(info->gd); |
1718 |
+ |
1719 |
+ mutex_lock(&blkfront_mutex); |
1720 |
+ list_del(&info->info_list); |
1721 |
+ mutex_unlock(&blkfront_mutex); |
1722 |
+ |
1723 |
+ blkif_free(info, 0); |
1724 |
+- xlbd_release_minors(info->gd->first_minor, info->gd->minors); |
1725 |
+- blk_cleanup_disk(info->gd); |
1726 |
+- blk_mq_free_tag_set(&info->tag_set); |
1727 |
++ if (info->gd) { |
1728 |
++ xlbd_release_minors(info->gd->first_minor, info->gd->minors); |
1729 |
++ blk_cleanup_disk(info->gd); |
1730 |
++ blk_mq_free_tag_set(&info->tag_set); |
1731 |
++ } |
1732 |
+ |
1733 |
+ kfree(info); |
1734 |
+ return 0; |
1735 |
+diff --git a/drivers/char/random.c b/drivers/char/random.c |
1736 |
+index 1f3072ee6b7cd..dd52e948a9a48 100644 |
1737 |
+--- a/drivers/char/random.c |
1738 |
++++ b/drivers/char/random.c |
1739 |
+@@ -87,7 +87,7 @@ static RAW_NOTIFIER_HEAD(random_ready_chain); |
1740 |
+ |
1741 |
+ /* Control how we warn userspace. */ |
1742 |
+ static struct ratelimit_state urandom_warning = |
1743 |
+- RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); |
1744 |
++ RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE); |
1745 |
+ static int ratelimit_disable __read_mostly = |
1746 |
+ IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM); |
1747 |
+ module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); |
1748 |
+@@ -451,7 +451,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) |
1749 |
+ |
1750 |
+ /* |
1751 |
+ * Immediately overwrite the ChaCha key at index 4 with random |
1752 |
+- * bytes, in case userspace causes copy_to_user() below to sleep |
1753 |
++ * bytes, in case userspace causes copy_to_iter() below to sleep |
1754 |
+ * forever, so that we still retain forward secrecy in that case. |
1755 |
+ */ |
1756 |
+ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); |
1757 |
+@@ -1038,7 +1038,7 @@ void add_interrupt_randomness(int irq) |
1758 |
+ if (new_count & MIX_INFLIGHT) |
1759 |
+ return; |
1760 |
+ |
1761 |
+- if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ)) |
1762 |
++ if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ)) |
1763 |
+ return; |
1764 |
+ |
1765 |
+ if (unlikely(!fast_pool->mix.func)) |
1766 |
+diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c |
1767 |
+index e7330684d3b82..9631f2fd2faf7 100644 |
1768 |
+--- a/drivers/dma-buf/udmabuf.c |
1769 |
++++ b/drivers/dma-buf/udmabuf.c |
1770 |
+@@ -32,8 +32,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) |
1771 |
+ { |
1772 |
+ struct vm_area_struct *vma = vmf->vma; |
1773 |
+ struct udmabuf *ubuf = vma->vm_private_data; |
1774 |
++ pgoff_t pgoff = vmf->pgoff; |
1775 |
+ |
1776 |
+- vmf->page = ubuf->pages[vmf->pgoff]; |
1777 |
++ if (pgoff >= ubuf->pagecount) |
1778 |
++ return VM_FAULT_SIGBUS; |
1779 |
++ vmf->page = ubuf->pages[pgoff]; |
1780 |
+ get_page(vmf->page); |
1781 |
+ return 0; |
1782 |
+ } |
1783 |
+diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c |
1784 |
+index 98cd715ccc33c..8d09b619c1669 100644 |
1785 |
+--- a/drivers/gpio/gpio-vr41xx.c |
1786 |
++++ b/drivers/gpio/gpio-vr41xx.c |
1787 |
+@@ -217,8 +217,6 @@ static int giu_get_irq(unsigned int irq) |
1788 |
+ printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", |
1789 |
+ maskl, pendl, maskh, pendh); |
1790 |
+ |
1791 |
+- atomic_inc(&irq_err_count); |
1792 |
+- |
1793 |
+ return -EINVAL; |
1794 |
+ } |
1795 |
+ |
1796 |
+diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c |
1797 |
+index 7f8f5b02e31d5..4b61d975cc0ec 100644 |
1798 |
+--- a/drivers/gpio/gpio-winbond.c |
1799 |
++++ b/drivers/gpio/gpio-winbond.c |
1800 |
+@@ -385,12 +385,13 @@ static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset) |
1801 |
+ unsigned long *base = gpiochip_get_data(gc); |
1802 |
+ const struct winbond_gpio_info *info; |
1803 |
+ bool val; |
1804 |
++ int ret; |
1805 |
+ |
1806 |
+ winbond_gpio_get_info(&offset, &info); |
1807 |
+ |
1808 |
+- val = winbond_sio_enter(*base); |
1809 |
+- if (val) |
1810 |
+- return val; |
1811 |
++ ret = winbond_sio_enter(*base); |
1812 |
++ if (ret) |
1813 |
++ return ret; |
1814 |
+ |
1815 |
+ winbond_sio_select_logical(*base, info->dev); |
1816 |
+ |
1817 |
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c |
1818 |
+index 95b5b5bfa1ffa..71b15e2df235b 100644 |
1819 |
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c |
1820 |
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c |
1821 |
+@@ -944,7 +944,7 @@ static void override_lane_settings(const struct link_training_settings *lt_setti |
1822 |
+ |
1823 |
+ return; |
1824 |
+ |
1825 |
+- for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) { |
1826 |
++ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { |
1827 |
+ if (lt_settings->voltage_swing) |
1828 |
+ lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; |
1829 |
+ if (lt_settings->pre_emphasis) |
1830 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c |
1831 |
+index 248602c15f3a0..6007b847b54f2 100644 |
1832 |
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c |
1833 |
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c |
1834 |
+@@ -1771,29 +1771,9 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) |
1835 |
+ break; |
1836 |
+ } |
1837 |
+ } |
1838 |
+- |
1839 |
+- /* |
1840 |
+- * TO-DO: So far the code logic below only addresses single eDP case. |
1841 |
+- * For dual eDP case, there are a few things that need to be |
1842 |
+- * implemented first: |
1843 |
+- * |
1844 |
+- * 1. Change the fastboot logic above, so eDP link[0 or 1]'s |
1845 |
+- * stream[0 or 1] will all be checked. |
1846 |
+- * |
1847 |
+- * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on |
1848 |
+- * for each eDP. |
1849 |
+- * |
1850 |
+- * Once above 2 things are completed, we can then change the logic below |
1851 |
+- * correspondingly, so dual eDP case will be fully covered. |
1852 |
+- */ |
1853 |
+- |
1854 |
+- // We are trying to enable eDP, don't power down VDD if eDP stream is existing |
1855 |
+- if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) { |
1856 |
++ // We are trying to enable eDP, don't power down VDD |
1857 |
++ if (can_apply_edp_fast_boot) |
1858 |
+ keep_edp_vdd_on = true; |
1859 |
+- DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n"); |
1860 |
+- } else { |
1861 |
+- DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n"); |
1862 |
+- } |
1863 |
+ } |
1864 |
+ |
1865 |
+ // Check seamless boot support |
1866 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c |
1867 |
+index 970b65efeac10..eaa7032f0f1a3 100644 |
1868 |
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c |
1869 |
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c |
1870 |
+@@ -212,6 +212,9 @@ static void dpp2_cnv_setup ( |
1871 |
+ break; |
1872 |
+ } |
1873 |
+ |
1874 |
++ /* Set default color space based on format if none is given. */ |
1875 |
++ color_space = input_color_space ? input_color_space : color_space; |
1876 |
++ |
1877 |
+ if (is_2bit == 1 && alpha_2bit_lut != NULL) { |
1878 |
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); |
1879 |
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); |
1880 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c |
1881 |
+index 8b6505b7dca86..f50ab961bc174 100644 |
1882 |
+--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c |
1883 |
++++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c |
1884 |
+@@ -153,6 +153,9 @@ static void dpp201_cnv_setup( |
1885 |
+ break; |
1886 |
+ } |
1887 |
+ |
1888 |
++ /* Set default color space based on format if none is given. */ |
1889 |
++ color_space = input_color_space ? input_color_space : color_space; |
1890 |
++ |
1891 |
+ if (is_2bit == 1 && alpha_2bit_lut != NULL) { |
1892 |
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); |
1893 |
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); |
1894 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c |
1895 |
+index ab3918c0a15b0..0dcc07531643f 100644 |
1896 |
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c |
1897 |
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c |
1898 |
+@@ -294,6 +294,9 @@ static void dpp3_cnv_setup ( |
1899 |
+ break; |
1900 |
+ } |
1901 |
+ |
1902 |
++ /* Set default color space based on format if none is given. */ |
1903 |
++ color_space = input_color_space ? input_color_space : color_space; |
1904 |
++ |
1905 |
+ if (is_2bit == 1 && alpha_2bit_lut != NULL) { |
1906 |
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); |
1907 |
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); |
1908 |
+diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c |
1909 |
+index 569903d47aea5..a76f037001aec 100644 |
1910 |
+--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c |
1911 |
++++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c |
1912 |
+@@ -2437,7 +2437,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params, |
1913 |
+ } |
1914 |
+ |
1915 |
+ /* |
1916 |
+- * Display WA #22010492432: ehl, tgl, adl-p |
1917 |
++ * Display WA #22010492432: ehl, tgl, adl-s, adl-p |
1918 |
+ * Program half of the nominal DCO divider fraction value. |
1919 |
+ */ |
1920 |
+ static bool |
1921 |
+@@ -2445,7 +2445,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) |
1922 |
+ { |
1923 |
+ return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) && |
1924 |
+ IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) || |
1925 |
+- IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) && |
1926 |
++ IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) && |
1927 |
+ i915->dpll.ref_clks.nssc == 38400; |
1928 |
+ } |
1929 |
+ |
1930 |
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c |
1931 |
+index 1219f71629a52..1ced7b108f2c7 100644 |
1932 |
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c |
1933 |
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c |
1934 |
+@@ -1002,7 +1002,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) |
1935 |
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) |
1936 |
+ release_firmware(adreno_gpu->fw[i]); |
1937 |
+ |
1938 |
+- pm_runtime_disable(&priv->gpu_pdev->dev); |
1939 |
++ if (pm_runtime_enabled(&priv->gpu_pdev->dev)) |
1940 |
++ pm_runtime_disable(&priv->gpu_pdev->dev); |
1941 |
+ |
1942 |
+ msm_gpu_cleanup(&adreno_gpu->base); |
1943 |
+ } |
1944 |
+diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c |
1945 |
+index 3cf476c551584..d92193db7eb2d 100644 |
1946 |
+--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c |
1947 |
++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c |
1948 |
+@@ -217,6 +217,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, |
1949 |
+ encoder = mdp4_lcdc_encoder_init(dev, panel_node); |
1950 |
+ if (IS_ERR(encoder)) { |
1951 |
+ DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n"); |
1952 |
++ of_node_put(panel_node); |
1953 |
+ return PTR_ERR(encoder); |
1954 |
+ } |
1955 |
+ |
1956 |
+@@ -226,6 +227,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, |
1957 |
+ connector = mdp4_lvds_connector_init(dev, panel_node, encoder); |
1958 |
+ if (IS_ERR(connector)) { |
1959 |
+ DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n"); |
1960 |
++ of_node_put(panel_node); |
1961 |
+ return PTR_ERR(connector); |
1962 |
+ } |
1963 |
+ |
1964 |
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c |
1965 |
+index de1974916ad2d..499d0bbc442c9 100644 |
1966 |
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c |
1967 |
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c |
1968 |
+@@ -1523,6 +1523,8 @@ end: |
1969 |
+ return ret; |
1970 |
+ } |
1971 |
+ |
1972 |
++static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl); |
1973 |
++ |
1974 |
+ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) |
1975 |
+ { |
1976 |
+ int ret = 0; |
1977 |
+@@ -1545,7 +1547,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) |
1978 |
+ |
1979 |
+ ret = dp_ctrl_on_link(&ctrl->dp_ctrl); |
1980 |
+ if (!ret) |
1981 |
+- ret = dp_ctrl_on_stream(&ctrl->dp_ctrl); |
1982 |
++ ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl); |
1983 |
+ else |
1984 |
+ DRM_ERROR("failed to enable DP link controller\n"); |
1985 |
+ |
1986 |
+@@ -1800,7 +1802,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl) |
1987 |
+ return dp_ctrl_setup_main_link(ctrl, &training_step); |
1988 |
+ } |
1989 |
+ |
1990 |
+-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) |
1991 |
++static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl) |
1992 |
++{ |
1993 |
++ int ret; |
1994 |
++ struct dp_ctrl_private *ctrl; |
1995 |
++ |
1996 |
++ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); |
1997 |
++ |
1998 |
++ ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; |
1999 |
++ |
2000 |
++ ret = dp_ctrl_enable_stream_clocks(ctrl); |
2001 |
++ if (ret) { |
2002 |
++ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret); |
2003 |
++ return ret; |
2004 |
++ } |
2005 |
++ |
2006 |
++ dp_ctrl_send_phy_test_pattern(ctrl); |
2007 |
++ |
2008 |
++ return 0; |
2009 |
++} |
2010 |
++ |
2011 |
++int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) |
2012 |
+ { |
2013 |
+ int ret = 0; |
2014 |
+ bool mainlink_ready = false; |
2015 |
+@@ -1831,12 +1853,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) |
2016 |
+ goto end; |
2017 |
+ } |
2018 |
+ |
2019 |
+- if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { |
2020 |
+- dp_ctrl_send_phy_test_pattern(ctrl); |
2021 |
+- return 0; |
2022 |
+- } |
2023 |
+- |
2024 |
+- if (!dp_ctrl_channel_eq_ok(ctrl)) |
2025 |
++ if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl)) |
2026 |
+ dp_ctrl_link_retrain(ctrl); |
2027 |
+ |
2028 |
+ /* stop txing train pattern to end link training */ |
2029 |
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h |
2030 |
+index 2433edbc70a6d..dcc7af21a5f05 100644 |
2031 |
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h |
2032 |
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h |
2033 |
+@@ -20,7 +20,7 @@ struct dp_ctrl { |
2034 |
+ }; |
2035 |
+ |
2036 |
+ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); |
2037 |
+-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl); |
2038 |
++int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train); |
2039 |
+ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl); |
2040 |
+ int dp_ctrl_off(struct dp_ctrl *dp_ctrl); |
2041 |
+ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl); |
2042 |
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c |
2043 |
+index 8deb92bddfdec..12270bd3cff98 100644 |
2044 |
+--- a/drivers/gpu/drm/msm/dp/dp_display.c |
2045 |
++++ b/drivers/gpu/drm/msm/dp/dp_display.c |
2046 |
+@@ -308,7 +308,8 @@ static void dp_display_unbind(struct device *dev, struct device *master, |
2047 |
+ struct msm_drm_private *priv = dev_get_drvdata(master); |
2048 |
+ |
2049 |
+ /* disable all HPD interrupts */ |
2050 |
+- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); |
2051 |
++ if (dp->core_initialized) |
2052 |
++ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); |
2053 |
+ |
2054 |
+ kthread_stop(dp->ev_tsk); |
2055 |
+ |
2056 |
+@@ -902,7 +903,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data) |
2057 |
+ return 0; |
2058 |
+ } |
2059 |
+ |
2060 |
+- rc = dp_ctrl_on_stream(dp->ctrl); |
2061 |
++ rc = dp_ctrl_on_stream(dp->ctrl, data); |
2062 |
+ if (!rc) |
2063 |
+ dp_display->power_on = true; |
2064 |
+ |
2065 |
+@@ -1589,6 +1590,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) |
2066 |
+ int rc = 0; |
2067 |
+ struct dp_display_private *dp_display; |
2068 |
+ u32 state; |
2069 |
++ bool force_link_train = false; |
2070 |
+ |
2071 |
+ dp_display = container_of(dp, struct dp_display_private, dp_display); |
2072 |
+ if (!dp_display->dp_mode.drm_mode.clock) { |
2073 |
+@@ -1617,10 +1619,12 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) |
2074 |
+ |
2075 |
+ state = dp_display->hpd_state; |
2076 |
+ |
2077 |
+- if (state == ST_DISPLAY_OFF) |
2078 |
++ if (state == ST_DISPLAY_OFF) { |
2079 |
+ dp_display_host_phy_init(dp_display); |
2080 |
++ force_link_train = true; |
2081 |
++ } |
2082 |
+ |
2083 |
+- dp_display_enable(dp_display, 0); |
2084 |
++ dp_display_enable(dp_display, force_link_train); |
2085 |
+ |
2086 |
+ rc = dp_display_post_enable(dp); |
2087 |
+ if (rc) { |
2088 |
+@@ -1629,10 +1633,6 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) |
2089 |
+ dp_display_unprepare(dp); |
2090 |
+ } |
2091 |
+ |
2092 |
+- /* manual kick off plug event to train link */ |
2093 |
+- if (state == ST_DISPLAY_OFF) |
2094 |
+- dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0); |
2095 |
+- |
2096 |
+ /* completed connection */ |
2097 |
+ dp_display->hpd_state = ST_CONNECTED; |
2098 |
+ |
2099 |
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c |
2100 |
+index f2c46116df55c..b5f6acfe7c6e9 100644 |
2101 |
+--- a/drivers/gpu/drm/msm/msm_drv.c |
2102 |
++++ b/drivers/gpu/drm/msm/msm_drv.c |
2103 |
+@@ -967,7 +967,7 @@ static const struct drm_driver msm_driver = { |
2104 |
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
2105 |
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
2106 |
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, |
2107 |
+- .gem_prime_mmap = drm_gem_prime_mmap, |
2108 |
++ .gem_prime_mmap = msm_gem_prime_mmap, |
2109 |
+ #ifdef CONFIG_DEBUG_FS |
2110 |
+ .debugfs_init = msm_debugfs_init, |
2111 |
+ #endif |
2112 |
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h |
2113 |
+index d661debb50f11..9b985b641319d 100644 |
2114 |
+--- a/drivers/gpu/drm/msm/msm_drv.h |
2115 |
++++ b/drivers/gpu/drm/msm/msm_drv.h |
2116 |
+@@ -288,6 +288,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t |
2117 |
+ void msm_gem_shrinker_init(struct drm_device *dev); |
2118 |
+ void msm_gem_shrinker_cleanup(struct drm_device *dev); |
2119 |
+ |
2120 |
++int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
2121 |
+ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); |
2122 |
+ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); |
2123 |
+ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); |
2124 |
+diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c |
2125 |
+index 94ab705e9b8a4..dcc8a573bc762 100644 |
2126 |
+--- a/drivers/gpu/drm/msm/msm_gem_prime.c |
2127 |
++++ b/drivers/gpu/drm/msm/msm_gem_prime.c |
2128 |
+@@ -11,6 +11,21 @@ |
2129 |
+ #include "msm_drv.h" |
2130 |
+ #include "msm_gem.h" |
2131 |
+ |
2132 |
++int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) |
2133 |
++{ |
2134 |
++ int ret; |
2135 |
++ |
2136 |
++ /* Ensure the mmap offset is initialized. We lazily initialize it, |
2137 |
++ * so if it has not been first mmap'd directly as a GEM object, the |
2138 |
++ * mmap offset will not be already initialized. |
2139 |
++ */ |
2140 |
++ ret = drm_gem_create_mmap_offset(obj); |
2141 |
++ if (ret) |
2142 |
++ return ret; |
2143 |
++ |
2144 |
++ return drm_gem_prime_mmap(obj, vma); |
2145 |
++} |
2146 |
++ |
2147 |
+ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) |
2148 |
+ { |
2149 |
+ struct msm_gem_object *msm_obj = to_msm_bo(obj); |
2150 |
+diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c |
2151 |
+index 58eb3e1662cb9..7d27d7cee688b 100644 |
2152 |
+--- a/drivers/gpu/drm/msm/msm_gpu.c |
2153 |
++++ b/drivers/gpu/drm/msm/msm_gpu.c |
2154 |
+@@ -664,7 +664,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, |
2155 |
+ msm_submit_retire(submit); |
2156 |
+ |
2157 |
+ pm_runtime_mark_last_busy(&gpu->pdev->dev); |
2158 |
+- pm_runtime_put_autosuspend(&gpu->pdev->dev); |
2159 |
+ |
2160 |
+ spin_lock_irqsave(&ring->submit_lock, flags); |
2161 |
+ list_del(&submit->node); |
2162 |
+@@ -678,6 +677,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, |
2163 |
+ msm_devfreq_idle(gpu); |
2164 |
+ mutex_unlock(&gpu->active_lock); |
2165 |
+ |
2166 |
++ pm_runtime_put_autosuspend(&gpu->pdev->dev); |
2167 |
++ |
2168 |
+ msm_gem_submit_put(submit); |
2169 |
+ } |
2170 |
+ |
2171 |
+diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c |
2172 |
+index bcaddbba564df..a54ed354578b5 100644 |
2173 |
+--- a/drivers/gpu/drm/msm/msm_iommu.c |
2174 |
++++ b/drivers/gpu/drm/msm/msm_iommu.c |
2175 |
+@@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, |
2176 |
+ u64 addr = iova; |
2177 |
+ unsigned int i; |
2178 |
+ |
2179 |
+- for_each_sg(sgt->sgl, sg, sgt->nents, i) { |
2180 |
++ for_each_sgtable_sg(sgt, sg, i) { |
2181 |
+ size_t size = sg->length; |
2182 |
+ phys_addr_t phys = sg_phys(sg); |
2183 |
+ |
2184 |
+diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c |
2185 |
+index 6a9ba8a77c778..4b29de65a5630 100644 |
2186 |
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c |
2187 |
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c |
2188 |
+@@ -73,7 +73,6 @@ static int sun4i_drv_bind(struct device *dev) |
2189 |
+ goto free_drm; |
2190 |
+ } |
2191 |
+ |
2192 |
+- dev_set_drvdata(dev, drm); |
2193 |
+ drm->dev_private = drv; |
2194 |
+ INIT_LIST_HEAD(&drv->frontend_list); |
2195 |
+ INIT_LIST_HEAD(&drv->engine_list); |
2196 |
+@@ -114,6 +113,8 @@ static int sun4i_drv_bind(struct device *dev) |
2197 |
+ |
2198 |
+ drm_fbdev_generic_setup(drm, 32); |
2199 |
+ |
2200 |
++ dev_set_drvdata(dev, drm); |
2201 |
++ |
2202 |
+ return 0; |
2203 |
+ |
2204 |
+ finish_poll: |
2205 |
+@@ -130,6 +131,7 @@ static void sun4i_drv_unbind(struct device *dev) |
2206 |
+ { |
2207 |
+ struct drm_device *drm = dev_get_drvdata(dev); |
2208 |
+ |
2209 |
++ dev_set_drvdata(dev, NULL); |
2210 |
+ drm_dev_unregister(drm); |
2211 |
+ drm_kms_helper_poll_fini(drm); |
2212 |
+ drm_atomic_helper_shutdown(drm); |
2213 |
+diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c |
2214 |
+index 4f73bc827eecb..9c9e985786670 100644 |
2215 |
+--- a/drivers/iio/accel/bma180.c |
2216 |
++++ b/drivers/iio/accel/bma180.c |
2217 |
+@@ -1006,11 +1006,12 @@ static int bma180_probe(struct i2c_client *client, |
2218 |
+ |
2219 |
+ data->trig->ops = &bma180_trigger_ops; |
2220 |
+ iio_trigger_set_drvdata(data->trig, indio_dev); |
2221 |
+- indio_dev->trig = iio_trigger_get(data->trig); |
2222 |
+ |
2223 |
+ ret = iio_trigger_register(data->trig); |
2224 |
+ if (ret) |
2225 |
+ goto err_trigger_free; |
2226 |
++ |
2227 |
++ indio_dev->trig = iio_trigger_get(data->trig); |
2228 |
+ } |
2229 |
+ |
2230 |
+ ret = iio_triggered_buffer_setup(indio_dev, NULL, |
2231 |
+diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c |
2232 |
+index ac74cdcd2bc8c..748b35c2f0c37 100644 |
2233 |
+--- a/drivers/iio/accel/kxcjk-1013.c |
2234 |
++++ b/drivers/iio/accel/kxcjk-1013.c |
2235 |
+@@ -1554,12 +1554,12 @@ static int kxcjk1013_probe(struct i2c_client *client, |
2236 |
+ |
2237 |
+ data->dready_trig->ops = &kxcjk1013_trigger_ops; |
2238 |
+ iio_trigger_set_drvdata(data->dready_trig, indio_dev); |
2239 |
+- indio_dev->trig = data->dready_trig; |
2240 |
+- iio_trigger_get(indio_dev->trig); |
2241 |
+ ret = iio_trigger_register(data->dready_trig); |
2242 |
+ if (ret) |
2243 |
+ goto err_poweroff; |
2244 |
+ |
2245 |
++ indio_dev->trig = iio_trigger_get(data->dready_trig); |
2246 |
++ |
2247 |
+ data->motion_trig->ops = &kxcjk1013_trigger_ops; |
2248 |
+ iio_trigger_set_drvdata(data->motion_trig, indio_dev); |
2249 |
+ ret = iio_trigger_register(data->motion_trig); |
2250 |
+diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c |
2251 |
+index 9c02c681c84c3..f4f835274d751 100644 |
2252 |
+--- a/drivers/iio/accel/mma8452.c |
2253 |
++++ b/drivers/iio/accel/mma8452.c |
2254 |
+@@ -1510,10 +1510,14 @@ static int mma8452_reset(struct i2c_client *client) |
2255 |
+ int i; |
2256 |
+ int ret; |
2257 |
+ |
2258 |
+- ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2, |
2259 |
++ /* |
2260 |
++ * Find on fxls8471, after config reset bit, it reset immediately, |
2261 |
++ * and will not give ACK, so here do not check the return value. |
2262 |
++ * The following code will read the reset register, and check whether |
2263 |
++ * this reset works. |
2264 |
++ */ |
2265 |
++ i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2, |
2266 |
+ MMA8452_CTRL_REG2_RST); |
2267 |
+- if (ret < 0) |
2268 |
+- return ret; |
2269 |
+ |
2270 |
+ for (i = 0; i < 10; i++) { |
2271 |
+ usleep_range(100, 200); |
2272 |
+@@ -1556,11 +1560,13 @@ static int mma8452_probe(struct i2c_client *client, |
2273 |
+ mutex_init(&data->lock); |
2274 |
+ |
2275 |
+ data->chip_info = device_get_match_data(&client->dev); |
2276 |
+- if (!data->chip_info && id) { |
2277 |
+- data->chip_info = &mma_chip_info_table[id->driver_data]; |
2278 |
+- } else { |
2279 |
+- dev_err(&client->dev, "unknown device model\n"); |
2280 |
+- return -ENODEV; |
2281 |
++ if (!data->chip_info) { |
2282 |
++ if (id) { |
2283 |
++ data->chip_info = &mma_chip_info_table[id->driver_data]; |
2284 |
++ } else { |
2285 |
++ dev_err(&client->dev, "unknown device model\n"); |
2286 |
++ return -ENODEV; |
2287 |
++ } |
2288 |
+ } |
2289 |
+ |
2290 |
+ ret = iio_read_mount_matrix(&client->dev, &data->orientation); |
2291 |
+diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c |
2292 |
+index b3afbf0649152..df600d2917c0a 100644 |
2293 |
+--- a/drivers/iio/accel/mxc4005.c |
2294 |
++++ b/drivers/iio/accel/mxc4005.c |
2295 |
+@@ -456,8 +456,6 @@ static int mxc4005_probe(struct i2c_client *client, |
2296 |
+ |
2297 |
+ data->dready_trig->ops = &mxc4005_trigger_ops; |
2298 |
+ iio_trigger_set_drvdata(data->dready_trig, indio_dev); |
2299 |
+- indio_dev->trig = data->dready_trig; |
2300 |
+- iio_trigger_get(indio_dev->trig); |
2301 |
+ ret = devm_iio_trigger_register(&client->dev, |
2302 |
+ data->dready_trig); |
2303 |
+ if (ret) { |
2304 |
+@@ -465,6 +463,8 @@ static int mxc4005_probe(struct i2c_client *client, |
2305 |
+ "failed to register trigger\n"); |
2306 |
+ return ret; |
2307 |
+ } |
2308 |
++ |
2309 |
++ indio_dev->trig = iio_trigger_get(data->dready_trig); |
2310 |
+ } |
2311 |
+ |
2312 |
+ return devm_iio_device_register(&client->dev, indio_dev); |
2313 |
+diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c |
2314 |
+index a73e3c2d212fa..a9e655e69eaa2 100644 |
2315 |
+--- a/drivers/iio/adc/adi-axi-adc.c |
2316 |
++++ b/drivers/iio/adc/adi-axi-adc.c |
2317 |
+@@ -322,16 +322,19 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev) |
2318 |
+ |
2319 |
+ if (!try_module_get(cl->dev->driver->owner)) { |
2320 |
+ mutex_unlock(®istered_clients_lock); |
2321 |
++ of_node_put(cln); |
2322 |
+ return ERR_PTR(-ENODEV); |
2323 |
+ } |
2324 |
+ |
2325 |
+ get_device(cl->dev); |
2326 |
+ cl->info = info; |
2327 |
+ mutex_unlock(®istered_clients_lock); |
2328 |
++ of_node_put(cln); |
2329 |
+ return cl; |
2330 |
+ } |
2331 |
+ |
2332 |
+ mutex_unlock(®istered_clients_lock); |
2333 |
++ of_node_put(cln); |
2334 |
+ |
2335 |
+ return ERR_PTR(-EPROBE_DEFER); |
2336 |
+ } |
2337 |
+diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c |
2338 |
+index 0793d2474cdcf..9341e0e0eb556 100644 |
2339 |
+--- a/drivers/iio/adc/aspeed_adc.c |
2340 |
++++ b/drivers/iio/adc/aspeed_adc.c |
2341 |
+@@ -186,6 +186,7 @@ static int aspeed_adc_set_trim_data(struct iio_dev *indio_dev) |
2342 |
+ return -EOPNOTSUPP; |
2343 |
+ } |
2344 |
+ scu = syscon_node_to_regmap(syscon); |
2345 |
++ of_node_put(syscon); |
2346 |
+ if (IS_ERR(scu)) { |
2347 |
+ dev_warn(data->dev, "Failed to get syscon regmap\n"); |
2348 |
+ return -EOPNOTSUPP; |
2349 |
+diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c |
2350 |
+index a4b8be5b8f883..580361bd98492 100644 |
2351 |
+--- a/drivers/iio/adc/axp288_adc.c |
2352 |
++++ b/drivers/iio/adc/axp288_adc.c |
2353 |
+@@ -196,6 +196,14 @@ static const struct dmi_system_id axp288_adc_ts_bias_override[] = { |
2354 |
+ }, |
2355 |
+ .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA, |
2356 |
+ }, |
2357 |
++ { |
2358 |
++ /* Nuvision Solo 10 Draw */ |
2359 |
++ .matches = { |
2360 |
++ DMI_MATCH(DMI_SYS_VENDOR, "TMAX"), |
2361 |
++ DMI_MATCH(DMI_PRODUCT_NAME, "TM101W610L"), |
2362 |
++ }, |
2363 |
++ .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA, |
2364 |
++ }, |
2365 |
+ {} |
2366 |
+ }; |
2367 |
+ |
2368 |
+diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c |
2369 |
+index 7585144b9715b..5b09a93fdf34f 100644 |
2370 |
+--- a/drivers/iio/adc/rzg2l_adc.c |
2371 |
++++ b/drivers/iio/adc/rzg2l_adc.c |
2372 |
+@@ -334,11 +334,15 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l |
2373 |
+ i = 0; |
2374 |
+ device_for_each_child_node(&pdev->dev, fwnode) { |
2375 |
+ ret = fwnode_property_read_u32(fwnode, "reg", &channel); |
2376 |
+- if (ret) |
2377 |
++ if (ret) { |
2378 |
++ fwnode_handle_put(fwnode); |
2379 |
+ return ret; |
2380 |
++ } |
2381 |
+ |
2382 |
+- if (channel >= RZG2L_ADC_MAX_CHANNELS) |
2383 |
++ if (channel >= RZG2L_ADC_MAX_CHANNELS) { |
2384 |
++ fwnode_handle_put(fwnode); |
2385 |
+ return -EINVAL; |
2386 |
++ } |
2387 |
+ |
2388 |
+ chan_array[i].type = IIO_VOLTAGE; |
2389 |
+ chan_array[i].indexed = 1; |
2390 |
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c |
2391 |
+index 1426562321575..3efb8c404ccc3 100644 |
2392 |
+--- a/drivers/iio/adc/stm32-adc-core.c |
2393 |
++++ b/drivers/iio/adc/stm32-adc-core.c |
2394 |
+@@ -64,6 +64,7 @@ struct stm32_adc_priv; |
2395 |
+ * @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet) |
2396 |
+ * @has_syscfg: SYSCFG capability flags |
2397 |
+ * @num_irqs: number of interrupt lines |
2398 |
++ * @num_adcs: maximum number of ADC instances in the common registers |
2399 |
+ */ |
2400 |
+ struct stm32_adc_priv_cfg { |
2401 |
+ const struct stm32_adc_common_regs *regs; |
2402 |
+@@ -71,6 +72,7 @@ struct stm32_adc_priv_cfg { |
2403 |
+ u32 max_clk_rate_hz; |
2404 |
+ unsigned int has_syscfg; |
2405 |
+ unsigned int num_irqs; |
2406 |
++ unsigned int num_adcs; |
2407 |
+ }; |
2408 |
+ |
2409 |
+ /** |
2410 |
+@@ -352,7 +354,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc) |
2411 |
+ * before invoking the interrupt handler (e.g. call ISR only for |
2412 |
+ * IRQ-enabled ADCs). |
2413 |
+ */ |
2414 |
+- for (i = 0; i < priv->cfg->num_irqs; i++) { |
2415 |
++ for (i = 0; i < priv->cfg->num_adcs; i++) { |
2416 |
+ if ((status & priv->cfg->regs->eoc_msk[i] && |
2417 |
+ stm32_adc_eoc_enabled(priv, i)) || |
2418 |
+ (status & priv->cfg->regs->ovr_msk[i])) |
2419 |
+@@ -792,6 +794,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = { |
2420 |
+ .clk_sel = stm32f4_adc_clk_sel, |
2421 |
+ .max_clk_rate_hz = 36000000, |
2422 |
+ .num_irqs = 1, |
2423 |
++ .num_adcs = 3, |
2424 |
+ }; |
2425 |
+ |
2426 |
+ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { |
2427 |
+@@ -800,14 +803,16 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { |
2428 |
+ .max_clk_rate_hz = 36000000, |
2429 |
+ .has_syscfg = HAS_VBOOSTER, |
2430 |
+ .num_irqs = 1, |
2431 |
++ .num_adcs = 2, |
2432 |
+ }; |
2433 |
+ |
2434 |
+ static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = { |
2435 |
+ .regs = &stm32h7_adc_common_regs, |
2436 |
+ .clk_sel = stm32h7_adc_clk_sel, |
2437 |
+- .max_clk_rate_hz = 40000000, |
2438 |
++ .max_clk_rate_hz = 36000000, |
2439 |
+ .has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD, |
2440 |
+ .num_irqs = 2, |
2441 |
++ .num_adcs = 2, |
2442 |
+ }; |
2443 |
+ |
2444 |
+ static const struct of_device_id stm32_adc_of_match[] = { |
2445 |
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c |
2446 |
+index a68ecbda6480b..11ef873d64532 100644 |
2447 |
+--- a/drivers/iio/adc/stm32-adc.c |
2448 |
++++ b/drivers/iio/adc/stm32-adc.c |
2449 |
+@@ -1365,7 +1365,7 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev, |
2450 |
+ else |
2451 |
+ ret = -EINVAL; |
2452 |
+ |
2453 |
+- if (mask == IIO_CHAN_INFO_PROCESSED && adc->vrefint.vrefint_cal) |
2454 |
++ if (mask == IIO_CHAN_INFO_PROCESSED) |
2455 |
+ *val = STM32_ADC_VREFINT_VOLTAGE * adc->vrefint.vrefint_cal / *val; |
2456 |
+ |
2457 |
+ iio_device_release_direct_mode(indio_dev); |
2458 |
+@@ -1407,7 +1407,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data) |
2459 |
+ struct stm32_adc *adc = iio_priv(indio_dev); |
2460 |
+ const struct stm32_adc_regspec *regs = adc->cfg->regs; |
2461 |
+ u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg); |
2462 |
+- u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg); |
2463 |
+ |
2464 |
+ /* Check ovr status right now, as ovr mask should be already disabled */ |
2465 |
+ if (status & regs->isr_ovr.mask) { |
2466 |
+@@ -1422,11 +1421,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data) |
2467 |
+ return IRQ_HANDLED; |
2468 |
+ } |
2469 |
+ |
2470 |
+- if (!(status & mask)) |
2471 |
+- dev_err_ratelimited(&indio_dev->dev, |
2472 |
+- "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n", |
2473 |
+- mask, status); |
2474 |
+- |
2475 |
+ return IRQ_NONE; |
2476 |
+ } |
2477 |
+ |
2478 |
+@@ -1436,10 +1430,6 @@ static irqreturn_t stm32_adc_isr(int irq, void *data) |
2479 |
+ struct stm32_adc *adc = iio_priv(indio_dev); |
2480 |
+ const struct stm32_adc_regspec *regs = adc->cfg->regs; |
2481 |
+ u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg); |
2482 |
+- u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg); |
2483 |
+- |
2484 |
+- if (!(status & mask)) |
2485 |
+- return IRQ_WAKE_THREAD; |
2486 |
+ |
2487 |
+ if (status & regs->isr_ovr.mask) { |
2488 |
+ /* |
2489 |
+@@ -1979,10 +1969,10 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n |
2490 |
+ |
2491 |
+ for (i = 0; i < STM32_ADC_INT_CH_NB; i++) { |
2492 |
+ if (!strncmp(stm32_adc_ic[i].name, ch_name, STM32_ADC_CH_SZ)) { |
2493 |
+- adc->int_ch[i] = chan; |
2494 |
+- |
2495 |
+- if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT) |
2496 |
+- continue; |
2497 |
++ if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT) { |
2498 |
++ adc->int_ch[i] = chan; |
2499 |
++ break; |
2500 |
++ } |
2501 |
+ |
2502 |
+ /* Get calibration data for vrefint channel */ |
2503 |
+ ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint); |
2504 |
+@@ -1990,10 +1980,15 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n |
2505 |
+ return dev_err_probe(indio_dev->dev.parent, ret, |
2506 |
+ "nvmem access error\n"); |
2507 |
+ } |
2508 |
+- if (ret == -ENOENT) |
2509 |
+- dev_dbg(&indio_dev->dev, "vrefint calibration not found\n"); |
2510 |
+- else |
2511 |
+- adc->vrefint.vrefint_cal = vrefint; |
2512 |
++ if (ret == -ENOENT) { |
2513 |
++ dev_dbg(&indio_dev->dev, "vrefint calibration not found. Skip vrefint channel\n"); |
2514 |
++ return ret; |
2515 |
++ } else if (!vrefint) { |
2516 |
++ dev_dbg(&indio_dev->dev, "Null vrefint calibration value. Skip vrefint channel\n"); |
2517 |
++ return -ENOENT; |
2518 |
++ } |
2519 |
++ adc->int_ch[i] = chan; |
2520 |
++ adc->vrefint.vrefint_cal = vrefint; |
2521 |
+ } |
2522 |
+ } |
2523 |
+ |
2524 |
+@@ -2030,7 +2025,9 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev, |
2525 |
+ } |
2526 |
+ strncpy(adc->chan_name[val], name, STM32_ADC_CH_SZ); |
2527 |
+ ret = stm32_adc_populate_int_ch(indio_dev, name, val); |
2528 |
+- if (ret) |
2529 |
++ if (ret == -ENOENT) |
2530 |
++ continue; |
2531 |
++ else if (ret) |
2532 |
+ goto err; |
2533 |
+ } else if (ret != -EINVAL) { |
2534 |
+ dev_err(&indio_dev->dev, "Invalid label %d\n", ret); |
2535 |
+diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c |
2536 |
+index 0c2025a225750..80a09817c1194 100644 |
2537 |
+--- a/drivers/iio/adc/ti-ads131e08.c |
2538 |
++++ b/drivers/iio/adc/ti-ads131e08.c |
2539 |
+@@ -739,7 +739,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) |
2540 |
+ device_for_each_child_node(dev, node) { |
2541 |
+ ret = fwnode_property_read_u32(node, "reg", &channel); |
2542 |
+ if (ret) |
2543 |
+- return ret; |
2544 |
++ goto err_child_out; |
2545 |
+ |
2546 |
+ ret = fwnode_property_read_u32(node, "ti,gain", &tmp); |
2547 |
+ if (ret) { |
2548 |
+@@ -747,7 +747,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) |
2549 |
+ } else { |
2550 |
+ ret = ads131e08_pga_gain_to_field_value(st, tmp); |
2551 |
+ if (ret < 0) |
2552 |
+- return ret; |
2553 |
++ goto err_child_out; |
2554 |
+ |
2555 |
+ channel_config[i].pga_gain = tmp; |
2556 |
+ } |
2557 |
+@@ -758,7 +758,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) |
2558 |
+ } else { |
2559 |
+ ret = ads131e08_validate_channel_mux(st, tmp); |
2560 |
+ if (ret) |
2561 |
+- return ret; |
2562 |
++ goto err_child_out; |
2563 |
+ |
2564 |
+ channel_config[i].mux = tmp; |
2565 |
+ } |
2566 |
+@@ -784,6 +784,10 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) |
2567 |
+ st->channel_config = channel_config; |
2568 |
+ |
2569 |
+ return 0; |
2570 |
++ |
2571 |
++err_child_out: |
2572 |
++ fwnode_handle_put(node); |
2573 |
++ return ret; |
2574 |
+ } |
2575 |
+ |
2576 |
+ static void ads131e08_regulator_disable(void *data) |
2577 |
+diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c |
2578 |
+index a55396c1f8b28..a7687706012d2 100644 |
2579 |
+--- a/drivers/iio/adc/xilinx-ams.c |
2580 |
++++ b/drivers/iio/adc/xilinx-ams.c |
2581 |
+@@ -1409,7 +1409,7 @@ static int ams_probe(struct platform_device *pdev) |
2582 |
+ |
2583 |
+ irq = platform_get_irq(pdev, 0); |
2584 |
+ if (irq < 0) |
2585 |
+- return ret; |
2586 |
++ return irq; |
2587 |
+ |
2588 |
+ ret = devm_request_irq(&pdev->dev, irq, &ams_irq, 0, "ams-irq", |
2589 |
+ indio_dev); |
2590 |
+diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c |
2591 |
+index 7e511293d6d12..dc426e1484f0d 100644 |
2592 |
+--- a/drivers/iio/afe/iio-rescale.c |
2593 |
++++ b/drivers/iio/afe/iio-rescale.c |
2594 |
+@@ -278,7 +278,7 @@ static int rescale_configure_channel(struct device *dev, |
2595 |
+ chan->ext_info = rescale->ext_info; |
2596 |
+ chan->type = rescale->cfg->type; |
2597 |
+ |
2598 |
+- if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) || |
2599 |
++ if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) && |
2600 |
+ iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) { |
2601 |
+ dev_info(dev, "using raw+scale source channel\n"); |
2602 |
+ } else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) { |
2603 |
+diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c |
2604 |
+index 847194fa1e464..80ef1aa9aae3b 100644 |
2605 |
+--- a/drivers/iio/chemical/ccs811.c |
2606 |
++++ b/drivers/iio/chemical/ccs811.c |
2607 |
+@@ -499,11 +499,11 @@ static int ccs811_probe(struct i2c_client *client, |
2608 |
+ |
2609 |
+ data->drdy_trig->ops = &ccs811_trigger_ops; |
2610 |
+ iio_trigger_set_drvdata(data->drdy_trig, indio_dev); |
2611 |
+- indio_dev->trig = data->drdy_trig; |
2612 |
+- iio_trigger_get(indio_dev->trig); |
2613 |
+ ret = iio_trigger_register(data->drdy_trig); |
2614 |
+ if (ret) |
2615 |
+ goto err_poweroff; |
2616 |
++ |
2617 |
++ indio_dev->trig = iio_trigger_get(data->drdy_trig); |
2618 |
+ } |
2619 |
+ |
2620 |
+ ret = iio_triggered_buffer_setup(indio_dev, NULL, |
2621 |
+diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c |
2622 |
+index ea387efab62d2..f4c2f4cb48349 100644 |
2623 |
+--- a/drivers/iio/gyro/mpu3050-core.c |
2624 |
++++ b/drivers/iio/gyro/mpu3050-core.c |
2625 |
+@@ -874,6 +874,7 @@ static int mpu3050_power_up(struct mpu3050 *mpu3050) |
2626 |
+ ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM, |
2627 |
+ MPU3050_PWR_MGM_SLEEP, 0); |
2628 |
+ if (ret) { |
2629 |
++ regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs); |
2630 |
+ dev_err(mpu3050->dev, "error setting power mode\n"); |
2631 |
+ return ret; |
2632 |
+ } |
2633 |
+diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c |
2634 |
+index f29692b9d2db0..66b32413cf5e2 100644 |
2635 |
+--- a/drivers/iio/humidity/hts221_buffer.c |
2636 |
++++ b/drivers/iio/humidity/hts221_buffer.c |
2637 |
+@@ -135,9 +135,12 @@ int hts221_allocate_trigger(struct iio_dev *iio_dev) |
2638 |
+ |
2639 |
+ iio_trigger_set_drvdata(hw->trig, iio_dev); |
2640 |
+ hw->trig->ops = &hts221_trigger_ops; |
2641 |
++ |
2642 |
++ err = devm_iio_trigger_register(hw->dev, hw->trig); |
2643 |
++ |
2644 |
+ iio_dev->trig = iio_trigger_get(hw->trig); |
2645 |
+ |
2646 |
+- return devm_iio_trigger_register(hw->dev, hw->trig); |
2647 |
++ return err; |
2648 |
+ } |
2649 |
+ |
2650 |
+ static int hts221_buffer_preenable(struct iio_dev *iio_dev) |
2651 |
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h |
2652 |
+index c0f5059b13b31..995a9dc06521d 100644 |
2653 |
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h |
2654 |
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h |
2655 |
+@@ -17,6 +17,7 @@ |
2656 |
+ #include "inv_icm42600_buffer.h" |
2657 |
+ |
2658 |
+ enum inv_icm42600_chip { |
2659 |
++ INV_CHIP_INVALID, |
2660 |
+ INV_CHIP_ICM42600, |
2661 |
+ INV_CHIP_ICM42602, |
2662 |
+ INV_CHIP_ICM42605, |
2663 |
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c |
2664 |
+index 86858da9cc38f..ca85fccc98393 100644 |
2665 |
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c |
2666 |
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c |
2667 |
+@@ -565,7 +565,7 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq, |
2668 |
+ bool open_drain; |
2669 |
+ int ret; |
2670 |
+ |
2671 |
+- if (chip < 0 || chip >= INV_CHIP_NB) { |
2672 |
++ if (chip <= INV_CHIP_INVALID || chip >= INV_CHIP_NB) { |
2673 |
+ dev_err(dev, "invalid chip = %d\n", chip); |
2674 |
+ return -ENODEV; |
2675 |
+ } |
2676 |
+diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c |
2677 |
+index 9ff7b0e56cf67..b2bc637150bfa 100644 |
2678 |
+--- a/drivers/iio/magnetometer/yamaha-yas530.c |
2679 |
++++ b/drivers/iio/magnetometer/yamaha-yas530.c |
2680 |
+@@ -639,7 +639,7 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx) |
2681 |
+ dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data); |
2682 |
+ |
2683 |
+ /* Sanity check, is this all zeroes? */ |
2684 |
+- if (memchr_inv(data, 0x00, 13)) { |
2685 |
++ if (memchr_inv(data, 0x00, 13) == NULL) { |
2686 |
+ if (!(data[13] & BIT(7))) |
2687 |
+ dev_warn(yas5xx->dev, "calibration is blank!\n"); |
2688 |
+ } |
2689 |
+diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c |
2690 |
+index 70c37f664f6da..63fbcaa4cac81 100644 |
2691 |
+--- a/drivers/iio/proximity/sx9324.c |
2692 |
++++ b/drivers/iio/proximity/sx9324.c |
2693 |
+@@ -885,6 +885,9 @@ sx9324_get_default_reg(struct device *dev, int idx, |
2694 |
+ break; |
2695 |
+ ret = device_property_read_u32_array(dev, prop, pin_defs, |
2696 |
+ ARRAY_SIZE(pin_defs)); |
2697 |
++ if (ret) |
2698 |
++ break; |
2699 |
++ |
2700 |
+ for (pin = 0; pin < SX9324_NUM_PINS; pin++) |
2701 |
+ raw |= (pin_defs[pin] << (2 * pin)) & |
2702 |
+ SX9324_REG_AFE_PH0_PIN_MASK(pin); |
2703 |
+diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig |
2704 |
+index 56ca0ad7e77a2..4c66c3f18c345 100644 |
2705 |
+--- a/drivers/iio/test/Kconfig |
2706 |
++++ b/drivers/iio/test/Kconfig |
2707 |
+@@ -6,7 +6,7 @@ |
2708 |
+ # Keep in alphabetical order |
2709 |
+ config IIO_RESCALE_KUNIT_TEST |
2710 |
+ bool "Test IIO rescale conversion functions" |
2711 |
+- depends on KUNIT=y && !IIO_RESCALE |
2712 |
++ depends on KUNIT=y && IIO_RESCALE=y |
2713 |
+ default KUNIT_ALL_TESTS |
2714 |
+ help |
2715 |
+ If you want to run tests on the iio-rescale code say Y here. |
2716 |
+diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile |
2717 |
+index f15ae0a6394f7..880360f8d02c2 100644 |
2718 |
+--- a/drivers/iio/test/Makefile |
2719 |
++++ b/drivers/iio/test/Makefile |
2720 |
+@@ -4,6 +4,6 @@ |
2721 |
+ # |
2722 |
+ |
2723 |
+ # Keep in alphabetical order |
2724 |
+-obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o ../afe/iio-rescale.o |
2725 |
++obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o |
2726 |
+ obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o |
2727 |
+ CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN) |
2728 |
+diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c |
2729 |
+index 2a4b75897910f..3d911c24b2650 100644 |
2730 |
+--- a/drivers/iio/trigger/iio-trig-sysfs.c |
2731 |
++++ b/drivers/iio/trigger/iio-trig-sysfs.c |
2732 |
+@@ -191,6 +191,7 @@ static int iio_sysfs_trigger_remove(int id) |
2733 |
+ } |
2734 |
+ |
2735 |
+ iio_trigger_unregister(t->trig); |
2736 |
++ irq_work_sync(&t->work); |
2737 |
+ iio_trigger_free(t->trig); |
2738 |
+ |
2739 |
+ list_del(&t->l); |
2740 |
+diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c |
2741 |
+index 8fdb84b3642bd..1d42084d02767 100644 |
2742 |
+--- a/drivers/iommu/ipmmu-vmsa.c |
2743 |
++++ b/drivers/iommu/ipmmu-vmsa.c |
2744 |
+@@ -987,7 +987,7 @@ static const struct of_device_id ipmmu_of_ids[] = { |
2745 |
+ .compatible = "renesas,ipmmu-r8a779a0", |
2746 |
+ .data = &ipmmu_features_rcar_gen4, |
2747 |
+ }, { |
2748 |
+- .compatible = "renesas,rcar-gen4-ipmmu", |
2749 |
++ .compatible = "renesas,rcar-gen4-ipmmu-vmsa", |
2750 |
+ .data = &ipmmu_features_rcar_gen4, |
2751 |
+ }, { |
2752 |
+ /* Terminator */ |
2753 |
+diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c |
2754 |
+index 1f6bf152b3c74..e92c1afc3677f 100644 |
2755 |
+--- a/drivers/md/dm-era-target.c |
2756 |
++++ b/drivers/md/dm-era-target.c |
2757 |
+@@ -1400,7 +1400,7 @@ static void start_worker(struct era *era) |
2758 |
+ static void stop_worker(struct era *era) |
2759 |
+ { |
2760 |
+ atomic_set(&era->suspended, 1); |
2761 |
+- flush_workqueue(era->wq); |
2762 |
++ drain_workqueue(era->wq); |
2763 |
+ } |
2764 |
+ |
2765 |
+ /*---------------------------------------------------------------- |
2766 |
+@@ -1570,6 +1570,12 @@ static void era_postsuspend(struct dm_target *ti) |
2767 |
+ } |
2768 |
+ |
2769 |
+ stop_worker(era); |
2770 |
++ |
2771 |
++ r = metadata_commit(era->md); |
2772 |
++ if (r) { |
2773 |
++ DMERR("%s: metadata_commit failed", __func__); |
2774 |
++ /* FIXME: fail mode */ |
2775 |
++ } |
2776 |
+ } |
2777 |
+ |
2778 |
+ static int era_preresume(struct dm_target *ti) |
2779 |
+diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c |
2780 |
+index 2dda05aada231..0c6620e7b7bf6 100644 |
2781 |
+--- a/drivers/md/dm-log.c |
2782 |
++++ b/drivers/md/dm-log.c |
2783 |
+@@ -615,7 +615,7 @@ static int disk_resume(struct dm_dirty_log *log) |
2784 |
+ log_clear_bit(lc, lc->clean_bits, i); |
2785 |
+ |
2786 |
+ /* clear any old bits -- device has shrunk */ |
2787 |
+- for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++) |
2788 |
++ for (i = lc->region_count; i % BITS_PER_LONG; i++) |
2789 |
+ log_clear_bit(lc, lc->clean_bits, i); |
2790 |
+ |
2791 |
+ /* copy clean across to sync */ |
2792 |
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
2793 |
+index 83dd17abf1af9..f01d33bc36136 100644 |
2794 |
+--- a/drivers/md/dm.c |
2795 |
++++ b/drivers/md/dm.c |
2796 |
+@@ -899,9 +899,11 @@ static void dm_io_complete(struct dm_io *io) |
2797 |
+ if (io_error == BLK_STS_AGAIN) { |
2798 |
+ /* io_uring doesn't handle BLK_STS_AGAIN (yet) */ |
2799 |
+ queue_io(md, bio); |
2800 |
++ return; |
2801 |
+ } |
2802 |
+ } |
2803 |
+- return; |
2804 |
++ if (io_error == BLK_STS_DM_REQUEUE) |
2805 |
++ return; |
2806 |
+ } |
2807 |
+ |
2808 |
+ if (bio_is_flush_with_data(bio)) { |
2809 |
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c |
2810 |
+index 86a3d34f418e8..4c5154e0bf00c 100644 |
2811 |
+--- a/drivers/memory/mtk-smi.c |
2812 |
++++ b/drivers/memory/mtk-smi.c |
2813 |
+@@ -404,13 +404,16 @@ static int mtk_smi_device_link_common(struct device *dev, struct device **com_de |
2814 |
+ of_node_put(smi_com_node); |
2815 |
+ if (smi_com_pdev) { |
2816 |
+ /* smi common is the supplier, Make sure it is ready before */ |
2817 |
+- if (!platform_get_drvdata(smi_com_pdev)) |
2818 |
++ if (!platform_get_drvdata(smi_com_pdev)) { |
2819 |
++ put_device(&smi_com_pdev->dev); |
2820 |
+ return -EPROBE_DEFER; |
2821 |
++ } |
2822 |
+ smi_com_dev = &smi_com_pdev->dev; |
2823 |
+ link = device_link_add(dev, smi_com_dev, |
2824 |
+ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); |
2825 |
+ if (!link) { |
2826 |
+ dev_err(dev, "Unable to link smi-common dev\n"); |
2827 |
++ put_device(&smi_com_pdev->dev); |
2828 |
+ return -ENODEV; |
2829 |
+ } |
2830 |
+ *com_dev = smi_com_dev; |
2831 |
+diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c |
2832 |
+index 4733e7898ffe5..c491cd549644f 100644 |
2833 |
+--- a/drivers/memory/samsung/exynos5422-dmc.c |
2834 |
++++ b/drivers/memory/samsung/exynos5422-dmc.c |
2835 |
+@@ -1187,33 +1187,39 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc) |
2836 |
+ |
2837 |
+ dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT, |
2838 |
+ sizeof(u32), GFP_KERNEL); |
2839 |
+- if (!dmc->timing_row) |
2840 |
+- return -ENOMEM; |
2841 |
++ if (!dmc->timing_row) { |
2842 |
++ ret = -ENOMEM; |
2843 |
++ goto put_node; |
2844 |
++ } |
2845 |
+ |
2846 |
+ dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT, |
2847 |
+ sizeof(u32), GFP_KERNEL); |
2848 |
+- if (!dmc->timing_data) |
2849 |
+- return -ENOMEM; |
2850 |
++ if (!dmc->timing_data) { |
2851 |
++ ret = -ENOMEM; |
2852 |
++ goto put_node; |
2853 |
++ } |
2854 |
+ |
2855 |
+ dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT, |
2856 |
+ sizeof(u32), GFP_KERNEL); |
2857 |
+- if (!dmc->timing_power) |
2858 |
+- return -ENOMEM; |
2859 |
++ if (!dmc->timing_power) { |
2860 |
++ ret = -ENOMEM; |
2861 |
++ goto put_node; |
2862 |
++ } |
2863 |
+ |
2864 |
+ dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev, |
2865 |
+ DDR_TYPE_LPDDR3, |
2866 |
+ &dmc->timings_arr_size); |
2867 |
+ if (!dmc->timings) { |
2868 |
+- of_node_put(np_ddr); |
2869 |
+ dev_warn(dmc->dev, "could not get timings from DT\n"); |
2870 |
+- return -EINVAL; |
2871 |
++ ret = -EINVAL; |
2872 |
++ goto put_node; |
2873 |
+ } |
2874 |
+ |
2875 |
+ dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev); |
2876 |
+ if (!dmc->min_tck) { |
2877 |
+- of_node_put(np_ddr); |
2878 |
+ dev_warn(dmc->dev, "could not get tck from DT\n"); |
2879 |
+- return -EINVAL; |
2880 |
++ ret = -EINVAL; |
2881 |
++ goto put_node; |
2882 |
+ } |
2883 |
+ |
2884 |
+ /* Sorted array of OPPs with frequency ascending */ |
2885 |
+@@ -1227,13 +1233,14 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc) |
2886 |
+ clk_period_ps); |
2887 |
+ } |
2888 |
+ |
2889 |
+- of_node_put(np_ddr); |
2890 |
+ |
2891 |
+ /* Take the highest frequency's timings as 'bypass' */ |
2892 |
+ dmc->bypass_timing_row = dmc->timing_row[idx - 1]; |
2893 |
+ dmc->bypass_timing_data = dmc->timing_data[idx - 1]; |
2894 |
+ dmc->bypass_timing_power = dmc->timing_power[idx - 1]; |
2895 |
+ |
2896 |
++put_node: |
2897 |
++ of_node_put(np_ddr); |
2898 |
+ return ret; |
2899 |
+ } |
2900 |
+ |
2901 |
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c |
2902 |
+index e61b0b98065a2..b74a0e54e652f 100644 |
2903 |
+--- a/drivers/mmc/host/mtk-sd.c |
2904 |
++++ b/drivers/mmc/host/mtk-sd.c |
2905 |
+@@ -1356,7 +1356,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq) |
2906 |
+ msdc_request_done(host, mrq); |
2907 |
+ } |
2908 |
+ |
2909 |
+-static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, |
2910 |
++static void msdc_data_xfer_done(struct msdc_host *host, u32 events, |
2911 |
+ struct mmc_request *mrq, struct mmc_data *data) |
2912 |
+ { |
2913 |
+ struct mmc_command *stop; |
2914 |
+@@ -1376,7 +1376,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, |
2915 |
+ spin_unlock_irqrestore(&host->lock, flags); |
2916 |
+ |
2917 |
+ if (done) |
2918 |
+- return true; |
2919 |
++ return; |
2920 |
+ stop = data->stop; |
2921 |
+ |
2922 |
+ if (check_data || (stop && stop->error)) { |
2923 |
+@@ -1385,12 +1385,15 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, |
2924 |
+ sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, |
2925 |
+ 1); |
2926 |
+ |
2927 |
++ ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CTRL, val, |
2928 |
++ !(val & MSDC_DMA_CTRL_STOP), 1, 20000); |
2929 |
++ if (ret) |
2930 |
++ dev_dbg(host->dev, "DMA stop timed out\n"); |
2931 |
++ |
2932 |
+ ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val, |
2933 |
+ !(val & MSDC_DMA_CFG_STS), 1, 20000); |
2934 |
+- if (ret) { |
2935 |
+- dev_dbg(host->dev, "DMA stop timed out\n"); |
2936 |
+- return false; |
2937 |
+- } |
2938 |
++ if (ret) |
2939 |
++ dev_dbg(host->dev, "DMA inactive timed out\n"); |
2940 |
+ |
2941 |
+ sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask); |
2942 |
+ dev_dbg(host->dev, "DMA stop\n"); |
2943 |
+@@ -1415,9 +1418,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, |
2944 |
+ } |
2945 |
+ |
2946 |
+ msdc_data_xfer_next(host, mrq); |
2947 |
+- done = true; |
2948 |
+ } |
2949 |
+- return done; |
2950 |
+ } |
2951 |
+ |
2952 |
+ static void msdc_set_buswidth(struct msdc_host *host, u32 width) |
2953 |
+@@ -2416,6 +2417,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery) |
2954 |
+ if (recovery) { |
2955 |
+ sdr_set_field(host->base + MSDC_DMA_CTRL, |
2956 |
+ MSDC_DMA_CTRL_STOP, 1); |
2957 |
++ if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CTRL, val, |
2958 |
++ !(val & MSDC_DMA_CTRL_STOP), 1, 3000))) |
2959 |
++ return; |
2960 |
+ if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val, |
2961 |
+ !(val & MSDC_DMA_CFG_STS), 1, 3000))) |
2962 |
+ return; |
2963 |
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c |
2964 |
+index 92c20cb8074a6..0d4d343dbb77d 100644 |
2965 |
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c |
2966 |
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c |
2967 |
+@@ -152,6 +152,8 @@ static int sdhci_o2_get_cd(struct mmc_host *mmc) |
2968 |
+ |
2969 |
+ if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS)) |
2970 |
+ sdhci_o2_enable_internal_clock(host); |
2971 |
++ else |
2972 |
++ sdhci_o2_wait_card_detect_stable(host); |
2973 |
+ |
2974 |
+ return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); |
2975 |
+ } |
2976 |
+diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c |
2977 |
+index 44b14c9dc9a73..375529b7d12e3 100644 |
2978 |
+--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c |
2979 |
++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c |
2980 |
+@@ -695,7 +695,7 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this, |
2981 |
+ hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | |
2982 |
+ BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | |
2983 |
+ BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles); |
2984 |
+- hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096); |
2985 |
++ hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096)); |
2986 |
+ |
2987 |
+ /* |
2988 |
+ * Derive NFC ideal delay from {3}: |
2989 |
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
2990 |
+index 26a6573adf0f5..93c7a551264eb 100644 |
2991 |
+--- a/drivers/net/bonding/bond_main.c |
2992 |
++++ b/drivers/net/bonding/bond_main.c |
2993 |
+@@ -3684,9 +3684,11 @@ re_arm: |
2994 |
+ if (!rtnl_trylock()) |
2995 |
+ return; |
2996 |
+ |
2997 |
+- if (should_notify_peers) |
2998 |
++ if (should_notify_peers) { |
2999 |
++ bond->send_peer_notif--; |
3000 |
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, |
3001 |
+ bond->dev); |
3002 |
++ } |
3003 |
+ if (should_notify_rtnl) { |
3004 |
+ bond_slave_state_notify(bond); |
3005 |
+ bond_slave_link_notify(bond); |
3006 |
+diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h |
3007 |
+index f375627174c8c..e553e3e6fa0fb 100644 |
3008 |
+--- a/drivers/net/dsa/qca8k.h |
3009 |
++++ b/drivers/net/dsa/qca8k.h |
3010 |
+@@ -15,7 +15,7 @@ |
3011 |
+ |
3012 |
+ #define QCA8K_ETHERNET_MDIO_PRIORITY 7 |
3013 |
+ #define QCA8K_ETHERNET_PHY_PRIORITY 6 |
3014 |
+-#define QCA8K_ETHERNET_TIMEOUT 100 |
3015 |
++#define QCA8K_ETHERNET_TIMEOUT 5 |
3016 |
+ |
3017 |
+ #define QCA8K_NUM_PORTS 7 |
3018 |
+ #define QCA8K_NUM_CPU_PORTS 2 |
3019 |
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c |
3020 |
+index 24cda7e1f916c..8aee4ae4cc8c9 100644 |
3021 |
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c |
3022 |
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c |
3023 |
+@@ -2191,6 +2191,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, |
3024 |
+ return err; |
3025 |
+ } |
3026 |
+ |
3027 |
++/** |
3028 |
++ * ice_set_phy_type_from_speed - set phy_types based on speeds |
3029 |
++ * and advertised modes |
3030 |
++ * @ks: ethtool link ksettings struct |
3031 |
++ * @phy_type_low: pointer to the lower part of phy_type |
3032 |
++ * @phy_type_high: pointer to the higher part of phy_type |
3033 |
++ * @adv_link_speed: targeted link speeds bitmap |
3034 |
++ */ |
3035 |
++static void |
3036 |
++ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks, |
3037 |
++ u64 *phy_type_low, u64 *phy_type_high, |
3038 |
++ u16 adv_link_speed) |
3039 |
++{ |
3040 |
++ /* Handle 1000M speed in a special way because ice_update_phy_type |
3041 |
++ * enables all link modes, but having mixed copper and optical |
3042 |
++ * standards is not supported. |
3043 |
++ */ |
3044 |
++ adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB; |
3045 |
++ |
3046 |
++ if (ethtool_link_ksettings_test_link_mode(ks, advertising, |
3047 |
++ 1000baseT_Full)) |
3048 |
++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T | |
3049 |
++ ICE_PHY_TYPE_LOW_1G_SGMII; |
3050 |
++ |
3051 |
++ if (ethtool_link_ksettings_test_link_mode(ks, advertising, |
3052 |
++ 1000baseKX_Full)) |
3053 |
++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX; |
3054 |
++ |
3055 |
++ if (ethtool_link_ksettings_test_link_mode(ks, advertising, |
3056 |
++ 1000baseX_Full)) |
3057 |
++ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX | |
3058 |
++ ICE_PHY_TYPE_LOW_1000BASE_LX; |
3059 |
++ |
3060 |
++ ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed); |
3061 |
++} |
3062 |
++ |
3063 |
+ /** |
3064 |
+ * ice_set_link_ksettings - Set Speed and Duplex |
3065 |
+ * @netdev: network interface device structure |
3066 |
+@@ -2322,7 +2358,8 @@ ice_set_link_ksettings(struct net_device *netdev, |
3067 |
+ adv_link_speed = curr_link_speed; |
3068 |
+ |
3069 |
+ /* Convert the advertise link speeds to their corresponded PHY_TYPE */ |
3070 |
+- ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed); |
3071 |
++ ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high, |
3072 |
++ adv_link_speed); |
3073 |
+ |
3074 |
+ if (!autoneg_changed && adv_link_speed == curr_link_speed) { |
3075 |
+ netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); |
3076 |
+@@ -3440,6 +3477,16 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) |
3077 |
+ new_rx = ch->combined_count + ch->rx_count; |
3078 |
+ new_tx = ch->combined_count + ch->tx_count; |
3079 |
+ |
3080 |
++ if (new_rx < vsi->tc_cfg.numtc) { |
3081 |
++ netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n", |
3082 |
++ vsi->tc_cfg.numtc); |
3083 |
++ return -EINVAL; |
3084 |
++ } |
3085 |
++ if (new_tx < vsi->tc_cfg.numtc) { |
3086 |
++ netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n", |
3087 |
++ vsi->tc_cfg.numtc); |
3088 |
++ return -EINVAL; |
3089 |
++ } |
3090 |
+ if (new_rx > ice_get_max_rxq(pf)) { |
3091 |
+ netdev_err(dev, "Maximum allowed Rx channels is %d\n", |
3092 |
+ ice_get_max_rxq(pf)); |
3093 |
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c |
3094 |
+index 454e01ae09b97..f7f9c973ec54d 100644 |
3095 |
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c |
3096 |
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c |
3097 |
+@@ -909,7 +909,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) |
3098 |
+ * @vsi: the VSI being configured |
3099 |
+ * @ctxt: VSI context structure |
3100 |
+ */ |
3101 |
+-static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) |
3102 |
++static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) |
3103 |
+ { |
3104 |
+ u16 offset = 0, qmap = 0, tx_count = 0, pow = 0; |
3105 |
+ u16 num_txq_per_tc, num_rxq_per_tc; |
3106 |
+@@ -982,7 +982,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) |
3107 |
+ else |
3108 |
+ vsi->num_rxq = num_rxq_per_tc; |
3109 |
+ |
3110 |
++ if (vsi->num_rxq > vsi->alloc_rxq) { |
3111 |
++ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", |
3112 |
++ vsi->num_rxq, vsi->alloc_rxq); |
3113 |
++ return -EINVAL; |
3114 |
++ } |
3115 |
++ |
3116 |
+ vsi->num_txq = tx_count; |
3117 |
++ if (vsi->num_txq > vsi->alloc_txq) { |
3118 |
++ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", |
3119 |
++ vsi->num_txq, vsi->alloc_txq); |
3120 |
++ return -EINVAL; |
3121 |
++ } |
3122 |
+ |
3123 |
+ if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { |
3124 |
+ dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); |
3125 |
+@@ -1000,6 +1011,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) |
3126 |
+ */ |
3127 |
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); |
3128 |
+ ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); |
3129 |
++ |
3130 |
++ return 0; |
3131 |
+ } |
3132 |
+ |
3133 |
+ /** |
3134 |
+@@ -1187,7 +1200,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) |
3135 |
+ if (vsi->type == ICE_VSI_CHNL) { |
3136 |
+ ice_chnl_vsi_setup_q_map(vsi, ctxt); |
3137 |
+ } else { |
3138 |
+- ice_vsi_setup_q_map(vsi, ctxt); |
3139 |
++ ret = ice_vsi_setup_q_map(vsi, ctxt); |
3140 |
++ if (ret) |
3141 |
++ goto out; |
3142 |
++ |
3143 |
+ if (!init_vsi) /* means VSI being updated */ |
3144 |
+ /* must to indicate which section of VSI context are |
3145 |
+ * being modified |
3146 |
+@@ -3464,7 +3480,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) |
3147 |
+ * |
3148 |
+ * Prepares VSI tc_config to have queue configurations based on MQPRIO options. |
3149 |
+ */ |
3150 |
+-static void |
3151 |
++static int |
3152 |
+ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, |
3153 |
+ u8 ena_tc) |
3154 |
+ { |
3155 |
+@@ -3513,7 +3529,18 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, |
3156 |
+ |
3157 |
+ /* Set actual Tx/Rx queue pairs */ |
3158 |
+ vsi->num_txq = offset + qcount_tx; |
3159 |
++ if (vsi->num_txq > vsi->alloc_txq) { |
3160 |
++ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", |
3161 |
++ vsi->num_txq, vsi->alloc_txq); |
3162 |
++ return -EINVAL; |
3163 |
++ } |
3164 |
++ |
3165 |
+ vsi->num_rxq = offset + qcount_rx; |
3166 |
++ if (vsi->num_rxq > vsi->alloc_rxq) { |
3167 |
++ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", |
3168 |
++ vsi->num_rxq, vsi->alloc_rxq); |
3169 |
++ return -EINVAL; |
3170 |
++ } |
3171 |
+ |
3172 |
+ /* Setup queue TC[0].qmap for given VSI context */ |
3173 |
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); |
3174 |
+@@ -3531,6 +3558,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, |
3175 |
+ dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); |
3176 |
+ dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", |
3177 |
+ vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); |
3178 |
++ |
3179 |
++ return 0; |
3180 |
+ } |
3181 |
+ |
3182 |
+ /** |
3183 |
+@@ -3580,9 +3609,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) |
3184 |
+ |
3185 |
+ if (vsi->type == ICE_VSI_PF && |
3186 |
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) |
3187 |
+- ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); |
3188 |
++ ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); |
3189 |
+ else |
3190 |
+- ice_vsi_setup_q_map(vsi, ctx); |
3191 |
++ ret = ice_vsi_setup_q_map(vsi, ctx); |
3192 |
++ |
3193 |
++ if (ret) |
3194 |
++ goto out; |
3195 |
+ |
3196 |
+ /* must to indicate which section of VSI context are being modified */ |
3197 |
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); |
3198 |
+diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c |
3199 |
+index 3acd9f921c441..2ce2694fcbd78 100644 |
3200 |
+--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c |
3201 |
++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c |
3202 |
+@@ -524,6 +524,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) |
3203 |
+ */ |
3204 |
+ fltr->rid = rule_added.rid; |
3205 |
+ fltr->rule_id = rule_added.rule_id; |
3206 |
++ fltr->dest_id = rule_added.vsi_handle; |
3207 |
+ |
3208 |
+ exit: |
3209 |
+ kfree(list); |
3210 |
+@@ -994,7 +995,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, |
3211 |
+ n_proto_key = ntohs(match.key->n_proto); |
3212 |
+ n_proto_mask = ntohs(match.mask->n_proto); |
3213 |
+ |
3214 |
+- if (n_proto_key == ETH_P_ALL || n_proto_key == 0) { |
3215 |
++ if (n_proto_key == ETH_P_ALL || n_proto_key == 0 || |
3216 |
++ fltr->tunnel_type == TNL_GTPU || |
3217 |
++ fltr->tunnel_type == TNL_GTPC) { |
3218 |
+ n_proto_key = 0; |
3219 |
+ n_proto_mask = 0; |
3220 |
+ } else { |
3221 |
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c |
3222 |
+index 68be2976f539f..c5f04c40284bf 100644 |
3223 |
+--- a/drivers/net/ethernet/intel/igb/igb_main.c |
3224 |
++++ b/drivers/net/ethernet/intel/igb/igb_main.c |
3225 |
+@@ -4819,8 +4819,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) |
3226 |
+ while (i != tx_ring->next_to_use) { |
3227 |
+ union e1000_adv_tx_desc *eop_desc, *tx_desc; |
3228 |
+ |
3229 |
+- /* Free all the Tx ring sk_buffs */ |
3230 |
+- dev_kfree_skb_any(tx_buffer->skb); |
3231 |
++ /* Free all the Tx ring sk_buffs or xdp frames */ |
3232 |
++ if (tx_buffer->type == IGB_TYPE_SKB) |
3233 |
++ dev_kfree_skb_any(tx_buffer->skb); |
3234 |
++ else |
3235 |
++ xdp_return_frame(tx_buffer->xdpf); |
3236 |
+ |
3237 |
+ /* unmap skb header data */ |
3238 |
+ dma_unmap_single(tx_ring->dev, |
3239 |
+@@ -9898,11 +9901,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) |
3240 |
+ struct e1000_hw *hw = &adapter->hw; |
3241 |
+ u32 dmac_thr; |
3242 |
+ u16 hwm; |
3243 |
++ u32 reg; |
3244 |
+ |
3245 |
+ if (hw->mac.type > e1000_82580) { |
3246 |
+ if (adapter->flags & IGB_FLAG_DMAC) { |
3247 |
+- u32 reg; |
3248 |
+- |
3249 |
+ /* force threshold to 0. */ |
3250 |
+ wr32(E1000_DMCTXTH, 0); |
3251 |
+ |
3252 |
+@@ -9935,7 +9937,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) |
3253 |
+ /* Disable BMC-to-OS Watchdog Enable */ |
3254 |
+ if (hw->mac.type != e1000_i354) |
3255 |
+ reg &= ~E1000_DMACR_DC_BMC2OSW_EN; |
3256 |
+- |
3257 |
+ wr32(E1000_DMACR, reg); |
3258 |
+ |
3259 |
+ /* no lower threshold to disable |
3260 |
+@@ -9952,12 +9953,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) |
3261 |
+ */ |
3262 |
+ wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - |
3263 |
+ (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); |
3264 |
++ } |
3265 |
+ |
3266 |
+- /* make low power state decision controlled |
3267 |
+- * by DMA coal |
3268 |
+- */ |
3269 |
++ if (hw->mac.type >= e1000_i210 || |
3270 |
++ (adapter->flags & IGB_FLAG_DMAC)) { |
3271 |
+ reg = rd32(E1000_PCIEMISC); |
3272 |
+- reg &= ~E1000_PCIEMISC_LX_DECISION; |
3273 |
++ reg |= E1000_PCIEMISC_LX_DECISION; |
3274 |
+ wr32(E1000_PCIEMISC, reg); |
3275 |
+ } /* endif adapter->dmac is not disabled */ |
3276 |
+ } else if (hw->mac.type == e1000_82580) { |
3277 |
+diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c |
3278 |
+index a8db1a19011bd..c7047f5d7a9b0 100644 |
3279 |
+--- a/drivers/net/phy/aquantia_main.c |
3280 |
++++ b/drivers/net/phy/aquantia_main.c |
3281 |
+@@ -34,6 +34,8 @@ |
3282 |
+ #define MDIO_AN_VEND_PROV 0xc400 |
3283 |
+ #define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15) |
3284 |
+ #define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14) |
3285 |
++#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11) |
3286 |
++#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10) |
3287 |
+ #define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4) |
3288 |
+ #define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0) |
3289 |
+ #define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4 |
3290 |
+@@ -231,9 +233,20 @@ static int aqr_config_aneg(struct phy_device *phydev) |
3291 |
+ phydev->advertising)) |
3292 |
+ reg |= MDIO_AN_VEND_PROV_1000BASET_HALF; |
3293 |
+ |
3294 |
++ /* Handle the case when the 2.5G and 5G speeds are not advertised */ |
3295 |
++ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, |
3296 |
++ phydev->advertising)) |
3297 |
++ reg |= MDIO_AN_VEND_PROV_2500BASET_FULL; |
3298 |
++ |
3299 |
++ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, |
3300 |
++ phydev->advertising)) |
3301 |
++ reg |= MDIO_AN_VEND_PROV_5000BASET_FULL; |
3302 |
++ |
3303 |
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV, |
3304 |
+ MDIO_AN_VEND_PROV_1000BASET_HALF | |
3305 |
+- MDIO_AN_VEND_PROV_1000BASET_FULL, reg); |
3306 |
++ MDIO_AN_VEND_PROV_1000BASET_FULL | |
3307 |
++ MDIO_AN_VEND_PROV_2500BASET_FULL | |
3308 |
++ MDIO_AN_VEND_PROV_5000BASET_FULL, reg); |
3309 |
+ if (ret < 0) |
3310 |
+ return ret; |
3311 |
+ if (ret > 0) |
3312 |
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c |
3313 |
+index 6a467e7817a6a..59fe356942b51 100644 |
3314 |
+--- a/drivers/net/phy/at803x.c |
3315 |
++++ b/drivers/net/phy/at803x.c |
3316 |
+@@ -2072,6 +2072,8 @@ static struct phy_driver at803x_driver[] = { |
3317 |
+ /* ATHEROS AR9331 */ |
3318 |
+ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID), |
3319 |
+ .name = "Qualcomm Atheros AR9331 built-in PHY", |
3320 |
++ .probe = at803x_probe, |
3321 |
++ .remove = at803x_remove, |
3322 |
+ .suspend = at803x_suspend, |
3323 |
+ .resume = at803x_resume, |
3324 |
+ .flags = PHY_POLL_CABLE_TEST, |
3325 |
+@@ -2087,6 +2089,8 @@ static struct phy_driver at803x_driver[] = { |
3326 |
+ /* Qualcomm Atheros QCA9561 */ |
3327 |
+ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID), |
3328 |
+ .name = "Qualcomm Atheros QCA9561 built-in PHY", |
3329 |
++ .probe = at803x_probe, |
3330 |
++ .remove = at803x_remove, |
3331 |
+ .suspend = at803x_suspend, |
3332 |
+ .resume = at803x_resume, |
3333 |
+ .flags = PHY_POLL_CABLE_TEST, |
3334 |
+@@ -2151,6 +2155,8 @@ static struct phy_driver at803x_driver[] = { |
3335 |
+ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID), |
3336 |
+ .name = "Qualcomm QCA8081", |
3337 |
+ .flags = PHY_POLL_CABLE_TEST, |
3338 |
++ .probe = at803x_probe, |
3339 |
++ .remove = at803x_remove, |
3340 |
+ .config_intr = at803x_config_intr, |
3341 |
+ .handle_interrupt = at803x_handle_interrupt, |
3342 |
+ .get_tunable = at803x_get_tunable, |
3343 |
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c |
3344 |
+index eb0121a64d6d2..1d1dea07d9326 100644 |
3345 |
+--- a/drivers/net/veth.c |
3346 |
++++ b/drivers/net/veth.c |
3347 |
+@@ -312,6 +312,7 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev, |
3348 |
+ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) |
3349 |
+ { |
3350 |
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev); |
3351 |
++ struct netdev_queue *queue = NULL; |
3352 |
+ struct veth_rq *rq = NULL; |
3353 |
+ struct net_device *rcv; |
3354 |
+ int length = skb->len; |
3355 |
+@@ -329,6 +330,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) |
3356 |
+ rxq = skb_get_queue_mapping(skb); |
3357 |
+ if (rxq < rcv->real_num_rx_queues) { |
3358 |
+ rq = &rcv_priv->rq[rxq]; |
3359 |
++ queue = netdev_get_tx_queue(dev, rxq); |
3360 |
+ |
3361 |
+ /* The napi pointer is available when an XDP program is |
3362 |
+ * attached or when GRO is enabled |
3363 |
+@@ -340,6 +342,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) |
3364 |
+ |
3365 |
+ skb_tx_timestamp(skb); |
3366 |
+ if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) { |
3367 |
++ if (queue) |
3368 |
++ txq_trans_cond_update(queue); |
3369 |
+ if (!use_napi) |
3370 |
+ dev_lstats_add(dev, length); |
3371 |
+ } else { |
3372 |
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c |
3373 |
+index cbba9d2e8f322..10d548b07b9c6 100644 |
3374 |
+--- a/drivers/net/virtio_net.c |
3375 |
++++ b/drivers/net/virtio_net.c |
3376 |
+@@ -2768,7 +2768,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = { |
3377 |
+ static void virtnet_freeze_down(struct virtio_device *vdev) |
3378 |
+ { |
3379 |
+ struct virtnet_info *vi = vdev->priv; |
3380 |
+- int i; |
3381 |
+ |
3382 |
+ /* Make sure no work handler is accessing the device */ |
3383 |
+ flush_work(&vi->config_work); |
3384 |
+@@ -2776,14 +2775,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) |
3385 |
+ netif_tx_lock_bh(vi->dev); |
3386 |
+ netif_device_detach(vi->dev); |
3387 |
+ netif_tx_unlock_bh(vi->dev); |
3388 |
+- cancel_delayed_work_sync(&vi->refill); |
3389 |
+- |
3390 |
+- if (netif_running(vi->dev)) { |
3391 |
+- for (i = 0; i < vi->max_queue_pairs; i++) { |
3392 |
+- napi_disable(&vi->rq[i].napi); |
3393 |
+- virtnet_napi_tx_disable(&vi->sq[i].napi); |
3394 |
+- } |
3395 |
+- } |
3396 |
++ if (netif_running(vi->dev)) |
3397 |
++ virtnet_close(vi->dev); |
3398 |
+ } |
3399 |
+ |
3400 |
+ static int init_vqs(struct virtnet_info *vi); |
3401 |
+@@ -2791,7 +2784,7 @@ static int init_vqs(struct virtnet_info *vi); |
3402 |
+ static int virtnet_restore_up(struct virtio_device *vdev) |
3403 |
+ { |
3404 |
+ struct virtnet_info *vi = vdev->priv; |
3405 |
+- int err, i; |
3406 |
++ int err; |
3407 |
+ |
3408 |
+ err = init_vqs(vi); |
3409 |
+ if (err) |
3410 |
+@@ -2800,15 +2793,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) |
3411 |
+ virtio_device_ready(vdev); |
3412 |
+ |
3413 |
+ if (netif_running(vi->dev)) { |
3414 |
+- for (i = 0; i < vi->curr_queue_pairs; i++) |
3415 |
+- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
3416 |
+- schedule_delayed_work(&vi->refill, 0); |
3417 |
+- |
3418 |
+- for (i = 0; i < vi->max_queue_pairs; i++) { |
3419 |
+- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
3420 |
+- virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
3421 |
+- &vi->sq[i].napi); |
3422 |
+- } |
3423 |
++ err = virtnet_open(vi->dev); |
3424 |
++ if (err) |
3425 |
++ return err; |
3426 |
+ } |
3427 |
+ |
3428 |
+ netif_tx_lock_bh(vi->dev); |
3429 |
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c |
3430 |
+index 1ea85c88d7951..a2862a56fadc4 100644 |
3431 |
+--- a/drivers/nvme/host/core.c |
3432 |
++++ b/drivers/nvme/host/core.c |
3433 |
+@@ -2487,6 +2487,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = { |
3434 |
+ .vid = 0x1e0f, |
3435 |
+ .mn = "KCD6XVUL6T40", |
3436 |
+ .quirks = NVME_QUIRK_NO_APST, |
3437 |
++ }, |
3438 |
++ { |
3439 |
++ /* |
3440 |
++ * The external Samsung X5 SSD fails initialization without a |
3441 |
++ * delay before checking if it is ready and has a whole set of |
3442 |
++ * other problems. To make this even more interesting, it |
3443 |
++ * shares the PCI ID with internal Samsung 970 Evo Plus that |
3444 |
++ * does not need or want these quirks. |
3445 |
++ */ |
3446 |
++ .vid = 0x144d, |
3447 |
++ .mn = "Samsung Portable SSD X5", |
3448 |
++ .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | |
3449 |
++ NVME_QUIRK_NO_DEEPEST_PS | |
3450 |
++ NVME_QUIRK_IGNORE_DEV_SUBNQN, |
3451 |
+ } |
3452 |
+ }; |
3453 |
+ |
3454 |
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c |
3455 |
+index 17aeb7d5c4852..ddea0fb90c288 100644 |
3456 |
+--- a/drivers/nvme/host/pci.c |
3457 |
++++ b/drivers/nvme/host/pci.c |
3458 |
+@@ -3475,10 +3475,6 @@ static const struct pci_device_id nvme_id_table[] = { |
3459 |
+ NVME_QUIRK_128_BYTES_SQES | |
3460 |
+ NVME_QUIRK_SHARED_TAGS | |
3461 |
+ NVME_QUIRK_SKIP_CID_GEN }, |
3462 |
+- { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */ |
3463 |
+- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY| |
3464 |
+- NVME_QUIRK_NO_DEEPEST_PS | |
3465 |
+- NVME_QUIRK_IGNORE_DEV_SUBNQN, }, |
3466 |
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, |
3467 |
+ { 0, } |
3468 |
+ }; |
3469 |
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c |
3470 |
+index d0eab5700dc57..00684e11976be 100644 |
3471 |
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c |
3472 |
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c |
3473 |
+@@ -160,8 +160,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *); |
3474 |
+ static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *); |
3475 |
+ static void ibmvfc_tgt_move_login(struct ibmvfc_target *); |
3476 |
+ |
3477 |
+-static void ibmvfc_release_sub_crqs(struct ibmvfc_host *); |
3478 |
+-static void ibmvfc_init_sub_crqs(struct ibmvfc_host *); |
3479 |
++static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *); |
3480 |
++static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *); |
3481 |
+ |
3482 |
+ static const char *unknown_error = "unknown error"; |
3483 |
+ |
3484 |
+@@ -917,7 +917,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) |
3485 |
+ struct vio_dev *vdev = to_vio_dev(vhost->dev); |
3486 |
+ unsigned long flags; |
3487 |
+ |
3488 |
+- ibmvfc_release_sub_crqs(vhost); |
3489 |
++ ibmvfc_dereg_sub_crqs(vhost); |
3490 |
+ |
3491 |
+ /* Re-enable the CRQ */ |
3492 |
+ do { |
3493 |
+@@ -936,7 +936,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) |
3494 |
+ spin_unlock(vhost->crq.q_lock); |
3495 |
+ spin_unlock_irqrestore(vhost->host->host_lock, flags); |
3496 |
+ |
3497 |
+- ibmvfc_init_sub_crqs(vhost); |
3498 |
++ ibmvfc_reg_sub_crqs(vhost); |
3499 |
+ |
3500 |
+ return rc; |
3501 |
+ } |
3502 |
+@@ -955,7 +955,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) |
3503 |
+ struct vio_dev *vdev = to_vio_dev(vhost->dev); |
3504 |
+ struct ibmvfc_queue *crq = &vhost->crq; |
3505 |
+ |
3506 |
+- ibmvfc_release_sub_crqs(vhost); |
3507 |
++ ibmvfc_dereg_sub_crqs(vhost); |
3508 |
+ |
3509 |
+ /* Close the CRQ */ |
3510 |
+ do { |
3511 |
+@@ -988,7 +988,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) |
3512 |
+ spin_unlock(vhost->crq.q_lock); |
3513 |
+ spin_unlock_irqrestore(vhost->host->host_lock, flags); |
3514 |
+ |
3515 |
+- ibmvfc_init_sub_crqs(vhost); |
3516 |
++ ibmvfc_reg_sub_crqs(vhost); |
3517 |
+ |
3518 |
+ return rc; |
3519 |
+ } |
3520 |
+@@ -5682,6 +5682,8 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost, |
3521 |
+ queue->cur = 0; |
3522 |
+ queue->fmt = fmt; |
3523 |
+ queue->size = PAGE_SIZE / fmt_size; |
3524 |
++ |
3525 |
++ queue->vhost = vhost; |
3526 |
+ return 0; |
3527 |
+ } |
3528 |
+ |
3529 |
+@@ -5757,9 +5759,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, |
3530 |
+ |
3531 |
+ ENTER; |
3532 |
+ |
3533 |
+- if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) |
3534 |
+- return -ENOMEM; |
3535 |
+- |
3536 |
+ rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE, |
3537 |
+ &scrq->cookie, &scrq->hw_irq); |
3538 |
+ |
3539 |
+@@ -5790,7 +5789,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, |
3540 |
+ } |
3541 |
+ |
3542 |
+ scrq->hwq_id = index; |
3543 |
+- scrq->vhost = vhost; |
3544 |
+ |
3545 |
+ LEAVE; |
3546 |
+ return 0; |
3547 |
+@@ -5800,7 +5798,6 @@ irq_failed: |
3548 |
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); |
3549 |
+ } while (rtas_busy_delay(rc)); |
3550 |
+ reg_failed: |
3551 |
+- ibmvfc_free_queue(vhost, scrq); |
3552 |
+ LEAVE; |
3553 |
+ return rc; |
3554 |
+ } |
3555 |
+@@ -5826,12 +5823,50 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index) |
3556 |
+ if (rc) |
3557 |
+ dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc); |
3558 |
+ |
3559 |
+- ibmvfc_free_queue(vhost, scrq); |
3560 |
++ /* Clean out the queue */ |
3561 |
++ memset(scrq->msgs.crq, 0, PAGE_SIZE); |
3562 |
++ scrq->cur = 0; |
3563 |
++ |
3564 |
++ LEAVE; |
3565 |
++} |
3566 |
++ |
3567 |
++static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost) |
3568 |
++{ |
3569 |
++ int i, j; |
3570 |
++ |
3571 |
++ ENTER; |
3572 |
++ if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) |
3573 |
++ return; |
3574 |
++ |
3575 |
++ for (i = 0; i < nr_scsi_hw_queues; i++) { |
3576 |
++ if (ibmvfc_register_scsi_channel(vhost, i)) { |
3577 |
++ for (j = i; j > 0; j--) |
3578 |
++ ibmvfc_deregister_scsi_channel(vhost, j - 1); |
3579 |
++ vhost->do_enquiry = 0; |
3580 |
++ return; |
3581 |
++ } |
3582 |
++ } |
3583 |
++ |
3584 |
++ LEAVE; |
3585 |
++} |
3586 |
++ |
3587 |
++static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost) |
3588 |
++{ |
3589 |
++ int i; |
3590 |
++ |
3591 |
++ ENTER; |
3592 |
++ if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) |
3593 |
++ return; |
3594 |
++ |
3595 |
++ for (i = 0; i < nr_scsi_hw_queues; i++) |
3596 |
++ ibmvfc_deregister_scsi_channel(vhost, i); |
3597 |
++ |
3598 |
+ LEAVE; |
3599 |
+ } |
3600 |
+ |
3601 |
+ static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) |
3602 |
+ { |
3603 |
++ struct ibmvfc_queue *scrq; |
3604 |
+ int i, j; |
3605 |
+ |
3606 |
+ ENTER; |
3607 |
+@@ -5847,30 +5882,41 @@ static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) |
3608 |
+ } |
3609 |
+ |
3610 |
+ for (i = 0; i < nr_scsi_hw_queues; i++) { |
3611 |
+- if (ibmvfc_register_scsi_channel(vhost, i)) { |
3612 |
+- for (j = i; j > 0; j--) |
3613 |
+- ibmvfc_deregister_scsi_channel(vhost, j - 1); |
3614 |
++ scrq = &vhost->scsi_scrqs.scrqs[i]; |
3615 |
++ if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) { |
3616 |
++ for (j = i; j > 0; j--) { |
3617 |
++ scrq = &vhost->scsi_scrqs.scrqs[j - 1]; |
3618 |
++ ibmvfc_free_queue(vhost, scrq); |
3619 |
++ } |
3620 |
+ kfree(vhost->scsi_scrqs.scrqs); |
3621 |
+ vhost->scsi_scrqs.scrqs = NULL; |
3622 |
+ vhost->scsi_scrqs.active_queues = 0; |
3623 |
+ vhost->do_enquiry = 0; |
3624 |
+- break; |
3625 |
++ vhost->mq_enabled = 0; |
3626 |
++ return; |
3627 |
+ } |
3628 |
+ } |
3629 |
+ |
3630 |
++ ibmvfc_reg_sub_crqs(vhost); |
3631 |
++ |
3632 |
+ LEAVE; |
3633 |
+ } |
3634 |
+ |
3635 |
+ static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost) |
3636 |
+ { |
3637 |
++ struct ibmvfc_queue *scrq; |
3638 |
+ int i; |
3639 |
+ |
3640 |
+ ENTER; |
3641 |
+ if (!vhost->scsi_scrqs.scrqs) |
3642 |
+ return; |
3643 |
+ |
3644 |
+- for (i = 0; i < nr_scsi_hw_queues; i++) |
3645 |
+- ibmvfc_deregister_scsi_channel(vhost, i); |
3646 |
++ ibmvfc_dereg_sub_crqs(vhost); |
3647 |
++ |
3648 |
++ for (i = 0; i < nr_scsi_hw_queues; i++) { |
3649 |
++ scrq = &vhost->scsi_scrqs.scrqs[i]; |
3650 |
++ ibmvfc_free_queue(vhost, scrq); |
3651 |
++ } |
3652 |
+ |
3653 |
+ kfree(vhost->scsi_scrqs.scrqs); |
3654 |
+ vhost->scsi_scrqs.scrqs = NULL; |
3655 |
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h |
3656 |
+index 3718406e09887..c39a245f43d02 100644 |
3657 |
+--- a/drivers/scsi/ibmvscsi/ibmvfc.h |
3658 |
++++ b/drivers/scsi/ibmvscsi/ibmvfc.h |
3659 |
+@@ -789,6 +789,7 @@ struct ibmvfc_queue { |
3660 |
+ spinlock_t _lock; |
3661 |
+ spinlock_t *q_lock; |
3662 |
+ |
3663 |
++ struct ibmvfc_host *vhost; |
3664 |
+ struct ibmvfc_event_pool evt_pool; |
3665 |
+ struct list_head sent; |
3666 |
+ struct list_head free; |
3667 |
+@@ -797,7 +798,6 @@ struct ibmvfc_queue { |
3668 |
+ union ibmvfc_iu cancel_rsp; |
3669 |
+ |
3670 |
+ /* Sub-CRQ fields */ |
3671 |
+- struct ibmvfc_host *vhost; |
3672 |
+ unsigned long cookie; |
3673 |
+ unsigned long vios_cookie; |
3674 |
+ unsigned long hw_irq; |
3675 |
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c |
3676 |
+index 592a290e6cfaa..6cdd67f2a08e9 100644 |
3677 |
+--- a/drivers/scsi/scsi_debug.c |
3678 |
++++ b/drivers/scsi/scsi_debug.c |
3679 |
+@@ -2788,6 +2788,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip, |
3680 |
+ } |
3681 |
+ } |
3682 |
+ |
3683 |
++static inline void zbc_set_zone_full(struct sdebug_dev_info *devip, |
3684 |
++ struct sdeb_zone_state *zsp) |
3685 |
++{ |
3686 |
++ switch (zsp->z_cond) { |
3687 |
++ case ZC2_IMPLICIT_OPEN: |
3688 |
++ devip->nr_imp_open--; |
3689 |
++ break; |
3690 |
++ case ZC3_EXPLICIT_OPEN: |
3691 |
++ devip->nr_exp_open--; |
3692 |
++ break; |
3693 |
++ default: |
3694 |
++ WARN_ONCE(true, "Invalid zone %llu condition %x\n", |
3695 |
++ zsp->z_start, zsp->z_cond); |
3696 |
++ break; |
3697 |
++ } |
3698 |
++ zsp->z_cond = ZC5_FULL; |
3699 |
++} |
3700 |
++ |
3701 |
+ static void zbc_inc_wp(struct sdebug_dev_info *devip, |
3702 |
+ unsigned long long lba, unsigned int num) |
3703 |
+ { |
3704 |
+@@ -2800,7 +2818,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip, |
3705 |
+ if (zsp->z_type == ZBC_ZONE_TYPE_SWR) { |
3706 |
+ zsp->z_wp += num; |
3707 |
+ if (zsp->z_wp >= zend) |
3708 |
+- zsp->z_cond = ZC5_FULL; |
3709 |
++ zbc_set_zone_full(devip, zsp); |
3710 |
+ return; |
3711 |
+ } |
3712 |
+ |
3713 |
+@@ -2819,7 +2837,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip, |
3714 |
+ n = num; |
3715 |
+ } |
3716 |
+ if (zsp->z_wp >= zend) |
3717 |
+- zsp->z_cond = ZC5_FULL; |
3718 |
++ zbc_set_zone_full(devip, zsp); |
3719 |
+ |
3720 |
+ num -= n; |
3721 |
+ lba += n; |
3722 |
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c |
3723 |
+index 2c0dd64159b09..5d21f07456c6d 100644 |
3724 |
+--- a/drivers/scsi/scsi_transport_iscsi.c |
3725 |
++++ b/drivers/scsi/scsi_transport_iscsi.c |
3726 |
+@@ -212,7 +212,12 @@ iscsi_create_endpoint(int dd_size) |
3727 |
+ return NULL; |
3728 |
+ |
3729 |
+ mutex_lock(&iscsi_ep_idr_mutex); |
3730 |
+- id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO); |
3731 |
++ |
3732 |
++ /* |
3733 |
++ * First endpoint id should be 1 to comply with user space |
3734 |
++ * applications (iscsid). |
3735 |
++ */ |
3736 |
++ id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO); |
3737 |
+ if (id < 0) { |
3738 |
+ mutex_unlock(&iscsi_ep_idr_mutex); |
3739 |
+ printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n", |
3740 |
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c |
3741 |
+index 9a0bba5a51a71..4b1f1d73eee8b 100644 |
3742 |
+--- a/drivers/scsi/storvsc_drv.c |
3743 |
++++ b/drivers/scsi/storvsc_drv.c |
3744 |
+@@ -1916,7 +1916,7 @@ static struct scsi_host_template scsi_driver = { |
3745 |
+ .cmd_per_lun = 2048, |
3746 |
+ .this_id = -1, |
3747 |
+ /* Ensure there are no gaps in presented sgls */ |
3748 |
+- .virt_boundary_mask = PAGE_SIZE-1, |
3749 |
++ .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1, |
3750 |
+ .no_write_same = 1, |
3751 |
+ .track_queue_depth = 1, |
3752 |
+ .change_queue_depth = storvsc_change_queue_depth, |
3753 |
+@@ -1970,6 +1970,7 @@ static int storvsc_probe(struct hv_device *device, |
3754 |
+ int max_targets; |
3755 |
+ int max_channels; |
3756 |
+ int max_sub_channels = 0; |
3757 |
++ u32 max_xfer_bytes; |
3758 |
+ |
3759 |
+ /* |
3760 |
+ * Based on the windows host we are running on, |
3761 |
+@@ -2059,12 +2060,28 @@ static int storvsc_probe(struct hv_device *device, |
3762 |
+ } |
3763 |
+ /* max cmd length */ |
3764 |
+ host->max_cmd_len = STORVSC_MAX_CMD_LEN; |
3765 |
+- |
3766 |
+ /* |
3767 |
+- * set the table size based on the info we got |
3768 |
+- * from the host. |
3769 |
++ * Any reasonable Hyper-V configuration should provide |
3770 |
++ * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE, |
3771 |
++ * protecting it from any weird value. |
3772 |
++ */ |
3773 |
++ max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE); |
3774 |
++ /* max_hw_sectors_kb */ |
3775 |
++ host->max_sectors = max_xfer_bytes >> 9; |
3776 |
++ /* |
3777 |
++ * There are 2 requirements for Hyper-V storvsc sgl segments, |
3778 |
++ * based on which the below calculation for max segments is |
3779 |
++ * done: |
3780 |
++ * |
3781 |
++ * 1. Except for the first and last sgl segment, all sgl segments |
3782 |
++ * should be align to HV_HYP_PAGE_SIZE, that also means the |
3783 |
++ * maximum number of segments in a sgl can be calculated by |
3784 |
++ * dividing the total max transfer length by HV_HYP_PAGE_SIZE. |
3785 |
++ * |
3786 |
++ * 2. Except for the first and last, each entry in the SGL must |
3787 |
++ * have an offset that is a multiple of HV_HYP_PAGE_SIZE. |
3788 |
+ */ |
3789 |
+- host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); |
3790 |
++ host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1; |
3791 |
+ /* |
3792 |
+ * For non-IDE disks, the host supports multiple channels. |
3793 |
+ * Set the number of HW queues we are supporting. |
3794 |
+diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c |
3795 |
+index 3cbb165d6e309..70ad0f3dce283 100644 |
3796 |
+--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c |
3797 |
++++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c |
3798 |
+@@ -783,6 +783,7 @@ static int brcmstb_pm_probe(struct platform_device *pdev) |
3799 |
+ } |
3800 |
+ |
3801 |
+ ret = brcmstb_init_sram(dn); |
3802 |
++ of_node_put(dn); |
3803 |
+ if (ret) { |
3804 |
+ pr_err("error setting up SRAM for PM\n"); |
3805 |
+ return ret; |
3806 |
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c |
3807 |
+index dc6c96e04bcfe..3b8bf6daf7d0f 100644 |
3808 |
+--- a/drivers/usb/chipidea/udc.c |
3809 |
++++ b/drivers/usb/chipidea/udc.c |
3810 |
+@@ -1048,6 +1048,9 @@ isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req) |
3811 |
+ struct ci_hdrc *ci = req->context; |
3812 |
+ unsigned long flags; |
3813 |
+ |
3814 |
++ if (req->status < 0) |
3815 |
++ return; |
3816 |
++ |
3817 |
+ if (ci->setaddr) { |
3818 |
+ hw_usb_set_address(ci, ci->address); |
3819 |
+ ci->setaddr = false; |
3820 |
+diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c |
3821 |
+index 7f59a0c474020..4e4a7c3126462 100644 |
3822 |
+--- a/drivers/usb/gadget/function/uvc_video.c |
3823 |
++++ b/drivers/usb/gadget/function/uvc_video.c |
3824 |
+@@ -415,6 +415,9 @@ static void uvcg_video_pump(struct work_struct *work) |
3825 |
+ uvcg_queue_cancel(queue, 0); |
3826 |
+ break; |
3827 |
+ } |
3828 |
++ |
3829 |
++ /* Endpoint now owns the request */ |
3830 |
++ req = NULL; |
3831 |
+ video->req_int_count++; |
3832 |
+ } |
3833 |
+ |
3834 |
+diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c |
3835 |
+index e9440f7bf019d..ed7c2127fb911 100644 |
3836 |
+--- a/drivers/usb/gadget/legacy/raw_gadget.c |
3837 |
++++ b/drivers/usb/gadget/legacy/raw_gadget.c |
3838 |
+@@ -11,6 +11,7 @@ |
3839 |
+ #include <linux/ctype.h> |
3840 |
+ #include <linux/debugfs.h> |
3841 |
+ #include <linux/delay.h> |
3842 |
++#include <linux/idr.h> |
3843 |
+ #include <linux/kref.h> |
3844 |
+ #include <linux/miscdevice.h> |
3845 |
+ #include <linux/module.h> |
3846 |
+@@ -36,6 +37,9 @@ MODULE_LICENSE("GPL"); |
3847 |
+ |
3848 |
+ /*----------------------------------------------------------------------*/ |
3849 |
+ |
3850 |
++static DEFINE_IDA(driver_id_numbers); |
3851 |
++#define DRIVER_DRIVER_NAME_LENGTH_MAX 32 |
3852 |
++ |
3853 |
+ #define RAW_EVENT_QUEUE_SIZE 16 |
3854 |
+ |
3855 |
+ struct raw_event_queue { |
3856 |
+@@ -161,6 +165,9 @@ struct raw_dev { |
3857 |
+ /* Reference to misc device: */ |
3858 |
+ struct device *dev; |
3859 |
+ |
3860 |
++ /* Make driver names unique */ |
3861 |
++ int driver_id_number; |
3862 |
++ |
3863 |
+ /* Protected by lock: */ |
3864 |
+ enum dev_state state; |
3865 |
+ bool gadget_registered; |
3866 |
+@@ -189,6 +196,7 @@ static struct raw_dev *dev_new(void) |
3867 |
+ spin_lock_init(&dev->lock); |
3868 |
+ init_completion(&dev->ep0_done); |
3869 |
+ raw_event_queue_init(&dev->queue); |
3870 |
++ dev->driver_id_number = -1; |
3871 |
+ return dev; |
3872 |
+ } |
3873 |
+ |
3874 |
+@@ -199,6 +207,9 @@ static void dev_free(struct kref *kref) |
3875 |
+ |
3876 |
+ kfree(dev->udc_name); |
3877 |
+ kfree(dev->driver.udc_name); |
3878 |
++ kfree(dev->driver.driver.name); |
3879 |
++ if (dev->driver_id_number >= 0) |
3880 |
++ ida_free(&driver_id_numbers, dev->driver_id_number); |
3881 |
+ if (dev->req) { |
3882 |
+ if (dev->ep0_urb_queued) |
3883 |
+ usb_ep_dequeue(dev->gadget->ep0, dev->req); |
3884 |
+@@ -419,9 +430,11 @@ out_put: |
3885 |
+ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) |
3886 |
+ { |
3887 |
+ int ret = 0; |
3888 |
++ int driver_id_number; |
3889 |
+ struct usb_raw_init arg; |
3890 |
+ char *udc_driver_name; |
3891 |
+ char *udc_device_name; |
3892 |
++ char *driver_driver_name; |
3893 |
+ unsigned long flags; |
3894 |
+ |
3895 |
+ if (copy_from_user(&arg, (void __user *)value, sizeof(arg))) |
3896 |
+@@ -440,36 +453,43 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) |
3897 |
+ return -EINVAL; |
3898 |
+ } |
3899 |
+ |
3900 |
++ driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL); |
3901 |
++ if (driver_id_number < 0) |
3902 |
++ return driver_id_number; |
3903 |
++ |
3904 |
++ driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL); |
3905 |
++ if (!driver_driver_name) { |
3906 |
++ ret = -ENOMEM; |
3907 |
++ goto out_free_driver_id_number; |
3908 |
++ } |
3909 |
++ snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX, |
3910 |
++ DRIVER_NAME ".%d", driver_id_number); |
3911 |
++ |
3912 |
+ udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL); |
3913 |
+- if (!udc_driver_name) |
3914 |
+- return -ENOMEM; |
3915 |
++ if (!udc_driver_name) { |
3916 |
++ ret = -ENOMEM; |
3917 |
++ goto out_free_driver_driver_name; |
3918 |
++ } |
3919 |
+ ret = strscpy(udc_driver_name, &arg.driver_name[0], |
3920 |
+ UDC_NAME_LENGTH_MAX); |
3921 |
+- if (ret < 0) { |
3922 |
+- kfree(udc_driver_name); |
3923 |
+- return ret; |
3924 |
+- } |
3925 |
++ if (ret < 0) |
3926 |
++ goto out_free_udc_driver_name; |
3927 |
+ ret = 0; |
3928 |
+ |
3929 |
+ udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL); |
3930 |
+ if (!udc_device_name) { |
3931 |
+- kfree(udc_driver_name); |
3932 |
+- return -ENOMEM; |
3933 |
++ ret = -ENOMEM; |
3934 |
++ goto out_free_udc_driver_name; |
3935 |
+ } |
3936 |
+ ret = strscpy(udc_device_name, &arg.device_name[0], |
3937 |
+ UDC_NAME_LENGTH_MAX); |
3938 |
+- if (ret < 0) { |
3939 |
+- kfree(udc_driver_name); |
3940 |
+- kfree(udc_device_name); |
3941 |
+- return ret; |
3942 |
+- } |
3943 |
++ if (ret < 0) |
3944 |
++ goto out_free_udc_device_name; |
3945 |
+ ret = 0; |
3946 |
+ |
3947 |
+ spin_lock_irqsave(&dev->lock, flags); |
3948 |
+ if (dev->state != STATE_DEV_OPENED) { |
3949 |
+ dev_dbg(dev->dev, "fail, device is not opened\n"); |
3950 |
+- kfree(udc_driver_name); |
3951 |
+- kfree(udc_device_name); |
3952 |
+ ret = -EINVAL; |
3953 |
+ goto out_unlock; |
3954 |
+ } |
3955 |
+@@ -484,14 +504,25 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) |
3956 |
+ dev->driver.suspend = gadget_suspend; |
3957 |
+ dev->driver.resume = gadget_resume; |
3958 |
+ dev->driver.reset = gadget_reset; |
3959 |
+- dev->driver.driver.name = DRIVER_NAME; |
3960 |
++ dev->driver.driver.name = driver_driver_name; |
3961 |
+ dev->driver.udc_name = udc_device_name; |
3962 |
+ dev->driver.match_existing_only = 1; |
3963 |
++ dev->driver_id_number = driver_id_number; |
3964 |
+ |
3965 |
+ dev->state = STATE_DEV_INITIALIZED; |
3966 |
++ spin_unlock_irqrestore(&dev->lock, flags); |
3967 |
++ return ret; |
3968 |
+ |
3969 |
+ out_unlock: |
3970 |
+ spin_unlock_irqrestore(&dev->lock, flags); |
3971 |
++out_free_udc_device_name: |
3972 |
++ kfree(udc_device_name); |
3973 |
++out_free_udc_driver_name: |
3974 |
++ kfree(udc_driver_name); |
3975 |
++out_free_driver_driver_name: |
3976 |
++ kfree(driver_driver_name); |
3977 |
++out_free_driver_id_number: |
3978 |
++ ida_free(&driver_id_numbers, driver_id_number); |
3979 |
+ return ret; |
3980 |
+ } |
3981 |
+ |
3982 |
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c |
3983 |
+index f65f1ba2b5929..fc322a9526c8c 100644 |
3984 |
+--- a/drivers/usb/host/xhci-hub.c |
3985 |
++++ b/drivers/usb/host/xhci-hub.c |
3986 |
+@@ -652,7 +652,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd) |
3987 |
+ * It will release and re-aquire the lock while calling ACPI |
3988 |
+ * method. |
3989 |
+ */ |
3990 |
+-static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, |
3991 |
++void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, |
3992 |
+ u16 index, bool on, unsigned long *flags) |
3993 |
+ __must_hold(&xhci->lock) |
3994 |
+ { |
3995 |
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c |
3996 |
+index d57c5ff5ae1f4..64173010d4666 100644 |
3997 |
+--- a/drivers/usb/host/xhci-pci.c |
3998 |
++++ b/drivers/usb/host/xhci-pci.c |
3999 |
+@@ -61,6 +61,8 @@ |
4000 |
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e |
4001 |
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e |
4002 |
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed |
4003 |
++#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI 0xa71e |
4004 |
++#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI 0x7ec0 |
4005 |
+ |
4006 |
+ #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 |
4007 |
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 |
4008 |
+@@ -270,7 +272,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) |
4009 |
+ pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI || |
4010 |
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI || |
4011 |
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI || |
4012 |
+- pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI)) |
4013 |
++ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || |
4014 |
++ pdev->device == PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI || |
4015 |
++ pdev->device == PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI)) |
4016 |
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; |
4017 |
+ |
4018 |
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON && |
4019 |
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c |
4020 |
+index 2be38d9de8df4..162d975b648c1 100644 |
4021 |
+--- a/drivers/usb/host/xhci.c |
4022 |
++++ b/drivers/usb/host/xhci.c |
4023 |
+@@ -779,6 +779,8 @@ static void xhci_stop(struct usb_hcd *hcd) |
4024 |
+ void xhci_shutdown(struct usb_hcd *hcd) |
4025 |
+ { |
4026 |
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
4027 |
++ unsigned long flags; |
4028 |
++ int i; |
4029 |
+ |
4030 |
+ if (xhci->quirks & XHCI_SPURIOUS_REBOOT) |
4031 |
+ usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); |
4032 |
+@@ -794,12 +796,21 @@ void xhci_shutdown(struct usb_hcd *hcd) |
4033 |
+ del_timer_sync(&xhci->shared_hcd->rh_timer); |
4034 |
+ } |
4035 |
+ |
4036 |
+- spin_lock_irq(&xhci->lock); |
4037 |
++ spin_lock_irqsave(&xhci->lock, flags); |
4038 |
+ xhci_halt(xhci); |
4039 |
++ |
4040 |
++ /* Power off USB2 ports*/ |
4041 |
++ for (i = 0; i < xhci->usb2_rhub.num_ports; i++) |
4042 |
++ xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags); |
4043 |
++ |
4044 |
++ /* Power off USB3 ports*/ |
4045 |
++ for (i = 0; i < xhci->usb3_rhub.num_ports; i++) |
4046 |
++ xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags); |
4047 |
++ |
4048 |
+ /* Workaround for spurious wakeups at shutdown with HSW */ |
4049 |
+ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) |
4050 |
+ xhci_reset(xhci, XHCI_RESET_SHORT_USEC); |
4051 |
+- spin_unlock_irq(&xhci->lock); |
4052 |
++ spin_unlock_irqrestore(&xhci->lock, flags); |
4053 |
+ |
4054 |
+ xhci_cleanup_msix(xhci); |
4055 |
+ |
4056 |
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h |
4057 |
+index 473a33ce299e4..1f3f311d9951e 100644 |
4058 |
+--- a/drivers/usb/host/xhci.h |
4059 |
++++ b/drivers/usb/host/xhci.h |
4060 |
+@@ -2172,6 +2172,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, |
4061 |
+ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); |
4062 |
+ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1); |
4063 |
+ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd); |
4064 |
++void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index, |
4065 |
++ bool on, unsigned long *flags); |
4066 |
+ |
4067 |
+ void xhci_hc_died(struct xhci_hcd *xhci); |
4068 |
+ |
4069 |
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
4070 |
+index ed1e50d83ccab..de59fa919540a 100644 |
4071 |
+--- a/drivers/usb/serial/option.c |
4072 |
++++ b/drivers/usb/serial/option.c |
4073 |
+@@ -252,10 +252,12 @@ static void option_instat_callback(struct urb *urb); |
4074 |
+ #define QUECTEL_PRODUCT_EG95 0x0195 |
4075 |
+ #define QUECTEL_PRODUCT_BG96 0x0296 |
4076 |
+ #define QUECTEL_PRODUCT_EP06 0x0306 |
4077 |
++#define QUECTEL_PRODUCT_EM05G 0x030a |
4078 |
+ #define QUECTEL_PRODUCT_EM12 0x0512 |
4079 |
+ #define QUECTEL_PRODUCT_RM500Q 0x0800 |
4080 |
+ #define QUECTEL_PRODUCT_EC200S_CN 0x6002 |
4081 |
+ #define QUECTEL_PRODUCT_EC200T 0x6026 |
4082 |
++#define QUECTEL_PRODUCT_RM500K 0x7001 |
4083 |
+ |
4084 |
+ #define CMOTECH_VENDOR_ID 0x16d8 |
4085 |
+ #define CMOTECH_PRODUCT_6001 0x6001 |
4086 |
+@@ -1134,6 +1136,8 @@ static const struct usb_device_id option_ids[] = { |
4087 |
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), |
4088 |
+ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, |
4089 |
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, |
4090 |
++ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff), |
4091 |
++ .driver_info = RSVD(6) | ZLP }, |
4092 |
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), |
4093 |
+ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, |
4094 |
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, |
4095 |
+@@ -1147,6 +1151,7 @@ static const struct usb_device_id option_ids[] = { |
4096 |
+ .driver_info = ZLP }, |
4097 |
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, |
4098 |
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, |
4099 |
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, |
4100 |
+ |
4101 |
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, |
4102 |
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, |
4103 |
+@@ -1279,6 +1284,7 @@ static const struct usb_device_id option_ids[] = { |
4104 |
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, |
4105 |
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */ |
4106 |
+ .driver_info = NCTRL(2) | RSVD(3) }, |
4107 |
++ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) }, /* Telit LE910Cx (rmnet) */ |
4108 |
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), |
4109 |
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, |
4110 |
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), |
4111 |
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c |
4112 |
+index 3506c47e1eef0..40b1ab3d284dc 100644 |
4113 |
+--- a/drivers/usb/serial/pl2303.c |
4114 |
++++ b/drivers/usb/serial/pl2303.c |
4115 |
+@@ -436,22 +436,27 @@ static int pl2303_detect_type(struct usb_serial *serial) |
4116 |
+ break; |
4117 |
+ case 0x200: |
4118 |
+ switch (bcdDevice) { |
4119 |
+- case 0x100: |
4120 |
++ case 0x100: /* GC */ |
4121 |
+ case 0x105: |
4122 |
++ return TYPE_HXN; |
4123 |
++ case 0x300: /* GT / TA */ |
4124 |
++ if (pl2303_supports_hx_status(serial)) |
4125 |
++ return TYPE_TA; |
4126 |
++ fallthrough; |
4127 |
+ case 0x305: |
4128 |
++ case 0x400: /* GL */ |
4129 |
+ case 0x405: |
4130 |
++ return TYPE_HXN; |
4131 |
++ case 0x500: /* GE / TB */ |
4132 |
++ if (pl2303_supports_hx_status(serial)) |
4133 |
++ return TYPE_TB; |
4134 |
++ fallthrough; |
4135 |
++ case 0x505: |
4136 |
++ case 0x600: /* GS */ |
4137 |
+ case 0x605: |
4138 |
+- /* |
4139 |
+- * Assume it's an HXN-type if the device doesn't |
4140 |
+- * support the old read request value. |
4141 |
+- */ |
4142 |
+- if (!pl2303_supports_hx_status(serial)) |
4143 |
+- return TYPE_HXN; |
4144 |
+- break; |
4145 |
+- case 0x300: |
4146 |
+- return TYPE_TA; |
4147 |
+- case 0x500: |
4148 |
+- return TYPE_TB; |
4149 |
++ case 0x700: /* GR */ |
4150 |
++ case 0x705: |
4151 |
++ return TYPE_HXN; |
4152 |
+ } |
4153 |
+ break; |
4154 |
+ } |
4155 |
+diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig |
4156 |
+index 557f392fe24da..073fd2ea5e0bb 100644 |
4157 |
+--- a/drivers/usb/typec/tcpm/Kconfig |
4158 |
++++ b/drivers/usb/typec/tcpm/Kconfig |
4159 |
+@@ -56,7 +56,6 @@ config TYPEC_WCOVE |
4160 |
+ tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver" |
4161 |
+ depends on ACPI |
4162 |
+ depends on MFD_INTEL_PMC_BXT |
4163 |
+- depends on INTEL_SOC_PMIC |
4164 |
+ depends on BXT_WC_PMIC_OPREGION |
4165 |
+ help |
4166 |
+ This driver adds support for USB Type-C on Intel Broxton platforms |
4167 |
+diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c |
4168 |
+index 6a947ff96d6eb..19fd3389946d9 100644 |
4169 |
+--- a/drivers/video/console/sticore.c |
4170 |
++++ b/drivers/video/console/sticore.c |
4171 |
+@@ -1127,6 +1127,7 @@ int sti_call(const struct sti_struct *sti, unsigned long func, |
4172 |
+ return ret; |
4173 |
+ } |
4174 |
+ |
4175 |
++#if defined(CONFIG_FB_STI) |
4176 |
+ /* check if given fb_info is the primary device */ |
4177 |
+ int fb_is_primary_device(struct fb_info *info) |
4178 |
+ { |
4179 |
+@@ -1142,6 +1143,7 @@ int fb_is_primary_device(struct fb_info *info) |
4180 |
+ return (sti->info == info); |
4181 |
+ } |
4182 |
+ EXPORT_SYMBOL(fb_is_primary_device); |
4183 |
++#endif |
4184 |
+ |
4185 |
+ MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer"); |
4186 |
+ MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines"); |
4187 |
+diff --git a/drivers/xen/features.c b/drivers/xen/features.c |
4188 |
+index 7b591443833c9..87f1828d40d5e 100644 |
4189 |
+--- a/drivers/xen/features.c |
4190 |
++++ b/drivers/xen/features.c |
4191 |
+@@ -42,7 +42,7 @@ void xen_setup_features(void) |
4192 |
+ if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) |
4193 |
+ break; |
4194 |
+ for (j = 0; j < 32; j++) |
4195 |
+- xen_features[i * 32 + j] = !!(fi.submap & 1<<j); |
4196 |
++ xen_features[i * 32 + j] = !!(fi.submap & 1U << j); |
4197 |
+ } |
4198 |
+ |
4199 |
+ if (xen_pv_domain()) { |
4200 |
+diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h |
4201 |
+index 20d7d059dadb5..40ef379c28ab0 100644 |
4202 |
+--- a/drivers/xen/gntdev-common.h |
4203 |
++++ b/drivers/xen/gntdev-common.h |
4204 |
+@@ -16,6 +16,7 @@ |
4205 |
+ #include <linux/mmu_notifier.h> |
4206 |
+ #include <linux/types.h> |
4207 |
+ #include <xen/interface/event_channel.h> |
4208 |
++#include <xen/grant_table.h> |
4209 |
+ |
4210 |
+ struct gntdev_dmabuf_priv; |
4211 |
+ |
4212 |
+@@ -56,6 +57,7 @@ struct gntdev_grant_map { |
4213 |
+ struct gnttab_unmap_grant_ref *unmap_ops; |
4214 |
+ struct gnttab_map_grant_ref *kmap_ops; |
4215 |
+ struct gnttab_unmap_grant_ref *kunmap_ops; |
4216 |
++ bool *being_removed; |
4217 |
+ struct page **pages; |
4218 |
+ unsigned long pages_vm_start; |
4219 |
+ |
4220 |
+@@ -73,6 +75,11 @@ struct gntdev_grant_map { |
4221 |
+ /* Needed to avoid allocation in gnttab_dma_free_pages(). */ |
4222 |
+ xen_pfn_t *frames; |
4223 |
+ #endif |
4224 |
++ |
4225 |
++ /* Number of live grants */ |
4226 |
++ atomic_t live_grants; |
4227 |
++ /* Needed to avoid allocation in __unmap_grant_pages */ |
4228 |
++ struct gntab_unmap_queue_data unmap_data; |
4229 |
+ }; |
4230 |
+ |
4231 |
+ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, |
4232 |
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c |
4233 |
+index 59ffea8000791..4b56c39f766d4 100644 |
4234 |
+--- a/drivers/xen/gntdev.c |
4235 |
++++ b/drivers/xen/gntdev.c |
4236 |
+@@ -35,6 +35,7 @@ |
4237 |
+ #include <linux/slab.h> |
4238 |
+ #include <linux/highmem.h> |
4239 |
+ #include <linux/refcount.h> |
4240 |
++#include <linux/workqueue.h> |
4241 |
+ |
4242 |
+ #include <xen/xen.h> |
4243 |
+ #include <xen/grant_table.h> |
4244 |
+@@ -60,10 +61,11 @@ module_param(limit, uint, 0644); |
4245 |
+ MODULE_PARM_DESC(limit, |
4246 |
+ "Maximum number of grants that may be mapped by one mapping request"); |
4247 |
+ |
4248 |
++/* True in PV mode, false otherwise */ |
4249 |
+ static int use_ptemod; |
4250 |
+ |
4251 |
+-static int unmap_grant_pages(struct gntdev_grant_map *map, |
4252 |
+- int offset, int pages); |
4253 |
++static void unmap_grant_pages(struct gntdev_grant_map *map, |
4254 |
++ int offset, int pages); |
4255 |
+ |
4256 |
+ static struct miscdevice gntdev_miscdev; |
4257 |
+ |
4258 |
+@@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map) |
4259 |
+ kvfree(map->unmap_ops); |
4260 |
+ kvfree(map->kmap_ops); |
4261 |
+ kvfree(map->kunmap_ops); |
4262 |
++ kvfree(map->being_removed); |
4263 |
+ kfree(map); |
4264 |
+ } |
4265 |
+ |
4266 |
+@@ -140,10 +143,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, |
4267 |
+ add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]), |
4268 |
+ GFP_KERNEL); |
4269 |
+ add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); |
4270 |
++ add->being_removed = |
4271 |
++ kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL); |
4272 |
+ if (NULL == add->grants || |
4273 |
+ NULL == add->map_ops || |
4274 |
+ NULL == add->unmap_ops || |
4275 |
+- NULL == add->pages) |
4276 |
++ NULL == add->pages || |
4277 |
++ NULL == add->being_removed) |
4278 |
+ goto err; |
4279 |
+ if (use_ptemod) { |
4280 |
+ add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]), |
4281 |
+@@ -250,9 +256,36 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) |
4282 |
+ if (!refcount_dec_and_test(&map->users)) |
4283 |
+ return; |
4284 |
+ |
4285 |
+- if (map->pages && !use_ptemod) |
4286 |
++ if (map->pages && !use_ptemod) { |
4287 |
++ /* |
4288 |
++ * Increment the reference count. This ensures that the |
4289 |
++ * subsequent call to unmap_grant_pages() will not wind up |
4290 |
++ * re-entering itself. It *can* wind up calling |
4291 |
++ * gntdev_put_map() recursively, but such calls will be with a |
4292 |
++ * reference count greater than 1, so they will return before |
4293 |
++ * this code is reached. The recursion depth is thus limited to |
4294 |
++ * 1. Do NOT use refcount_inc() here, as it will detect that |
4295 |
++ * the reference count is zero and WARN(). |
4296 |
++ */ |
4297 |
++ refcount_set(&map->users, 1); |
4298 |
++ |
4299 |
++ /* |
4300 |
++ * Unmap the grants. This may or may not be asynchronous, so it |
4301 |
++ * is possible that the reference count is 1 on return, but it |
4302 |
++ * could also be greater than 1. |
4303 |
++ */ |
4304 |
+ unmap_grant_pages(map, 0, map->count); |
4305 |
+ |
4306 |
++ /* Check if the memory now needs to be freed */ |
4307 |
++ if (!refcount_dec_and_test(&map->users)) |
4308 |
++ return; |
4309 |
++ |
4310 |
++ /* |
4311 |
++ * All pages have been returned to the hypervisor, so free the |
4312 |
++ * map. |
4313 |
++ */ |
4314 |
++ } |
4315 |
++ |
4316 |
+ if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { |
4317 |
+ notify_remote_via_evtchn(map->notify.event); |
4318 |
+ evtchn_put(map->notify.event); |
4319 |
+@@ -283,6 +316,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) |
4320 |
+ |
4321 |
+ int gntdev_map_grant_pages(struct gntdev_grant_map *map) |
4322 |
+ { |
4323 |
++ size_t alloced = 0; |
4324 |
+ int i, err = 0; |
4325 |
+ |
4326 |
+ if (!use_ptemod) { |
4327 |
+@@ -331,97 +365,116 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) |
4328 |
+ map->count); |
4329 |
+ |
4330 |
+ for (i = 0; i < map->count; i++) { |
4331 |
+- if (map->map_ops[i].status == GNTST_okay) |
4332 |
++ if (map->map_ops[i].status == GNTST_okay) { |
4333 |
+ map->unmap_ops[i].handle = map->map_ops[i].handle; |
4334 |
+- else if (!err) |
4335 |
++ if (!use_ptemod) |
4336 |
++ alloced++; |
4337 |
++ } else if (!err) |
4338 |
+ err = -EINVAL; |
4339 |
+ |
4340 |
+ if (map->flags & GNTMAP_device_map) |
4341 |
+ map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr; |
4342 |
+ |
4343 |
+ if (use_ptemod) { |
4344 |
+- if (map->kmap_ops[i].status == GNTST_okay) |
4345 |
++ if (map->kmap_ops[i].status == GNTST_okay) { |
4346 |
++ if (map->map_ops[i].status == GNTST_okay) |
4347 |
++ alloced++; |
4348 |
+ map->kunmap_ops[i].handle = map->kmap_ops[i].handle; |
4349 |
+- else if (!err) |
4350 |
++ } else if (!err) |
4351 |
+ err = -EINVAL; |
4352 |
+ } |
4353 |
+ } |
4354 |
++ atomic_add(alloced, &map->live_grants); |
4355 |
+ return err; |
4356 |
+ } |
4357 |
+ |
4358 |
+-static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset, |
4359 |
+- int pages) |
4360 |
++static void __unmap_grant_pages_done(int result, |
4361 |
++ struct gntab_unmap_queue_data *data) |
4362 |
+ { |
4363 |
+- int i, err = 0; |
4364 |
+- struct gntab_unmap_queue_data unmap_data; |
4365 |
+- |
4366 |
+- if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { |
4367 |
+- int pgno = (map->notify.addr >> PAGE_SHIFT); |
4368 |
+- if (pgno >= offset && pgno < offset + pages) { |
4369 |
+- /* No need for kmap, pages are in lowmem */ |
4370 |
+- uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); |
4371 |
+- tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; |
4372 |
+- map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; |
4373 |
+- } |
4374 |
+- } |
4375 |
+- |
4376 |
+- unmap_data.unmap_ops = map->unmap_ops + offset; |
4377 |
+- unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; |
4378 |
+- unmap_data.pages = map->pages + offset; |
4379 |
+- unmap_data.count = pages; |
4380 |
+- |
4381 |
+- err = gnttab_unmap_refs_sync(&unmap_data); |
4382 |
+- if (err) |
4383 |
+- return err; |
4384 |
++ unsigned int i; |
4385 |
++ struct gntdev_grant_map *map = data->data; |
4386 |
++ unsigned int offset = data->unmap_ops - map->unmap_ops; |
4387 |
+ |
4388 |
+- for (i = 0; i < pages; i++) { |
4389 |
+- if (map->unmap_ops[offset+i].status) |
4390 |
+- err = -EINVAL; |
4391 |
++ for (i = 0; i < data->count; i++) { |
4392 |
++ WARN_ON(map->unmap_ops[offset+i].status); |
4393 |
+ pr_debug("unmap handle=%d st=%d\n", |
4394 |
+ map->unmap_ops[offset+i].handle, |
4395 |
+ map->unmap_ops[offset+i].status); |
4396 |
+ map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; |
4397 |
+ if (use_ptemod) { |
4398 |
+- if (map->kunmap_ops[offset+i].status) |
4399 |
+- err = -EINVAL; |
4400 |
++ WARN_ON(map->kunmap_ops[offset+i].status); |
4401 |
+ pr_debug("kunmap handle=%u st=%d\n", |
4402 |
+ map->kunmap_ops[offset+i].handle, |
4403 |
+ map->kunmap_ops[offset+i].status); |
4404 |
+ map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; |
4405 |
+ } |
4406 |
+ } |
4407 |
+- return err; |
4408 |
++ /* |
4409 |
++ * Decrease the live-grant counter. This must happen after the loop to |
4410 |
++ * prevent premature reuse of the grants by gnttab_mmap(). |
4411 |
++ */ |
4412 |
++ atomic_sub(data->count, &map->live_grants); |
4413 |
++ |
4414 |
++ /* Release reference taken by __unmap_grant_pages */ |
4415 |
++ gntdev_put_map(NULL, map); |
4416 |
++} |
4417 |
++ |
4418 |
++static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset, |
4419 |
++ int pages) |
4420 |
++{ |
4421 |
++ if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { |
4422 |
++ int pgno = (map->notify.addr >> PAGE_SHIFT); |
4423 |
++ |
4424 |
++ if (pgno >= offset && pgno < offset + pages) { |
4425 |
++ /* No need for kmap, pages are in lowmem */ |
4426 |
++ uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); |
4427 |
++ |
4428 |
++ tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; |
4429 |
++ map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; |
4430 |
++ } |
4431 |
++ } |
4432 |
++ |
4433 |
++ map->unmap_data.unmap_ops = map->unmap_ops + offset; |
4434 |
++ map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; |
4435 |
++ map->unmap_data.pages = map->pages + offset; |
4436 |
++ map->unmap_data.count = pages; |
4437 |
++ map->unmap_data.done = __unmap_grant_pages_done; |
4438 |
++ map->unmap_data.data = map; |
4439 |
++ refcount_inc(&map->users); /* to keep map alive during async call below */ |
4440 |
++ |
4441 |
++ gnttab_unmap_refs_async(&map->unmap_data); |
4442 |
+ } |
4443 |
+ |
4444 |
+-static int unmap_grant_pages(struct gntdev_grant_map *map, int offset, |
4445 |
+- int pages) |
4446 |
++static void unmap_grant_pages(struct gntdev_grant_map *map, int offset, |
4447 |
++ int pages) |
4448 |
+ { |
4449 |
+- int range, err = 0; |
4450 |
++ int range; |
4451 |
++ |
4452 |
++ if (atomic_read(&map->live_grants) == 0) |
4453 |
++ return; /* Nothing to do */ |
4454 |
+ |
4455 |
+ pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); |
4456 |
+ |
4457 |
+ /* It is possible the requested range will have a "hole" where we |
4458 |
+ * already unmapped some of the grants. Only unmap valid ranges. |
4459 |
+ */ |
4460 |
+- while (pages && !err) { |
4461 |
+- while (pages && |
4462 |
+- map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) { |
4463 |
++ while (pages) { |
4464 |
++ while (pages && map->being_removed[offset]) { |
4465 |
+ offset++; |
4466 |
+ pages--; |
4467 |
+ } |
4468 |
+ range = 0; |
4469 |
+ while (range < pages) { |
4470 |
+- if (map->unmap_ops[offset + range].handle == |
4471 |
+- INVALID_GRANT_HANDLE) |
4472 |
++ if (map->being_removed[offset + range]) |
4473 |
+ break; |
4474 |
++ map->being_removed[offset + range] = true; |
4475 |
+ range++; |
4476 |
+ } |
4477 |
+- err = __unmap_grant_pages(map, offset, range); |
4478 |
++ if (range) |
4479 |
++ __unmap_grant_pages(map, offset, range); |
4480 |
+ offset += range; |
4481 |
+ pages -= range; |
4482 |
+ } |
4483 |
+- |
4484 |
+- return err; |
4485 |
+ } |
4486 |
+ |
4487 |
+ /* ------------------------------------------------------------------ */ |
4488 |
+@@ -473,7 +526,6 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn, |
4489 |
+ struct gntdev_grant_map *map = |
4490 |
+ container_of(mn, struct gntdev_grant_map, notifier); |
4491 |
+ unsigned long mstart, mend; |
4492 |
+- int err; |
4493 |
+ |
4494 |
+ if (!mmu_notifier_range_blockable(range)) |
4495 |
+ return false; |
4496 |
+@@ -494,10 +546,9 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn, |
4497 |
+ map->index, map->count, |
4498 |
+ map->vma->vm_start, map->vma->vm_end, |
4499 |
+ range->start, range->end, mstart, mend); |
4500 |
+- err = unmap_grant_pages(map, |
4501 |
++ unmap_grant_pages(map, |
4502 |
+ (mstart - map->vma->vm_start) >> PAGE_SHIFT, |
4503 |
+ (mend - mstart) >> PAGE_SHIFT); |
4504 |
+- WARN_ON(err); |
4505 |
+ |
4506 |
+ return true; |
4507 |
+ } |
4508 |
+@@ -985,6 +1036,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) |
4509 |
+ goto unlock_out; |
4510 |
+ if (use_ptemod && map->vma) |
4511 |
+ goto unlock_out; |
4512 |
++ if (atomic_read(&map->live_grants)) { |
4513 |
++ err = -EAGAIN; |
4514 |
++ goto unlock_out; |
4515 |
++ } |
4516 |
+ refcount_inc(&map->users); |
4517 |
+ |
4518 |
+ vma->vm_ops = &gntdev_vmops; |
4519 |
+diff --git a/fs/9p/fid.c b/fs/9p/fid.c |
4520 |
+index 79df61fe0e596..baf2b152229e3 100644 |
4521 |
+--- a/fs/9p/fid.c |
4522 |
++++ b/fs/9p/fid.c |
4523 |
+@@ -152,7 +152,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, |
4524 |
+ const unsigned char **wnames, *uname; |
4525 |
+ int i, n, l, clone, access; |
4526 |
+ struct v9fs_session_info *v9ses; |
4527 |
+- struct p9_fid *fid, *old_fid = NULL; |
4528 |
++ struct p9_fid *fid, *old_fid; |
4529 |
+ |
4530 |
+ v9ses = v9fs_dentry2v9ses(dentry); |
4531 |
+ access = v9ses->flags & V9FS_ACCESS_MASK; |
4532 |
+@@ -194,13 +194,12 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, |
4533 |
+ if (IS_ERR(fid)) |
4534 |
+ return fid; |
4535 |
+ |
4536 |
++ refcount_inc(&fid->count); |
4537 |
+ v9fs_fid_add(dentry->d_sb->s_root, fid); |
4538 |
+ } |
4539 |
+ /* If we are root ourself just return that */ |
4540 |
+- if (dentry->d_sb->s_root == dentry) { |
4541 |
+- refcount_inc(&fid->count); |
4542 |
++ if (dentry->d_sb->s_root == dentry) |
4543 |
+ return fid; |
4544 |
+- } |
4545 |
+ /* |
4546 |
+ * Do a multipath walk with attached root. |
4547 |
+ * When walking parent we need to make sure we |
4548 |
+@@ -212,6 +211,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, |
4549 |
+ fid = ERR_PTR(n); |
4550 |
+ goto err_out; |
4551 |
+ } |
4552 |
++ old_fid = fid; |
4553 |
+ clone = 1; |
4554 |
+ i = 0; |
4555 |
+ while (i < n) { |
4556 |
+@@ -221,19 +221,15 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, |
4557 |
+ * walk to ensure none of the patch component change |
4558 |
+ */ |
4559 |
+ fid = p9_client_walk(fid, l, &wnames[i], clone); |
4560 |
++ /* non-cloning walk will return the same fid */ |
4561 |
++ if (fid != old_fid) { |
4562 |
++ p9_client_clunk(old_fid); |
4563 |
++ old_fid = fid; |
4564 |
++ } |
4565 |
+ if (IS_ERR(fid)) { |
4566 |
+- if (old_fid) { |
4567 |
+- /* |
4568 |
+- * If we fail, clunk fid which are mapping |
4569 |
+- * to path component and not the last component |
4570 |
+- * of the path. |
4571 |
+- */ |
4572 |
+- p9_client_clunk(old_fid); |
4573 |
+- } |
4574 |
+ kfree(wnames); |
4575 |
+ goto err_out; |
4576 |
+ } |
4577 |
+- old_fid = fid; |
4578 |
+ i += l; |
4579 |
+ clone = 0; |
4580 |
+ } |
4581 |
+diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c |
4582 |
+index 595875228672f..a58c554b40706 100644 |
4583 |
+--- a/fs/9p/vfs_addr.c |
4584 |
++++ b/fs/9p/vfs_addr.c |
4585 |
+@@ -58,8 +58,21 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq) |
4586 |
+ */ |
4587 |
+ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file) |
4588 |
+ { |
4589 |
++ struct inode *inode = file_inode(file); |
4590 |
++ struct v9fs_inode *v9inode = V9FS_I(inode); |
4591 |
+ struct p9_fid *fid = file->private_data; |
4592 |
+ |
4593 |
++ BUG_ON(!fid); |
4594 |
++ |
4595 |
++ /* we might need to read from a fid that was opened write-only |
4596 |
++ * for read-modify-write of page cache, use the writeback fid |
4597 |
++ * for that */ |
4598 |
++ if (rreq->origin == NETFS_READ_FOR_WRITE && |
4599 |
++ (fid->mode & O_ACCMODE) == O_WRONLY) { |
4600 |
++ fid = v9inode->writeback_fid; |
4601 |
++ BUG_ON(!fid); |
4602 |
++ } |
4603 |
++ |
4604 |
+ refcount_inc(&fid->count); |
4605 |
+ rreq->netfs_priv = fid; |
4606 |
+ return 0; |
4607 |
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c |
4608 |
+index e660c6348b9da..d4b705a866eab 100644 |
4609 |
+--- a/fs/9p/vfs_inode.c |
4610 |
++++ b/fs/9p/vfs_inode.c |
4611 |
+@@ -1250,15 +1250,15 @@ static const char *v9fs_vfs_get_link(struct dentry *dentry, |
4612 |
+ return ERR_PTR(-ECHILD); |
4613 |
+ |
4614 |
+ v9ses = v9fs_dentry2v9ses(dentry); |
4615 |
+- fid = v9fs_fid_lookup(dentry); |
4616 |
++ if (!v9fs_proto_dotu(v9ses)) |
4617 |
++ return ERR_PTR(-EBADF); |
4618 |
++ |
4619 |
+ p9_debug(P9_DEBUG_VFS, "%pd\n", dentry); |
4620 |
++ fid = v9fs_fid_lookup(dentry); |
4621 |
+ |
4622 |
+ if (IS_ERR(fid)) |
4623 |
+ return ERR_CAST(fid); |
4624 |
+ |
4625 |
+- if (!v9fs_proto_dotu(v9ses)) |
4626 |
+- return ERR_PTR(-EBADF); |
4627 |
+- |
4628 |
+ st = p9_client_stat(fid); |
4629 |
+ p9_client_clunk(fid); |
4630 |
+ if (IS_ERR(st)) |
4631 |
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c |
4632 |
+index d17502a738a94..b6eb1160296c3 100644 |
4633 |
+--- a/fs/9p/vfs_inode_dotl.c |
4634 |
++++ b/fs/9p/vfs_inode_dotl.c |
4635 |
+@@ -274,6 +274,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, |
4636 |
+ if (IS_ERR(ofid)) { |
4637 |
+ err = PTR_ERR(ofid); |
4638 |
+ p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); |
4639 |
++ p9_client_clunk(dfid); |
4640 |
+ goto out; |
4641 |
+ } |
4642 |
+ |
4643 |
+@@ -285,6 +286,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, |
4644 |
+ if (err) { |
4645 |
+ p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n", |
4646 |
+ err); |
4647 |
++ p9_client_clunk(dfid); |
4648 |
+ goto error; |
4649 |
+ } |
4650 |
+ err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags), |
4651 |
+@@ -292,6 +294,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, |
4652 |
+ if (err < 0) { |
4653 |
+ p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n", |
4654 |
+ err); |
4655 |
++ p9_client_clunk(dfid); |
4656 |
+ goto error; |
4657 |
+ } |
4658 |
+ v9fs_invalidate_inode_attr(dir); |
4659 |
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c |
4660 |
+index 22811e9eacf58..c4c9f6dff0a23 100644 |
4661 |
+--- a/fs/afs/inode.c |
4662 |
++++ b/fs/afs/inode.c |
4663 |
+@@ -745,7 +745,8 @@ int afs_getattr(struct user_namespace *mnt_userns, const struct path *path, |
4664 |
+ |
4665 |
+ _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation); |
4666 |
+ |
4667 |
+- if (!(query_flags & AT_STATX_DONT_SYNC) && |
4668 |
++ if (vnode->volume && |
4669 |
++ !(query_flags & AT_STATX_DONT_SYNC) && |
4670 |
+ !test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { |
4671 |
+ key = afs_request_key(vnode->volume->cell); |
4672 |
+ if (IS_ERR(key)) |
4673 |
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c |
4674 |
+index 30d0bbfdb3bca..6f30413ed9a9b 100644 |
4675 |
+--- a/fs/btrfs/disk-io.c |
4676 |
++++ b/fs/btrfs/disk-io.c |
4677 |
+@@ -4639,6 +4639,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) |
4678 |
+ int ret; |
4679 |
+ |
4680 |
+ set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); |
4681 |
++ |
4682 |
++ /* |
4683 |
++ * We may have the reclaim task running and relocating a data block group, |
4684 |
++ * in which case it may create delayed iputs. So stop it before we park |
4685 |
++ * the cleaner kthread otherwise we can get new delayed iputs after |
4686 |
++ * parking the cleaner, and that can make the async reclaim task to hang |
4687 |
++ * if it's waiting for delayed iputs to complete, since the cleaner is |
4688 |
++ * parked and can not run delayed iputs - this will make us hang when |
4689 |
++ * trying to stop the async reclaim task. |
4690 |
++ */ |
4691 |
++ cancel_work_sync(&fs_info->reclaim_bgs_work); |
4692 |
+ /* |
4693 |
+ * We don't want the cleaner to start new transactions, add more delayed |
4694 |
+ * iputs, etc. while we're closing. We can't use kthread_stop() yet |
4695 |
+@@ -4679,8 +4690,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) |
4696 |
+ cancel_work_sync(&fs_info->async_data_reclaim_work); |
4697 |
+ cancel_work_sync(&fs_info->preempt_reclaim_work); |
4698 |
+ |
4699 |
+- cancel_work_sync(&fs_info->reclaim_bgs_work); |
4700 |
+- |
4701 |
+ /* Cancel or finish ongoing discard work */ |
4702 |
+ btrfs_discard_cleanup(fs_info); |
4703 |
+ |
4704 |
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c |
4705 |
+index 380054c94e4b6..153920acd2269 100644 |
4706 |
+--- a/fs/btrfs/file.c |
4707 |
++++ b/fs/btrfs/file.c |
4708 |
+@@ -2359,25 +2359,62 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) |
4709 |
+ */ |
4710 |
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP); |
4711 |
+ |
4712 |
+- if (ret != BTRFS_NO_LOG_SYNC) { |
4713 |
++ if (ret == BTRFS_NO_LOG_SYNC) { |
4714 |
++ ret = btrfs_end_transaction(trans); |
4715 |
++ goto out; |
4716 |
++ } |
4717 |
++ |
4718 |
++ /* We successfully logged the inode, attempt to sync the log. */ |
4719 |
++ if (!ret) { |
4720 |
++ ret = btrfs_sync_log(trans, root, &ctx); |
4721 |
+ if (!ret) { |
4722 |
+- ret = btrfs_sync_log(trans, root, &ctx); |
4723 |
+- if (!ret) { |
4724 |
+- ret = btrfs_end_transaction(trans); |
4725 |
+- goto out; |
4726 |
+- } |
4727 |
+- } |
4728 |
+- if (!full_sync) { |
4729 |
+- ret = btrfs_wait_ordered_range(inode, start, len); |
4730 |
+- if (ret) { |
4731 |
+- btrfs_end_transaction(trans); |
4732 |
+- goto out; |
4733 |
+- } |
4734 |
++ ret = btrfs_end_transaction(trans); |
4735 |
++ goto out; |
4736 |
+ } |
4737 |
+- ret = btrfs_commit_transaction(trans); |
4738 |
+- } else { |
4739 |
++ } |
4740 |
++ |
4741 |
++ /* |
4742 |
++ * At this point we need to commit the transaction because we had |
4743 |
++ * btrfs_need_log_full_commit() or some other error. |
4744 |
++ * |
4745 |
++ * If we didn't do a full sync we have to stop the trans handle, wait on |
4746 |
++ * the ordered extents, start it again and commit the transaction. If |
4747 |
++ * we attempt to wait on the ordered extents here we could deadlock with |
4748 |
++ * something like fallocate() that is holding the extent lock trying to |
4749 |
++ * start a transaction while some other thread is trying to commit the |
4750 |
++ * transaction while we (fsync) are currently holding the transaction |
4751 |
++ * open. |
4752 |
++ */ |
4753 |
++ if (!full_sync) { |
4754 |
+ ret = btrfs_end_transaction(trans); |
4755 |
++ if (ret) |
4756 |
++ goto out; |
4757 |
++ ret = btrfs_wait_ordered_range(inode, start, len); |
4758 |
++ if (ret) |
4759 |
++ goto out; |
4760 |
++ |
4761 |
++ /* |
4762 |
++ * This is safe to use here because we're only interested in |
4763 |
++ * making sure the transaction that had the ordered extents is |
4764 |
++ * committed. We aren't waiting on anything past this point, |
4765 |
++ * we're purely getting the transaction and committing it. |
4766 |
++ */ |
4767 |
++ trans = btrfs_attach_transaction_barrier(root); |
4768 |
++ if (IS_ERR(trans)) { |
4769 |
++ ret = PTR_ERR(trans); |
4770 |
++ |
4771 |
++ /* |
4772 |
++ * We committed the transaction and there's no currently |
4773 |
++ * running transaction, this means everything we care |
4774 |
++ * about made it to disk and we are done. |
4775 |
++ */ |
4776 |
++ if (ret == -ENOENT) |
4777 |
++ ret = 0; |
4778 |
++ goto out; |
4779 |
++ } |
4780 |
+ } |
4781 |
++ |
4782 |
++ ret = btrfs_commit_transaction(trans); |
4783 |
+ out: |
4784 |
+ ASSERT(list_empty(&ctx.list)); |
4785 |
+ err = file_check_and_advance_wb_err(file); |
4786 |
+diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c |
4787 |
+index 313d9d685adb7..33461b4f9c8b5 100644 |
4788 |
+--- a/fs/btrfs/locking.c |
4789 |
++++ b/fs/btrfs/locking.c |
4790 |
+@@ -45,7 +45,6 @@ void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting ne |
4791 |
+ start_ns = ktime_get_ns(); |
4792 |
+ |
4793 |
+ down_read_nested(&eb->lock, nest); |
4794 |
+- eb->lock_owner = current->pid; |
4795 |
+ trace_btrfs_tree_read_lock(eb, start_ns); |
4796 |
+ } |
4797 |
+ |
4798 |
+@@ -62,7 +61,6 @@ void btrfs_tree_read_lock(struct extent_buffer *eb) |
4799 |
+ int btrfs_try_tree_read_lock(struct extent_buffer *eb) |
4800 |
+ { |
4801 |
+ if (down_read_trylock(&eb->lock)) { |
4802 |
+- eb->lock_owner = current->pid; |
4803 |
+ trace_btrfs_try_tree_read_lock(eb); |
4804 |
+ return 1; |
4805 |
+ } |
4806 |
+@@ -90,7 +88,6 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) |
4807 |
+ void btrfs_tree_read_unlock(struct extent_buffer *eb) |
4808 |
+ { |
4809 |
+ trace_btrfs_tree_read_unlock(eb); |
4810 |
+- eb->lock_owner = 0; |
4811 |
+ up_read(&eb->lock); |
4812 |
+ } |
4813 |
+ |
4814 |
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c |
4815 |
+index 998e3f180d90e..6db7f50de84da 100644 |
4816 |
+--- a/fs/btrfs/reflink.c |
4817 |
++++ b/fs/btrfs/reflink.c |
4818 |
+@@ -344,6 +344,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode, |
4819 |
+ int ret; |
4820 |
+ const u64 len = olen_aligned; |
4821 |
+ u64 last_dest_end = destoff; |
4822 |
++ u64 prev_extent_end = off; |
4823 |
+ |
4824 |
+ ret = -ENOMEM; |
4825 |
+ buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); |
4826 |
+@@ -363,7 +364,6 @@ static int btrfs_clone(struct inode *src, struct inode *inode, |
4827 |
+ key.offset = off; |
4828 |
+ |
4829 |
+ while (1) { |
4830 |
+- u64 next_key_min_offset = key.offset + 1; |
4831 |
+ struct btrfs_file_extent_item *extent; |
4832 |
+ u64 extent_gen; |
4833 |
+ int type; |
4834 |
+@@ -431,14 +431,21 @@ process_slot: |
4835 |
+ * The first search might have left us at an extent item that |
4836 |
+ * ends before our target range's start, can happen if we have |
4837 |
+ * holes and NO_HOLES feature enabled. |
4838 |
++ * |
4839 |
++ * Subsequent searches may leave us on a file range we have |
4840 |
++ * processed before - this happens due to a race with ordered |
4841 |
++ * extent completion for a file range that is outside our source |
4842 |
++ * range, but that range was part of a file extent item that |
4843 |
++ * also covered a leading part of our source range. |
4844 |
+ */ |
4845 |
+- if (key.offset + datal <= off) { |
4846 |
++ if (key.offset + datal <= prev_extent_end) { |
4847 |
+ path->slots[0]++; |
4848 |
+ goto process_slot; |
4849 |
+ } else if (key.offset >= off + len) { |
4850 |
+ break; |
4851 |
+ } |
4852 |
+- next_key_min_offset = key.offset + datal; |
4853 |
++ |
4854 |
++ prev_extent_end = key.offset + datal; |
4855 |
+ size = btrfs_item_size(leaf, slot); |
4856 |
+ read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), |
4857 |
+ size); |
4858 |
+@@ -550,7 +557,7 @@ process_slot: |
4859 |
+ break; |
4860 |
+ |
4861 |
+ btrfs_release_path(path); |
4862 |
+- key.offset = next_key_min_offset; |
4863 |
++ key.offset = prev_extent_end; |
4864 |
+ |
4865 |
+ if (fatal_signal_pending(current)) { |
4866 |
+ ret = -EINTR; |
4867 |
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c |
4868 |
+index b228efe8ab6e2..0b2a387615f64 100644 |
4869 |
+--- a/fs/btrfs/super.c |
4870 |
++++ b/fs/btrfs/super.c |
4871 |
+@@ -763,6 +763,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, |
4872 |
+ compress_force = false; |
4873 |
+ no_compress++; |
4874 |
+ } else { |
4875 |
++ btrfs_err(info, "unrecognized compression value %s", |
4876 |
++ args[0].from); |
4877 |
+ ret = -EINVAL; |
4878 |
+ goto out; |
4879 |
+ } |
4880 |
+@@ -821,8 +823,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, |
4881 |
+ case Opt_thread_pool: |
4882 |
+ ret = match_int(&args[0], &intarg); |
4883 |
+ if (ret) { |
4884 |
++ btrfs_err(info, "unrecognized thread_pool value %s", |
4885 |
++ args[0].from); |
4886 |
+ goto out; |
4887 |
+ } else if (intarg == 0) { |
4888 |
++ btrfs_err(info, "invalid value 0 for thread_pool"); |
4889 |
+ ret = -EINVAL; |
4890 |
+ goto out; |
4891 |
+ } |
4892 |
+@@ -883,8 +888,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, |
4893 |
+ break; |
4894 |
+ case Opt_ratio: |
4895 |
+ ret = match_int(&args[0], &intarg); |
4896 |
+- if (ret) |
4897 |
++ if (ret) { |
4898 |
++ btrfs_err(info, "unrecognized metadata_ratio value %s", |
4899 |
++ args[0].from); |
4900 |
+ goto out; |
4901 |
++ } |
4902 |
+ info->metadata_ratio = intarg; |
4903 |
+ btrfs_info(info, "metadata ratio %u", |
4904 |
+ info->metadata_ratio); |
4905 |
+@@ -901,6 +909,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, |
4906 |
+ btrfs_set_and_info(info, DISCARD_ASYNC, |
4907 |
+ "turning on async discard"); |
4908 |
+ } else { |
4909 |
++ btrfs_err(info, "unrecognized discard mode value %s", |
4910 |
++ args[0].from); |
4911 |
+ ret = -EINVAL; |
4912 |
+ goto out; |
4913 |
+ } |
4914 |
+@@ -933,6 +943,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, |
4915 |
+ btrfs_set_and_info(info, FREE_SPACE_TREE, |
4916 |
+ "enabling free space tree"); |
4917 |
+ } else { |
4918 |
++ btrfs_err(info, "unrecognized space_cache value %s", |
4919 |
++ args[0].from); |
4920 |
+ ret = -EINVAL; |
4921 |
+ goto out; |
4922 |
+ } |
4923 |
+@@ -1014,8 +1026,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, |
4924 |
+ break; |
4925 |
+ case Opt_check_integrity_print_mask: |
4926 |
+ ret = match_int(&args[0], &intarg); |
4927 |
+- if (ret) |
4928 |
++ if (ret) { |
4929 |
++ btrfs_err(info, |
4930 |
++ "unrecognized check_integrity_print_mask value %s", |
4931 |
++ args[0].from); |
4932 |
+ goto out; |
4933 |
++ } |
4934 |
+ info->check_integrity_print_mask = intarg; |
4935 |
+ btrfs_info(info, "check_integrity_print_mask 0x%x", |
4936 |
+ info->check_integrity_print_mask); |
4937 |
+@@ -1030,13 +1046,15 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, |
4938 |
+ goto out; |
4939 |
+ #endif |
4940 |
+ case Opt_fatal_errors: |
4941 |
+- if (strcmp(args[0].from, "panic") == 0) |
4942 |
++ if (strcmp(args[0].from, "panic") == 0) { |
4943 |
+ btrfs_set_opt(info->mount_opt, |
4944 |
+ PANIC_ON_FATAL_ERROR); |
4945 |
+- else if (strcmp(args[0].from, "bug") == 0) |
4946 |
++ } else if (strcmp(args[0].from, "bug") == 0) { |
4947 |
+ btrfs_clear_opt(info->mount_opt, |
4948 |
+ PANIC_ON_FATAL_ERROR); |
4949 |
+- else { |
4950 |
++ } else { |
4951 |
++ btrfs_err(info, "unrecognized fatal_errors value %s", |
4952 |
++ args[0].from); |
4953 |
+ ret = -EINVAL; |
4954 |
+ goto out; |
4955 |
+ } |
4956 |
+@@ -1044,8 +1062,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, |
4957 |
+ case Opt_commit_interval: |
4958 |
+ intarg = 0; |
4959 |
+ ret = match_int(&args[0], &intarg); |
4960 |
+- if (ret) |
4961 |
++ if (ret) { |
4962 |
++ btrfs_err(info, "unrecognized commit_interval value %s", |
4963 |
++ args[0].from); |
4964 |
++ ret = -EINVAL; |
4965 |
+ goto out; |
4966 |
++ } |
4967 |
+ if (intarg == 0) { |
4968 |
+ btrfs_info(info, |
4969 |
+ "using default commit interval %us", |
4970 |
+@@ -1059,8 +1081,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, |
4971 |
+ break; |
4972 |
+ case Opt_rescue: |
4973 |
+ ret = parse_rescue_options(info, args[0].from); |
4974 |
+- if (ret < 0) |
4975 |
++ if (ret < 0) { |
4976 |
++ btrfs_err(info, "unrecognized rescue value %s", |
4977 |
++ args[0].from); |
4978 |
+ goto out; |
4979 |
++ } |
4980 |
+ break; |
4981 |
+ #ifdef CONFIG_BTRFS_DEBUG |
4982 |
+ case Opt_fragment_all: |
4983 |
+@@ -1986,6 +2011,14 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) |
4984 |
+ if (ret) |
4985 |
+ goto restore; |
4986 |
+ |
4987 |
++ /* V1 cache is not supported for subpage mount. */ |
4988 |
++ if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) { |
4989 |
++ btrfs_warn(fs_info, |
4990 |
++ "v1 space cache is not supported for page size %lu with sectorsize %u", |
4991 |
++ PAGE_SIZE, fs_info->sectorsize); |
4992 |
++ ret = -EINVAL; |
4993 |
++ goto restore; |
4994 |
++ } |
4995 |
+ btrfs_remount_begin(fs_info, old_opts, *flags); |
4996 |
+ btrfs_resize_thread_pool(fs_info, |
4997 |
+ fs_info->thread_pool_size, old_thread_pool_size); |
4998 |
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
4999 |
+index 179c1630bf561..6a8a00f28b192 100644 |
5000 |
+--- a/fs/cifs/smb2pdu.c |
5001 |
++++ b/fs/cifs/smb2pdu.c |
5002 |
+@@ -543,6 +543,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req, |
5003 |
+ struct TCP_Server_Info *server, unsigned int *total_len) |
5004 |
+ { |
5005 |
+ char *pneg_ctxt; |
5006 |
++ char *hostname = NULL; |
5007 |
+ unsigned int ctxt_len, neg_context_count; |
5008 |
+ |
5009 |
+ if (*total_len > 200) { |
5010 |
+@@ -570,16 +571,24 @@ assemble_neg_contexts(struct smb2_negotiate_req *req, |
5011 |
+ *total_len += ctxt_len; |
5012 |
+ pneg_ctxt += ctxt_len; |
5013 |
+ |
5014 |
+- ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt, |
5015 |
+- server->hostname); |
5016 |
+- *total_len += ctxt_len; |
5017 |
+- pneg_ctxt += ctxt_len; |
5018 |
+- |
5019 |
+ build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); |
5020 |
+ *total_len += sizeof(struct smb2_posix_neg_context); |
5021 |
+ pneg_ctxt += sizeof(struct smb2_posix_neg_context); |
5022 |
+ |
5023 |
+- neg_context_count = 4; |
5024 |
++ /* |
5025 |
++ * secondary channels don't have the hostname field populated |
5026 |
++ * use the hostname field in the primary channel instead |
5027 |
++ */ |
5028 |
++ hostname = CIFS_SERVER_IS_CHAN(server) ? |
5029 |
++ server->primary_server->hostname : server->hostname; |
5030 |
++ if (hostname && (hostname[0] != 0)) { |
5031 |
++ ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt, |
5032 |
++ hostname); |
5033 |
++ *total_len += ctxt_len; |
5034 |
++ pneg_ctxt += ctxt_len; |
5035 |
++ neg_context_count = 4; |
5036 |
++ } else /* second channels do not have a hostname */ |
5037 |
++ neg_context_count = 3; |
5038 |
+ |
5039 |
+ if (server->compress_algorithm) { |
5040 |
+ build_compression_ctxt((struct smb2_compression_capabilities_context *) |
5041 |
+diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c |
5042 |
+index be599f31d3c48..d84c5f6cc09d7 100644 |
5043 |
+--- a/fs/f2fs/iostat.c |
5044 |
++++ b/fs/f2fs/iostat.c |
5045 |
+@@ -91,8 +91,9 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi) |
5046 |
+ unsigned int cnt; |
5047 |
+ struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE]; |
5048 |
+ struct iostat_lat_info *io_lat = sbi->iostat_io_lat; |
5049 |
++ unsigned long flags; |
5050 |
+ |
5051 |
+- spin_lock_bh(&sbi->iostat_lat_lock); |
5052 |
++ spin_lock_irqsave(&sbi->iostat_lat_lock, flags); |
5053 |
+ for (idx = 0; idx < MAX_IO_TYPE; idx++) { |
5054 |
+ for (io = 0; io < NR_PAGE_TYPE; io++) { |
5055 |
+ cnt = io_lat->bio_cnt[idx][io]; |
5056 |
+@@ -106,7 +107,7 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi) |
5057 |
+ io_lat->bio_cnt[idx][io] = 0; |
5058 |
+ } |
5059 |
+ } |
5060 |
+- spin_unlock_bh(&sbi->iostat_lat_lock); |
5061 |
++ spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags); |
5062 |
+ |
5063 |
+ trace_f2fs_iostat_latency(sbi, iostat_lat); |
5064 |
+ } |
5065 |
+@@ -115,14 +116,15 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi) |
5066 |
+ { |
5067 |
+ unsigned long long iostat_diff[NR_IO_TYPE]; |
5068 |
+ int i; |
5069 |
++ unsigned long flags; |
5070 |
+ |
5071 |
+ if (time_is_after_jiffies(sbi->iostat_next_period)) |
5072 |
+ return; |
5073 |
+ |
5074 |
+ /* Need double check under the lock */ |
5075 |
+- spin_lock_bh(&sbi->iostat_lock); |
5076 |
++ spin_lock_irqsave(&sbi->iostat_lock, flags); |
5077 |
+ if (time_is_after_jiffies(sbi->iostat_next_period)) { |
5078 |
+- spin_unlock_bh(&sbi->iostat_lock); |
5079 |
++ spin_unlock_irqrestore(&sbi->iostat_lock, flags); |
5080 |
+ return; |
5081 |
+ } |
5082 |
+ sbi->iostat_next_period = jiffies + |
5083 |
+@@ -133,7 +135,7 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi) |
5084 |
+ sbi->prev_rw_iostat[i]; |
5085 |
+ sbi->prev_rw_iostat[i] = sbi->rw_iostat[i]; |
5086 |
+ } |
5087 |
+- spin_unlock_bh(&sbi->iostat_lock); |
5088 |
++ spin_unlock_irqrestore(&sbi->iostat_lock, flags); |
5089 |
+ |
5090 |
+ trace_f2fs_iostat(sbi, iostat_diff); |
5091 |
+ |
5092 |
+@@ -145,25 +147,27 @@ void f2fs_reset_iostat(struct f2fs_sb_info *sbi) |
5093 |
+ struct iostat_lat_info *io_lat = sbi->iostat_io_lat; |
5094 |
+ int i; |
5095 |
+ |
5096 |
+- spin_lock_bh(&sbi->iostat_lock); |
5097 |
++ spin_lock_irq(&sbi->iostat_lock); |
5098 |
+ for (i = 0; i < NR_IO_TYPE; i++) { |
5099 |
+ sbi->rw_iostat[i] = 0; |
5100 |
+ sbi->prev_rw_iostat[i] = 0; |
5101 |
+ } |
5102 |
+- spin_unlock_bh(&sbi->iostat_lock); |
5103 |
++ spin_unlock_irq(&sbi->iostat_lock); |
5104 |
+ |
5105 |
+- spin_lock_bh(&sbi->iostat_lat_lock); |
5106 |
++ spin_lock_irq(&sbi->iostat_lat_lock); |
5107 |
+ memset(io_lat, 0, sizeof(struct iostat_lat_info)); |
5108 |
+- spin_unlock_bh(&sbi->iostat_lat_lock); |
5109 |
++ spin_unlock_irq(&sbi->iostat_lat_lock); |
5110 |
+ } |
5111 |
+ |
5112 |
+ void f2fs_update_iostat(struct f2fs_sb_info *sbi, |
5113 |
+ enum iostat_type type, unsigned long long io_bytes) |
5114 |
+ { |
5115 |
++ unsigned long flags; |
5116 |
++ |
5117 |
+ if (!sbi->iostat_enable) |
5118 |
+ return; |
5119 |
+ |
5120 |
+- spin_lock_bh(&sbi->iostat_lock); |
5121 |
++ spin_lock_irqsave(&sbi->iostat_lock, flags); |
5122 |
+ sbi->rw_iostat[type] += io_bytes; |
5123 |
+ |
5124 |
+ if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO) |
5125 |
+@@ -172,7 +176,7 @@ void f2fs_update_iostat(struct f2fs_sb_info *sbi, |
5126 |
+ if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO) |
5127 |
+ sbi->rw_iostat[APP_READ_IO] += io_bytes; |
5128 |
+ |
5129 |
+- spin_unlock_bh(&sbi->iostat_lock); |
5130 |
++ spin_unlock_irqrestore(&sbi->iostat_lock, flags); |
5131 |
+ |
5132 |
+ f2fs_record_iostat(sbi); |
5133 |
+ } |
5134 |
+@@ -185,6 +189,7 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx, |
5135 |
+ struct f2fs_sb_info *sbi = iostat_ctx->sbi; |
5136 |
+ struct iostat_lat_info *io_lat = sbi->iostat_io_lat; |
5137 |
+ int idx; |
5138 |
++ unsigned long flags; |
5139 |
+ |
5140 |
+ if (!sbi->iostat_enable) |
5141 |
+ return; |
5142 |
+@@ -202,12 +207,12 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx, |
5143 |
+ idx = WRITE_ASYNC_IO; |
5144 |
+ } |
5145 |
+ |
5146 |
+- spin_lock_bh(&sbi->iostat_lat_lock); |
5147 |
++ spin_lock_irqsave(&sbi->iostat_lat_lock, flags); |
5148 |
+ io_lat->sum_lat[idx][iotype] += ts_diff; |
5149 |
+ io_lat->bio_cnt[idx][iotype]++; |
5150 |
+ if (ts_diff > io_lat->peak_lat[idx][iotype]) |
5151 |
+ io_lat->peak_lat[idx][iotype] = ts_diff; |
5152 |
+- spin_unlock_bh(&sbi->iostat_lat_lock); |
5153 |
++ spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags); |
5154 |
+ } |
5155 |
+ |
5156 |
+ void iostat_update_and_unbind_ctx(struct bio *bio, int rw) |
5157 |
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c |
5158 |
+index fffafd2aa4387..3764e12f19db0 100644 |
5159 |
+--- a/fs/f2fs/namei.c |
5160 |
++++ b/fs/f2fs/namei.c |
5161 |
+@@ -92,8 +92,6 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns, |
5162 |
+ if (test_opt(sbi, INLINE_XATTR)) |
5163 |
+ set_inode_flag(inode, FI_INLINE_XATTR); |
5164 |
+ |
5165 |
+- if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode)) |
5166 |
+- set_inode_flag(inode, FI_INLINE_DATA); |
5167 |
+ if (f2fs_may_inline_dentry(inode)) |
5168 |
+ set_inode_flag(inode, FI_INLINE_DENTRY); |
5169 |
+ |
5170 |
+@@ -110,10 +108,6 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns, |
5171 |
+ |
5172 |
+ f2fs_init_extent_tree(inode, NULL); |
5173 |
+ |
5174 |
+- stat_inc_inline_xattr(inode); |
5175 |
+- stat_inc_inline_inode(inode); |
5176 |
+- stat_inc_inline_dir(inode); |
5177 |
+- |
5178 |
+ F2FS_I(inode)->i_flags = |
5179 |
+ f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED); |
5180 |
+ |
5181 |
+@@ -130,6 +124,14 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns, |
5182 |
+ set_compress_context(inode); |
5183 |
+ } |
5184 |
+ |
5185 |
++ /* Should enable inline_data after compression set */ |
5186 |
++ if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode)) |
5187 |
++ set_inode_flag(inode, FI_INLINE_DATA); |
5188 |
++ |
5189 |
++ stat_inc_inline_xattr(inode); |
5190 |
++ stat_inc_inline_inode(inode); |
5191 |
++ stat_inc_inline_dir(inode); |
5192 |
++ |
5193 |
+ f2fs_set_inode_flags(inode); |
5194 |
+ |
5195 |
+ trace_f2fs_new_inode(inode, 0); |
5196 |
+@@ -328,6 +330,9 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode, |
5197 |
+ if (!is_extension_exist(name, ext[i], false)) |
5198 |
+ continue; |
5199 |
+ |
5200 |
++ /* Do not use inline_data with compression */ |
5201 |
++ stat_dec_inline_inode(inode); |
5202 |
++ clear_inode_flag(inode, FI_INLINE_DATA); |
5203 |
+ set_compress_context(inode); |
5204 |
+ return; |
5205 |
+ } |
5206 |
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c |
5207 |
+index a8d0fa2731cbe..aedc3d334113b 100644 |
5208 |
+--- a/fs/f2fs/node.c |
5209 |
++++ b/fs/f2fs/node.c |
5210 |
+@@ -1454,7 +1454,9 @@ page_hit: |
5211 |
+ out_err: |
5212 |
+ ClearPageUptodate(page); |
5213 |
+ out_put_err: |
5214 |
+- f2fs_handle_page_eio(sbi, page->index, NODE); |
5215 |
++ /* ENOENT comes from read_node_page which is not an error. */ |
5216 |
++ if (err != -ENOENT) |
5217 |
++ f2fs_handle_page_eio(sbi, page->index, NODE); |
5218 |
+ f2fs_put_page(page, 1); |
5219 |
+ return ERR_PTR(err); |
5220 |
+ } |
5221 |
+diff --git a/fs/io_uring.c b/fs/io_uring.c |
5222 |
+index 68aab48838e41..e4186635aaa8d 100644 |
5223 |
+--- a/fs/io_uring.c |
5224 |
++++ b/fs/io_uring.c |
5225 |
+@@ -926,7 +926,7 @@ struct io_kiocb { |
5226 |
+ /* used by request caches, completion batching and iopoll */ |
5227 |
+ struct io_wq_work_node comp_list; |
5228 |
+ /* cache ->apoll->events */ |
5229 |
+- int apoll_events; |
5230 |
++ __poll_t apoll_events; |
5231 |
+ }; |
5232 |
+ atomic_t refs; |
5233 |
+ atomic_t poll_refs; |
5234 |
+@@ -5984,7 +5984,8 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked) |
5235 |
+ io_req_complete_failed(req, ret); |
5236 |
+ } |
5237 |
+ |
5238 |
+-static void __io_poll_execute(struct io_kiocb *req, int mask, int events) |
5239 |
++static void __io_poll_execute(struct io_kiocb *req, int mask, |
5240 |
++ __poll_t __maybe_unused events) |
5241 |
+ { |
5242 |
+ req->result = mask; |
5243 |
+ /* |
5244 |
+@@ -5993,7 +5994,6 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events) |
5245 |
+ * CPU. We want to avoid pulling in req->apoll->events for that |
5246 |
+ * case. |
5247 |
+ */ |
5248 |
+- req->apoll_events = events; |
5249 |
+ if (req->opcode == IORING_OP_POLL_ADD) |
5250 |
+ req->io_task_work.func = io_poll_task_func; |
5251 |
+ else |
5252 |
+@@ -6003,7 +6003,8 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events) |
5253 |
+ io_req_task_work_add(req, false); |
5254 |
+ } |
5255 |
+ |
5256 |
+-static inline void io_poll_execute(struct io_kiocb *req, int res, int events) |
5257 |
++static inline void io_poll_execute(struct io_kiocb *req, int res, |
5258 |
++ __poll_t events) |
5259 |
+ { |
5260 |
+ if (io_poll_get_ownership(req)) |
5261 |
+ __io_poll_execute(req, res, events); |
5262 |
+@@ -6142,6 +6143,8 @@ static int __io_arm_poll_handler(struct io_kiocb *req, |
5263 |
+ io_init_poll_iocb(poll, mask, io_poll_wake); |
5264 |
+ poll->file = req->file; |
5265 |
+ |
5266 |
++ req->apoll_events = poll->events; |
5267 |
++ |
5268 |
+ ipt->pt._key = mask; |
5269 |
+ ipt->req = req; |
5270 |
+ ipt->error = 0; |
5271 |
+@@ -6172,8 +6175,11 @@ static int __io_arm_poll_handler(struct io_kiocb *req, |
5272 |
+ |
5273 |
+ if (mask) { |
5274 |
+ /* can't multishot if failed, just queue the event we've got */ |
5275 |
+- if (unlikely(ipt->error || !ipt->nr_entries)) |
5276 |
++ if (unlikely(ipt->error || !ipt->nr_entries)) { |
5277 |
+ poll->events |= EPOLLONESHOT; |
5278 |
++ req->apoll_events |= EPOLLONESHOT; |
5279 |
++ ipt->error = 0; |
5280 |
++ } |
5281 |
+ __io_poll_execute(req, mask, poll->events); |
5282 |
+ return 0; |
5283 |
+ } |
5284 |
+@@ -6386,7 +6392,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe |
5285 |
+ return -EINVAL; |
5286 |
+ |
5287 |
+ io_req_set_refcount(req); |
5288 |
+- req->apoll_events = poll->events = io_poll_parse_events(sqe, flags); |
5289 |
++ poll->events = io_poll_parse_events(sqe, flags); |
5290 |
+ return 0; |
5291 |
+ } |
5292 |
+ |
5293 |
+@@ -6399,6 +6405,8 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) |
5294 |
+ ipt.pt._qproc = io_poll_queue_proc; |
5295 |
+ |
5296 |
+ ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events); |
5297 |
++ if (!ret && ipt.error) |
5298 |
++ req_set_fail(req); |
5299 |
+ ret = ret ?: ipt.error; |
5300 |
+ if (ret) |
5301 |
+ __io_req_complete(req, issue_flags, ret, 0); |
5302 |
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c |
5303 |
+index 6f1b8ddc6f7a4..54dda2e19ed12 100644 |
5304 |
+--- a/fs/proc/vmcore.c |
5305 |
++++ b/fs/proc/vmcore.c |
5306 |
+@@ -26,6 +26,7 @@ |
5307 |
+ #include <linux/vmalloc.h> |
5308 |
+ #include <linux/pagemap.h> |
5309 |
+ #include <linux/uaccess.h> |
5310 |
++#include <linux/uio.h> |
5311 |
+ #include <linux/cc_platform.h> |
5312 |
+ #include <asm/io.h> |
5313 |
+ #include "internal.h" |
5314 |
+@@ -128,9 +129,8 @@ static int open_vmcore(struct inode *inode, struct file *file) |
5315 |
+ } |
5316 |
+ |
5317 |
+ /* Reads a page from the oldmem device from given offset. */ |
5318 |
+-ssize_t read_from_oldmem(char *buf, size_t count, |
5319 |
+- u64 *ppos, int userbuf, |
5320 |
+- bool encrypted) |
5321 |
++static ssize_t read_from_oldmem_iter(struct iov_iter *iter, size_t count, |
5322 |
++ u64 *ppos, bool encrypted) |
5323 |
+ { |
5324 |
+ unsigned long pfn, offset; |
5325 |
+ size_t nr_bytes; |
5326 |
+@@ -152,29 +152,23 @@ ssize_t read_from_oldmem(char *buf, size_t count, |
5327 |
+ |
5328 |
+ /* If pfn is not ram, return zeros for sparse dump files */ |
5329 |
+ if (!pfn_is_ram(pfn)) { |
5330 |
+- tmp = 0; |
5331 |
+- if (!userbuf) |
5332 |
+- memset(buf, 0, nr_bytes); |
5333 |
+- else if (clear_user(buf, nr_bytes)) |
5334 |
+- tmp = -EFAULT; |
5335 |
++ tmp = iov_iter_zero(nr_bytes, iter); |
5336 |
+ } else { |
5337 |
+ if (encrypted) |
5338 |
+- tmp = copy_oldmem_page_encrypted(pfn, buf, |
5339 |
++ tmp = copy_oldmem_page_encrypted(iter, pfn, |
5340 |
+ nr_bytes, |
5341 |
+- offset, |
5342 |
+- userbuf); |
5343 |
++ offset); |
5344 |
+ else |
5345 |
+- tmp = copy_oldmem_page(pfn, buf, nr_bytes, |
5346 |
+- offset, userbuf); |
5347 |
++ tmp = copy_oldmem_page(iter, pfn, nr_bytes, |
5348 |
++ offset); |
5349 |
+ } |
5350 |
+- if (tmp < 0) { |
5351 |
++ if (tmp < nr_bytes) { |
5352 |
+ srcu_read_unlock(&vmcore_cb_srcu, idx); |
5353 |
+- return tmp; |
5354 |
++ return -EFAULT; |
5355 |
+ } |
5356 |
+ |
5357 |
+ *ppos += nr_bytes; |
5358 |
+ count -= nr_bytes; |
5359 |
+- buf += nr_bytes; |
5360 |
+ read += nr_bytes; |
5361 |
+ ++pfn; |
5362 |
+ offset = 0; |
5363 |
+@@ -184,6 +178,27 @@ ssize_t read_from_oldmem(char *buf, size_t count, |
5364 |
+ return read; |
5365 |
+ } |
5366 |
+ |
5367 |
++ssize_t read_from_oldmem(char *buf, size_t count, |
5368 |
++ u64 *ppos, int userbuf, |
5369 |
++ bool encrypted) |
5370 |
++{ |
5371 |
++ struct iov_iter iter; |
5372 |
++ struct iovec iov; |
5373 |
++ struct kvec kvec; |
5374 |
++ |
5375 |
++ if (userbuf) { |
5376 |
++ iov.iov_base = (__force void __user *)buf; |
5377 |
++ iov.iov_len = count; |
5378 |
++ iov_iter_init(&iter, READ, &iov, 1, count); |
5379 |
++ } else { |
5380 |
++ kvec.iov_base = buf; |
5381 |
++ kvec.iov_len = count; |
5382 |
++ iov_iter_kvec(&iter, READ, &kvec, 1, count); |
5383 |
++ } |
5384 |
++ |
5385 |
++ return read_from_oldmem_iter(&iter, count, ppos, encrypted); |
5386 |
++} |
5387 |
++ |
5388 |
+ /* |
5389 |
+ * Architectures may override this function to allocate ELF header in 2nd kernel |
5390 |
+ */ |
5391 |
+@@ -228,11 +243,10 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, |
5392 |
+ /* |
5393 |
+ * Architectures which support memory encryption override this. |
5394 |
+ */ |
5395 |
+-ssize_t __weak |
5396 |
+-copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, |
5397 |
+- unsigned long offset, int userbuf) |
5398 |
++ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter, |
5399 |
++ unsigned long pfn, size_t csize, unsigned long offset) |
5400 |
+ { |
5401 |
+- return copy_oldmem_page(pfn, buf, csize, offset, userbuf); |
5402 |
++ return copy_oldmem_page(iter, pfn, csize, offset); |
5403 |
+ } |
5404 |
+ |
5405 |
+ /* |
5406 |
+diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h |
5407 |
+index 620821549b23a..a1cf7d5c03c7c 100644 |
5408 |
+--- a/include/linux/crash_dump.h |
5409 |
++++ b/include/linux/crash_dump.h |
5410 |
+@@ -24,11 +24,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma, |
5411 |
+ unsigned long from, unsigned long pfn, |
5412 |
+ unsigned long size, pgprot_t prot); |
5413 |
+ |
5414 |
+-extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, |
5415 |
+- unsigned long, int); |
5416 |
+-extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, |
5417 |
+- size_t csize, unsigned long offset, |
5418 |
+- int userbuf); |
5419 |
++ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize, |
5420 |
++ unsigned long offset); |
5421 |
++ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn, |
5422 |
++ size_t csize, unsigned long offset); |
5423 |
+ |
5424 |
+ void vmcore_cleanup(void); |
5425 |
+ |
5426 |
+diff --git a/include/linux/mm.h b/include/linux/mm.h |
5427 |
+index b0183450e484b..da08cce2a9fa8 100644 |
5428 |
+--- a/include/linux/mm.h |
5429 |
++++ b/include/linux/mm.h |
5430 |
+@@ -3188,6 +3188,7 @@ enum mf_flags { |
5431 |
+ MF_MUST_KILL = 1 << 2, |
5432 |
+ MF_SOFT_OFFLINE = 1 << 3, |
5433 |
+ MF_UNPOISON = 1 << 4, |
5434 |
++ MF_SW_SIMULATED = 1 << 5, |
5435 |
+ }; |
5436 |
+ extern int memory_failure(unsigned long pfn, int flags); |
5437 |
+ extern void memory_failure_queue(unsigned long pfn, int flags); |
5438 |
+diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h |
5439 |
+index c21c7f8103e2b..002266693e506 100644 |
5440 |
+--- a/include/linux/ratelimit_types.h |
5441 |
++++ b/include/linux/ratelimit_types.h |
5442 |
+@@ -23,12 +23,16 @@ struct ratelimit_state { |
5443 |
+ unsigned long flags; |
5444 |
+ }; |
5445 |
+ |
5446 |
+-#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ |
5447 |
+- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ |
5448 |
+- .interval = interval_init, \ |
5449 |
+- .burst = burst_init, \ |
5450 |
++#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \ |
5451 |
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ |
5452 |
++ .interval = interval_init, \ |
5453 |
++ .burst = burst_init, \ |
5454 |
++ .flags = flags_init, \ |
5455 |
+ } |
5456 |
+ |
5457 |
++#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ |
5458 |
++ RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, 0) |
5459 |
++ |
5460 |
+ #define RATELIMIT_STATE_INIT_DISABLED \ |
5461 |
+ RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) |
5462 |
+ |
5463 |
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h |
5464 |
+index 234d70ae5f4cb..48e4c59d85e24 100644 |
5465 |
+--- a/include/net/inet_sock.h |
5466 |
++++ b/include/net/inet_sock.h |
5467 |
+@@ -252,6 +252,11 @@ struct inet_sock { |
5468 |
+ #define IP_CMSG_CHECKSUM BIT(7) |
5469 |
+ #define IP_CMSG_RECVFRAGSIZE BIT(8) |
5470 |
+ |
5471 |
++static inline bool sk_is_inet(struct sock *sk) |
5472 |
++{ |
5473 |
++ return sk->sk_family == AF_INET || sk->sk_family == AF_INET6; |
5474 |
++} |
5475 |
++ |
5476 |
+ /** |
5477 |
+ * sk_to_full_sk - Access to a full socket |
5478 |
+ * @sk: pointer to a socket |
5479 |
+diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h |
5480 |
+index d4e631aa976fb..6025dd8ba4aa1 100644 |
5481 |
+--- a/include/trace/events/libata.h |
5482 |
++++ b/include/trace/events/libata.h |
5483 |
+@@ -288,6 +288,7 @@ DECLARE_EVENT_CLASS(ata_qc_complete_template, |
5484 |
+ __entry->hob_feature = qc->result_tf.hob_feature; |
5485 |
+ __entry->nsect = qc->result_tf.nsect; |
5486 |
+ __entry->hob_nsect = qc->result_tf.hob_nsect; |
5487 |
++ __entry->flags = qc->flags; |
5488 |
+ ), |
5489 |
+ |
5490 |
+ TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \ |
5491 |
+diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c |
5492 |
+index e978f36e6be86..8d0b68a170422 100644 |
5493 |
+--- a/kernel/dma/direct.c |
5494 |
++++ b/kernel/dma/direct.c |
5495 |
+@@ -357,7 +357,7 @@ void dma_direct_free(struct device *dev, size_t size, |
5496 |
+ } else { |
5497 |
+ if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) |
5498 |
+ arch_dma_clear_uncached(cpu_addr, size); |
5499 |
+- if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) |
5500 |
++ if (dma_set_encrypted(dev, cpu_addr, size)) |
5501 |
+ return; |
5502 |
+ } |
5503 |
+ |
5504 |
+@@ -392,7 +392,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, |
5505 |
+ struct page *page, dma_addr_t dma_addr, |
5506 |
+ enum dma_data_direction dir) |
5507 |
+ { |
5508 |
+- unsigned int page_order = get_order(size); |
5509 |
+ void *vaddr = page_address(page); |
5510 |
+ |
5511 |
+ /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ |
5512 |
+@@ -400,7 +399,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, |
5513 |
+ dma_free_from_pool(dev, vaddr, size)) |
5514 |
+ return; |
5515 |
+ |
5516 |
+- if (dma_set_encrypted(dev, vaddr, 1 << page_order)) |
5517 |
++ if (dma_set_encrypted(dev, vaddr, size)) |
5518 |
+ return; |
5519 |
+ __dma_direct_free_pages(dev, page, size); |
5520 |
+ } |
5521 |
+diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c |
5522 |
+index b56833700d23f..c69d82273ce78 100644 |
5523 |
+--- a/kernel/trace/rethook.c |
5524 |
++++ b/kernel/trace/rethook.c |
5525 |
+@@ -154,6 +154,15 @@ struct rethook_node *rethook_try_get(struct rethook *rh) |
5526 |
+ if (unlikely(!handler)) |
5527 |
+ return NULL; |
5528 |
+ |
5529 |
++ /* |
5530 |
++ * This expects the caller will set up a rethook on a function entry. |
5531 |
++ * When the function returns, the rethook will eventually be reclaimed |
5532 |
++ * or released in the rethook_recycle() with call_rcu(). |
5533 |
++ * This means the caller must be run in the RCU-availabe context. |
5534 |
++ */ |
5535 |
++ if (unlikely(!rcu_is_watching())) |
5536 |
++ return NULL; |
5537 |
++ |
5538 |
+ fn = freelist_try_get(&rh->pool); |
5539 |
+ if (!fn) |
5540 |
+ return NULL; |
5541 |
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c |
5542 |
+index 47cebef78532c..13439743285c5 100644 |
5543 |
+--- a/kernel/trace/trace_kprobe.c |
5544 |
++++ b/kernel/trace/trace_kprobe.c |
5545 |
+@@ -1718,8 +1718,17 @@ static int |
5546 |
+ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) |
5547 |
+ { |
5548 |
+ struct kretprobe *rp = get_kretprobe(ri); |
5549 |
+- struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp); |
5550 |
++ struct trace_kprobe *tk; |
5551 |
++ |
5552 |
++ /* |
5553 |
++ * There is a small chance that get_kretprobe(ri) returns NULL when |
5554 |
++ * the kretprobe is unregister on another CPU between kretprobe's |
5555 |
++ * trampoline_handler and this function. |
5556 |
++ */ |
5557 |
++ if (unlikely(!rp)) |
5558 |
++ return 0; |
5559 |
+ |
5560 |
++ tk = container_of(rp, struct trace_kprobe, rp); |
5561 |
+ raw_cpu_inc(*tk->nhit); |
5562 |
+ |
5563 |
+ if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE)) |
5564 |
+diff --git a/mm/filemap.c b/mm/filemap.c |
5565 |
+index 61dd39990fda2..be1859a276e1e 100644 |
5566 |
+--- a/mm/filemap.c |
5567 |
++++ b/mm/filemap.c |
5568 |
+@@ -2385,6 +2385,8 @@ static void filemap_get_read_batch(struct address_space *mapping, |
5569 |
+ continue; |
5570 |
+ if (xas.xa_index > max || xa_is_value(folio)) |
5571 |
+ break; |
5572 |
++ if (xa_is_sibling(folio)) |
5573 |
++ break; |
5574 |
+ if (!folio_try_get_rcu(folio)) |
5575 |
+ goto retry; |
5576 |
+ |
5577 |
+diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c |
5578 |
+index bb0cea5468cbf..f483742e9dea8 100644 |
5579 |
+--- a/mm/hwpoison-inject.c |
5580 |
++++ b/mm/hwpoison-inject.c |
5581 |
+@@ -48,7 +48,7 @@ static int hwpoison_inject(void *data, u64 val) |
5582 |
+ |
5583 |
+ inject: |
5584 |
+ pr_info("Injecting memory failure at pfn %#lx\n", pfn); |
5585 |
+- err = memory_failure(pfn, 0); |
5586 |
++ err = memory_failure(pfn, MF_SW_SIMULATED); |
5587 |
+ return (err == -EOPNOTSUPP) ? 0 : err; |
5588 |
+ } |
5589 |
+ |
5590 |
+diff --git a/mm/madvise.c b/mm/madvise.c |
5591 |
+index 1873616a37d2e..4d29a11c18e9e 100644 |
5592 |
+--- a/mm/madvise.c |
5593 |
++++ b/mm/madvise.c |
5594 |
+@@ -1101,7 +1101,7 @@ static int madvise_inject_error(int behavior, |
5595 |
+ } else { |
5596 |
+ pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", |
5597 |
+ pfn, start); |
5598 |
+- ret = memory_failure(pfn, MF_COUNT_INCREASED); |
5599 |
++ ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED); |
5600 |
+ if (ret == -EOPNOTSUPP) |
5601 |
+ ret = 0; |
5602 |
+ } |
5603 |
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c |
5604 |
+index d4a4adcca01f3..94dac77f5ebad 100644 |
5605 |
+--- a/mm/memory-failure.c |
5606 |
++++ b/mm/memory-failure.c |
5607 |
+@@ -68,6 +68,8 @@ int sysctl_memory_failure_recovery __read_mostly = 1; |
5608 |
+ |
5609 |
+ atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); |
5610 |
+ |
5611 |
++static bool hw_memory_failure __read_mostly = false; |
5612 |
++ |
5613 |
+ static bool __page_handle_poison(struct page *page) |
5614 |
+ { |
5615 |
+ int ret; |
5616 |
+@@ -1780,6 +1782,9 @@ int memory_failure(unsigned long pfn, int flags) |
5617 |
+ |
5618 |
+ mutex_lock(&mf_mutex); |
5619 |
+ |
5620 |
++ if (!(flags & MF_SW_SIMULATED)) |
5621 |
++ hw_memory_failure = true; |
5622 |
++ |
5623 |
+ p = pfn_to_online_page(pfn); |
5624 |
+ if (!p) { |
5625 |
+ res = arch_memory_failure(pfn, flags); |
5626 |
+@@ -2138,6 +2143,13 @@ int unpoison_memory(unsigned long pfn) |
5627 |
+ |
5628 |
+ mutex_lock(&mf_mutex); |
5629 |
+ |
5630 |
++ if (hw_memory_failure) { |
5631 |
++ unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n", |
5632 |
++ pfn, &unpoison_rs); |
5633 |
++ ret = -EOPNOTSUPP; |
5634 |
++ goto unlock_mutex; |
5635 |
++ } |
5636 |
++ |
5637 |
+ if (!PageHWPoison(p)) { |
5638 |
+ unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", |
5639 |
+ pfn, &unpoison_rs); |
5640 |
+diff --git a/mm/readahead.c b/mm/readahead.c |
5641 |
+index 4a60cdb64262a..38635af5bab75 100644 |
5642 |
+--- a/mm/readahead.c |
5643 |
++++ b/mm/readahead.c |
5644 |
+@@ -508,6 +508,7 @@ void page_cache_ra_order(struct readahead_control *ractl, |
5645 |
+ new_order--; |
5646 |
+ } |
5647 |
+ |
5648 |
++ filemap_invalidate_lock_shared(mapping); |
5649 |
+ while (index <= limit) { |
5650 |
+ unsigned int order = new_order; |
5651 |
+ |
5652 |
+@@ -534,6 +535,7 @@ void page_cache_ra_order(struct readahead_control *ractl, |
5653 |
+ } |
5654 |
+ |
5655 |
+ read_pages(ractl); |
5656 |
++ filemap_invalidate_unlock_shared(mapping); |
5657 |
+ |
5658 |
+ /* |
5659 |
+ * If there were already pages in the page cache, then we may have |
5660 |
+diff --git a/mm/slub.c b/mm/slub.c |
5661 |
+index ed5c2c03a47aa..46de927322fc4 100644 |
5662 |
+--- a/mm/slub.c |
5663 |
++++ b/mm/slub.c |
5664 |
+@@ -2939,6 +2939,7 @@ redo: |
5665 |
+ |
5666 |
+ if (!freelist) { |
5667 |
+ c->slab = NULL; |
5668 |
++ c->tid = next_tid(c->tid); |
5669 |
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
5670 |
+ stat(s, DEACTIVATE_BYPASS); |
5671 |
+ goto new_slab; |
5672 |
+@@ -2971,6 +2972,7 @@ deactivate_slab: |
5673 |
+ freelist = c->freelist; |
5674 |
+ c->slab = NULL; |
5675 |
+ c->freelist = NULL; |
5676 |
++ c->tid = next_tid(c->tid); |
5677 |
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags); |
5678 |
+ deactivate_slab(s, slab, freelist); |
5679 |
+ |
5680 |
+diff --git a/mm/swap.c b/mm/swap.c |
5681 |
+index 7e320ec08c6ae..8a98d21d2786c 100644 |
5682 |
+--- a/mm/swap.c |
5683 |
++++ b/mm/swap.c |
5684 |
+@@ -881,7 +881,7 @@ void lru_cache_disable(void) |
5685 |
+ * lru_disable_count = 0 will have exited the critical |
5686 |
+ * section when synchronize_rcu() returns. |
5687 |
+ */ |
5688 |
+- synchronize_rcu(); |
5689 |
++ synchronize_rcu_expedited(); |
5690 |
+ #ifdef CONFIG_SMP |
5691 |
+ __lru_add_drain_all(true); |
5692 |
+ #else |
5693 |
+diff --git a/net/core/dev.c b/net/core/dev.c |
5694 |
+index 0784c339cd7d8..842917883adb4 100644 |
5695 |
+--- a/net/core/dev.c |
5696 |
++++ b/net/core/dev.c |
5697 |
+@@ -396,16 +396,18 @@ static void list_netdevice(struct net_device *dev) |
5698 |
+ /* Device list removal |
5699 |
+ * caller must respect a RCU grace period before freeing/reusing dev |
5700 |
+ */ |
5701 |
+-static void unlist_netdevice(struct net_device *dev) |
5702 |
++static void unlist_netdevice(struct net_device *dev, bool lock) |
5703 |
+ { |
5704 |
+ ASSERT_RTNL(); |
5705 |
+ |
5706 |
+ /* Unlink dev from the device chain */ |
5707 |
+- write_lock(&dev_base_lock); |
5708 |
++ if (lock) |
5709 |
++ write_lock(&dev_base_lock); |
5710 |
+ list_del_rcu(&dev->dev_list); |
5711 |
+ netdev_name_node_del(dev->name_node); |
5712 |
+ hlist_del_rcu(&dev->index_hlist); |
5713 |
+- write_unlock(&dev_base_lock); |
5714 |
++ if (lock) |
5715 |
++ write_unlock(&dev_base_lock); |
5716 |
+ |
5717 |
+ dev_base_seq_inc(dev_net(dev)); |
5718 |
+ } |
5719 |
+@@ -9963,11 +9965,11 @@ int register_netdevice(struct net_device *dev) |
5720 |
+ goto err_uninit; |
5721 |
+ |
5722 |
+ ret = netdev_register_kobject(dev); |
5723 |
+- if (ret) { |
5724 |
+- dev->reg_state = NETREG_UNREGISTERED; |
5725 |
++ write_lock(&dev_base_lock); |
5726 |
++ dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED; |
5727 |
++ write_unlock(&dev_base_lock); |
5728 |
++ if (ret) |
5729 |
+ goto err_uninit; |
5730 |
+- } |
5731 |
+- dev->reg_state = NETREG_REGISTERED; |
5732 |
+ |
5733 |
+ __netdev_update_features(dev); |
5734 |
+ |
5735 |
+@@ -10249,7 +10251,9 @@ void netdev_run_todo(void) |
5736 |
+ continue; |
5737 |
+ } |
5738 |
+ |
5739 |
++ write_lock(&dev_base_lock); |
5740 |
+ dev->reg_state = NETREG_UNREGISTERED; |
5741 |
++ write_unlock(&dev_base_lock); |
5742 |
+ linkwatch_forget_dev(dev); |
5743 |
+ } |
5744 |
+ |
5745 |
+@@ -10727,9 +10731,10 @@ void unregister_netdevice_many(struct list_head *head) |
5746 |
+ |
5747 |
+ list_for_each_entry(dev, head, unreg_list) { |
5748 |
+ /* And unlink it from device chain. */ |
5749 |
+- unlist_netdevice(dev); |
5750 |
+- |
5751 |
++ write_lock(&dev_base_lock); |
5752 |
++ unlist_netdevice(dev, false); |
5753 |
+ dev->reg_state = NETREG_UNREGISTERING; |
5754 |
++ write_unlock(&dev_base_lock); |
5755 |
+ } |
5756 |
+ flush_all_backlogs(); |
5757 |
+ |
5758 |
+@@ -10876,7 +10881,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, |
5759 |
+ dev_close(dev); |
5760 |
+ |
5761 |
+ /* And unlink it from device chain */ |
5762 |
+- unlist_netdevice(dev); |
5763 |
++ unlist_netdevice(dev, true); |
5764 |
+ |
5765 |
+ synchronize_net(); |
5766 |
+ |
5767 |
+diff --git a/net/core/filter.c b/net/core/filter.c |
5768 |
+index 8847316ee20e0..af1e77f2f24a8 100644 |
5769 |
+--- a/net/core/filter.c |
5770 |
++++ b/net/core/filter.c |
5771 |
+@@ -6506,10 +6506,21 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, |
5772 |
+ ifindex, proto, netns_id, flags); |
5773 |
+ |
5774 |
+ if (sk) { |
5775 |
+- sk = sk_to_full_sk(sk); |
5776 |
+- if (!sk_fullsock(sk)) { |
5777 |
++ struct sock *sk2 = sk_to_full_sk(sk); |
5778 |
++ |
5779 |
++ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk |
5780 |
++ * sock refcnt is decremented to prevent a request_sock leak. |
5781 |
++ */ |
5782 |
++ if (!sk_fullsock(sk2)) |
5783 |
++ sk2 = NULL; |
5784 |
++ if (sk2 != sk) { |
5785 |
+ sock_gen_put(sk); |
5786 |
+- return NULL; |
5787 |
++ /* Ensure there is no need to bump sk2 refcnt */ |
5788 |
++ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) { |
5789 |
++ WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); |
5790 |
++ return NULL; |
5791 |
++ } |
5792 |
++ sk = sk2; |
5793 |
+ } |
5794 |
+ } |
5795 |
+ |
5796 |
+@@ -6543,10 +6554,21 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, |
5797 |
+ flags); |
5798 |
+ |
5799 |
+ if (sk) { |
5800 |
+- sk = sk_to_full_sk(sk); |
5801 |
+- if (!sk_fullsock(sk)) { |
5802 |
++ struct sock *sk2 = sk_to_full_sk(sk); |
5803 |
++ |
5804 |
++ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk |
5805 |
++ * sock refcnt is decremented to prevent a request_sock leak. |
5806 |
++ */ |
5807 |
++ if (!sk_fullsock(sk2)) |
5808 |
++ sk2 = NULL; |
5809 |
++ if (sk2 != sk) { |
5810 |
+ sock_gen_put(sk); |
5811 |
+- return NULL; |
5812 |
++ /* Ensure there is no need to bump sk2 refcnt */ |
5813 |
++ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) { |
5814 |
++ WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); |
5815 |
++ return NULL; |
5816 |
++ } |
5817 |
++ sk = sk2; |
5818 |
+ } |
5819 |
+ } |
5820 |
+ |
5821 |
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c |
5822 |
+index 9cbc1c8289bcd..9ee57997354a2 100644 |
5823 |
+--- a/net/core/net-sysfs.c |
5824 |
++++ b/net/core/net-sysfs.c |
5825 |
+@@ -32,6 +32,7 @@ static const char fmt_dec[] = "%d\n"; |
5826 |
+ static const char fmt_ulong[] = "%lu\n"; |
5827 |
+ static const char fmt_u64[] = "%llu\n"; |
5828 |
+ |
5829 |
++/* Caller holds RTNL or dev_base_lock */ |
5830 |
+ static inline int dev_isalive(const struct net_device *dev) |
5831 |
+ { |
5832 |
+ return dev->reg_state <= NETREG_REGISTERED; |
5833 |
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c |
5834 |
+index cc381165ea080..ede0af308f404 100644 |
5835 |
+--- a/net/core/skmsg.c |
5836 |
++++ b/net/core/skmsg.c |
5837 |
+@@ -695,6 +695,11 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node) |
5838 |
+ |
5839 |
+ write_lock_bh(&sk->sk_callback_lock); |
5840 |
+ |
5841 |
++ if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) { |
5842 |
++ psock = ERR_PTR(-EINVAL); |
5843 |
++ goto out; |
5844 |
++ } |
5845 |
++ |
5846 |
+ if (sk->sk_user_data) { |
5847 |
+ psock = ERR_PTR(-EBUSY); |
5848 |
+ goto out; |
5849 |
+diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c |
5850 |
+index 7e6b37a54add3..1c94bb8ea03f2 100644 |
5851 |
+--- a/net/ethtool/eeprom.c |
5852 |
++++ b/net/ethtool/eeprom.c |
5853 |
+@@ -36,7 +36,7 @@ static int fallback_set_params(struct eeprom_req_info *request, |
5854 |
+ if (request->page) |
5855 |
+ offset = request->page * ETH_MODULE_EEPROM_PAGE_LEN + offset; |
5856 |
+ |
5857 |
+- if (modinfo->type == ETH_MODULE_SFF_8079 && |
5858 |
++ if (modinfo->type == ETH_MODULE_SFF_8472 && |
5859 |
+ request->i2c_address == 0x51) |
5860 |
+ offset += ETH_MODULE_EEPROM_PAGE_LEN * 2; |
5861 |
+ |
5862 |
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c |
5863 |
+index bc8dfdf1c48ad..3186735179766 100644 |
5864 |
+--- a/net/ipv4/ip_gre.c |
5865 |
++++ b/net/ipv4/ip_gre.c |
5866 |
+@@ -524,7 +524,6 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) |
5867 |
+ int tunnel_hlen; |
5868 |
+ int version; |
5869 |
+ int nhoff; |
5870 |
+- int thoff; |
5871 |
+ |
5872 |
+ tun_info = skb_tunnel_info(skb); |
5873 |
+ if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || |
5874 |
+@@ -558,10 +557,16 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) |
5875 |
+ (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) |
5876 |
+ truncate = true; |
5877 |
+ |
5878 |
+- thoff = skb_transport_header(skb) - skb_mac_header(skb); |
5879 |
+- if (skb->protocol == htons(ETH_P_IPV6) && |
5880 |
+- (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)) |
5881 |
+- truncate = true; |
5882 |
++ if (skb->protocol == htons(ETH_P_IPV6)) { |
5883 |
++ int thoff; |
5884 |
++ |
5885 |
++ if (skb_transport_header_was_set(skb)) |
5886 |
++ thoff = skb_transport_header(skb) - skb_mac_header(skb); |
5887 |
++ else |
5888 |
++ thoff = nhoff + sizeof(struct ipv6hdr); |
5889 |
++ if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) |
5890 |
++ truncate = true; |
5891 |
++ } |
5892 |
+ |
5893 |
+ if (version == 1) { |
5894 |
+ erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), |
5895 |
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c |
5896 |
+index 4e5ceca7ff7f9..9dccbf863f826 100644 |
5897 |
+--- a/net/ipv4/ping.c |
5898 |
++++ b/net/ipv4/ping.c |
5899 |
+@@ -319,12 +319,16 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, |
5900 |
+ pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", |
5901 |
+ sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); |
5902 |
+ |
5903 |
++ if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) |
5904 |
++ return 0; |
5905 |
++ |
5906 |
+ tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id; |
5907 |
+ chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id); |
5908 |
+ |
5909 |
+- if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk), |
5910 |
+- addr->sin_addr.s_addr, |
5911 |
+- chk_addr_ret)) |
5912 |
++ if (chk_addr_ret == RTN_MULTICAST || |
5913 |
++ chk_addr_ret == RTN_BROADCAST || |
5914 |
++ (chk_addr_ret != RTN_LOCAL && |
5915 |
++ !inet_can_nonlocal_bind(net, isk))) |
5916 |
+ return -EADDRNOTAVAIL; |
5917 |
+ |
5918 |
+ #if IS_ENABLED(CONFIG_IPV6) |
5919 |
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c |
5920 |
+index 1cdcb4df0eb7e..2c597a4e429ab 100644 |
5921 |
+--- a/net/ipv4/tcp_bpf.c |
5922 |
++++ b/net/ipv4/tcp_bpf.c |
5923 |
+@@ -612,9 +612,6 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) |
5924 |
+ return 0; |
5925 |
+ } |
5926 |
+ |
5927 |
+- if (inet_csk_has_ulp(sk)) |
5928 |
+- return -EINVAL; |
5929 |
+- |
5930 |
+ if (sk->sk_family == AF_INET6) { |
5931 |
+ if (tcp_bpf_assert_proto_ops(psock->sk_proto)) |
5932 |
+ return -EINVAL; |
5933 |
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
5934 |
+index 5136959b3dc5d..b996ccaff56e3 100644 |
5935 |
+--- a/net/ipv6/ip6_gre.c |
5936 |
++++ b/net/ipv6/ip6_gre.c |
5937 |
+@@ -944,7 +944,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, |
5938 |
+ __be16 proto; |
5939 |
+ __u32 mtu; |
5940 |
+ int nhoff; |
5941 |
+- int thoff; |
5942 |
+ |
5943 |
+ if (!pskb_inet_may_pull(skb)) |
5944 |
+ goto tx_err; |
5945 |
+@@ -965,10 +964,16 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, |
5946 |
+ (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) |
5947 |
+ truncate = true; |
5948 |
+ |
5949 |
+- thoff = skb_transport_header(skb) - skb_mac_header(skb); |
5950 |
+- if (skb->protocol == htons(ETH_P_IPV6) && |
5951 |
+- (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)) |
5952 |
+- truncate = true; |
5953 |
++ if (skb->protocol == htons(ETH_P_IPV6)) { |
5954 |
++ int thoff; |
5955 |
++ |
5956 |
++ if (skb_transport_header_was_set(skb)) |
5957 |
++ thoff = skb_transport_header(skb) - skb_mac_header(skb); |
5958 |
++ else |
5959 |
++ thoff = nhoff + sizeof(struct ipv6hdr); |
5960 |
++ if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) |
5961 |
++ truncate = true; |
5962 |
++ } |
5963 |
+ |
5964 |
+ if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen)) |
5965 |
+ goto tx_err; |
5966 |
+diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c |
5967 |
+index 7873bd1389c36..a8e2425e43b0d 100644 |
5968 |
+--- a/net/netfilter/nf_dup_netdev.c |
5969 |
++++ b/net/netfilter/nf_dup_netdev.c |
5970 |
+@@ -13,14 +13,31 @@ |
5971 |
+ #include <net/netfilter/nf_tables_offload.h> |
5972 |
+ #include <net/netfilter/nf_dup_netdev.h> |
5973 |
+ |
5974 |
+-static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev) |
5975 |
++#define NF_RECURSION_LIMIT 2 |
5976 |
++ |
5977 |
++static DEFINE_PER_CPU(u8, nf_dup_skb_recursion); |
5978 |
++ |
5979 |
++static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev, |
5980 |
++ enum nf_dev_hooks hook) |
5981 |
+ { |
5982 |
+- if (skb_mac_header_was_set(skb)) |
5983 |
++ if (__this_cpu_read(nf_dup_skb_recursion) > NF_RECURSION_LIMIT) |
5984 |
++ goto err; |
5985 |
++ |
5986 |
++ if (hook == NF_NETDEV_INGRESS && skb_mac_header_was_set(skb)) { |
5987 |
++ if (skb_cow_head(skb, skb->mac_len)) |
5988 |
++ goto err; |
5989 |
++ |
5990 |
+ skb_push(skb, skb->mac_len); |
5991 |
++ } |
5992 |
+ |
5993 |
+ skb->dev = dev; |
5994 |
+ skb_clear_tstamp(skb); |
5995 |
++ __this_cpu_inc(nf_dup_skb_recursion); |
5996 |
+ dev_queue_xmit(skb); |
5997 |
++ __this_cpu_dec(nf_dup_skb_recursion); |
5998 |
++ return; |
5999 |
++err: |
6000 |
++ kfree_skb(skb); |
6001 |
+ } |
6002 |
+ |
6003 |
+ void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif) |
6004 |
+@@ -33,7 +50,7 @@ void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif) |
6005 |
+ return; |
6006 |
+ } |
6007 |
+ |
6008 |
+- nf_do_netdev_egress(pkt->skb, dev); |
6009 |
++ nf_do_netdev_egress(pkt->skb, dev, nft_hook(pkt)); |
6010 |
+ } |
6011 |
+ EXPORT_SYMBOL_GPL(nf_fwd_netdev_egress); |
6012 |
+ |
6013 |
+@@ -48,7 +65,7 @@ void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif) |
6014 |
+ |
6015 |
+ skb = skb_clone(pkt->skb, GFP_ATOMIC); |
6016 |
+ if (skb) |
6017 |
+- nf_do_netdev_egress(skb, dev); |
6018 |
++ nf_do_netdev_egress(skb, dev, nft_hook(pkt)); |
6019 |
+ } |
6020 |
+ EXPORT_SYMBOL_GPL(nf_dup_netdev_egress); |
6021 |
+ |
6022 |
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c |
6023 |
+index ac4859241e177..55d2d49c34259 100644 |
6024 |
+--- a/net/netfilter/nft_meta.c |
6025 |
++++ b/net/netfilter/nft_meta.c |
6026 |
+@@ -14,6 +14,7 @@ |
6027 |
+ #include <linux/in.h> |
6028 |
+ #include <linux/ip.h> |
6029 |
+ #include <linux/ipv6.h> |
6030 |
++#include <linux/random.h> |
6031 |
+ #include <linux/smp.h> |
6032 |
+ #include <linux/static_key.h> |
6033 |
+ #include <net/dst.h> |
6034 |
+@@ -32,8 +33,6 @@ |
6035 |
+ #define NFT_META_SECS_PER_DAY 86400 |
6036 |
+ #define NFT_META_DAYS_PER_WEEK 7 |
6037 |
+ |
6038 |
+-static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state); |
6039 |
+- |
6040 |
+ static u8 nft_meta_weekday(void) |
6041 |
+ { |
6042 |
+ time64_t secs = ktime_get_real_seconds(); |
6043 |
+@@ -271,13 +270,6 @@ static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest, |
6044 |
+ return true; |
6045 |
+ } |
6046 |
+ |
6047 |
+-static noinline u32 nft_prandom_u32(void) |
6048 |
+-{ |
6049 |
+- struct rnd_state *state = this_cpu_ptr(&nft_prandom_state); |
6050 |
+- |
6051 |
+- return prandom_u32_state(state); |
6052 |
+-} |
6053 |
+- |
6054 |
+ #ifdef CONFIG_IP_ROUTE_CLASSID |
6055 |
+ static noinline bool |
6056 |
+ nft_meta_get_eval_rtclassid(const struct sk_buff *skb, u32 *dest) |
6057 |
+@@ -389,7 +381,7 @@ void nft_meta_get_eval(const struct nft_expr *expr, |
6058 |
+ break; |
6059 |
+ #endif |
6060 |
+ case NFT_META_PRANDOM: |
6061 |
+- *dest = nft_prandom_u32(); |
6062 |
++ *dest = get_random_u32(); |
6063 |
+ break; |
6064 |
+ #ifdef CONFIG_XFRM |
6065 |
+ case NFT_META_SECPATH: |
6066 |
+@@ -518,7 +510,6 @@ int nft_meta_get_init(const struct nft_ctx *ctx, |
6067 |
+ len = IFNAMSIZ; |
6068 |
+ break; |
6069 |
+ case NFT_META_PRANDOM: |
6070 |
+- prandom_init_once(&nft_prandom_state); |
6071 |
+ len = sizeof(u32); |
6072 |
+ break; |
6073 |
+ #ifdef CONFIG_XFRM |
6074 |
+diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c |
6075 |
+index 81b40c663d86a..45d3dc9e96f2c 100644 |
6076 |
+--- a/net/netfilter/nft_numgen.c |
6077 |
++++ b/net/netfilter/nft_numgen.c |
6078 |
+@@ -9,12 +9,11 @@ |
6079 |
+ #include <linux/netlink.h> |
6080 |
+ #include <linux/netfilter.h> |
6081 |
+ #include <linux/netfilter/nf_tables.h> |
6082 |
++#include <linux/random.h> |
6083 |
+ #include <linux/static_key.h> |
6084 |
+ #include <net/netfilter/nf_tables.h> |
6085 |
+ #include <net/netfilter/nf_tables_core.h> |
6086 |
+ |
6087 |
+-static DEFINE_PER_CPU(struct rnd_state, nft_numgen_prandom_state); |
6088 |
+- |
6089 |
+ struct nft_ng_inc { |
6090 |
+ u8 dreg; |
6091 |
+ u32 modulus; |
6092 |
+@@ -135,12 +134,9 @@ struct nft_ng_random { |
6093 |
+ u32 offset; |
6094 |
+ }; |
6095 |
+ |
6096 |
+-static u32 nft_ng_random_gen(struct nft_ng_random *priv) |
6097 |
++static u32 nft_ng_random_gen(const struct nft_ng_random *priv) |
6098 |
+ { |
6099 |
+- struct rnd_state *state = this_cpu_ptr(&nft_numgen_prandom_state); |
6100 |
+- |
6101 |
+- return reciprocal_scale(prandom_u32_state(state), priv->modulus) + |
6102 |
+- priv->offset; |
6103 |
++ return reciprocal_scale(get_random_u32(), priv->modulus) + priv->offset; |
6104 |
+ } |
6105 |
+ |
6106 |
+ static void nft_ng_random_eval(const struct nft_expr *expr, |
6107 |
+@@ -168,8 +164,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx, |
6108 |
+ if (priv->offset + priv->modulus - 1 < priv->offset) |
6109 |
+ return -EOVERFLOW; |
6110 |
+ |
6111 |
+- prandom_init_once(&nft_numgen_prandom_state); |
6112 |
+- |
6113 |
+ return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg, |
6114 |
+ NULL, NFT_DATA_VALUE, sizeof(u32)); |
6115 |
+ } |
6116 |
+diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c |
6117 |
+index 372bf54a0ca9e..e20d1a9734175 100644 |
6118 |
+--- a/net/openvswitch/flow.c |
6119 |
++++ b/net/openvswitch/flow.c |
6120 |
+@@ -407,7 +407,7 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) |
6121 |
+ if (flags & IP6_FH_F_FRAG) { |
6122 |
+ if (frag_off) { |
6123 |
+ key->ip.frag = OVS_FRAG_TYPE_LATER; |
6124 |
+- key->ip.proto = nexthdr; |
6125 |
++ key->ip.proto = NEXTHDR_FRAGMENT; |
6126 |
+ return 0; |
6127 |
+ } |
6128 |
+ key->ip.frag = OVS_FRAG_TYPE_FIRST; |
6129 |
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c |
6130 |
+index ed4ccef5d6a82..5449ed114e406 100644 |
6131 |
+--- a/net/sched/sch_netem.c |
6132 |
++++ b/net/sched/sch_netem.c |
6133 |
+@@ -1146,9 +1146,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) |
6134 |
+ struct tc_netem_rate rate; |
6135 |
+ struct tc_netem_slot slot; |
6136 |
+ |
6137 |
+- qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency), |
6138 |
++ qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency), |
6139 |
+ UINT_MAX); |
6140 |
+- qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter), |
6141 |
++ qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter), |
6142 |
+ UINT_MAX); |
6143 |
+ qopt.limit = q->limit; |
6144 |
+ qopt.loss = q->loss; |
6145 |
+diff --git a/net/tipc/core.c b/net/tipc/core.c |
6146 |
+index 3f4542e0f0650..434e70eabe081 100644 |
6147 |
+--- a/net/tipc/core.c |
6148 |
++++ b/net/tipc/core.c |
6149 |
+@@ -109,10 +109,9 @@ static void __net_exit tipc_exit_net(struct net *net) |
6150 |
+ struct tipc_net *tn = tipc_net(net); |
6151 |
+ |
6152 |
+ tipc_detach_loopback(net); |
6153 |
++ tipc_net_stop(net); |
6154 |
+ /* Make sure the tipc_net_finalize_work() finished */ |
6155 |
+ cancel_work_sync(&tn->work); |
6156 |
+- tipc_net_stop(net); |
6157 |
+- |
6158 |
+ tipc_bcast_stop(net); |
6159 |
+ tipc_nametbl_stop(net); |
6160 |
+ tipc_sk_rht_destroy(net); |
6161 |
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c |
6162 |
+index 7b2b0e7ffee4c..5c9697840ef70 100644 |
6163 |
+--- a/net/tls/tls_main.c |
6164 |
++++ b/net/tls/tls_main.c |
6165 |
+@@ -873,6 +873,8 @@ static void tls_update(struct sock *sk, struct proto *p, |
6166 |
+ { |
6167 |
+ struct tls_context *ctx; |
6168 |
+ |
6169 |
++ WARN_ON_ONCE(sk->sk_prot == p); |
6170 |
++ |
6171 |
+ ctx = tls_get_ctx(sk); |
6172 |
+ if (likely(ctx)) { |
6173 |
+ ctx->sk_write_space = write_space; |
6174 |
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c |
6175 |
+index d6bcdbfd0fc58..9b12ea3ab85a7 100644 |
6176 |
+--- a/net/xdp/xsk.c |
6177 |
++++ b/net/xdp/xsk.c |
6178 |
+@@ -538,12 +538,6 @@ static int xsk_generic_xmit(struct sock *sk) |
6179 |
+ goto out; |
6180 |
+ } |
6181 |
+ |
6182 |
+- skb = xsk_build_skb(xs, &desc); |
6183 |
+- if (IS_ERR(skb)) { |
6184 |
+- err = PTR_ERR(skb); |
6185 |
+- goto out; |
6186 |
+- } |
6187 |
+- |
6188 |
+ /* This is the backpressure mechanism for the Tx path. |
6189 |
+ * Reserve space in the completion queue and only proceed |
6190 |
+ * if there is space in it. This avoids having to implement |
6191 |
+@@ -552,11 +546,19 @@ static int xsk_generic_xmit(struct sock *sk) |
6192 |
+ spin_lock_irqsave(&xs->pool->cq_lock, flags); |
6193 |
+ if (xskq_prod_reserve(xs->pool->cq)) { |
6194 |
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags); |
6195 |
+- kfree_skb(skb); |
6196 |
+ goto out; |
6197 |
+ } |
6198 |
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags); |
6199 |
+ |
6200 |
++ skb = xsk_build_skb(xs, &desc); |
6201 |
++ if (IS_ERR(skb)) { |
6202 |
++ err = PTR_ERR(skb); |
6203 |
++ spin_lock_irqsave(&xs->pool->cq_lock, flags); |
6204 |
++ xskq_prod_cancel(xs->pool->cq); |
6205 |
++ spin_unlock_irqrestore(&xs->pool->cq_lock, flags); |
6206 |
++ goto out; |
6207 |
++ } |
6208 |
++ |
6209 |
+ err = __dev_direct_xmit(skb, xs->queue_id); |
6210 |
+ if (err == NETDEV_TX_BUSY) { |
6211 |
+ /* Tell user-space to retry the send */ |
6212 |
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c |
6213 |
+index b28344fd7408e..0005900b19b08 100644 |
6214 |
+--- a/scripts/mod/modpost.c |
6215 |
++++ b/scripts/mod/modpost.c |
6216 |
+@@ -1115,7 +1115,7 @@ static const struct sectioncheck sectioncheck[] = { |
6217 |
+ }, |
6218 |
+ /* Do not export init/exit functions or data */ |
6219 |
+ { |
6220 |
+- .fromsec = { "__ksymtab*", NULL }, |
6221 |
++ .fromsec = { "___ksymtab*", NULL }, |
6222 |
+ .bad_tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL }, |
6223 |
+ .mismatch = EXPORT_TO_INIT_EXIT, |
6224 |
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL }, |
6225 |
+diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c |
6226 |
+index 15dc7160ba34e..8cfdaee779050 100644 |
6227 |
+--- a/sound/core/memalloc.c |
6228 |
++++ b/sound/core/memalloc.c |
6229 |
+@@ -431,33 +431,17 @@ static const struct snd_malloc_ops snd_dma_iram_ops = { |
6230 |
+ */ |
6231 |
+ static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) |
6232 |
+ { |
6233 |
+- void *p; |
6234 |
+- |
6235 |
+- p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); |
6236 |
+-#ifdef CONFIG_X86 |
6237 |
+- if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) |
6238 |
+- set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT); |
6239 |
+-#endif |
6240 |
+- return p; |
6241 |
++ return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); |
6242 |
+ } |
6243 |
+ |
6244 |
+ static void snd_dma_dev_free(struct snd_dma_buffer *dmab) |
6245 |
+ { |
6246 |
+-#ifdef CONFIG_X86 |
6247 |
+- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) |
6248 |
+- set_memory_wb((unsigned long)dmab->area, |
6249 |
+- PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT); |
6250 |
+-#endif |
6251 |
+ dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); |
6252 |
+ } |
6253 |
+ |
6254 |
+ static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, |
6255 |
+ struct vm_area_struct *area) |
6256 |
+ { |
6257 |
+-#ifdef CONFIG_X86 |
6258 |
+- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) |
6259 |
+- area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); |
6260 |
+-#endif |
6261 |
+ return dma_mmap_coherent(dmab->dev.dev, area, |
6262 |
+ dmab->area, dmab->addr, dmab->bytes); |
6263 |
+ } |
6264 |
+@@ -471,10 +455,6 @@ static const struct snd_malloc_ops snd_dma_dev_ops = { |
6265 |
+ /* |
6266 |
+ * Write-combined pages |
6267 |
+ */ |
6268 |
+-#ifdef CONFIG_X86 |
6269 |
+-/* On x86, share the same ops as the standard dev ops */ |
6270 |
+-#define snd_dma_wc_ops snd_dma_dev_ops |
6271 |
+-#else /* CONFIG_X86 */ |
6272 |
+ static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) |
6273 |
+ { |
6274 |
+ return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); |
6275 |
+@@ -497,7 +477,6 @@ static const struct snd_malloc_ops snd_dma_wc_ops = { |
6276 |
+ .free = snd_dma_wc_free, |
6277 |
+ .mmap = snd_dma_wc_mmap, |
6278 |
+ }; |
6279 |
+-#endif /* CONFIG_X86 */ |
6280 |
+ |
6281 |
+ #ifdef CONFIG_SND_DMA_SGBUF |
6282 |
+ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size); |
6283 |
+diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c |
6284 |
+index 3f35972e1cf75..161a9711cd63e 100644 |
6285 |
+--- a/sound/hda/hdac_i915.c |
6286 |
++++ b/sound/hda/hdac_i915.c |
6287 |
+@@ -119,21 +119,18 @@ static int i915_component_master_match(struct device *dev, int subcomponent, |
6288 |
+ /* check whether Intel graphics is present and reachable */ |
6289 |
+ static int i915_gfx_present(struct pci_dev *hdac_pci) |
6290 |
+ { |
6291 |
+- unsigned int class = PCI_BASE_CLASS_DISPLAY << 16; |
6292 |
+ struct pci_dev *display_dev = NULL; |
6293 |
+- bool match = false; |
6294 |
+ |
6295 |
+- do { |
6296 |
+- display_dev = pci_get_class(class, display_dev); |
6297 |
+- |
6298 |
+- if (display_dev && display_dev->vendor == PCI_VENDOR_ID_INTEL && |
6299 |
++ for_each_pci_dev(display_dev) { |
6300 |
++ if (display_dev->vendor == PCI_VENDOR_ID_INTEL && |
6301 |
++ (display_dev->class >> 16) == PCI_BASE_CLASS_DISPLAY && |
6302 |
+ connectivity_check(display_dev, hdac_pci)) { |
6303 |
+ pci_dev_put(display_dev); |
6304 |
+- match = true; |
6305 |
++ return true; |
6306 |
+ } |
6307 |
+- } while (!match && display_dev); |
6308 |
++ } |
6309 |
+ |
6310 |
+- return match; |
6311 |
++ return false; |
6312 |
+ } |
6313 |
+ |
6314 |
+ /** |
6315 |
+diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c |
6316 |
+index cd1db943b7e07..7c6b1fe8dfcce 100644 |
6317 |
+--- a/sound/pci/hda/hda_auto_parser.c |
6318 |
++++ b/sound/pci/hda/hda_auto_parser.c |
6319 |
+@@ -819,7 +819,7 @@ static void set_pin_targets(struct hda_codec *codec, |
6320 |
+ snd_hda_set_pin_ctl_cache(codec, cfg->nid, cfg->val); |
6321 |
+ } |
6322 |
+ |
6323 |
+-static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) |
6324 |
++void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth) |
6325 |
+ { |
6326 |
+ const char *modelname = codec->fixup_name; |
6327 |
+ |
6328 |
+@@ -829,7 +829,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) |
6329 |
+ if (++depth > 10) |
6330 |
+ break; |
6331 |
+ if (fix->chained_before) |
6332 |
+- apply_fixup(codec, fix->chain_id, action, depth + 1); |
6333 |
++ __snd_hda_apply_fixup(codec, fix->chain_id, action, depth + 1); |
6334 |
+ |
6335 |
+ switch (fix->type) { |
6336 |
+ case HDA_FIXUP_PINS: |
6337 |
+@@ -870,6 +870,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) |
6338 |
+ id = fix->chain_id; |
6339 |
+ } |
6340 |
+ } |
6341 |
++EXPORT_SYMBOL_GPL(__snd_hda_apply_fixup); |
6342 |
+ |
6343 |
+ /** |
6344 |
+ * snd_hda_apply_fixup - Apply the fixup chain with the given action |
6345 |
+@@ -879,7 +880,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) |
6346 |
+ void snd_hda_apply_fixup(struct hda_codec *codec, int action) |
6347 |
+ { |
6348 |
+ if (codec->fixup_list) |
6349 |
+- apply_fixup(codec, codec->fixup_id, action, 0); |
6350 |
++ __snd_hda_apply_fixup(codec, codec->fixup_id, action, 0); |
6351 |
+ } |
6352 |
+ EXPORT_SYMBOL_GPL(snd_hda_apply_fixup); |
6353 |
+ |
6354 |
+diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h |
6355 |
+index aca592651870e..682dca2057dbe 100644 |
6356 |
+--- a/sound/pci/hda/hda_local.h |
6357 |
++++ b/sound/pci/hda/hda_local.h |
6358 |
+@@ -348,6 +348,7 @@ void snd_hda_apply_verbs(struct hda_codec *codec); |
6359 |
+ void snd_hda_apply_pincfgs(struct hda_codec *codec, |
6360 |
+ const struct hda_pintbl *cfg); |
6361 |
+ void snd_hda_apply_fixup(struct hda_codec *codec, int action); |
6362 |
++void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth); |
6363 |
+ void snd_hda_pick_fixup(struct hda_codec *codec, |
6364 |
+ const struct hda_model_fixup *models, |
6365 |
+ const struct snd_pci_quirk *quirk, |
6366 |
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c |
6367 |
+index bce2cef80000b..0b7d500249f6e 100644 |
6368 |
+--- a/sound/pci/hda/patch_conexant.c |
6369 |
++++ b/sound/pci/hda/patch_conexant.c |
6370 |
+@@ -1079,11 +1079,11 @@ static int patch_conexant_auto(struct hda_codec *codec) |
6371 |
+ if (err < 0) |
6372 |
+ goto error; |
6373 |
+ |
6374 |
+- err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); |
6375 |
++ err = cx_auto_parse_beep(codec); |
6376 |
+ if (err < 0) |
6377 |
+ goto error; |
6378 |
+ |
6379 |
+- err = cx_auto_parse_beep(codec); |
6380 |
++ err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); |
6381 |
+ if (err < 0) |
6382 |
+ goto error; |
6383 |
+ |
6384 |
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
6385 |
+index 588d4a59c8d92..d3d786de8f4c4 100644 |
6386 |
+--- a/sound/pci/hda/patch_realtek.c |
6387 |
++++ b/sound/pci/hda/patch_realtek.c |
6388 |
+@@ -2634,6 +2634,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { |
6389 |
+ SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), |
6390 |
+ SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), |
6391 |
+ SND_PCI_QUIRK(0x1558, 0x67f1, "Clevo PC70H[PRS]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), |
6392 |
++ SND_PCI_QUIRK(0x1558, 0x67f5, "Clevo PD70PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), |
6393 |
+ SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), |
6394 |
+ SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS), |
6395 |
+ SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED), |
6396 |
+@@ -7056,6 +7057,7 @@ enum { |
6397 |
+ ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS, |
6398 |
+ ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE, |
6399 |
+ ALC287_FIXUP_YOGA7_14ITL_SPEAKERS, |
6400 |
++ ALC298_FIXUP_LENOVO_C940_DUET7, |
6401 |
+ ALC287_FIXUP_13S_GEN2_SPEAKERS, |
6402 |
+ ALC256_FIXUP_SET_COEF_DEFAULTS, |
6403 |
+ ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, |
6404 |
+@@ -7074,6 +7076,23 @@ enum { |
6405 |
+ ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE, |
6406 |
+ }; |
6407 |
+ |
6408 |
++/* A special fixup for Lenovo C940 and Yoga Duet 7; |
6409 |
++ * both have the very same PCI SSID, and we need to apply different fixups |
6410 |
++ * depending on the codec ID |
6411 |
++ */ |
6412 |
++static void alc298_fixup_lenovo_c940_duet7(struct hda_codec *codec, |
6413 |
++ const struct hda_fixup *fix, |
6414 |
++ int action) |
6415 |
++{ |
6416 |
++ int id; |
6417 |
++ |
6418 |
++ if (codec->core.vendor_id == 0x10ec0298) |
6419 |
++ id = ALC298_FIXUP_LENOVO_SPK_VOLUME; /* C940 */ |
6420 |
++ else |
6421 |
++ id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* Duet 7 */ |
6422 |
++ __snd_hda_apply_fixup(codec, id, action, 0); |
6423 |
++} |
6424 |
++ |
6425 |
+ static const struct hda_fixup alc269_fixups[] = { |
6426 |
+ [ALC269_FIXUP_GPIO2] = { |
6427 |
+ .type = HDA_FIXUP_FUNC, |
6428 |
+@@ -8773,6 +8792,10 @@ static const struct hda_fixup alc269_fixups[] = { |
6429 |
+ .chained = true, |
6430 |
+ .chain_id = ALC269_FIXUP_HEADSET_MODE, |
6431 |
+ }, |
6432 |
++ [ALC298_FIXUP_LENOVO_C940_DUET7] = { |
6433 |
++ .type = HDA_FIXUP_FUNC, |
6434 |
++ .v.func = alc298_fixup_lenovo_c940_duet7, |
6435 |
++ }, |
6436 |
+ [ALC287_FIXUP_13S_GEN2_SPEAKERS] = { |
6437 |
+ .type = HDA_FIXUP_VERBS, |
6438 |
+ .v.verbs = (const struct hda_verb[]) { |
6439 |
+@@ -9074,6 +9097,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
6440 |
+ ALC285_FIXUP_HP_GPIO_AMP_INIT), |
6441 |
+ SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation", |
6442 |
+ ALC285_FIXUP_HP_GPIO_AMP_INIT), |
6443 |
++ SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), |
6444 |
+ SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), |
6445 |
+ SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED), |
6446 |
+ SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), |
6447 |
+@@ -9239,6 +9263,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
6448 |
+ SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), |
6449 |
+ SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), |
6450 |
+ SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), |
6451 |
++ SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), |
6452 |
+ SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), |
6453 |
+ SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), |
6454 |
+ SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), |
6455 |
+@@ -9325,7 +9350,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
6456 |
+ SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), |
6457 |
+ SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), |
6458 |
+ SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), |
6459 |
+- SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), |
6460 |
++ SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7), |
6461 |
+ SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), |
6462 |
+ SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), |
6463 |
+ SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), |
6464 |
+@@ -10789,6 +10814,7 @@ enum { |
6465 |
+ ALC668_FIXUP_MIC_DET_COEF, |
6466 |
+ ALC897_FIXUP_LENOVO_HEADSET_MIC, |
6467 |
+ ALC897_FIXUP_HEADSET_MIC_PIN, |
6468 |
++ ALC897_FIXUP_HP_HSMIC_VERB, |
6469 |
+ }; |
6470 |
+ |
6471 |
+ static const struct hda_fixup alc662_fixups[] = { |
6472 |
+@@ -11208,6 +11234,13 @@ static const struct hda_fixup alc662_fixups[] = { |
6473 |
+ .chained = true, |
6474 |
+ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC |
6475 |
+ }, |
6476 |
++ [ALC897_FIXUP_HP_HSMIC_VERB] = { |
6477 |
++ .type = HDA_FIXUP_PINS, |
6478 |
++ .v.pins = (const struct hda_pintbl[]) { |
6479 |
++ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ |
6480 |
++ { } |
6481 |
++ }, |
6482 |
++ }, |
6483 |
+ }; |
6484 |
+ |
6485 |
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
6486 |
+@@ -11233,6 +11266,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
6487 |
+ SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
6488 |
+ SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
6489 |
+ SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), |
6490 |
++ SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB), |
6491 |
+ SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), |
6492 |
+ SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2), |
6493 |
+ SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE), |
6494 |
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c |
6495 |
+index 773a136161f11..a188901a83bbe 100644 |
6496 |
+--- a/sound/pci/hda/patch_via.c |
6497 |
++++ b/sound/pci/hda/patch_via.c |
6498 |
+@@ -520,11 +520,11 @@ static int via_parse_auto_config(struct hda_codec *codec) |
6499 |
+ if (err < 0) |
6500 |
+ return err; |
6501 |
+ |
6502 |
+- err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); |
6503 |
++ err = auto_parse_beep(codec); |
6504 |
+ if (err < 0) |
6505 |
+ return err; |
6506 |
+ |
6507 |
+- err = auto_parse_beep(codec); |
6508 |
++ err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); |
6509 |
+ if (err < 0) |
6510 |
+ return err; |
6511 |
+ |
6512 |
+diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh |
6513 |
+index 6ffbb27afabac..ec108d45d3c61 100755 |
6514 |
+--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh |
6515 |
++++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh |
6516 |
+@@ -43,7 +43,7 @@ CFLAGS="-g -O0 -fno-inline -fno-omit-frame-pointer" |
6517 |
+ cc $CFLAGS $TEST_PROGRAM_SOURCE -o $TEST_PROGRAM || exit 1 |
6518 |
+ |
6519 |
+ # Add a 1 second delay to skip samples that are not in the leaf() function |
6520 |
+-perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 -- $TEST_PROGRAM 2> /dev/null & |
6521 |
++perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null & |
6522 |
+ PID=$! |
6523 |
+ |
6524 |
+ echo " + Recording (PID=$PID)..." |
6525 |
+diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c |
6526 |
+index d23a9e322ff52..0b4f61b6cc6b8 100644 |
6527 |
+--- a/tools/perf/tests/topology.c |
6528 |
++++ b/tools/perf/tests/topology.c |
6529 |
+@@ -115,7 +115,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) |
6530 |
+ * physical_package_id will be set to -1. Hence skip this |
6531 |
+ * test if physical_package_id returns -1 for cpu from perf_cpu_map. |
6532 |
+ */ |
6533 |
+- if (strncmp(session->header.env.arch, "powerpc", 7)) { |
6534 |
++ if (!strncmp(session->header.env.arch, "ppc64le", 7)) { |
6535 |
+ if (cpu__get_socket_id(perf_cpu_map__cpu(map, 0)) == -1) |
6536 |
+ return TEST_SKIP; |
6537 |
+ } |
6538 |
+diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c |
6539 |
+index 1a80151baed96..d040406f3314c 100644 |
6540 |
+--- a/tools/perf/util/arm-spe.c |
6541 |
++++ b/tools/perf/util/arm-spe.c |
6542 |
+@@ -387,26 +387,16 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq, |
6543 |
+ return arm_spe_deliver_synth_event(spe, speq, event, &sample); |
6544 |
+ } |
6545 |
+ |
6546 |
+-#define SPE_MEM_TYPE (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS | \ |
6547 |
+- ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS | \ |
6548 |
+- ARM_SPE_REMOTE_ACCESS) |
6549 |
+- |
6550 |
+-static bool arm_spe__is_memory_event(enum arm_spe_sample_type type) |
6551 |
+-{ |
6552 |
+- if (type & SPE_MEM_TYPE) |
6553 |
+- return true; |
6554 |
+- |
6555 |
+- return false; |
6556 |
+-} |
6557 |
+- |
6558 |
+ static u64 arm_spe__synth_data_source(const struct arm_spe_record *record) |
6559 |
+ { |
6560 |
+ union perf_mem_data_src data_src = { 0 }; |
6561 |
+ |
6562 |
+ if (record->op == ARM_SPE_LD) |
6563 |
+ data_src.mem_op = PERF_MEM_OP_LOAD; |
6564 |
+- else |
6565 |
++ else if (record->op == ARM_SPE_ST) |
6566 |
+ data_src.mem_op = PERF_MEM_OP_STORE; |
6567 |
++ else |
6568 |
++ return 0; |
6569 |
+ |
6570 |
+ if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) { |
6571 |
+ data_src.mem_lvl = PERF_MEM_LVL_L3; |
6572 |
+@@ -510,7 +500,11 @@ static int arm_spe_sample(struct arm_spe_queue *speq) |
6573 |
+ return err; |
6574 |
+ } |
6575 |
+ |
6576 |
+- if (spe->sample_memory && arm_spe__is_memory_event(record->type)) { |
6577 |
++ /* |
6578 |
++ * When data_src is zero it means the record is not a memory operation, |
6579 |
++ * skip to synthesize memory sample for this case. |
6580 |
++ */ |
6581 |
++ if (spe->sample_memory && data_src) { |
6582 |
+ err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src); |
6583 |
+ if (err) |
6584 |
+ return err; |
6585 |
+diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c |
6586 |
+index 82f3d46bea70e..328668f38c69d 100644 |
6587 |
+--- a/tools/perf/util/build-id.c |
6588 |
++++ b/tools/perf/util/build-id.c |
6589 |
+@@ -872,6 +872,30 @@ out_free: |
6590 |
+ return err; |
6591 |
+ } |
6592 |
+ |
6593 |
++static int filename__read_build_id_ns(const char *filename, |
6594 |
++ struct build_id *bid, |
6595 |
++ struct nsinfo *nsi) |
6596 |
++{ |
6597 |
++ struct nscookie nsc; |
6598 |
++ int ret; |
6599 |
++ |
6600 |
++ nsinfo__mountns_enter(nsi, &nsc); |
6601 |
++ ret = filename__read_build_id(filename, bid); |
6602 |
++ nsinfo__mountns_exit(&nsc); |
6603 |
++ |
6604 |
++ return ret; |
6605 |
++} |
6606 |
++ |
6607 |
++static bool dso__build_id_mismatch(struct dso *dso, const char *name) |
6608 |
++{ |
6609 |
++ struct build_id bid; |
6610 |
++ |
6611 |
++ if (filename__read_build_id_ns(name, &bid, dso->nsinfo) < 0) |
6612 |
++ return false; |
6613 |
++ |
6614 |
++ return !dso__build_id_equal(dso, &bid); |
6615 |
++} |
6616 |
++ |
6617 |
+ static int dso__cache_build_id(struct dso *dso, struct machine *machine, |
6618 |
+ void *priv __maybe_unused) |
6619 |
+ { |
6620 |
+@@ -886,6 +910,10 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine, |
6621 |
+ is_kallsyms = true; |
6622 |
+ name = machine->mmap_name; |
6623 |
+ } |
6624 |
++ |
6625 |
++ if (!is_kallsyms && dso__build_id_mismatch(dso, name)) |
6626 |
++ return 0; |
6627 |
++ |
6628 |
+ return build_id_cache__add_b(&dso->bid, name, dso->nsinfo, |
6629 |
+ is_kallsyms, is_vdso); |
6630 |
+ } |
6631 |
+diff --git a/tools/testing/selftests/dma/Makefile b/tools/testing/selftests/dma/Makefile |
6632 |
+index aa8e8b5b3864e..cd8c5ece1cba4 100644 |
6633 |
+--- a/tools/testing/selftests/dma/Makefile |
6634 |
++++ b/tools/testing/selftests/dma/Makefile |
6635 |
+@@ -1,5 +1,6 @@ |
6636 |
+ # SPDX-License-Identifier: GPL-2.0 |
6637 |
+ CFLAGS += -I../../../../usr/include/ |
6638 |
++CFLAGS += -I../../../../include/ |
6639 |
+ |
6640 |
+ TEST_GEN_PROGS := dma_map_benchmark |
6641 |
+ |
6642 |
+diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c |
6643 |
+index c3b3c09e995e8..5c997f17fcbdb 100644 |
6644 |
+--- a/tools/testing/selftests/dma/dma_map_benchmark.c |
6645 |
++++ b/tools/testing/selftests/dma/dma_map_benchmark.c |
6646 |
+@@ -10,8 +10,8 @@ |
6647 |
+ #include <unistd.h> |
6648 |
+ #include <sys/ioctl.h> |
6649 |
+ #include <sys/mman.h> |
6650 |
+-#include <linux/map_benchmark.h> |
6651 |
+ #include <linux/types.h> |
6652 |
++#include <linux/map_benchmark.h> |
6653 |
+ |
6654 |
+ #define NSEC_PER_MSEC 1000000L |
6655 |
+ |
6656 |
+diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh |
6657 |
+index 54701c8b0cd70..03b586760164a 100755 |
6658 |
+--- a/tools/testing/selftests/net/fcnal-test.sh |
6659 |
++++ b/tools/testing/selftests/net/fcnal-test.sh |
6660 |
+@@ -70,6 +70,10 @@ NSB_LO_IP6=2001:db8:2::2 |
6661 |
+ NL_IP=172.17.1.1 |
6662 |
+ NL_IP6=2001:db8:4::1 |
6663 |
+ |
6664 |
++# multicast and broadcast addresses |
6665 |
++MCAST_IP=224.0.0.1 |
6666 |
++BCAST_IP=255.255.255.255 |
6667 |
++ |
6668 |
+ MD5_PW=abc123 |
6669 |
+ MD5_WRONG_PW=abc1234 |
6670 |
+ |
6671 |
+@@ -308,6 +312,9 @@ addr2str() |
6672 |
+ 127.0.0.1) echo "loopback";; |
6673 |
+ ::1) echo "IPv6 loopback";; |
6674 |
+ |
6675 |
++ ${BCAST_IP}) echo "broadcast";; |
6676 |
++ ${MCAST_IP}) echo "multicast";; |
6677 |
++ |
6678 |
+ ${NSA_IP}) echo "ns-A IP";; |
6679 |
+ ${NSA_IP6}) echo "ns-A IPv6";; |
6680 |
+ ${NSA_LO_IP}) echo "ns-A loopback IP";; |
6681 |
+@@ -1793,12 +1800,33 @@ ipv4_addr_bind_novrf() |
6682 |
+ done |
6683 |
+ |
6684 |
+ # |
6685 |
+- # raw socket with nonlocal bind |
6686 |
++ # tests for nonlocal bind |
6687 |
+ # |
6688 |
+ a=${NL_IP} |
6689 |
+ log_start |
6690 |
+- run_cmd nettest -s -R -P icmp -f -l ${a} -I ${NSA_DEV} -b |
6691 |
+- log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after device bind" |
6692 |
++ run_cmd nettest -s -R -f -l ${a} -b |
6693 |
++ log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address" |
6694 |
++ |
6695 |
++ log_start |
6696 |
++ run_cmd nettest -s -f -l ${a} -b |
6697 |
++ log_test_addr ${a} $? 0 "TCP socket bind to nonlocal address" |
6698 |
++ |
6699 |
++ log_start |
6700 |
++ run_cmd nettest -s -D -P icmp -f -l ${a} -b |
6701 |
++ log_test_addr ${a} $? 0 "ICMP socket bind to nonlocal address" |
6702 |
++ |
6703 |
++ # |
6704 |
++ # check that ICMP sockets cannot bind to broadcast and multicast addresses |
6705 |
++ # |
6706 |
++ a=${BCAST_IP} |
6707 |
++ log_start |
6708 |
++ run_cmd nettest -s -D -P icmp -l ${a} -b |
6709 |
++ log_test_addr ${a} $? 1 "ICMP socket bind to broadcast address" |
6710 |
++ |
6711 |
++ a=${MCAST_IP} |
6712 |
++ log_start |
6713 |
++ run_cmd nettest -s -D -P icmp -l ${a} -b |
6714 |
++ log_test_addr ${a} $? 1 "ICMP socket bind to multicast address" |
6715 |
+ |
6716 |
+ # |
6717 |
+ # tcp sockets |
6718 |
+@@ -1850,13 +1878,34 @@ ipv4_addr_bind_vrf() |
6719 |
+ log_test_addr ${a} $? 1 "Raw socket bind to out of scope address after VRF bind" |
6720 |
+ |
6721 |
+ # |
6722 |
+- # raw socket with nonlocal bind |
6723 |
++ # tests for nonlocal bind |
6724 |
+ # |
6725 |
+ a=${NL_IP} |
6726 |
+ log_start |
6727 |
+- run_cmd nettest -s -R -P icmp -f -l ${a} -I ${VRF} -b |
6728 |
++ run_cmd nettest -s -R -f -l ${a} -I ${VRF} -b |
6729 |
+ log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after VRF bind" |
6730 |
+ |
6731 |
++ log_start |
6732 |
++ run_cmd nettest -s -f -l ${a} -I ${VRF} -b |
6733 |
++ log_test_addr ${a} $? 0 "TCP socket bind to nonlocal address after VRF bind" |
6734 |
++ |
6735 |
++ log_start |
6736 |
++ run_cmd nettest -s -D -P icmp -f -l ${a} -I ${VRF} -b |
6737 |
++ log_test_addr ${a} $? 0 "ICMP socket bind to nonlocal address after VRF bind" |
6738 |
++ |
6739 |
++ # |
6740 |
++ # check that ICMP sockets cannot bind to broadcast and multicast addresses |
6741 |
++ # |
6742 |
++ a=${BCAST_IP} |
6743 |
++ log_start |
6744 |
++ run_cmd nettest -s -D -P icmp -l ${a} -I ${VRF} -b |
6745 |
++ log_test_addr ${a} $? 1 "ICMP socket bind to broadcast address after VRF bind" |
6746 |
++ |
6747 |
++ a=${MCAST_IP} |
6748 |
++ log_start |
6749 |
++ run_cmd nettest -s -D -P icmp -l ${a} -I ${VRF} -b |
6750 |
++ log_test_addr ${a} $? 1 "ICMP socket bind to multicast address after VRF bind" |
6751 |
++ |
6752 |
+ # |
6753 |
+ # tcp sockets |
6754 |
+ # |
6755 |
+@@ -1889,10 +1938,12 @@ ipv4_addr_bind() |
6756 |
+ |
6757 |
+ log_subsection "No VRF" |
6758 |
+ setup |
6759 |
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null |
6760 |
+ ipv4_addr_bind_novrf |
6761 |
+ |
6762 |
+ log_subsection "With VRF" |
6763 |
+ setup "yes" |
6764 |
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null |
6765 |
+ ipv4_addr_bind_vrf |
6766 |
+ } |
6767 |
+ |
6768 |
+diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh |
6769 |
+index b35010cc7f6ae..a6991877e50cd 100755 |
6770 |
+--- a/tools/testing/selftests/netfilter/nft_concat_range.sh |
6771 |
++++ b/tools/testing/selftests/netfilter/nft_concat_range.sh |
6772 |
+@@ -31,7 +31,7 @@ BUGS="flush_remove_add reload" |
6773 |
+ |
6774 |
+ # List of possible paths to pktgen script from kernel tree for performance tests |
6775 |
+ PKTGEN_SCRIPT_PATHS=" |
6776 |
+- ../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh |
6777 |
++ ../../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh |
6778 |
+ pktgen/pktgen_bench_xmit_mode_netif_receive.sh" |
6779 |
+ |
6780 |
+ # Definition of set types: |