1 |
commit: 990a05cd08431dc75fc47455023ef4e99fdfbca2 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sat Dec 31 15:30:27 2022 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Sat Dec 31 15:30:27 2022 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=990a05cd |
7 |
|
8 |
Linux patch 5.15.86 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1085_linux-5.15.86.patch | 37180 +++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 37184 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index be8eaa68..2eeb88bf 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -383,6 +383,10 @@ Patch: 1084_linux-5.15.85.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.15.85 |
23 |
|
24 |
+Patch: 1085_linux-5.15.86.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.15.86 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1085_linux-5.15.86.patch b/1085_linux-5.15.86.patch |
33 |
new file mode 100644 |
34 |
index 00000000..36fa572a |
35 |
--- /dev/null |
36 |
+++ b/1085_linux-5.15.86.patch |
37 |
@@ -0,0 +1,37180 @@ |
38 |
+diff --git a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor |
39 |
+index d76cd3946434d..e9ef69aef20b1 100644 |
40 |
+--- a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor |
41 |
++++ b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor |
42 |
+@@ -5,6 +5,9 @@ Contact: linux-mtd@×××××××××××××××.org |
43 |
+ Description: (RO) The JEDEC ID of the SPI NOR flash as reported by the |
44 |
+ flash device. |
45 |
+ |
46 |
++ The attribute is not present if the flash doesn't support |
47 |
++ the "Read JEDEC ID" command (9Fh). This is the case for |
48 |
++ non-JEDEC compliant flashes. |
49 |
+ |
50 |
+ What: /sys/bus/spi/devices/.../spi-nor/manufacturer |
51 |
+ Date: April 2021 |
52 |
+diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml |
53 |
+index acea1cd444fd5..9b0548264a397 100644 |
54 |
+--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml |
55 |
++++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml |
56 |
+@@ -14,9 +14,6 @@ description: |+ |
57 |
+ This PCIe host controller is based on the Synopsys DesignWare PCIe IP |
58 |
+ and thus inherits all the common properties defined in snps,dw-pcie.yaml. |
59 |
+ |
60 |
+-allOf: |
61 |
+- - $ref: /schemas/pci/snps,dw-pcie.yaml# |
62 |
+- |
63 |
+ properties: |
64 |
+ compatible: |
65 |
+ enum: |
66 |
+@@ -59,7 +56,7 @@ properties: |
67 |
+ - const: pcie |
68 |
+ - const: pcie_bus |
69 |
+ - const: pcie_phy |
70 |
+- - const: pcie_inbound_axi for imx6sx-pcie, pcie_aux for imx8mq-pcie |
71 |
++ - enum: [ pcie_inbound_axi, pcie_aux ] |
72 |
+ |
73 |
+ num-lanes: |
74 |
+ const: 1 |
75 |
+@@ -166,6 +163,47 @@ required: |
76 |
+ - clocks |
77 |
+ - clock-names |
78 |
+ |
79 |
++allOf: |
80 |
++ - $ref: /schemas/pci/snps,dw-pcie.yaml# |
81 |
++ - if: |
82 |
++ properties: |
83 |
++ compatible: |
84 |
++ contains: |
85 |
++ const: fsl,imx6sx-pcie |
86 |
++ then: |
87 |
++ properties: |
88 |
++ clock-names: |
89 |
++ items: |
90 |
++ - {} |
91 |
++ - {} |
92 |
++ - {} |
93 |
++ - const: pcie_inbound_axi |
94 |
++ - if: |
95 |
++ properties: |
96 |
++ compatible: |
97 |
++ contains: |
98 |
++ const: fsl,imx8mq-pcie |
99 |
++ then: |
100 |
++ properties: |
101 |
++ clock-names: |
102 |
++ items: |
103 |
++ - {} |
104 |
++ - {} |
105 |
++ - {} |
106 |
++ - const: pcie_aux |
107 |
++ - if: |
108 |
++ properties: |
109 |
++ compatible: |
110 |
++ not: |
111 |
++ contains: |
112 |
++ enum: |
113 |
++ - fsl,imx6sx-pcie |
114 |
++ - fsl,imx8mq-pcie |
115 |
++ then: |
116 |
++ properties: |
117 |
++ clock-names: |
118 |
++ maxItems: 3 |
119 |
++ |
120 |
+ unevaluatedProperties: false |
121 |
+ |
122 |
+ examples: |
123 |
+diff --git a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml |
124 |
+index 30b6396d83c83..aea0e2bcdd778 100644 |
125 |
+--- a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml |
126 |
++++ b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml |
127 |
+@@ -36,7 +36,7 @@ properties: |
128 |
+ - const: mpu |
129 |
+ |
130 |
+ interrupts: |
131 |
+- maxItems: 1 |
132 |
++ maxItems: 2 |
133 |
+ |
134 |
+ clocks: |
135 |
+ items: |
136 |
+@@ -94,8 +94,9 @@ examples: |
137 |
+ #interrupt-cells = <1>; |
138 |
+ ranges = <0x81000000 0 0x40000000 0 0x40000000 0 0x00010000>, |
139 |
+ <0x82000000 0 0x50000000 0 0x50000000 0 0x20000000>; |
140 |
+- interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>; |
141 |
+- interrupt-names = "intr"; |
142 |
++ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>, |
143 |
++ <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>; |
144 |
++ interrupt-names = "msi", "intr"; |
145 |
+ interrupt-map-mask = <0 0 0 7>; |
146 |
+ interrupt-map = |
147 |
+ <0 0 0 1 &gic GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH |
148 |
+diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt |
149 |
+index 5d6ea66a863fe..1f75feec3dec6 100644 |
150 |
+--- a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt |
151 |
++++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt |
152 |
+@@ -109,7 +109,7 @@ audio-codec@1{ |
153 |
+ reg = <1 0>; |
154 |
+ interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>; |
155 |
+ interrupt-names = "intr2" |
156 |
+- reset-gpios = <&msmgpio 64 0>; |
157 |
++ reset-gpios = <&msmgpio 64 GPIO_ACTIVE_LOW>; |
158 |
+ slim-ifc-dev = <&wc9335_ifd>; |
159 |
+ clock-names = "mclk", "native"; |
160 |
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>, |
161 |
+diff --git a/Documentation/driver-api/spi.rst b/Documentation/driver-api/spi.rst |
162 |
+index f64cb666498aa..f28887045049d 100644 |
163 |
+--- a/Documentation/driver-api/spi.rst |
164 |
++++ b/Documentation/driver-api/spi.rst |
165 |
+@@ -25,8 +25,8 @@ hardware, which may be as simple as a set of GPIO pins or as complex as |
166 |
+ a pair of FIFOs connected to dual DMA engines on the other side of the |
167 |
+ SPI shift register (maximizing throughput). Such drivers bridge between |
168 |
+ whatever bus they sit on (often the platform bus) and SPI, and expose |
169 |
+-the SPI side of their device as a :c:type:`struct spi_master |
170 |
+-<spi_master>`. SPI devices are children of that master, |
171 |
++the SPI side of their device as a :c:type:`struct spi_controller |
172 |
++<spi_controller>`. SPI devices are children of that master, |
173 |
+ represented as a :c:type:`struct spi_device <spi_device>` and |
174 |
+ manufactured from :c:type:`struct spi_board_info |
175 |
+ <spi_board_info>` descriptors which are usually provided by |
176 |
+diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst |
177 |
+index 4a25c5eb6f072..8c47847755a68 100644 |
178 |
+--- a/Documentation/fault-injection/fault-injection.rst |
179 |
++++ b/Documentation/fault-injection/fault-injection.rst |
180 |
+@@ -83,9 +83,7 @@ configuration of fault-injection capabilities. |
181 |
+ - /sys/kernel/debug/fail*/times: |
182 |
+ |
183 |
+ specifies how many times failures may happen at most. A value of -1 |
184 |
+- means "no limit". Note, though, that this file only accepts unsigned |
185 |
+- values. So, if you want to specify -1, you better use 'printf' instead |
186 |
+- of 'echo', e.g.: $ printf %#x -1 > times |
187 |
++ means "no limit". |
188 |
+ |
189 |
+ - /sys/kernel/debug/fail*/space: |
190 |
+ |
191 |
+@@ -277,7 +275,7 @@ Application Examples |
192 |
+ echo Y > /sys/kernel/debug/$FAILTYPE/task-filter |
193 |
+ echo 10 > /sys/kernel/debug/$FAILTYPE/probability |
194 |
+ echo 100 > /sys/kernel/debug/$FAILTYPE/interval |
195 |
+- printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times |
196 |
++ echo -1 > /sys/kernel/debug/$FAILTYPE/times |
197 |
+ echo 0 > /sys/kernel/debug/$FAILTYPE/space |
198 |
+ echo 2 > /sys/kernel/debug/$FAILTYPE/verbose |
199 |
+ echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait |
200 |
+@@ -331,7 +329,7 @@ Application Examples |
201 |
+ echo N > /sys/kernel/debug/$FAILTYPE/task-filter |
202 |
+ echo 10 > /sys/kernel/debug/$FAILTYPE/probability |
203 |
+ echo 100 > /sys/kernel/debug/$FAILTYPE/interval |
204 |
+- printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times |
205 |
++ echo -1 > /sys/kernel/debug/$FAILTYPE/times |
206 |
+ echo 0 > /sys/kernel/debug/$FAILTYPE/space |
207 |
+ echo 2 > /sys/kernel/debug/$FAILTYPE/verbose |
208 |
+ echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait |
209 |
+@@ -362,7 +360,7 @@ Application Examples |
210 |
+ echo N > /sys/kernel/debug/$FAILTYPE/task-filter |
211 |
+ echo 100 > /sys/kernel/debug/$FAILTYPE/probability |
212 |
+ echo 0 > /sys/kernel/debug/$FAILTYPE/interval |
213 |
+- printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times |
214 |
++ echo -1 > /sys/kernel/debug/$FAILTYPE/times |
215 |
+ echo 0 > /sys/kernel/debug/$FAILTYPE/space |
216 |
+ echo 1 > /sys/kernel/debug/$FAILTYPE/verbose |
217 |
+ |
218 |
+diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst |
219 |
+index 8ced754a5a0f6..f3484f60eae59 100644 |
220 |
+--- a/Documentation/process/deprecated.rst |
221 |
++++ b/Documentation/process/deprecated.rst |
222 |
+@@ -70,6 +70,9 @@ Instead, the 2-factor form of the allocator should be used:: |
223 |
+ |
224 |
+ foo = kmalloc_array(count, size, GFP_KERNEL); |
225 |
+ |
226 |
++Specifically, kmalloc() can be replaced with kmalloc_array(), and |
227 |
++kzalloc() can be replaced with kcalloc(). |
228 |
++ |
229 |
+ If no 2-factor form is available, the saturate-on-overflow helpers should |
230 |
+ be used:: |
231 |
+ |
232 |
+@@ -90,9 +93,20 @@ Instead, use the helper:: |
233 |
+ array usage and switch to a `flexible array member |
234 |
+ <#zero-length-and-one-element-arrays>`_ instead. |
235 |
+ |
236 |
+-See array_size(), array3_size(), and struct_size(), |
237 |
+-for more details as well as the related check_add_overflow() and |
238 |
+-check_mul_overflow() family of functions. |
239 |
++For other calculations, please compose the use of the size_mul(), |
240 |
++size_add(), and size_sub() helpers. For example, in the case of:: |
241 |
++ |
242 |
++ foo = krealloc(current_size + chunk_size * (count - 3), GFP_KERNEL); |
243 |
++ |
244 |
++Instead, use the helpers:: |
245 |
++ |
246 |
++ foo = krealloc(size_add(current_size, |
247 |
++ size_mul(chunk_size, |
248 |
++ size_sub(count, 3))), GFP_KERNEL); |
249 |
++ |
250 |
++For more details, also see array3_size() and flex_array_size(), |
251 |
++as well as the related check_mul_overflow(), check_add_overflow(), |
252 |
++check_sub_overflow(), and check_shl_overflow() family of functions. |
253 |
+ |
254 |
+ simple_strtol(), simple_strtoll(), simple_strtoul(), simple_strtoull() |
255 |
+ ---------------------------------------------------------------------- |
256 |
+diff --git a/MAINTAINERS b/MAINTAINERS |
257 |
+index 1cf05aee91afc..4f50a453e18ac 100644 |
258 |
+--- a/MAINTAINERS |
259 |
++++ b/MAINTAINERS |
260 |
+@@ -7746,7 +7746,7 @@ F: Documentation/locking/*futex* |
261 |
+ F: include/asm-generic/futex.h |
262 |
+ F: include/linux/futex.h |
263 |
+ F: include/uapi/linux/futex.h |
264 |
+-F: kernel/futex.c |
265 |
++F: kernel/futex/* |
266 |
+ F: tools/perf/bench/futex* |
267 |
+ F: tools/testing/selftests/futex/ |
268 |
+ |
269 |
+diff --git a/Makefile b/Makefile |
270 |
+index 314864891d499..9f5d2e87150ed 100644 |
271 |
+--- a/Makefile |
272 |
++++ b/Makefile |
273 |
+@@ -1,7 +1,7 @@ |
274 |
+ # SPDX-License-Identifier: GPL-2.0 |
275 |
+ VERSION = 5 |
276 |
+ PATCHLEVEL = 15 |
277 |
+-SUBLEVEL = 85 |
278 |
++SUBLEVEL = 86 |
279 |
+ EXTRAVERSION = |
280 |
+ NAME = Trick or Treat |
281 |
+ |
282 |
+diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h |
283 |
+index 2592356e32154..0ce1eee0924b1 100644 |
284 |
+--- a/arch/alpha/include/asm/thread_info.h |
285 |
++++ b/arch/alpha/include/asm/thread_info.h |
286 |
+@@ -77,7 +77,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); |
287 |
+ |
288 |
+ /* Work to do on interrupt/exception return. */ |
289 |
+ #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
290 |
+- _TIF_NOTIFY_RESUME) |
291 |
++ _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL) |
292 |
+ |
293 |
+ /* Work to do on any return to userspace. */ |
294 |
+ #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ |
295 |
+diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S |
296 |
+index e227f3a29a43c..c41a5a9c3b9f2 100644 |
297 |
+--- a/arch/alpha/kernel/entry.S |
298 |
++++ b/arch/alpha/kernel/entry.S |
299 |
+@@ -469,8 +469,10 @@ entSys: |
300 |
+ #ifdef CONFIG_AUDITSYSCALL |
301 |
+ lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |
302 |
+ and $3, $6, $3 |
303 |
+-#endif |
304 |
+ bne $3, strace |
305 |
++#else |
306 |
++ blbs $3, strace /* check for SYSCALL_TRACE in disguise */ |
307 |
++#endif |
308 |
+ beq $4, 1f |
309 |
+ ldq $27, 0($5) |
310 |
+ 1: jsr $26, ($27), sys_ni_syscall |
311 |
+diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi |
312 |
+index 46e6d3ed8f35a..c042c416a94a3 100644 |
313 |
+--- a/arch/arm/boot/dts/armada-370.dtsi |
314 |
++++ b/arch/arm/boot/dts/armada-370.dtsi |
315 |
+@@ -74,7 +74,7 @@ |
316 |
+ |
317 |
+ pcie2: pcie@2,0 { |
318 |
+ device_type = "pci"; |
319 |
+- assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; |
320 |
++ assigned-addresses = <0x82001000 0 0x80000 0 0x2000>; |
321 |
+ reg = <0x1000 0 0 0 0>; |
322 |
+ #address-cells = <3>; |
323 |
+ #size-cells = <2>; |
324 |
+diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi |
325 |
+index 7f2f24a29e6c1..352a2f7ba3114 100644 |
326 |
+--- a/arch/arm/boot/dts/armada-375.dtsi |
327 |
++++ b/arch/arm/boot/dts/armada-375.dtsi |
328 |
+@@ -582,7 +582,7 @@ |
329 |
+ |
330 |
+ pcie1: pcie@2,0 { |
331 |
+ device_type = "pci"; |
332 |
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; |
333 |
++ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; |
334 |
+ reg = <0x1000 0 0 0 0>; |
335 |
+ #address-cells = <3>; |
336 |
+ #size-cells = <2>; |
337 |
+diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi |
338 |
+index cff1269f3fbfd..7146cc8f082af 100644 |
339 |
+--- a/arch/arm/boot/dts/armada-380.dtsi |
340 |
++++ b/arch/arm/boot/dts/armada-380.dtsi |
341 |
+@@ -79,7 +79,7 @@ |
342 |
+ /* x1 port */ |
343 |
+ pcie@2,0 { |
344 |
+ device_type = "pci"; |
345 |
+- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; |
346 |
++ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; |
347 |
+ reg = <0x1000 0 0 0 0>; |
348 |
+ #address-cells = <3>; |
349 |
+ #size-cells = <2>; |
350 |
+@@ -98,7 +98,7 @@ |
351 |
+ /* x1 port */ |
352 |
+ pcie@3,0 { |
353 |
+ device_type = "pci"; |
354 |
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; |
355 |
++ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; |
356 |
+ reg = <0x1800 0 0 0 0>; |
357 |
+ #address-cells = <3>; |
358 |
+ #size-cells = <2>; |
359 |
+diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts |
360 |
+index 01b0dfd55d703..e7649c795699c 100644 |
361 |
+--- a/arch/arm/boot/dts/armada-385-turris-omnia.dts |
362 |
++++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts |
363 |
+@@ -23,6 +23,12 @@ |
364 |
+ stdout-path = &uart0; |
365 |
+ }; |
366 |
+ |
367 |
++ aliases { |
368 |
++ ethernet0 = ð0; |
369 |
++ ethernet1 = ð1; |
370 |
++ ethernet2 = ð2; |
371 |
++ }; |
372 |
++ |
373 |
+ memory { |
374 |
+ device_type = "memory"; |
375 |
+ reg = <0x00000000 0x40000000>; /* 1024 MB */ |
376 |
+@@ -450,7 +456,17 @@ |
377 |
+ }; |
378 |
+ }; |
379 |
+ |
380 |
+- /* port 6 is connected to eth0 */ |
381 |
++ ports@6 { |
382 |
++ reg = <6>; |
383 |
++ label = "cpu"; |
384 |
++ ethernet = <ð0>; |
385 |
++ phy-mode = "rgmii-id"; |
386 |
++ |
387 |
++ fixed-link { |
388 |
++ speed = <1000>; |
389 |
++ full-duplex; |
390 |
++ }; |
391 |
++ }; |
392 |
+ }; |
393 |
+ }; |
394 |
+ }; |
395 |
+diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi |
396 |
+index f0022d10c7159..f081f7cb66e5f 100644 |
397 |
+--- a/arch/arm/boot/dts/armada-385.dtsi |
398 |
++++ b/arch/arm/boot/dts/armada-385.dtsi |
399 |
+@@ -84,7 +84,7 @@ |
400 |
+ /* x1 port */ |
401 |
+ pcie2: pcie@2,0 { |
402 |
+ device_type = "pci"; |
403 |
+- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; |
404 |
++ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; |
405 |
+ reg = <0x1000 0 0 0 0>; |
406 |
+ #address-cells = <3>; |
407 |
+ #size-cells = <2>; |
408 |
+@@ -103,7 +103,7 @@ |
409 |
+ /* x1 port */ |
410 |
+ pcie3: pcie@3,0 { |
411 |
+ device_type = "pci"; |
412 |
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; |
413 |
++ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; |
414 |
+ reg = <0x1800 0 0 0 0>; |
415 |
+ #address-cells = <3>; |
416 |
+ #size-cells = <2>; |
417 |
+@@ -125,7 +125,7 @@ |
418 |
+ */ |
419 |
+ pcie4: pcie@4,0 { |
420 |
+ device_type = "pci"; |
421 |
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; |
422 |
++ assigned-addresses = <0x82002000 0 0x48000 0 0x2000>; |
423 |
+ reg = <0x2000 0 0 0 0>; |
424 |
+ #address-cells = <3>; |
425 |
+ #size-cells = <2>; |
426 |
+diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi |
427 |
+index e0b7c20998312..9525e7b7f4360 100644 |
428 |
+--- a/arch/arm/boot/dts/armada-39x.dtsi |
429 |
++++ b/arch/arm/boot/dts/armada-39x.dtsi |
430 |
+@@ -453,7 +453,7 @@ |
431 |
+ /* x1 port */ |
432 |
+ pcie@2,0 { |
433 |
+ device_type = "pci"; |
434 |
+- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; |
435 |
++ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; |
436 |
+ reg = <0x1000 0 0 0 0>; |
437 |
+ #address-cells = <3>; |
438 |
+ #size-cells = <2>; |
439 |
+@@ -472,7 +472,7 @@ |
440 |
+ /* x1 port */ |
441 |
+ pcie@3,0 { |
442 |
+ device_type = "pci"; |
443 |
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; |
444 |
++ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; |
445 |
+ reg = <0x1800 0 0 0 0>; |
446 |
+ #address-cells = <3>; |
447 |
+ #size-cells = <2>; |
448 |
+@@ -494,7 +494,7 @@ |
449 |
+ */ |
450 |
+ pcie@4,0 { |
451 |
+ device_type = "pci"; |
452 |
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; |
453 |
++ assigned-addresses = <0x82002000 0 0x48000 0 0x2000>; |
454 |
+ reg = <0x2000 0 0 0 0>; |
455 |
+ #address-cells = <3>; |
456 |
+ #size-cells = <2>; |
457 |
+diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi |
458 |
+index 8558bf6bb54c6..d55fe162fc7f0 100644 |
459 |
+--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi |
460 |
++++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi |
461 |
+@@ -97,7 +97,7 @@ |
462 |
+ |
463 |
+ pcie2: pcie@2,0 { |
464 |
+ device_type = "pci"; |
465 |
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; |
466 |
++ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; |
467 |
+ reg = <0x1000 0 0 0 0>; |
468 |
+ #address-cells = <3>; |
469 |
+ #size-cells = <2>; |
470 |
+@@ -115,7 +115,7 @@ |
471 |
+ |
472 |
+ pcie3: pcie@3,0 { |
473 |
+ device_type = "pci"; |
474 |
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; |
475 |
++ assigned-addresses = <0x82001800 0 0x48000 0 0x2000>; |
476 |
+ reg = <0x1800 0 0 0 0>; |
477 |
+ #address-cells = <3>; |
478 |
+ #size-cells = <2>; |
479 |
+@@ -133,7 +133,7 @@ |
480 |
+ |
481 |
+ pcie4: pcie@4,0 { |
482 |
+ device_type = "pci"; |
483 |
+- assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>; |
484 |
++ assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>; |
485 |
+ reg = <0x2000 0 0 0 0>; |
486 |
+ #address-cells = <3>; |
487 |
+ #size-cells = <2>; |
488 |
+@@ -151,7 +151,7 @@ |
489 |
+ |
490 |
+ pcie5: pcie@5,0 { |
491 |
+ device_type = "pci"; |
492 |
+- assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; |
493 |
++ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; |
494 |
+ reg = <0x2800 0 0 0 0>; |
495 |
+ #address-cells = <3>; |
496 |
+ #size-cells = <2>; |
497 |
+diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi |
498 |
+index 2d85fe8ac3272..fdcc818199401 100644 |
499 |
+--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi |
500 |
++++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi |
501 |
+@@ -112,7 +112,7 @@ |
502 |
+ |
503 |
+ pcie2: pcie@2,0 { |
504 |
+ device_type = "pci"; |
505 |
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; |
506 |
++ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; |
507 |
+ reg = <0x1000 0 0 0 0>; |
508 |
+ #address-cells = <3>; |
509 |
+ #size-cells = <2>; |
510 |
+@@ -130,7 +130,7 @@ |
511 |
+ |
512 |
+ pcie3: pcie@3,0 { |
513 |
+ device_type = "pci"; |
514 |
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; |
515 |
++ assigned-addresses = <0x82001800 0 0x48000 0 0x2000>; |
516 |
+ reg = <0x1800 0 0 0 0>; |
517 |
+ #address-cells = <3>; |
518 |
+ #size-cells = <2>; |
519 |
+@@ -148,7 +148,7 @@ |
520 |
+ |
521 |
+ pcie4: pcie@4,0 { |
522 |
+ device_type = "pci"; |
523 |
+- assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>; |
524 |
++ assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>; |
525 |
+ reg = <0x2000 0 0 0 0>; |
526 |
+ #address-cells = <3>; |
527 |
+ #size-cells = <2>; |
528 |
+@@ -166,7 +166,7 @@ |
529 |
+ |
530 |
+ pcie5: pcie@5,0 { |
531 |
+ device_type = "pci"; |
532 |
+- assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; |
533 |
++ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; |
534 |
+ reg = <0x2800 0 0 0 0>; |
535 |
+ #address-cells = <3>; |
536 |
+ #size-cells = <2>; |
537 |
+@@ -184,7 +184,7 @@ |
538 |
+ |
539 |
+ pcie6: pcie@6,0 { |
540 |
+ device_type = "pci"; |
541 |
+- assigned-addresses = <0x82000800 0 0x84000 0 0x2000>; |
542 |
++ assigned-addresses = <0x82003000 0 0x84000 0 0x2000>; |
543 |
+ reg = <0x3000 0 0 0 0>; |
544 |
+ #address-cells = <3>; |
545 |
+ #size-cells = <2>; |
546 |
+@@ -202,7 +202,7 @@ |
547 |
+ |
548 |
+ pcie7: pcie@7,0 { |
549 |
+ device_type = "pci"; |
550 |
+- assigned-addresses = <0x82000800 0 0x88000 0 0x2000>; |
551 |
++ assigned-addresses = <0x82003800 0 0x88000 0 0x2000>; |
552 |
+ reg = <0x3800 0 0 0 0>; |
553 |
+ #address-cells = <3>; |
554 |
+ #size-cells = <2>; |
555 |
+@@ -220,7 +220,7 @@ |
556 |
+ |
557 |
+ pcie8: pcie@8,0 { |
558 |
+ device_type = "pci"; |
559 |
+- assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>; |
560 |
++ assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>; |
561 |
+ reg = <0x4000 0 0 0 0>; |
562 |
+ #address-cells = <3>; |
563 |
+ #size-cells = <2>; |
564 |
+@@ -238,7 +238,7 @@ |
565 |
+ |
566 |
+ pcie9: pcie@9,0 { |
567 |
+ device_type = "pci"; |
568 |
+- assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; |
569 |
++ assigned-addresses = <0x82004800 0 0x42000 0 0x2000>; |
570 |
+ reg = <0x4800 0 0 0 0>; |
571 |
+ #address-cells = <3>; |
572 |
+ #size-cells = <2>; |
573 |
+diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi |
574 |
+index 89e0bdaf3a85f..726d353eda686 100644 |
575 |
+--- a/arch/arm/boot/dts/dove.dtsi |
576 |
++++ b/arch/arm/boot/dts/dove.dtsi |
577 |
+@@ -129,7 +129,7 @@ |
578 |
+ pcie1: pcie@2 { |
579 |
+ device_type = "pci"; |
580 |
+ status = "disabled"; |
581 |
+- assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; |
582 |
++ assigned-addresses = <0x82001000 0 0x80000 0 0x2000>; |
583 |
+ reg = <0x1000 0 0 0 0>; |
584 |
+ clocks = <&gate_clk 5>; |
585 |
+ marvell,pcie-port = <1>; |
586 |
+diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts |
587 |
+index eb6eb21cb2a44..33c8d5b3d679a 100644 |
588 |
+--- a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts |
589 |
++++ b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts |
590 |
+@@ -366,7 +366,7 @@ |
591 |
+ spi-max-frequency = <20000000>; |
592 |
+ spi-rx-bus-width = <2>; |
593 |
+ label = "bmc"; |
594 |
+- partitions@80000000 { |
595 |
++ partitions { |
596 |
+ compatible = "fixed-partitions"; |
597 |
+ #address-cells = <1>; |
598 |
+ #size-cells = <1>; |
599 |
+diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts |
600 |
+index d4ff49939a3d9..bbe18618f5c56 100644 |
601 |
+--- a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts |
602 |
++++ b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts |
603 |
+@@ -142,7 +142,7 @@ |
604 |
+ reg = <0>; |
605 |
+ spi-rx-bus-width = <2>; |
606 |
+ |
607 |
+- partitions@80000000 { |
608 |
++ partitions { |
609 |
+ compatible = "fixed-partitions"; |
610 |
+ #address-cells = <1>; |
611 |
+ #size-cells = <1>; |
612 |
+diff --git a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts |
613 |
+index 82a104b2a65f1..8e3425cb8e8b9 100644 |
614 |
+--- a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts |
615 |
++++ b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts |
616 |
+@@ -388,7 +388,7 @@ |
617 |
+ spi-max-frequency = <5000000>; |
618 |
+ spi-rx-bus-width = <2>; |
619 |
+ label = "bmc"; |
620 |
+- partitions@80000000 { |
621 |
++ partitions { |
622 |
+ compatible = "fixed-partitions"; |
623 |
+ #address-cells = <1>; |
624 |
+ #size-cells = <1>; |
625 |
+@@ -422,7 +422,7 @@ |
626 |
+ reg = <1>; |
627 |
+ spi-max-frequency = <5000000>; |
628 |
+ spi-rx-bus-width = <2>; |
629 |
+- partitions@88000000 { |
630 |
++ partitions { |
631 |
+ compatible = "fixed-partitions"; |
632 |
+ #address-cells = <1>; |
633 |
+ #size-cells = <1>; |
634 |
+@@ -447,7 +447,7 @@ |
635 |
+ reg = <0>; |
636 |
+ spi-max-frequency = <5000000>; |
637 |
+ spi-rx-bus-width = <2>; |
638 |
+- partitions@A0000000 { |
639 |
++ partitions { |
640 |
+ compatible = "fixed-partitions"; |
641 |
+ #address-cells = <1>; |
642 |
+ #size-cells = <1>; |
643 |
+diff --git a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts |
644 |
+index 0334641f88292..cf274c926711a 100644 |
645 |
+--- a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts |
646 |
++++ b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts |
647 |
+@@ -74,7 +74,7 @@ |
648 |
+ spi-rx-bus-width = <2>; |
649 |
+ reg = <0>; |
650 |
+ spi-max-frequency = <5000000>; |
651 |
+- partitions@80000000 { |
652 |
++ partitions { |
653 |
+ compatible = "fixed-partitions"; |
654 |
+ #address-cells = <1>; |
655 |
+ #size-cells = <1>; |
656 |
+@@ -135,7 +135,7 @@ |
657 |
+ spi-rx-bus-width = <2>; |
658 |
+ reg = <0>; |
659 |
+ spi-max-frequency = <5000000>; |
660 |
+- partitions@A0000000 { |
661 |
++ partitions { |
662 |
+ compatible = "fixed-partitions"; |
663 |
+ #address-cells = <1>; |
664 |
+ #size-cells = <1>; |
665 |
+diff --git a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts |
666 |
+index 767e0ac0df7c5..7fe7efee28acb 100644 |
667 |
+--- a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts |
668 |
++++ b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts |
669 |
+@@ -107,7 +107,7 @@ |
670 |
+ reg = <0>; |
671 |
+ spi-rx-bus-width = <2>; |
672 |
+ |
673 |
+- partitions@80000000 { |
674 |
++ partitions { |
675 |
+ compatible = "fixed-partitions"; |
676 |
+ #address-cells = <1>; |
677 |
+ #size-cells = <1>; |
678 |
+@@ -146,7 +146,7 @@ |
679 |
+ reg = <1>; |
680 |
+ npcm,fiu-rx-bus-width = <2>; |
681 |
+ |
682 |
+- partitions@88000000 { |
683 |
++ partitions { |
684 |
+ compatible = "fixed-partitions"; |
685 |
+ #address-cells = <1>; |
686 |
+ #size-cells = <1>; |
687 |
+@@ -173,7 +173,7 @@ |
688 |
+ reg = <0>; |
689 |
+ spi-rx-bus-width = <2>; |
690 |
+ |
691 |
+- partitions@A0000000 { |
692 |
++ partitions { |
693 |
+ compatible = "fixed-partitions"; |
694 |
+ #address-cells = <1>; |
695 |
+ #size-cells = <1>; |
696 |
+diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi |
697 |
+index d1c1c6aab2b87..0e830476fefd2 100644 |
698 |
+--- a/arch/arm/boot/dts/qcom-apq8064.dtsi |
699 |
++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi |
700 |
+@@ -1571,7 +1571,7 @@ |
701 |
+ }; |
702 |
+ |
703 |
+ etb@1a01000 { |
704 |
+- compatible = "coresight-etb10", "arm,primecell"; |
705 |
++ compatible = "arm,coresight-etb10", "arm,primecell"; |
706 |
+ reg = <0x1a01000 0x1000>; |
707 |
+ |
708 |
+ clocks = <&rpmcc RPM_QDSS_CLK>; |
709 |
+diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi |
710 |
+index fd41243a0b2c0..9d5a04a46b14e 100644 |
711 |
+--- a/arch/arm/boot/dts/spear600.dtsi |
712 |
++++ b/arch/arm/boot/dts/spear600.dtsi |
713 |
+@@ -47,7 +47,7 @@ |
714 |
+ compatible = "arm,pl110", "arm,primecell"; |
715 |
+ reg = <0xfc200000 0x1000>; |
716 |
+ interrupt-parent = <&vic1>; |
717 |
+- interrupts = <12>; |
718 |
++ interrupts = <13>; |
719 |
+ status = "disabled"; |
720 |
+ }; |
721 |
+ |
722 |
+diff --git a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts |
723 |
+index 2e3c9fbb4eb36..275167f26fd9d 100644 |
724 |
+--- a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts |
725 |
++++ b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts |
726 |
+@@ -13,7 +13,6 @@ |
727 |
+ /dts-v1/; |
728 |
+ |
729 |
+ #include "stm32mp157.dtsi" |
730 |
+-#include "stm32mp15xc.dtsi" |
731 |
+ #include "stm32mp15xx-dhcor-som.dtsi" |
732 |
+ #include "stm32mp15xx-dhcor-avenger96.dtsi" |
733 |
+ |
734 |
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi |
735 |
+index 8eb51d84b6988..d3375ad8c91fc 100644 |
736 |
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi |
737 |
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi |
738 |
+@@ -100,7 +100,7 @@ |
739 |
+ regulator-min-microvolt = <3300000>; |
740 |
+ regulator-max-microvolt = <3300000>; |
741 |
+ |
742 |
+- gpios = <&gpioz 3 GPIO_ACTIVE_HIGH>; |
743 |
++ gpio = <&gpioz 3 GPIO_ACTIVE_HIGH>; |
744 |
+ enable-active-high; |
745 |
+ }; |
746 |
+ }; |
747 |
+diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c |
748 |
+index 41b2e8abc9e69..708816caf859c 100644 |
749 |
+--- a/arch/arm/mach-mmp/time.c |
750 |
++++ b/arch/arm/mach-mmp/time.c |
751 |
+@@ -43,18 +43,21 @@ |
752 |
+ static void __iomem *mmp_timer_base = TIMERS_VIRT_BASE; |
753 |
+ |
754 |
+ /* |
755 |
+- * FIXME: the timer needs some delay to stablize the counter capture |
756 |
++ * Read the timer through the CVWR register. Delay is required after requesting |
757 |
++ * a read. The CR register cannot be directly read due to metastability issues |
758 |
++ * documented in the PXA168 software manual. |
759 |
+ */ |
760 |
+ static inline uint32_t timer_read(void) |
761 |
+ { |
762 |
+- int delay = 100; |
763 |
++ uint32_t val; |
764 |
++ int delay = 3; |
765 |
+ |
766 |
+ __raw_writel(1, mmp_timer_base + TMR_CVWR(1)); |
767 |
+ |
768 |
+ while (delay--) |
769 |
+- cpu_relax(); |
770 |
++ val = __raw_readl(mmp_timer_base + TMR_CVWR(1)); |
771 |
+ |
772 |
+- return __raw_readl(mmp_timer_base + TMR_CVWR(1)); |
773 |
++ return val; |
774 |
+ } |
775 |
+ |
776 |
+ static u64 notrace mmp_read_sched_clock(void) |
777 |
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts |
778 |
+index 1cee26479bfec..b276dd77df83c 100644 |
779 |
+--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts |
780 |
++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts |
781 |
+@@ -125,9 +125,12 @@ |
782 |
+ /delete-property/ mrvl,i2c-fast-mode; |
783 |
+ status = "okay"; |
784 |
+ |
785 |
++ /* MCP7940MT-I/MNY RTC */ |
786 |
+ rtc@6f { |
787 |
+ compatible = "microchip,mcp7940x"; |
788 |
+ reg = <0x6f>; |
789 |
++ interrupt-parent = <&gpiosb>; |
790 |
++ interrupts = <5 0>; /* GPIO2_5 */ |
791 |
+ }; |
792 |
+ }; |
793 |
+ |
794 |
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts |
795 |
+index 7d369fdd3117f..9d20cabf4f699 100644 |
796 |
+--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts |
797 |
++++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts |
798 |
+@@ -26,14 +26,14 @@ |
799 |
+ stdout-path = "serial0:921600n8"; |
800 |
+ }; |
801 |
+ |
802 |
+- cpus_fixed_vproc0: fixedregulator@0 { |
803 |
++ cpus_fixed_vproc0: regulator-vproc-buck0 { |
804 |
+ compatible = "regulator-fixed"; |
805 |
+ regulator-name = "vproc_buck0"; |
806 |
+ regulator-min-microvolt = <1000000>; |
807 |
+ regulator-max-microvolt = <1000000>; |
808 |
+ }; |
809 |
+ |
810 |
+- cpus_fixed_vproc1: fixedregulator@1 { |
811 |
++ cpus_fixed_vproc1: regulator-vproc-buck1 { |
812 |
+ compatible = "regulator-fixed"; |
813 |
+ regulator-name = "vproc_buck1"; |
814 |
+ regulator-min-microvolt = <1000000>; |
815 |
+@@ -50,7 +50,7 @@ |
816 |
+ id-gpio = <&pio 14 GPIO_ACTIVE_HIGH>; |
817 |
+ }; |
818 |
+ |
819 |
+- usb_p0_vbus: regulator@2 { |
820 |
++ usb_p0_vbus: regulator-usb-p0-vbus { |
821 |
+ compatible = "regulator-fixed"; |
822 |
+ regulator-name = "p0_vbus"; |
823 |
+ regulator-min-microvolt = <5000000>; |
824 |
+@@ -59,7 +59,7 @@ |
825 |
+ enable-active-high; |
826 |
+ }; |
827 |
+ |
828 |
+- usb_p1_vbus: regulator@3 { |
829 |
++ usb_p1_vbus: regulator-usb-p1-vbus { |
830 |
+ compatible = "regulator-fixed"; |
831 |
+ regulator-name = "p1_vbus"; |
832 |
+ regulator-min-microvolt = <5000000>; |
833 |
+@@ -68,7 +68,7 @@ |
834 |
+ enable-active-high; |
835 |
+ }; |
836 |
+ |
837 |
+- usb_p2_vbus: regulator@4 { |
838 |
++ usb_p2_vbus: regulator-usb-p2-vbus { |
839 |
+ compatible = "regulator-fixed"; |
840 |
+ regulator-name = "p2_vbus"; |
841 |
+ regulator-min-microvolt = <5000000>; |
842 |
+@@ -77,7 +77,7 @@ |
843 |
+ enable-active-high; |
844 |
+ }; |
845 |
+ |
846 |
+- usb_p3_vbus: regulator@5 { |
847 |
++ usb_p3_vbus: regulator-usb-p3-vbus { |
848 |
+ compatible = "regulator-fixed"; |
849 |
+ regulator-name = "p3_vbus"; |
850 |
+ regulator-min-microvolt = <5000000>; |
851 |
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi |
852 |
+index a9cca9c146fdc..993a03d7fff14 100644 |
853 |
+--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi |
854 |
++++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi |
855 |
+@@ -160,70 +160,70 @@ |
856 |
+ #clock-cells = <0>; |
857 |
+ }; |
858 |
+ |
859 |
+- clk26m: oscillator@0 { |
860 |
++ clk26m: oscillator-26m { |
861 |
+ compatible = "fixed-clock"; |
862 |
+ #clock-cells = <0>; |
863 |
+ clock-frequency = <26000000>; |
864 |
+ clock-output-names = "clk26m"; |
865 |
+ }; |
866 |
+ |
867 |
+- clk32k: oscillator@1 { |
868 |
++ clk32k: oscillator-32k { |
869 |
+ compatible = "fixed-clock"; |
870 |
+ #clock-cells = <0>; |
871 |
+ clock-frequency = <32768>; |
872 |
+ clock-output-names = "clk32k"; |
873 |
+ }; |
874 |
+ |
875 |
+- clkfpc: oscillator@2 { |
876 |
++ clkfpc: oscillator-50m { |
877 |
+ compatible = "fixed-clock"; |
878 |
+ #clock-cells = <0>; |
879 |
+ clock-frequency = <50000000>; |
880 |
+ clock-output-names = "clkfpc"; |
881 |
+ }; |
882 |
+ |
883 |
+- clkaud_ext_i_0: oscillator@3 { |
884 |
++ clkaud_ext_i_0: oscillator-aud0 { |
885 |
+ compatible = "fixed-clock"; |
886 |
+ #clock-cells = <0>; |
887 |
+ clock-frequency = <6500000>; |
888 |
+ clock-output-names = "clkaud_ext_i_0"; |
889 |
+ }; |
890 |
+ |
891 |
+- clkaud_ext_i_1: oscillator@4 { |
892 |
++ clkaud_ext_i_1: oscillator-aud1 { |
893 |
+ compatible = "fixed-clock"; |
894 |
+ #clock-cells = <0>; |
895 |
+ clock-frequency = <196608000>; |
896 |
+ clock-output-names = "clkaud_ext_i_1"; |
897 |
+ }; |
898 |
+ |
899 |
+- clkaud_ext_i_2: oscillator@5 { |
900 |
++ clkaud_ext_i_2: oscillator-aud2 { |
901 |
+ compatible = "fixed-clock"; |
902 |
+ #clock-cells = <0>; |
903 |
+ clock-frequency = <180633600>; |
904 |
+ clock-output-names = "clkaud_ext_i_2"; |
905 |
+ }; |
906 |
+ |
907 |
+- clki2si0_mck_i: oscillator@6 { |
908 |
++ clki2si0_mck_i: oscillator-i2s0 { |
909 |
+ compatible = "fixed-clock"; |
910 |
+ #clock-cells = <0>; |
911 |
+ clock-frequency = <30000000>; |
912 |
+ clock-output-names = "clki2si0_mck_i"; |
913 |
+ }; |
914 |
+ |
915 |
+- clki2si1_mck_i: oscillator@7 { |
916 |
++ clki2si1_mck_i: oscillator-i2s1 { |
917 |
+ compatible = "fixed-clock"; |
918 |
+ #clock-cells = <0>; |
919 |
+ clock-frequency = <30000000>; |
920 |
+ clock-output-names = "clki2si1_mck_i"; |
921 |
+ }; |
922 |
+ |
923 |
+- clki2si2_mck_i: oscillator@8 { |
924 |
++ clki2si2_mck_i: oscillator-i2s2 { |
925 |
+ compatible = "fixed-clock"; |
926 |
+ #clock-cells = <0>; |
927 |
+ clock-frequency = <30000000>; |
928 |
+ clock-output-names = "clki2si2_mck_i"; |
929 |
+ }; |
930 |
+ |
931 |
+- clktdmin_mclk_i: oscillator@9 { |
932 |
++ clktdmin_mclk_i: oscillator-mclk { |
933 |
+ compatible = "fixed-clock"; |
934 |
+ #clock-cells = <0>; |
935 |
+ clock-frequency = <30000000>; |
936 |
+@@ -266,7 +266,7 @@ |
937 |
+ reg = <0 0x10005000 0 0x1000>; |
938 |
+ }; |
939 |
+ |
940 |
+- pio: pinctrl@10005000 { |
941 |
++ pio: pinctrl@1000b000 { |
942 |
+ compatible = "mediatek,mt2712-pinctrl"; |
943 |
+ reg = <0 0x1000b000 0 0x1000>; |
944 |
+ mediatek,pctl-regmap = <&syscfg_pctl_a>; |
945 |
+diff --git a/arch/arm64/boot/dts/mediatek/mt6779.dtsi b/arch/arm64/boot/dts/mediatek/mt6779.dtsi |
946 |
+index 9bdf5145966c5..dde9ce137b4f1 100644 |
947 |
+--- a/arch/arm64/boot/dts/mediatek/mt6779.dtsi |
948 |
++++ b/arch/arm64/boot/dts/mediatek/mt6779.dtsi |
949 |
+@@ -88,14 +88,14 @@ |
950 |
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW 0>; |
951 |
+ }; |
952 |
+ |
953 |
+- clk26m: oscillator@0 { |
954 |
++ clk26m: oscillator-26m { |
955 |
+ compatible = "fixed-clock"; |
956 |
+ #clock-cells = <0>; |
957 |
+ clock-frequency = <26000000>; |
958 |
+ clock-output-names = "clk26m"; |
959 |
+ }; |
960 |
+ |
961 |
+- clk32k: oscillator@1 { |
962 |
++ clk32k: oscillator-32k { |
963 |
+ compatible = "fixed-clock"; |
964 |
+ #clock-cells = <0>; |
965 |
+ clock-frequency = <32768>; |
966 |
+@@ -117,7 +117,7 @@ |
967 |
+ compatible = "simple-bus"; |
968 |
+ ranges; |
969 |
+ |
970 |
+- gic: interrupt-controller@0c000000 { |
971 |
++ gic: interrupt-controller@c000000 { |
972 |
+ compatible = "arm,gic-v3"; |
973 |
+ #interrupt-cells = <4>; |
974 |
+ interrupt-parent = <&gic>; |
975 |
+@@ -138,7 +138,7 @@ |
976 |
+ |
977 |
+ }; |
978 |
+ |
979 |
+- sysirq: intpol-controller@0c53a650 { |
980 |
++ sysirq: intpol-controller@c53a650 { |
981 |
+ compatible = "mediatek,mt6779-sysirq", |
982 |
+ "mediatek,mt6577-sysirq"; |
983 |
+ interrupt-controller; |
984 |
+diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi |
985 |
+index 15616231022a2..c3677d77e0a45 100644 |
986 |
+--- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi |
987 |
++++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi |
988 |
+@@ -95,7 +95,7 @@ |
989 |
+ }; |
990 |
+ }; |
991 |
+ |
992 |
+- clk26m: oscillator@0 { |
993 |
++ clk26m: oscillator-26m { |
994 |
+ compatible = "fixed-clock"; |
995 |
+ #clock-cells = <0>; |
996 |
+ clock-frequency = <26000000>; |
997 |
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi |
998 |
+index 409cf827970cf..f4e0bea8ddcb6 100644 |
999 |
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi |
1000 |
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi |
1001 |
+@@ -1212,7 +1212,7 @@ |
1002 |
+ <GIC_SPI 278 IRQ_TYPE_LEVEL_LOW>; |
1003 |
+ interrupt-names = "job", "mmu", "gpu"; |
1004 |
+ |
1005 |
+- clocks = <&topckgen CLK_TOP_MFGPLL_CK>; |
1006 |
++ clocks = <&mfgcfg CLK_MFG_BG3D>; |
1007 |
+ |
1008 |
+ power-domains = |
1009 |
+ <&spm MT8183_POWER_DOMAIN_MFG_CORE0>, |
1010 |
+diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi |
1011 |
+index fcddec14738d8..54514d62398f2 100644 |
1012 |
+--- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi |
1013 |
++++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi |
1014 |
+@@ -17,7 +17,7 @@ |
1015 |
+ }; |
1016 |
+ |
1017 |
+ firmware { |
1018 |
+- optee: optee@4fd00000 { |
1019 |
++ optee: optee { |
1020 |
+ compatible = "linaro,optee-tz"; |
1021 |
+ method = "smc"; |
1022 |
+ }; |
1023 |
+@@ -210,7 +210,7 @@ |
1024 |
+ }; |
1025 |
+ }; |
1026 |
+ |
1027 |
+- i2c0_pins_a: i2c0@0 { |
1028 |
++ i2c0_pins_a: i2c0 { |
1029 |
+ pins1 { |
1030 |
+ pinmux = <MT8516_PIN_58_SDA0__FUNC_SDA0_0>, |
1031 |
+ <MT8516_PIN_59_SCL0__FUNC_SCL0_0>; |
1032 |
+@@ -218,7 +218,7 @@ |
1033 |
+ }; |
1034 |
+ }; |
1035 |
+ |
1036 |
+- i2c2_pins_a: i2c2@0 { |
1037 |
++ i2c2_pins_a: i2c2 { |
1038 |
+ pins1 { |
1039 |
+ pinmux = <MT8516_PIN_60_SDA2__FUNC_SDA2_0>, |
1040 |
+ <MT8516_PIN_61_SCL2__FUNC_SCL2_0>; |
1041 |
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts |
1042 |
+index 5aec183087128..5310259d03dc5 100644 |
1043 |
+--- a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts |
1044 |
++++ b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts |
1045 |
+@@ -37,6 +37,8 @@ |
1046 |
+ |
1047 |
+ &spi_0 { |
1048 |
+ cs-select = <0>; |
1049 |
++ pinctrl-0 = <&spi_0_pins>; |
1050 |
++ pinctrl-names = "default"; |
1051 |
+ status = "okay"; |
1052 |
+ |
1053 |
+ m25p80@0 { |
1054 |
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi |
1055 |
+index ce4c2b4a5fc07..30ac0b2e8c896 100644 |
1056 |
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi |
1057 |
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi |
1058 |
+@@ -401,7 +401,7 @@ |
1059 |
+ reset-names = "phy", |
1060 |
+ "common"; |
1061 |
+ |
1062 |
+- pcie_phy0: lane@84200 { |
1063 |
++ pcie_phy0: phy@84200 { |
1064 |
+ reg = <0x0 0x84200 0x0 0x16c>, /* Serdes Tx */ |
1065 |
+ <0x0 0x84400 0x0 0x200>, /* Serdes Rx */ |
1066 |
+ <0x0 0x84800 0x0 0x4f4>; /* PCS: Lane0, COM, PCIE */ |
1067 |
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi |
1068 |
+index 6b9ac05504905..9d4019e0949a9 100644 |
1069 |
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi |
1070 |
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi |
1071 |
+@@ -106,7 +106,7 @@ |
1072 |
+ reset-names = "phy","common"; |
1073 |
+ status = "disabled"; |
1074 |
+ |
1075 |
+- usb1_ssphy: lane@58200 { |
1076 |
++ usb1_ssphy: phy@58200 { |
1077 |
+ reg = <0x00058200 0x130>, /* Tx */ |
1078 |
+ <0x00058400 0x200>, /* Rx */ |
1079 |
+ <0x00058800 0x1f8>, /* PCS */ |
1080 |
+@@ -149,7 +149,7 @@ |
1081 |
+ reset-names = "phy","common"; |
1082 |
+ status = "disabled"; |
1083 |
+ |
1084 |
+- usb0_ssphy: lane@78200 { |
1085 |
++ usb0_ssphy: phy@78200 { |
1086 |
+ reg = <0x00078200 0x130>, /* Tx */ |
1087 |
+ <0x00078400 0x200>, /* Rx */ |
1088 |
+ <0x00078800 0x1f8>, /* PCS */ |
1089 |
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi |
1090 |
+index 19e201f52b167..b967dbfba3b84 100644 |
1091 |
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi |
1092 |
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi |
1093 |
+@@ -1307,7 +1307,7 @@ |
1094 |
+ }; |
1095 |
+ |
1096 |
+ mpss: remoteproc@4080000 { |
1097 |
+- compatible = "qcom,msm8916-mss-pil", "qcom,q6v5-pil"; |
1098 |
++ compatible = "qcom,msm8916-mss-pil"; |
1099 |
+ reg = <0x04080000 0x100>, |
1100 |
+ <0x04020000 0x040>; |
1101 |
+ |
1102 |
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi |
1103 |
+index 6077c36019514..40174220e8e28 100644 |
1104 |
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi |
1105 |
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi |
1106 |
+@@ -142,82 +142,92 @@ |
1107 |
+ /* Nominal fmax for now */ |
1108 |
+ opp-307200000 { |
1109 |
+ opp-hz = /bits/ 64 <307200000>; |
1110 |
+- opp-supported-hw = <0x77>; |
1111 |
++ opp-supported-hw = <0x7>; |
1112 |
+ clock-latency-ns = <200000>; |
1113 |
+ }; |
1114 |
+ opp-422400000 { |
1115 |
+ opp-hz = /bits/ 64 <422400000>; |
1116 |
+- opp-supported-hw = <0x77>; |
1117 |
++ opp-supported-hw = <0x7>; |
1118 |
+ clock-latency-ns = <200000>; |
1119 |
+ }; |
1120 |
+ opp-480000000 { |
1121 |
+ opp-hz = /bits/ 64 <480000000>; |
1122 |
+- opp-supported-hw = <0x77>; |
1123 |
++ opp-supported-hw = <0x7>; |
1124 |
+ clock-latency-ns = <200000>; |
1125 |
+ }; |
1126 |
+ opp-556800000 { |
1127 |
+ opp-hz = /bits/ 64 <556800000>; |
1128 |
+- opp-supported-hw = <0x77>; |
1129 |
++ opp-supported-hw = <0x7>; |
1130 |
+ clock-latency-ns = <200000>; |
1131 |
+ }; |
1132 |
+ opp-652800000 { |
1133 |
+ opp-hz = /bits/ 64 <652800000>; |
1134 |
+- opp-supported-hw = <0x77>; |
1135 |
++ opp-supported-hw = <0x7>; |
1136 |
+ clock-latency-ns = <200000>; |
1137 |
+ }; |
1138 |
+ opp-729600000 { |
1139 |
+ opp-hz = /bits/ 64 <729600000>; |
1140 |
+- opp-supported-hw = <0x77>; |
1141 |
++ opp-supported-hw = <0x7>; |
1142 |
+ clock-latency-ns = <200000>; |
1143 |
+ }; |
1144 |
+ opp-844800000 { |
1145 |
+ opp-hz = /bits/ 64 <844800000>; |
1146 |
+- opp-supported-hw = <0x77>; |
1147 |
++ opp-supported-hw = <0x7>; |
1148 |
+ clock-latency-ns = <200000>; |
1149 |
+ }; |
1150 |
+ opp-960000000 { |
1151 |
+ opp-hz = /bits/ 64 <960000000>; |
1152 |
+- opp-supported-hw = <0x77>; |
1153 |
++ opp-supported-hw = <0x7>; |
1154 |
+ clock-latency-ns = <200000>; |
1155 |
+ }; |
1156 |
+ opp-1036800000 { |
1157 |
+ opp-hz = /bits/ 64 <1036800000>; |
1158 |
+- opp-supported-hw = <0x77>; |
1159 |
++ opp-supported-hw = <0x7>; |
1160 |
+ clock-latency-ns = <200000>; |
1161 |
+ }; |
1162 |
+ opp-1113600000 { |
1163 |
+ opp-hz = /bits/ 64 <1113600000>; |
1164 |
+- opp-supported-hw = <0x77>; |
1165 |
++ opp-supported-hw = <0x7>; |
1166 |
+ clock-latency-ns = <200000>; |
1167 |
+ }; |
1168 |
+ opp-1190400000 { |
1169 |
+ opp-hz = /bits/ 64 <1190400000>; |
1170 |
+- opp-supported-hw = <0x77>; |
1171 |
++ opp-supported-hw = <0x7>; |
1172 |
+ clock-latency-ns = <200000>; |
1173 |
+ }; |
1174 |
+ opp-1228800000 { |
1175 |
+ opp-hz = /bits/ 64 <1228800000>; |
1176 |
+- opp-supported-hw = <0x77>; |
1177 |
++ opp-supported-hw = <0x7>; |
1178 |
+ clock-latency-ns = <200000>; |
1179 |
+ }; |
1180 |
+ opp-1324800000 { |
1181 |
+ opp-hz = /bits/ 64 <1324800000>; |
1182 |
+- opp-supported-hw = <0x77>; |
1183 |
++ opp-supported-hw = <0x5>; |
1184 |
++ clock-latency-ns = <200000>; |
1185 |
++ }; |
1186 |
++ opp-1363200000 { |
1187 |
++ opp-hz = /bits/ 64 <1363200000>; |
1188 |
++ opp-supported-hw = <0x2>; |
1189 |
+ clock-latency-ns = <200000>; |
1190 |
+ }; |
1191 |
+ opp-1401600000 { |
1192 |
+ opp-hz = /bits/ 64 <1401600000>; |
1193 |
+- opp-supported-hw = <0x77>; |
1194 |
++ opp-supported-hw = <0x5>; |
1195 |
+ clock-latency-ns = <200000>; |
1196 |
+ }; |
1197 |
+ opp-1478400000 { |
1198 |
+ opp-hz = /bits/ 64 <1478400000>; |
1199 |
+- opp-supported-hw = <0x77>; |
1200 |
++ opp-supported-hw = <0x1>; |
1201 |
++ clock-latency-ns = <200000>; |
1202 |
++ }; |
1203 |
++ opp-1497600000 { |
1204 |
++ opp-hz = /bits/ 64 <1497600000>; |
1205 |
++ opp-supported-hw = <0x04>; |
1206 |
+ clock-latency-ns = <200000>; |
1207 |
+ }; |
1208 |
+ opp-1593600000 { |
1209 |
+ opp-hz = /bits/ 64 <1593600000>; |
1210 |
+- opp-supported-hw = <0x77>; |
1211 |
++ opp-supported-hw = <0x1>; |
1212 |
+ clock-latency-ns = <200000>; |
1213 |
+ }; |
1214 |
+ }; |
1215 |
+@@ -230,127 +240,137 @@ |
1216 |
+ /* Nominal fmax for now */ |
1217 |
+ opp-307200000 { |
1218 |
+ opp-hz = /bits/ 64 <307200000>; |
1219 |
+- opp-supported-hw = <0x77>; |
1220 |
++ opp-supported-hw = <0x7>; |
1221 |
+ clock-latency-ns = <200000>; |
1222 |
+ }; |
1223 |
+ opp-403200000 { |
1224 |
+ opp-hz = /bits/ 64 <403200000>; |
1225 |
+- opp-supported-hw = <0x77>; |
1226 |
++ opp-supported-hw = <0x7>; |
1227 |
+ clock-latency-ns = <200000>; |
1228 |
+ }; |
1229 |
+ opp-480000000 { |
1230 |
+ opp-hz = /bits/ 64 <480000000>; |
1231 |
+- opp-supported-hw = <0x77>; |
1232 |
++ opp-supported-hw = <0x7>; |
1233 |
+ clock-latency-ns = <200000>; |
1234 |
+ }; |
1235 |
+ opp-556800000 { |
1236 |
+ opp-hz = /bits/ 64 <556800000>; |
1237 |
+- opp-supported-hw = <0x77>; |
1238 |
++ opp-supported-hw = <0x7>; |
1239 |
+ clock-latency-ns = <200000>; |
1240 |
+ }; |
1241 |
+ opp-652800000 { |
1242 |
+ opp-hz = /bits/ 64 <652800000>; |
1243 |
+- opp-supported-hw = <0x77>; |
1244 |
++ opp-supported-hw = <0x7>; |
1245 |
+ clock-latency-ns = <200000>; |
1246 |
+ }; |
1247 |
+ opp-729600000 { |
1248 |
+ opp-hz = /bits/ 64 <729600000>; |
1249 |
+- opp-supported-hw = <0x77>; |
1250 |
++ opp-supported-hw = <0x7>; |
1251 |
+ clock-latency-ns = <200000>; |
1252 |
+ }; |
1253 |
+ opp-806400000 { |
1254 |
+ opp-hz = /bits/ 64 <806400000>; |
1255 |
+- opp-supported-hw = <0x77>; |
1256 |
++ opp-supported-hw = <0x7>; |
1257 |
+ clock-latency-ns = <200000>; |
1258 |
+ }; |
1259 |
+ opp-883200000 { |
1260 |
+ opp-hz = /bits/ 64 <883200000>; |
1261 |
+- opp-supported-hw = <0x77>; |
1262 |
++ opp-supported-hw = <0x7>; |
1263 |
+ clock-latency-ns = <200000>; |
1264 |
+ }; |
1265 |
+ opp-940800000 { |
1266 |
+ opp-hz = /bits/ 64 <940800000>; |
1267 |
+- opp-supported-hw = <0x77>; |
1268 |
++ opp-supported-hw = <0x7>; |
1269 |
+ clock-latency-ns = <200000>; |
1270 |
+ }; |
1271 |
+ opp-1036800000 { |
1272 |
+ opp-hz = /bits/ 64 <1036800000>; |
1273 |
+- opp-supported-hw = <0x77>; |
1274 |
++ opp-supported-hw = <0x7>; |
1275 |
+ clock-latency-ns = <200000>; |
1276 |
+ }; |
1277 |
+ opp-1113600000 { |
1278 |
+ opp-hz = /bits/ 64 <1113600000>; |
1279 |
+- opp-supported-hw = <0x77>; |
1280 |
++ opp-supported-hw = <0x7>; |
1281 |
+ clock-latency-ns = <200000>; |
1282 |
+ }; |
1283 |
+ opp-1190400000 { |
1284 |
+ opp-hz = /bits/ 64 <1190400000>; |
1285 |
+- opp-supported-hw = <0x77>; |
1286 |
++ opp-supported-hw = <0x7>; |
1287 |
+ clock-latency-ns = <200000>; |
1288 |
+ }; |
1289 |
+ opp-1248000000 { |
1290 |
+ opp-hz = /bits/ 64 <1248000000>; |
1291 |
+- opp-supported-hw = <0x77>; |
1292 |
++ opp-supported-hw = <0x7>; |
1293 |
+ clock-latency-ns = <200000>; |
1294 |
+ }; |
1295 |
+ opp-1324800000 { |
1296 |
+ opp-hz = /bits/ 64 <1324800000>; |
1297 |
+- opp-supported-hw = <0x77>; |
1298 |
++ opp-supported-hw = <0x7>; |
1299 |
+ clock-latency-ns = <200000>; |
1300 |
+ }; |
1301 |
+ opp-1401600000 { |
1302 |
+ opp-hz = /bits/ 64 <1401600000>; |
1303 |
+- opp-supported-hw = <0x77>; |
1304 |
++ opp-supported-hw = <0x7>; |
1305 |
+ clock-latency-ns = <200000>; |
1306 |
+ }; |
1307 |
+ opp-1478400000 { |
1308 |
+ opp-hz = /bits/ 64 <1478400000>; |
1309 |
+- opp-supported-hw = <0x77>; |
1310 |
++ opp-supported-hw = <0x7>; |
1311 |
+ clock-latency-ns = <200000>; |
1312 |
+ }; |
1313 |
+ opp-1555200000 { |
1314 |
+ opp-hz = /bits/ 64 <1555200000>; |
1315 |
+- opp-supported-hw = <0x77>; |
1316 |
++ opp-supported-hw = <0x7>; |
1317 |
+ clock-latency-ns = <200000>; |
1318 |
+ }; |
1319 |
+ opp-1632000000 { |
1320 |
+ opp-hz = /bits/ 64 <1632000000>; |
1321 |
+- opp-supported-hw = <0x77>; |
1322 |
++ opp-supported-hw = <0x7>; |
1323 |
+ clock-latency-ns = <200000>; |
1324 |
+ }; |
1325 |
+ opp-1708800000 { |
1326 |
+ opp-hz = /bits/ 64 <1708800000>; |
1327 |
+- opp-supported-hw = <0x77>; |
1328 |
++ opp-supported-hw = <0x7>; |
1329 |
+ clock-latency-ns = <200000>; |
1330 |
+ }; |
1331 |
+ opp-1785600000 { |
1332 |
+ opp-hz = /bits/ 64 <1785600000>; |
1333 |
+- opp-supported-hw = <0x77>; |
1334 |
++ opp-supported-hw = <0x7>; |
1335 |
++ clock-latency-ns = <200000>; |
1336 |
++ }; |
1337 |
++ opp-1804800000 { |
1338 |
++ opp-hz = /bits/ 64 <1804800000>; |
1339 |
++ opp-supported-hw = <0x6>; |
1340 |
+ clock-latency-ns = <200000>; |
1341 |
+ }; |
1342 |
+ opp-1824000000 { |
1343 |
+ opp-hz = /bits/ 64 <1824000000>; |
1344 |
+- opp-supported-hw = <0x77>; |
1345 |
++ opp-supported-hw = <0x1>; |
1346 |
++ clock-latency-ns = <200000>; |
1347 |
++ }; |
1348 |
++ opp-1900800000 { |
1349 |
++ opp-hz = /bits/ 64 <1900800000>; |
1350 |
++ opp-supported-hw = <0x4>; |
1351 |
+ clock-latency-ns = <200000>; |
1352 |
+ }; |
1353 |
+ opp-1920000000 { |
1354 |
+ opp-hz = /bits/ 64 <1920000000>; |
1355 |
+- opp-supported-hw = <0x77>; |
1356 |
++ opp-supported-hw = <0x1>; |
1357 |
+ clock-latency-ns = <200000>; |
1358 |
+ }; |
1359 |
+ opp-1996800000 { |
1360 |
+ opp-hz = /bits/ 64 <1996800000>; |
1361 |
+- opp-supported-hw = <0x77>; |
1362 |
++ opp-supported-hw = <0x1>; |
1363 |
+ clock-latency-ns = <200000>; |
1364 |
+ }; |
1365 |
+ opp-2073600000 { |
1366 |
+ opp-hz = /bits/ 64 <2073600000>; |
1367 |
+- opp-supported-hw = <0x77>; |
1368 |
++ opp-supported-hw = <0x1>; |
1369 |
+ clock-latency-ns = <200000>; |
1370 |
+ }; |
1371 |
+ opp-2150400000 { |
1372 |
+ opp-hz = /bits/ 64 <2150400000>; |
1373 |
+- opp-supported-hw = <0x77>; |
1374 |
++ opp-supported-hw = <0x1>; |
1375 |
+ clock-latency-ns = <200000>; |
1376 |
+ }; |
1377 |
+ }; |
1378 |
+@@ -598,7 +618,7 @@ |
1379 |
+ reset-names = "phy", "common", "cfg"; |
1380 |
+ status = "disabled"; |
1381 |
+ |
1382 |
+- pciephy_0: lane@35000 { |
1383 |
++ pciephy_0: phy@35000 { |
1384 |
+ reg = <0x00035000 0x130>, |
1385 |
+ <0x00035200 0x200>, |
1386 |
+ <0x00035400 0x1dc>; |
1387 |
+@@ -611,7 +631,7 @@ |
1388 |
+ reset-names = "lane0"; |
1389 |
+ }; |
1390 |
+ |
1391 |
+- pciephy_1: lane@36000 { |
1392 |
++ pciephy_1: phy@36000 { |
1393 |
+ reg = <0x00036000 0x130>, |
1394 |
+ <0x00036200 0x200>, |
1395 |
+ <0x00036400 0x1dc>; |
1396 |
+@@ -624,7 +644,7 @@ |
1397 |
+ reset-names = "lane1"; |
1398 |
+ }; |
1399 |
+ |
1400 |
+- pciephy_2: lane@37000 { |
1401 |
++ pciephy_2: phy@37000 { |
1402 |
+ reg = <0x00037000 0x130>, |
1403 |
+ <0x00037200 0x200>, |
1404 |
+ <0x00037400 0x1dc>; |
1405 |
+@@ -975,17 +995,17 @@ |
1406 |
+ compatible ="operating-points-v2"; |
1407 |
+ |
1408 |
+ /* |
1409 |
+- * 624Mhz and 560Mhz are only available on speed |
1410 |
+- * bin (1 << 0). All the rest are available on |
1411 |
+- * all bins of the hardware |
1412 |
++ * 624Mhz is only available on speed bins 0 and 3. |
1413 |
++ * 560Mhz is only available on speed bins 0, 2 and 3. |
1414 |
++ * All the rest are available on all bins of the hardware. |
1415 |
+ */ |
1416 |
+ opp-624000000 { |
1417 |
+ opp-hz = /bits/ 64 <624000000>; |
1418 |
+- opp-supported-hw = <0x01>; |
1419 |
++ opp-supported-hw = <0x09>; |
1420 |
+ }; |
1421 |
+ opp-560000000 { |
1422 |
+ opp-hz = /bits/ 64 <560000000>; |
1423 |
+- opp-supported-hw = <0x01>; |
1424 |
++ opp-supported-hw = <0x0d>; |
1425 |
+ }; |
1426 |
+ opp-510000000 { |
1427 |
+ opp-hz = /bits/ 64 <510000000>; |
1428 |
+@@ -1743,7 +1763,7 @@ |
1429 |
+ reset-names = "ufsphy"; |
1430 |
+ status = "disabled"; |
1431 |
+ |
1432 |
+- ufsphy_lane: lanes@627400 { |
1433 |
++ ufsphy_lane: phy@627400 { |
1434 |
+ reg = <0x627400 0x12c>, |
1435 |
+ <0x627600 0x200>, |
1436 |
+ <0x627c00 0x1b4>; |
1437 |
+@@ -2598,7 +2618,7 @@ |
1438 |
+ reset-names = "phy", "common"; |
1439 |
+ status = "disabled"; |
1440 |
+ |
1441 |
+- ssusb_phy_0: lane@7410200 { |
1442 |
++ ssusb_phy_0: phy@7410200 { |
1443 |
+ reg = <0x07410200 0x200>, |
1444 |
+ <0x07410400 0x130>, |
1445 |
+ <0x07410600 0x1a8>; |
1446 |
+diff --git a/arch/arm64/boot/dts/qcom/msm8996pro.dtsi b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi |
1447 |
+new file mode 100644 |
1448 |
+index 0000000000000..63e1b4ec7a360 |
1449 |
+--- /dev/null |
1450 |
++++ b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi |
1451 |
+@@ -0,0 +1,266 @@ |
1452 |
++// SPDX-License-Identifier: BSD-3-Clause |
1453 |
++/* |
1454 |
++ * Copyright (c) 2022, Linaro Limited |
1455 |
++ */ |
1456 |
++ |
1457 |
++#include "msm8996.dtsi" |
1458 |
++ |
1459 |
++/ { |
1460 |
++ /delete-node/ opp-table-cluster0; |
1461 |
++ /delete-node/ opp-table-cluster1; |
1462 |
++ |
1463 |
++ /* |
1464 |
++ * On MSM8996 Pro the cpufreq driver shifts speed bins into the high |
1465 |
++ * nibble of supported hw, so speed bin 0 becomes 0x10, speed bin 1 |
1466 |
++ * becomes 0x20, speed 2 becomes 0x40. |
1467 |
++ */ |
1468 |
++ |
1469 |
++ cluster0_opp: opp-table-cluster0 { |
1470 |
++ compatible = "operating-points-v2-kryo-cpu"; |
1471 |
++ nvmem-cells = <&speedbin_efuse>; |
1472 |
++ opp-shared; |
1473 |
++ |
1474 |
++ opp-307200000 { |
1475 |
++ opp-hz = /bits/ 64 <307200000>; |
1476 |
++ opp-supported-hw = <0x70>; |
1477 |
++ clock-latency-ns = <200000>; |
1478 |
++ }; |
1479 |
++ opp-384000000 { |
1480 |
++ opp-hz = /bits/ 64 <384000000>; |
1481 |
++ opp-supported-hw = <0x70>; |
1482 |
++ clock-latency-ns = <200000>; |
1483 |
++ }; |
1484 |
++ opp-460800000 { |
1485 |
++ opp-hz = /bits/ 64 <460800000>; |
1486 |
++ opp-supported-hw = <0x70>; |
1487 |
++ clock-latency-ns = <200000>; |
1488 |
++ }; |
1489 |
++ opp-537600000 { |
1490 |
++ opp-hz = /bits/ 64 <537600000>; |
1491 |
++ opp-supported-hw = <0x70>; |
1492 |
++ clock-latency-ns = <200000>; |
1493 |
++ }; |
1494 |
++ opp-614400000 { |
1495 |
++ opp-hz = /bits/ 64 <614400000>; |
1496 |
++ opp-supported-hw = <0x70>; |
1497 |
++ clock-latency-ns = <200000>; |
1498 |
++ }; |
1499 |
++ opp-691200000 { |
1500 |
++ opp-hz = /bits/ 64 <691200000>; |
1501 |
++ opp-supported-hw = <0x70>; |
1502 |
++ clock-latency-ns = <200000>; |
1503 |
++ }; |
1504 |
++ opp-768000000 { |
1505 |
++ opp-hz = /bits/ 64 <768000000>; |
1506 |
++ opp-supported-hw = <0x70>; |
1507 |
++ clock-latency-ns = <200000>; |
1508 |
++ }; |
1509 |
++ opp-844800000 { |
1510 |
++ opp-hz = /bits/ 64 <844800000>; |
1511 |
++ opp-supported-hw = <0x70>; |
1512 |
++ clock-latency-ns = <200000>; |
1513 |
++ }; |
1514 |
++ opp-902400000 { |
1515 |
++ opp-hz = /bits/ 64 <902400000>; |
1516 |
++ opp-supported-hw = <0x70>; |
1517 |
++ clock-latency-ns = <200000>; |
1518 |
++ }; |
1519 |
++ opp-979200000 { |
1520 |
++ opp-hz = /bits/ 64 <979200000>; |
1521 |
++ opp-supported-hw = <0x70>; |
1522 |
++ clock-latency-ns = <200000>; |
1523 |
++ }; |
1524 |
++ opp-1056000000 { |
1525 |
++ opp-hz = /bits/ 64 <1056000000>; |
1526 |
++ opp-supported-hw = <0x70>; |
1527 |
++ clock-latency-ns = <200000>; |
1528 |
++ }; |
1529 |
++ opp-1132800000 { |
1530 |
++ opp-hz = /bits/ 64 <1132800000>; |
1531 |
++ opp-supported-hw = <0x70>; |
1532 |
++ clock-latency-ns = <200000>; |
1533 |
++ }; |
1534 |
++ opp-1209600000 { |
1535 |
++ opp-hz = /bits/ 64 <1209600000>; |
1536 |
++ opp-supported-hw = <0x70>; |
1537 |
++ clock-latency-ns = <200000>; |
1538 |
++ }; |
1539 |
++ opp-1286400000 { |
1540 |
++ opp-hz = /bits/ 64 <1286400000>; |
1541 |
++ opp-supported-hw = <0x70>; |
1542 |
++ clock-latency-ns = <200000>; |
1543 |
++ }; |
1544 |
++ opp-1363200000 { |
1545 |
++ opp-hz = /bits/ 64 <1363200000>; |
1546 |
++ opp-supported-hw = <0x70>; |
1547 |
++ clock-latency-ns = <200000>; |
1548 |
++ }; |
1549 |
++ opp-1440000000 { |
1550 |
++ opp-hz = /bits/ 64 <1440000000>; |
1551 |
++ opp-supported-hw = <0x70>; |
1552 |
++ clock-latency-ns = <200000>; |
1553 |
++ }; |
1554 |
++ opp-1516800000 { |
1555 |
++ opp-hz = /bits/ 64 <1516800000>; |
1556 |
++ opp-supported-hw = <0x70>; |
1557 |
++ clock-latency-ns = <200000>; |
1558 |
++ }; |
1559 |
++ opp-1593600000 { |
1560 |
++ opp-hz = /bits/ 64 <1593600000>; |
1561 |
++ opp-supported-hw = <0x70>; |
1562 |
++ clock-latency-ns = <200000>; |
1563 |
++ }; |
1564 |
++ opp-1996800000 { |
1565 |
++ opp-hz = /bits/ 64 <1996800000>; |
1566 |
++ opp-supported-hw = <0x20>; |
1567 |
++ clock-latency-ns = <200000>; |
1568 |
++ }; |
1569 |
++ opp-2188800000 { |
1570 |
++ opp-hz = /bits/ 64 <2188800000>; |
1571 |
++ opp-supported-hw = <0x10>; |
1572 |
++ clock-latency-ns = <200000>; |
1573 |
++ }; |
1574 |
++ }; |
1575 |
++ |
1576 |
++ cluster1_opp: opp-table-cluster1 { |
1577 |
++ compatible = "operating-points-v2-kryo-cpu"; |
1578 |
++ nvmem-cells = <&speedbin_efuse>; |
1579 |
++ opp-shared; |
1580 |
++ |
1581 |
++ opp-307200000 { |
1582 |
++ opp-hz = /bits/ 64 <307200000>; |
1583 |
++ opp-supported-hw = <0x70>; |
1584 |
++ clock-latency-ns = <200000>; |
1585 |
++ }; |
1586 |
++ opp-384000000 { |
1587 |
++ opp-hz = /bits/ 64 <384000000>; |
1588 |
++ opp-supported-hw = <0x70>; |
1589 |
++ clock-latency-ns = <200000>; |
1590 |
++ }; |
1591 |
++ opp-460800000 { |
1592 |
++ opp-hz = /bits/ 64 <460800000>; |
1593 |
++ opp-supported-hw = <0x70>; |
1594 |
++ clock-latency-ns = <200000>; |
1595 |
++ }; |
1596 |
++ opp-537600000 { |
1597 |
++ opp-hz = /bits/ 64 <537600000>; |
1598 |
++ opp-supported-hw = <0x70>; |
1599 |
++ clock-latency-ns = <200000>; |
1600 |
++ }; |
1601 |
++ opp-614400000 { |
1602 |
++ opp-hz = /bits/ 64 <614400000>; |
1603 |
++ opp-supported-hw = <0x70>; |
1604 |
++ clock-latency-ns = <200000>; |
1605 |
++ }; |
1606 |
++ opp-691200000 { |
1607 |
++ opp-hz = /bits/ 64 <691200000>; |
1608 |
++ opp-supported-hw = <0x70>; |
1609 |
++ clock-latency-ns = <200000>; |
1610 |
++ }; |
1611 |
++ opp-748800000 { |
1612 |
++ opp-hz = /bits/ 64 <748800000>; |
1613 |
++ opp-supported-hw = <0x70>; |
1614 |
++ clock-latency-ns = <200000>; |
1615 |
++ }; |
1616 |
++ opp-825600000 { |
1617 |
++ opp-hz = /bits/ 64 <825600000>; |
1618 |
++ opp-supported-hw = <0x70>; |
1619 |
++ clock-latency-ns = <200000>; |
1620 |
++ }; |
1621 |
++ opp-902400000 { |
1622 |
++ opp-hz = /bits/ 64 <902400000>; |
1623 |
++ opp-supported-hw = <0x70>; |
1624 |
++ clock-latency-ns = <200000>; |
1625 |
++ }; |
1626 |
++ opp-979200000 { |
1627 |
++ opp-hz = /bits/ 64 <979200000>; |
1628 |
++ opp-supported-hw = <0x70>; |
1629 |
++ clock-latency-ns = <200000>; |
1630 |
++ }; |
1631 |
++ opp-1056000000 { |
1632 |
++ opp-hz = /bits/ 64 <1056000000>; |
1633 |
++ opp-supported-hw = <0x70>; |
1634 |
++ clock-latency-ns = <200000>; |
1635 |
++ }; |
1636 |
++ opp-1132800000 { |
1637 |
++ opp-hz = /bits/ 64 <1132800000>; |
1638 |
++ opp-supported-hw = <0x70>; |
1639 |
++ clock-latency-ns = <200000>; |
1640 |
++ }; |
1641 |
++ opp-1209600000 { |
1642 |
++ opp-hz = /bits/ 64 <1209600000>; |
1643 |
++ opp-supported-hw = <0x70>; |
1644 |
++ clock-latency-ns = <200000>; |
1645 |
++ }; |
1646 |
++ opp-1286400000 { |
1647 |
++ opp-hz = /bits/ 64 <1286400000>; |
1648 |
++ opp-supported-hw = <0x70>; |
1649 |
++ clock-latency-ns = <200000>; |
1650 |
++ }; |
1651 |
++ opp-1363200000 { |
1652 |
++ opp-hz = /bits/ 64 <1363200000>; |
1653 |
++ opp-supported-hw = <0x70>; |
1654 |
++ clock-latency-ns = <200000>; |
1655 |
++ }; |
1656 |
++ opp-1440000000 { |
1657 |
++ opp-hz = /bits/ 64 <1440000000>; |
1658 |
++ opp-supported-hw = <0x70>; |
1659 |
++ clock-latency-ns = <200000>; |
1660 |
++ }; |
1661 |
++ opp-1516800000 { |
1662 |
++ opp-hz = /bits/ 64 <1516800000>; |
1663 |
++ opp-supported-hw = <0x70>; |
1664 |
++ clock-latency-ns = <200000>; |
1665 |
++ }; |
1666 |
++ opp-1593600000 { |
1667 |
++ opp-hz = /bits/ 64 <1593600000>; |
1668 |
++ opp-supported-hw = <0x70>; |
1669 |
++ clock-latency-ns = <200000>; |
1670 |
++ }; |
1671 |
++ opp-1670400000 { |
1672 |
++ opp-hz = /bits/ 64 <1670400000>; |
1673 |
++ opp-supported-hw = <0x70>; |
1674 |
++ clock-latency-ns = <200000>; |
1675 |
++ }; |
1676 |
++ opp-1747200000 { |
1677 |
++ opp-hz = /bits/ 64 <1747200000>; |
1678 |
++ opp-supported-hw = <0x70>; |
1679 |
++ clock-latency-ns = <200000>; |
1680 |
++ }; |
1681 |
++ opp-1824000000 { |
1682 |
++ opp-hz = /bits/ 64 <1824000000>; |
1683 |
++ opp-supported-hw = <0x70>; |
1684 |
++ clock-latency-ns = <200000>; |
1685 |
++ }; |
1686 |
++ opp-1900800000 { |
1687 |
++ opp-hz = /bits/ 64 <1900800000>; |
1688 |
++ opp-supported-hw = <0x70>; |
1689 |
++ clock-latency-ns = <200000>; |
1690 |
++ }; |
1691 |
++ opp-1977600000 { |
1692 |
++ opp-hz = /bits/ 64 <1977600000>; |
1693 |
++ opp-supported-hw = <0x30>; |
1694 |
++ clock-latency-ns = <200000>; |
1695 |
++ }; |
1696 |
++ opp-2054400000 { |
1697 |
++ opp-hz = /bits/ 64 <2054400000>; |
1698 |
++ opp-supported-hw = <0x30>; |
1699 |
++ clock-latency-ns = <200000>; |
1700 |
++ }; |
1701 |
++ opp-2150400000 { |
1702 |
++ opp-hz = /bits/ 64 <2150400000>; |
1703 |
++ opp-supported-hw = <0x30>; |
1704 |
++ clock-latency-ns = <200000>; |
1705 |
++ }; |
1706 |
++ opp-2246400000 { |
1707 |
++ opp-hz = /bits/ 64 <2246400000>; |
1708 |
++ opp-supported-hw = <0x10>; |
1709 |
++ clock-latency-ns = <200000>; |
1710 |
++ }; |
1711 |
++ opp-2342400000 { |
1712 |
++ opp-hz = /bits/ 64 <2342400000>; |
1713 |
++ opp-supported-hw = <0x10>; |
1714 |
++ clock-latency-ns = <200000>; |
1715 |
++ }; |
1716 |
++ }; |
1717 |
++}; |
1718 |
+diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi |
1719 |
+index 228339f81c327..5350b911f4f6c 100644 |
1720 |
+--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi |
1721 |
++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi |
1722 |
+@@ -994,7 +994,7 @@ |
1723 |
+ vdda-phy-supply = <&vreg_l1a_0p875>; |
1724 |
+ vdda-pll-supply = <&vreg_l2a_1p2>; |
1725 |
+ |
1726 |
+- pciephy: lane@1c06800 { |
1727 |
++ pciephy: phy@1c06800 { |
1728 |
+ reg = <0x01c06200 0x128>, <0x01c06400 0x1fc>, <0x01c06800 0x20c>; |
1729 |
+ #phy-cells = <0>; |
1730 |
+ |
1731 |
+@@ -1066,7 +1066,7 @@ |
1732 |
+ reset-names = "ufsphy"; |
1733 |
+ resets = <&ufshc 0>; |
1734 |
+ |
1735 |
+- ufsphy_lanes: lanes@1da7400 { |
1736 |
++ ufsphy_lanes: phy@1da7400 { |
1737 |
+ reg = <0x01da7400 0x128>, |
1738 |
+ <0x01da7600 0x1fc>, |
1739 |
+ <0x01da7c00 0x1dc>, |
1740 |
+@@ -1999,7 +1999,7 @@ |
1741 |
+ <&gcc GCC_USB3PHY_PHY_BCR>; |
1742 |
+ reset-names = "phy", "common"; |
1743 |
+ |
1744 |
+- usb1_ssphy: lane@c010200 { |
1745 |
++ usb1_ssphy: phy@c010200 { |
1746 |
+ reg = <0xc010200 0x128>, |
1747 |
+ <0xc010400 0x200>, |
1748 |
+ <0xc010c00 0x20c>, |
1749 |
+diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi |
1750 |
+index e847d7209afc6..affc736d154ad 100644 |
1751 |
+--- a/arch/arm64/boot/dts/qcom/pm660.dtsi |
1752 |
++++ b/arch/arm64/boot/dts/qcom/pm660.dtsi |
1753 |
+@@ -152,7 +152,7 @@ |
1754 |
+ qcom,pre-scaling = <1 3>; |
1755 |
+ }; |
1756 |
+ |
1757 |
+- vcoin: vcoin@83 { |
1758 |
++ vcoin: vcoin@85 { |
1759 |
+ reg = <ADC5_VCOIN>; |
1760 |
+ qcom,decimation = <1024>; |
1761 |
+ qcom,pre-scaling = <1 3>; |
1762 |
+diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi |
1763 |
+index 952bb133914f4..c2e1a0d9a2725 100644 |
1764 |
+--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi |
1765 |
++++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi |
1766 |
+@@ -768,7 +768,7 @@ |
1767 |
+ pins = "gpio17", "gpio18", "gpio19"; |
1768 |
+ function = "gpio"; |
1769 |
+ drive-strength = <2>; |
1770 |
+- bias-no-pull; |
1771 |
++ bias-disable; |
1772 |
+ }; |
1773 |
+ }; |
1774 |
+ |
1775 |
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi |
1776 |
+index dfd1b42c07fd5..3566db1d7357e 100644 |
1777 |
+--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi |
1778 |
++++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi |
1779 |
+@@ -1299,7 +1299,7 @@ ap_ts_i2c: &i2c14 { |
1780 |
+ config { |
1781 |
+ pins = "gpio126"; |
1782 |
+ function = "gpio"; |
1783 |
+- bias-no-pull; |
1784 |
++ bias-disable; |
1785 |
+ drive-strength = <2>; |
1786 |
+ output-low; |
1787 |
+ }; |
1788 |
+@@ -1309,7 +1309,7 @@ ap_ts_i2c: &i2c14 { |
1789 |
+ config { |
1790 |
+ pins = "gpio126"; |
1791 |
+ function = "gpio"; |
1792 |
+- bias-no-pull; |
1793 |
++ bias-disable; |
1794 |
+ drive-strength = <2>; |
1795 |
+ output-high; |
1796 |
+ }; |
1797 |
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi |
1798 |
+index ea7a272d267a7..ed293f635f145 100644 |
1799 |
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi |
1800 |
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi |
1801 |
+@@ -2064,7 +2064,7 @@ |
1802 |
+ |
1803 |
+ status = "disabled"; |
1804 |
+ |
1805 |
+- pcie0_lane: lanes@1c06200 { |
1806 |
++ pcie0_lane: phy@1c06200 { |
1807 |
+ reg = <0 0x01c06200 0 0x128>, |
1808 |
+ <0 0x01c06400 0 0x1fc>, |
1809 |
+ <0 0x01c06800 0 0x218>, |
1810 |
+@@ -2174,7 +2174,7 @@ |
1811 |
+ |
1812 |
+ status = "disabled"; |
1813 |
+ |
1814 |
+- pcie1_lane: lanes@1c06200 { |
1815 |
++ pcie1_lane: phy@1c06200 { |
1816 |
+ reg = <0 0x01c0a800 0 0x800>, |
1817 |
+ <0 0x01c0a800 0 0x800>, |
1818 |
+ <0 0x01c0b800 0 0x400>; |
1819 |
+@@ -2302,7 +2302,7 @@ |
1820 |
+ reset-names = "ufsphy"; |
1821 |
+ status = "disabled"; |
1822 |
+ |
1823 |
+- ufs_mem_phy_lanes: lanes@1d87400 { |
1824 |
++ ufs_mem_phy_lanes: phy@1d87400 { |
1825 |
+ reg = <0 0x01d87400 0 0x108>, |
1826 |
+ <0 0x01d87600 0 0x1e0>, |
1827 |
+ <0 0x01d87c00 0 0x1dc>, |
1828 |
+@@ -3699,7 +3699,7 @@ |
1829 |
+ <&gcc GCC_USB3_PHY_PRIM_BCR>; |
1830 |
+ reset-names = "phy", "common"; |
1831 |
+ |
1832 |
+- usb_1_ssphy: lanes@88e9200 { |
1833 |
++ usb_1_ssphy: phy@88e9200 { |
1834 |
+ reg = <0 0x088e9200 0 0x128>, |
1835 |
+ <0 0x088e9400 0 0x200>, |
1836 |
+ <0 0x088e9c00 0 0x218>, |
1837 |
+@@ -3732,7 +3732,7 @@ |
1838 |
+ <&gcc GCC_USB3_PHY_SEC_BCR>; |
1839 |
+ reset-names = "phy", "common"; |
1840 |
+ |
1841 |
+- usb_2_ssphy: lane@88eb200 { |
1842 |
++ usb_2_ssphy: phy@88eb200 { |
1843 |
+ reg = <0 0x088eb200 0 0x128>, |
1844 |
+ <0 0x088eb400 0 0x1fc>, |
1845 |
+ <0 0x088eb800 0 0x218>, |
1846 |
+diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi |
1847 |
+index f89af5e351127..dc3bddc54eb62 100644 |
1848 |
+--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi |
1849 |
++++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi |
1850 |
+@@ -408,7 +408,7 @@ |
1851 |
+ sdhc_1: sdhci@4744000 { |
1852 |
+ compatible = "qcom,sm6125-sdhci", "qcom,sdhci-msm-v5"; |
1853 |
+ reg = <0x04744000 0x1000>, <0x04745000 0x1000>; |
1854 |
+- reg-names = "hc", "core"; |
1855 |
++ reg-names = "hc", "cqhci"; |
1856 |
+ |
1857 |
+ interrupts = <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>, |
1858 |
+ <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>; |
1859 |
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi |
1860 |
+index f347f752d536d..292e40d6162dd 100644 |
1861 |
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi |
1862 |
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi |
1863 |
+@@ -1692,12 +1692,12 @@ |
1864 |
+ reset-names = "ufsphy"; |
1865 |
+ status = "disabled"; |
1866 |
+ |
1867 |
+- ufs_mem_phy_lanes: lanes@1d87400 { |
1868 |
+- reg = <0 0x01d87400 0 0x108>, |
1869 |
+- <0 0x01d87600 0 0x1e0>, |
1870 |
+- <0 0x01d87c00 0 0x1dc>, |
1871 |
+- <0 0x01d87800 0 0x108>, |
1872 |
+- <0 0x01d87a00 0 0x1e0>; |
1873 |
++ ufs_mem_phy_lanes: phy@1d87400 { |
1874 |
++ reg = <0 0x01d87400 0 0x16c>, |
1875 |
++ <0 0x01d87600 0 0x200>, |
1876 |
++ <0 0x01d87c00 0 0x200>, |
1877 |
++ <0 0x01d87800 0 0x16c>, |
1878 |
++ <0 0x01d87a00 0 0x200>; |
1879 |
+ #phy-cells = <0>; |
1880 |
+ }; |
1881 |
+ }; |
1882 |
+@@ -3010,7 +3010,7 @@ |
1883 |
+ <&gcc GCC_USB3_PHY_PRIM_BCR>; |
1884 |
+ reset-names = "phy", "common"; |
1885 |
+ |
1886 |
+- usb_1_ssphy: lanes@88e9200 { |
1887 |
++ usb_1_ssphy: phy@88e9200 { |
1888 |
+ reg = <0 0x088e9200 0 0x200>, |
1889 |
+ <0 0x088e9400 0 0x200>, |
1890 |
+ <0 0x088e9c00 0 0x218>, |
1891 |
+@@ -3043,7 +3043,7 @@ |
1892 |
+ <&gcc GCC_USB3_PHY_SEC_BCR>; |
1893 |
+ reset-names = "phy", "common"; |
1894 |
+ |
1895 |
+- usb_2_ssphy: lane@88eb200 { |
1896 |
++ usb_2_ssphy: phy@88eb200 { |
1897 |
+ reg = <0 0x088eb200 0 0x200>, |
1898 |
+ <0 0x088eb400 0 0x200>, |
1899 |
+ <0 0x088eb800 0 0x800>, |
1900 |
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi |
1901 |
+index b15d085db05ad..effbd6a9c9891 100644 |
1902 |
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi |
1903 |
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi |
1904 |
+@@ -591,7 +591,7 @@ |
1905 |
+ pins = "gpio39"; |
1906 |
+ function = "gpio"; |
1907 |
+ drive-strength = <2>; |
1908 |
+- bias-disabled; |
1909 |
++ bias-disable; |
1910 |
+ input-enable; |
1911 |
+ }; |
1912 |
+ |
1913 |
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi |
1914 |
+index b710bca456489..4e3b772a8bded 100644 |
1915 |
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi |
1916 |
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi |
1917 |
+@@ -1463,7 +1463,7 @@ |
1918 |
+ |
1919 |
+ status = "disabled"; |
1920 |
+ |
1921 |
+- pcie0_lane: lanes@1c06200 { |
1922 |
++ pcie0_lane: phy@1c06200 { |
1923 |
+ reg = <0 0x1c06200 0 0x170>, /* tx */ |
1924 |
+ <0 0x1c06400 0 0x200>, /* rx */ |
1925 |
+ <0 0x1c06800 0 0x1f0>, /* pcs */ |
1926 |
+@@ -1569,7 +1569,7 @@ |
1927 |
+ |
1928 |
+ status = "disabled"; |
1929 |
+ |
1930 |
+- pcie1_lane: lanes@1c0e200 { |
1931 |
++ pcie1_lane: phy@1c0e200 { |
1932 |
+ reg = <0 0x1c0e200 0 0x170>, /* tx0 */ |
1933 |
+ <0 0x1c0e400 0 0x200>, /* rx0 */ |
1934 |
+ <0 0x1c0ea00 0 0x1f0>, /* pcs */ |
1935 |
+@@ -1677,7 +1677,7 @@ |
1936 |
+ |
1937 |
+ status = "disabled"; |
1938 |
+ |
1939 |
+- pcie2_lane: lanes@1c16200 { |
1940 |
++ pcie2_lane: phy@1c16200 { |
1941 |
+ reg = <0 0x1c16200 0 0x170>, /* tx0 */ |
1942 |
+ <0 0x1c16400 0 0x200>, /* rx0 */ |
1943 |
+ <0 0x1c16a00 0 0x1f0>, /* pcs */ |
1944 |
+@@ -1756,12 +1756,12 @@ |
1945 |
+ reset-names = "ufsphy"; |
1946 |
+ status = "disabled"; |
1947 |
+ |
1948 |
+- ufs_mem_phy_lanes: lanes@1d87400 { |
1949 |
+- reg = <0 0x01d87400 0 0x108>, |
1950 |
+- <0 0x01d87600 0 0x1e0>, |
1951 |
+- <0 0x01d87c00 0 0x1dc>, |
1952 |
+- <0 0x01d87800 0 0x108>, |
1953 |
+- <0 0x01d87a00 0 0x1e0>; |
1954 |
++ ufs_mem_phy_lanes: phy@1d87400 { |
1955 |
++ reg = <0 0x01d87400 0 0x16c>, |
1956 |
++ <0 0x01d87600 0 0x200>, |
1957 |
++ <0 0x01d87c00 0 0x200>, |
1958 |
++ <0 0x01d87800 0 0x16c>, |
1959 |
++ <0 0x01d87a00 0 0x200>; |
1960 |
+ #phy-cells = <0>; |
1961 |
+ }; |
1962 |
+ }; |
1963 |
+@@ -1933,7 +1933,7 @@ |
1964 |
+ pins = "gpio7"; |
1965 |
+ function = "dmic1_data"; |
1966 |
+ drive-strength = <2>; |
1967 |
+- pull-down; |
1968 |
++ bias-pull-down; |
1969 |
+ input-enable; |
1970 |
+ }; |
1971 |
+ }; |
1972 |
+@@ -2306,15 +2306,11 @@ |
1973 |
+ dp_phy: dp-phy@88ea200 { |
1974 |
+ reg = <0 0x088ea200 0 0x200>, |
1975 |
+ <0 0x088ea400 0 0x200>, |
1976 |
+- <0 0x088eac00 0 0x400>, |
1977 |
++ <0 0x088eaa00 0 0x200>, |
1978 |
+ <0 0x088ea600 0 0x200>, |
1979 |
+- <0 0x088ea800 0 0x200>, |
1980 |
+- <0 0x088eaa00 0 0x100>; |
1981 |
++ <0 0x088ea800 0 0x200>; |
1982 |
+ #phy-cells = <0>; |
1983 |
+ #clock-cells = <1>; |
1984 |
+- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; |
1985 |
+- clock-names = "pipe0"; |
1986 |
+- clock-output-names = "usb3_phy_pipe_clk_src"; |
1987 |
+ }; |
1988 |
+ }; |
1989 |
+ |
1990 |
+@@ -2336,7 +2332,7 @@ |
1991 |
+ <&gcc GCC_USB3_PHY_SEC_BCR>; |
1992 |
+ reset-names = "phy", "common"; |
1993 |
+ |
1994 |
+- usb_2_ssphy: lanes@88eb200 { |
1995 |
++ usb_2_ssphy: phy@88eb200 { |
1996 |
+ reg = <0 0x088eb200 0 0x200>, |
1997 |
+ <0 0x088eb400 0 0x200>, |
1998 |
+ <0 0x088eb800 0 0x800>; |
1999 |
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi |
2000 |
+index c0a3ea47302f4..1ef16975d13a1 100644 |
2001 |
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi |
2002 |
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi |
2003 |
+@@ -1123,12 +1123,12 @@ |
2004 |
+ reset-names = "ufsphy"; |
2005 |
+ status = "disabled"; |
2006 |
+ |
2007 |
+- ufs_mem_phy_lanes: lanes@1d87400 { |
2008 |
+- reg = <0 0x01d87400 0 0x108>, |
2009 |
+- <0 0x01d87600 0 0x1e0>, |
2010 |
+- <0 0x01d87c00 0 0x1dc>, |
2011 |
+- <0 0x01d87800 0 0x108>, |
2012 |
+- <0 0x01d87a00 0 0x1e0>; |
2013 |
++ ufs_mem_phy_lanes: phy@1d87400 { |
2014 |
++ reg = <0 0x01d87400 0 0x188>, |
2015 |
++ <0 0x01d87600 0 0x200>, |
2016 |
++ <0 0x01d87c00 0 0x200>, |
2017 |
++ <0 0x01d87800 0 0x188>, |
2018 |
++ <0 0x01d87a00 0 0x200>; |
2019 |
+ #phy-cells = <0>; |
2020 |
+ #clock-cells = <0>; |
2021 |
+ }; |
2022 |
+diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi |
2023 |
+index 82be00069bcd5..4f232f575ab2a 100644 |
2024 |
+--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi |
2025 |
++++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi |
2026 |
+@@ -120,7 +120,6 @@ |
2027 |
+ dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, |
2028 |
+ <&main_udmap 0x4001>; |
2029 |
+ dma-names = "tx", "rx1", "rx2"; |
2030 |
+- dma-coherent; |
2031 |
+ |
2032 |
+ rng: rng@4e10000 { |
2033 |
+ compatible = "inside-secure,safexcel-eip76"; |
2034 |
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi |
2035 |
+index 6c81997ee28ad..ad21bb1417aa6 100644 |
2036 |
+--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi |
2037 |
++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi |
2038 |
+@@ -336,7 +336,6 @@ |
2039 |
+ dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, |
2040 |
+ <&main_udmap 0x4001>; |
2041 |
+ dma-names = "tx", "rx1", "rx2"; |
2042 |
+- dma-coherent; |
2043 |
+ |
2044 |
+ rng: rng@4e10000 { |
2045 |
+ compatible = "inside-secure,safexcel-eip76"; |
2046 |
+diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h |
2047 |
+index 657c921fd784a..e1e10a24519b2 100644 |
2048 |
+--- a/arch/arm64/include/asm/debug-monitors.h |
2049 |
++++ b/arch/arm64/include/asm/debug-monitors.h |
2050 |
+@@ -76,7 +76,7 @@ struct task_struct; |
2051 |
+ |
2052 |
+ struct step_hook { |
2053 |
+ struct list_head node; |
2054 |
+- int (*fn)(struct pt_regs *regs, unsigned int esr); |
2055 |
++ int (*fn)(struct pt_regs *regs, unsigned long esr); |
2056 |
+ }; |
2057 |
+ |
2058 |
+ void register_user_step_hook(struct step_hook *hook); |
2059 |
+@@ -87,7 +87,7 @@ void unregister_kernel_step_hook(struct step_hook *hook); |
2060 |
+ |
2061 |
+ struct break_hook { |
2062 |
+ struct list_head node; |
2063 |
+- int (*fn)(struct pt_regs *regs, unsigned int esr); |
2064 |
++ int (*fn)(struct pt_regs *regs, unsigned long esr); |
2065 |
+ u16 imm; |
2066 |
+ u16 mask; /* These bits are ignored when comparing with imm */ |
2067 |
+ }; |
2068 |
+diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h |
2069 |
+index 8f59bbeba7a7e..9f91c8906edd9 100644 |
2070 |
+--- a/arch/arm64/include/asm/esr.h |
2071 |
++++ b/arch/arm64/include/asm/esr.h |
2072 |
+@@ -324,14 +324,14 @@ |
2073 |
+ #ifndef __ASSEMBLY__ |
2074 |
+ #include <asm/types.h> |
2075 |
+ |
2076 |
+-static inline bool esr_is_data_abort(u32 esr) |
2077 |
++static inline bool esr_is_data_abort(unsigned long esr) |
2078 |
+ { |
2079 |
+- const u32 ec = ESR_ELx_EC(esr); |
2080 |
++ const unsigned long ec = ESR_ELx_EC(esr); |
2081 |
+ |
2082 |
+ return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR; |
2083 |
+ } |
2084 |
+ |
2085 |
+-const char *esr_get_class_string(u32 esr); |
2086 |
++const char *esr_get_class_string(unsigned long esr); |
2087 |
+ #endif /* __ASSEMBLY */ |
2088 |
+ |
2089 |
+ #endif /* __ASM_ESR_H */ |
2090 |
+diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h |
2091 |
+index 339477dca5513..0e6535aa78c2f 100644 |
2092 |
+--- a/arch/arm64/include/asm/exception.h |
2093 |
++++ b/arch/arm64/include/asm/exception.h |
2094 |
+@@ -19,9 +19,9 @@ |
2095 |
+ #define __exception_irq_entry __kprobes |
2096 |
+ #endif |
2097 |
+ |
2098 |
+-static inline u32 disr_to_esr(u64 disr) |
2099 |
++static inline unsigned long disr_to_esr(u64 disr) |
2100 |
+ { |
2101 |
+- unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT; |
2102 |
++ unsigned long esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT; |
2103 |
+ |
2104 |
+ if ((disr & DISR_EL1_IDS) == 0) |
2105 |
+ esr |= (disr & DISR_EL1_ESR_MASK); |
2106 |
+@@ -57,23 +57,23 @@ asmlinkage void call_on_irq_stack(struct pt_regs *regs, |
2107 |
+ void (*func)(struct pt_regs *)); |
2108 |
+ asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs); |
2109 |
+ |
2110 |
+-void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); |
2111 |
++void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs); |
2112 |
+ void do_undefinstr(struct pt_regs *regs); |
2113 |
+ void do_bti(struct pt_regs *regs); |
2114 |
+-void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, |
2115 |
++void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, |
2116 |
+ struct pt_regs *regs); |
2117 |
+-void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs); |
2118 |
+-void do_sve_acc(unsigned int esr, struct pt_regs *regs); |
2119 |
+-void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs); |
2120 |
+-void do_sysinstr(unsigned int esr, struct pt_regs *regs); |
2121 |
+-void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); |
2122 |
+-void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); |
2123 |
+-void do_cp15instr(unsigned int esr, struct pt_regs *regs); |
2124 |
++void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs); |
2125 |
++void do_sve_acc(unsigned long esr, struct pt_regs *regs); |
2126 |
++void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs); |
2127 |
++void do_sysinstr(unsigned long esr, struct pt_regs *regs); |
2128 |
++void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs); |
2129 |
++void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr); |
2130 |
++void do_cp15instr(unsigned long esr, struct pt_regs *regs); |
2131 |
+ void do_el0_svc(struct pt_regs *regs); |
2132 |
+ void do_el0_svc_compat(struct pt_regs *regs); |
2133 |
+-void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); |
2134 |
+-void do_serror(struct pt_regs *regs, unsigned int esr); |
2135 |
++void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr); |
2136 |
++void do_serror(struct pt_regs *regs, unsigned long esr); |
2137 |
+ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); |
2138 |
+ |
2139 |
+-void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far); |
2140 |
++void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); |
2141 |
+ #endif /* __ASM_EXCEPTION_H */ |
2142 |
+diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h |
2143 |
+index d9bf3d12a2b85..7364530de0a77 100644 |
2144 |
+--- a/arch/arm64/include/asm/processor.h |
2145 |
++++ b/arch/arm64/include/asm/processor.h |
2146 |
+@@ -240,13 +240,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, |
2147 |
+ } |
2148 |
+ #endif |
2149 |
+ |
2150 |
+-static inline bool is_ttbr0_addr(unsigned long addr) |
2151 |
++static __always_inline bool is_ttbr0_addr(unsigned long addr) |
2152 |
+ { |
2153 |
+ /* entry assembly clears tags for TTBR0 addrs */ |
2154 |
+ return addr < TASK_SIZE; |
2155 |
+ } |
2156 |
+ |
2157 |
+-static inline bool is_ttbr1_addr(unsigned long addr) |
2158 |
++static __always_inline bool is_ttbr1_addr(unsigned long addr) |
2159 |
+ { |
2160 |
+ /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ |
2161 |
+ return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; |
2162 |
+diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h |
2163 |
+index 305a7157c6a6a..0eb7709422e29 100644 |
2164 |
+--- a/arch/arm64/include/asm/system_misc.h |
2165 |
++++ b/arch/arm64/include/asm/system_misc.h |
2166 |
+@@ -23,9 +23,9 @@ void die(const char *msg, struct pt_regs *regs, int err); |
2167 |
+ struct siginfo; |
2168 |
+ void arm64_notify_die(const char *str, struct pt_regs *regs, |
2169 |
+ int signo, int sicode, unsigned long far, |
2170 |
+- int err); |
2171 |
++ unsigned long err); |
2172 |
+ |
2173 |
+-void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int, |
2174 |
++void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned long, |
2175 |
+ struct pt_regs *), |
2176 |
+ int sig, int code, const char *name); |
2177 |
+ |
2178 |
+diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h |
2179 |
+index 54f32a0675dff..6e5826470bea6 100644 |
2180 |
+--- a/arch/arm64/include/asm/traps.h |
2181 |
++++ b/arch/arm64/include/asm/traps.h |
2182 |
+@@ -24,7 +24,7 @@ struct undef_hook { |
2183 |
+ |
2184 |
+ void register_undef_hook(struct undef_hook *hook); |
2185 |
+ void unregister_undef_hook(struct undef_hook *hook); |
2186 |
+-void force_signal_inject(int signal, int code, unsigned long address, unsigned int err); |
2187 |
++void force_signal_inject(int signal, int code, unsigned long address, unsigned long err); |
2188 |
+ void arm64_notify_segfault(unsigned long addr); |
2189 |
+ void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *str); |
2190 |
+ void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str); |
2191 |
+@@ -57,7 +57,7 @@ static inline int in_entry_text(unsigned long ptr) |
2192 |
+ * errors share the same encoding as an all-zeros encoding from a CPU that |
2193 |
+ * doesn't support RAS. |
2194 |
+ */ |
2195 |
+-static inline bool arm64_is_ras_serror(u32 esr) |
2196 |
++static inline bool arm64_is_ras_serror(unsigned long esr) |
2197 |
+ { |
2198 |
+ WARN_ON(preemptible()); |
2199 |
+ |
2200 |
+@@ -77,9 +77,9 @@ static inline bool arm64_is_ras_serror(u32 esr) |
2201 |
+ * We treat them as Uncontainable. |
2202 |
+ * Non-RAS SError's are reported as Uncontained/Uncategorized. |
2203 |
+ */ |
2204 |
+-static inline u32 arm64_ras_serror_get_severity(u32 esr) |
2205 |
++static inline unsigned long arm64_ras_serror_get_severity(unsigned long esr) |
2206 |
+ { |
2207 |
+- u32 aet = esr & ESR_ELx_AET; |
2208 |
++ unsigned long aet = esr & ESR_ELx_AET; |
2209 |
+ |
2210 |
+ if (!arm64_is_ras_serror(esr)) { |
2211 |
+ /* Not a RAS error, we can't interpret the ESR. */ |
2212 |
+@@ -98,6 +98,6 @@ static inline u32 arm64_ras_serror_get_severity(u32 esr) |
2213 |
+ return aet; |
2214 |
+ } |
2215 |
+ |
2216 |
+-bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr); |
2217 |
+-void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr); |
2218 |
++bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr); |
2219 |
++void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr); |
2220 |
+ #endif |
2221 |
+diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c |
2222 |
+index 4f3661eeb7ec6..bf9fe71589bca 100644 |
2223 |
+--- a/arch/arm64/kernel/debug-monitors.c |
2224 |
++++ b/arch/arm64/kernel/debug-monitors.c |
2225 |
+@@ -202,7 +202,7 @@ void unregister_kernel_step_hook(struct step_hook *hook) |
2226 |
+ * So we call all the registered handlers, until the right handler is |
2227 |
+ * found which returns zero. |
2228 |
+ */ |
2229 |
+-static int call_step_hook(struct pt_regs *regs, unsigned int esr) |
2230 |
++static int call_step_hook(struct pt_regs *regs, unsigned long esr) |
2231 |
+ { |
2232 |
+ struct step_hook *hook; |
2233 |
+ struct list_head *list; |
2234 |
+@@ -238,7 +238,7 @@ static void send_user_sigtrap(int si_code) |
2235 |
+ "User debug trap"); |
2236 |
+ } |
2237 |
+ |
2238 |
+-static int single_step_handler(unsigned long unused, unsigned int esr, |
2239 |
++static int single_step_handler(unsigned long unused, unsigned long esr, |
2240 |
+ struct pt_regs *regs) |
2241 |
+ { |
2242 |
+ bool handler_found = false; |
2243 |
+@@ -299,11 +299,11 @@ void unregister_kernel_break_hook(struct break_hook *hook) |
2244 |
+ unregister_debug_hook(&hook->node); |
2245 |
+ } |
2246 |
+ |
2247 |
+-static int call_break_hook(struct pt_regs *regs, unsigned int esr) |
2248 |
++static int call_break_hook(struct pt_regs *regs, unsigned long esr) |
2249 |
+ { |
2250 |
+ struct break_hook *hook; |
2251 |
+ struct list_head *list; |
2252 |
+- int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; |
2253 |
++ int (*fn)(struct pt_regs *regs, unsigned long esr) = NULL; |
2254 |
+ |
2255 |
+ list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; |
2256 |
+ |
2257 |
+@@ -312,7 +312,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) |
2258 |
+ * entirely not preemptible, and we can use rcu list safely here. |
2259 |
+ */ |
2260 |
+ list_for_each_entry_rcu(hook, list, node) { |
2261 |
+- unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; |
2262 |
++ unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; |
2263 |
+ |
2264 |
+ if ((comment & ~hook->mask) == hook->imm) |
2265 |
+ fn = hook->fn; |
2266 |
+@@ -322,7 +322,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) |
2267 |
+ } |
2268 |
+ NOKPROBE_SYMBOL(call_break_hook); |
2269 |
+ |
2270 |
+-static int brk_handler(unsigned long unused, unsigned int esr, |
2271 |
++static int brk_handler(unsigned long unused, unsigned long esr, |
2272 |
+ struct pt_regs *regs) |
2273 |
+ { |
2274 |
+ if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) |
2275 |
+diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c |
2276 |
+index 8ecca795aca0b..fc91dad1579ab 100644 |
2277 |
+--- a/arch/arm64/kernel/entry-common.c |
2278 |
++++ b/arch/arm64/kernel/entry-common.c |
2279 |
+@@ -273,13 +273,13 @@ extern void (*handle_arch_irq)(struct pt_regs *); |
2280 |
+ extern void (*handle_arch_fiq)(struct pt_regs *); |
2281 |
+ |
2282 |
+ static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, |
2283 |
+- unsigned int esr) |
2284 |
++ unsigned long esr) |
2285 |
+ { |
2286 |
+ arm64_enter_nmi(regs); |
2287 |
+ |
2288 |
+ console_verbose(); |
2289 |
+ |
2290 |
+- pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n", |
2291 |
++ pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n", |
2292 |
+ vector, smp_processor_id(), esr, |
2293 |
+ esr_get_class_string(esr)); |
2294 |
+ |
2295 |
+@@ -796,7 +796,7 @@ UNHANDLED(el0t, 32, error) |
2296 |
+ #ifdef CONFIG_VMAP_STACK |
2297 |
+ asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) |
2298 |
+ { |
2299 |
+- unsigned int esr = read_sysreg(esr_el1); |
2300 |
++ unsigned long esr = read_sysreg(esr_el1); |
2301 |
+ unsigned long far = read_sysreg(far_el1); |
2302 |
+ |
2303 |
+ arm64_enter_nmi(regs); |
2304 |
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c |
2305 |
+index ff4962750b3d0..7a3fcf21b18a7 100644 |
2306 |
+--- a/arch/arm64/kernel/fpsimd.c |
2307 |
++++ b/arch/arm64/kernel/fpsimd.c |
2308 |
+@@ -930,7 +930,7 @@ void fpsimd_release_task(struct task_struct *dead_task) |
2309 |
+ * would have disabled the SVE access trap for userspace during |
2310 |
+ * ret_to_user, making an SVE access trap impossible in that case. |
2311 |
+ */ |
2312 |
+-void do_sve_acc(unsigned int esr, struct pt_regs *regs) |
2313 |
++void do_sve_acc(unsigned long esr, struct pt_regs *regs) |
2314 |
+ { |
2315 |
+ /* Even if we chose not to use SVE, the hardware could still trap: */ |
2316 |
+ if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { |
2317 |
+@@ -972,7 +972,7 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs) |
2318 |
+ /* |
2319 |
+ * Trapped FP/ASIMD access. |
2320 |
+ */ |
2321 |
+-void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) |
2322 |
++void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs) |
2323 |
+ { |
2324 |
+ /* TODO: implement lazy context saving/restoring */ |
2325 |
+ WARN_ON(1); |
2326 |
+@@ -981,7 +981,7 @@ void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) |
2327 |
+ /* |
2328 |
+ * Raise a SIGFPE for the current process. |
2329 |
+ */ |
2330 |
+-void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) |
2331 |
++void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs) |
2332 |
+ { |
2333 |
+ unsigned int si_code = FPE_FLTUNK; |
2334 |
+ |
2335 |
+diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c |
2336 |
+index 712e97c03e54c..2a7f21314cde6 100644 |
2337 |
+--- a/arch/arm64/kernel/hw_breakpoint.c |
2338 |
++++ b/arch/arm64/kernel/hw_breakpoint.c |
2339 |
+@@ -617,7 +617,7 @@ NOKPROBE_SYMBOL(toggle_bp_registers); |
2340 |
+ /* |
2341 |
+ * Debug exception handlers. |
2342 |
+ */ |
2343 |
+-static int breakpoint_handler(unsigned long unused, unsigned int esr, |
2344 |
++static int breakpoint_handler(unsigned long unused, unsigned long esr, |
2345 |
+ struct pt_regs *regs) |
2346 |
+ { |
2347 |
+ int i, step = 0, *kernel_step; |
2348 |
+@@ -751,7 +751,7 @@ static int watchpoint_report(struct perf_event *wp, unsigned long addr, |
2349 |
+ return step; |
2350 |
+ } |
2351 |
+ |
2352 |
+-static int watchpoint_handler(unsigned long addr, unsigned int esr, |
2353 |
++static int watchpoint_handler(unsigned long addr, unsigned long esr, |
2354 |
+ struct pt_regs *regs) |
2355 |
+ { |
2356 |
+ int i, step = 0, *kernel_step, access, closest_match = 0; |
2357 |
+diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c |
2358 |
+index 2aede780fb80c..cda9c1e9864f7 100644 |
2359 |
+--- a/arch/arm64/kernel/kgdb.c |
2360 |
++++ b/arch/arm64/kernel/kgdb.c |
2361 |
+@@ -232,14 +232,14 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, |
2362 |
+ return err; |
2363 |
+ } |
2364 |
+ |
2365 |
+-static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) |
2366 |
++static int kgdb_brk_fn(struct pt_regs *regs, unsigned long esr) |
2367 |
+ { |
2368 |
+ kgdb_handle_exception(1, SIGTRAP, 0, regs); |
2369 |
+ return DBG_HOOK_HANDLED; |
2370 |
+ } |
2371 |
+ NOKPROBE_SYMBOL(kgdb_brk_fn) |
2372 |
+ |
2373 |
+-static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) |
2374 |
++static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned long esr) |
2375 |
+ { |
2376 |
+ compiled_break = 1; |
2377 |
+ kgdb_handle_exception(1, SIGTRAP, 0, regs); |
2378 |
+@@ -248,7 +248,7 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) |
2379 |
+ } |
2380 |
+ NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); |
2381 |
+ |
2382 |
+-static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) |
2383 |
++static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned long esr) |
2384 |
+ { |
2385 |
+ if (!kgdb_single_step) |
2386 |
+ return DBG_HOOK_ERROR; |
2387 |
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c |
2388 |
+index 6dbcc89f66627..b7404dba0d623 100644 |
2389 |
+--- a/arch/arm64/kernel/probes/kprobes.c |
2390 |
++++ b/arch/arm64/kernel/probes/kprobes.c |
2391 |
+@@ -332,7 +332,7 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) |
2392 |
+ } |
2393 |
+ |
2394 |
+ static int __kprobes |
2395 |
+-kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr) |
2396 |
++kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr) |
2397 |
+ { |
2398 |
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
2399 |
+ unsigned long addr = instruction_pointer(regs); |
2400 |
+@@ -356,7 +356,7 @@ static struct break_hook kprobes_break_ss_hook = { |
2401 |
+ }; |
2402 |
+ |
2403 |
+ static int __kprobes |
2404 |
+-kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) |
2405 |
++kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr) |
2406 |
+ { |
2407 |
+ kprobe_handler(regs); |
2408 |
+ return DBG_HOOK_HANDLED; |
2409 |
+diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c |
2410 |
+index 9be668f3f0341..d49aef2657cdf 100644 |
2411 |
+--- a/arch/arm64/kernel/probes/uprobes.c |
2412 |
++++ b/arch/arm64/kernel/probes/uprobes.c |
2413 |
+@@ -166,7 +166,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, |
2414 |
+ } |
2415 |
+ |
2416 |
+ static int uprobe_breakpoint_handler(struct pt_regs *regs, |
2417 |
+- unsigned int esr) |
2418 |
++ unsigned long esr) |
2419 |
+ { |
2420 |
+ if (uprobe_pre_sstep_notifier(regs)) |
2421 |
+ return DBG_HOOK_HANDLED; |
2422 |
+@@ -175,7 +175,7 @@ static int uprobe_breakpoint_handler(struct pt_regs *regs, |
2423 |
+ } |
2424 |
+ |
2425 |
+ static int uprobe_single_step_handler(struct pt_regs *regs, |
2426 |
+- unsigned int esr) |
2427 |
++ unsigned long esr) |
2428 |
+ { |
2429 |
+ struct uprobe_task *utask = current->utask; |
2430 |
+ |
2431 |
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c |
2432 |
+index fe0cd0568813e..f859cc870d5b3 100644 |
2433 |
+--- a/arch/arm64/kernel/traps.c |
2434 |
++++ b/arch/arm64/kernel/traps.c |
2435 |
+@@ -243,7 +243,7 @@ static void arm64_show_signal(int signo, const char *str) |
2436 |
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, |
2437 |
+ DEFAULT_RATELIMIT_BURST); |
2438 |
+ struct task_struct *tsk = current; |
2439 |
+- unsigned int esr = tsk->thread.fault_code; |
2440 |
++ unsigned long esr = tsk->thread.fault_code; |
2441 |
+ struct pt_regs *regs = task_pt_regs(tsk); |
2442 |
+ |
2443 |
+ /* Leave if the signal won't be shown */ |
2444 |
+@@ -254,7 +254,7 @@ static void arm64_show_signal(int signo, const char *str) |
2445 |
+ |
2446 |
+ pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); |
2447 |
+ if (esr) |
2448 |
+- pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr); |
2449 |
++ pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr), esr); |
2450 |
+ |
2451 |
+ pr_cont("%s", str); |
2452 |
+ print_vma_addr(KERN_CONT " in ", regs->pc); |
2453 |
+@@ -288,7 +288,7 @@ void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, |
2454 |
+ |
2455 |
+ void arm64_notify_die(const char *str, struct pt_regs *regs, |
2456 |
+ int signo, int sicode, unsigned long far, |
2457 |
+- int err) |
2458 |
++ unsigned long err) |
2459 |
+ { |
2460 |
+ if (user_mode(regs)) { |
2461 |
+ WARN_ON(regs != current_pt_regs()); |
2462 |
+@@ -440,7 +440,7 @@ exit: |
2463 |
+ return fn ? fn(regs, instr) : 1; |
2464 |
+ } |
2465 |
+ |
2466 |
+-void force_signal_inject(int signal, int code, unsigned long address, unsigned int err) |
2467 |
++void force_signal_inject(int signal, int code, unsigned long address, unsigned long err) |
2468 |
+ { |
2469 |
+ const char *desc; |
2470 |
+ struct pt_regs *regs = current_pt_regs(); |
2471 |
+@@ -507,7 +507,7 @@ void do_bti(struct pt_regs *regs) |
2472 |
+ } |
2473 |
+ NOKPROBE_SYMBOL(do_bti); |
2474 |
+ |
2475 |
+-void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr) |
2476 |
++void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr) |
2477 |
+ { |
2478 |
+ /* |
2479 |
+ * Unexpected FPAC exception or pointer authentication failure in |
2480 |
+@@ -538,7 +538,7 @@ NOKPROBE_SYMBOL(do_ptrauth_fault); |
2481 |
+ uaccess_ttbr0_disable(); \ |
2482 |
+ } |
2483 |
+ |
2484 |
+-static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) |
2485 |
++static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs) |
2486 |
+ { |
2487 |
+ unsigned long tagged_address, address; |
2488 |
+ int rt = ESR_ELx_SYS64_ISS_RT(esr); |
2489 |
+@@ -578,7 +578,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) |
2490 |
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
2491 |
+ } |
2492 |
+ |
2493 |
+-static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) |
2494 |
++static void ctr_read_handler(unsigned long esr, struct pt_regs *regs) |
2495 |
+ { |
2496 |
+ int rt = ESR_ELx_SYS64_ISS_RT(esr); |
2497 |
+ unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); |
2498 |
+@@ -597,7 +597,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) |
2499 |
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
2500 |
+ } |
2501 |
+ |
2502 |
+-static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) |
2503 |
++static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs) |
2504 |
+ { |
2505 |
+ int rt = ESR_ELx_SYS64_ISS_RT(esr); |
2506 |
+ |
2507 |
+@@ -605,7 +605,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) |
2508 |
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
2509 |
+ } |
2510 |
+ |
2511 |
+-static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) |
2512 |
++static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) |
2513 |
+ { |
2514 |
+ int rt = ESR_ELx_SYS64_ISS_RT(esr); |
2515 |
+ |
2516 |
+@@ -613,7 +613,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) |
2517 |
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
2518 |
+ } |
2519 |
+ |
2520 |
+-static void mrs_handler(unsigned int esr, struct pt_regs *regs) |
2521 |
++static void mrs_handler(unsigned long esr, struct pt_regs *regs) |
2522 |
+ { |
2523 |
+ u32 sysreg, rt; |
2524 |
+ |
2525 |
+@@ -624,15 +624,15 @@ static void mrs_handler(unsigned int esr, struct pt_regs *regs) |
2526 |
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
2527 |
+ } |
2528 |
+ |
2529 |
+-static void wfi_handler(unsigned int esr, struct pt_regs *regs) |
2530 |
++static void wfi_handler(unsigned long esr, struct pt_regs *regs) |
2531 |
+ { |
2532 |
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
2533 |
+ } |
2534 |
+ |
2535 |
+ struct sys64_hook { |
2536 |
+- unsigned int esr_mask; |
2537 |
+- unsigned int esr_val; |
2538 |
+- void (*handler)(unsigned int esr, struct pt_regs *regs); |
2539 |
++ unsigned long esr_mask; |
2540 |
++ unsigned long esr_val; |
2541 |
++ void (*handler)(unsigned long esr, struct pt_regs *regs); |
2542 |
+ }; |
2543 |
+ |
2544 |
+ static const struct sys64_hook sys64_hooks[] = { |
2545 |
+@@ -675,7 +675,7 @@ static const struct sys64_hook sys64_hooks[] = { |
2546 |
+ }; |
2547 |
+ |
2548 |
+ #ifdef CONFIG_COMPAT |
2549 |
+-static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) |
2550 |
++static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs) |
2551 |
+ { |
2552 |
+ int cond; |
2553 |
+ |
2554 |
+@@ -695,7 +695,7 @@ static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) |
2555 |
+ return aarch32_opcode_cond_checks[cond](regs->pstate); |
2556 |
+ } |
2557 |
+ |
2558 |
+-static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) |
2559 |
++static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) |
2560 |
+ { |
2561 |
+ int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; |
2562 |
+ |
2563 |
+@@ -712,7 +712,7 @@ static const struct sys64_hook cp15_32_hooks[] = { |
2564 |
+ {}, |
2565 |
+ }; |
2566 |
+ |
2567 |
+-static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) |
2568 |
++static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs) |
2569 |
+ { |
2570 |
+ int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; |
2571 |
+ int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; |
2572 |
+@@ -732,7 +732,7 @@ static const struct sys64_hook cp15_64_hooks[] = { |
2573 |
+ {}, |
2574 |
+ }; |
2575 |
+ |
2576 |
+-void do_cp15instr(unsigned int esr, struct pt_regs *regs) |
2577 |
++void do_cp15instr(unsigned long esr, struct pt_regs *regs) |
2578 |
+ { |
2579 |
+ const struct sys64_hook *hook, *hook_base; |
2580 |
+ |
2581 |
+@@ -773,7 +773,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs) |
2582 |
+ NOKPROBE_SYMBOL(do_cp15instr); |
2583 |
+ #endif |
2584 |
+ |
2585 |
+-void do_sysinstr(unsigned int esr, struct pt_regs *regs) |
2586 |
++void do_sysinstr(unsigned long esr, struct pt_regs *regs) |
2587 |
+ { |
2588 |
+ const struct sys64_hook *hook; |
2589 |
+ |
2590 |
+@@ -837,7 +837,7 @@ static const char *esr_class_str[] = { |
2591 |
+ [ESR_ELx_EC_BRK64] = "BRK (AArch64)", |
2592 |
+ }; |
2593 |
+ |
2594 |
+-const char *esr_get_class_string(u32 esr) |
2595 |
++const char *esr_get_class_string(unsigned long esr) |
2596 |
+ { |
2597 |
+ return esr_class_str[ESR_ELx_EC(esr)]; |
2598 |
+ } |
2599 |
+@@ -846,7 +846,7 @@ const char *esr_get_class_string(u32 esr) |
2600 |
+ * bad_el0_sync handles unexpected, but potentially recoverable synchronous |
2601 |
+ * exceptions taken from EL0. |
2602 |
+ */ |
2603 |
+-void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) |
2604 |
++void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr) |
2605 |
+ { |
2606 |
+ unsigned long pc = instruction_pointer(regs); |
2607 |
+ |
2608 |
+@@ -862,7 +862,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) |
2609 |
+ DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) |
2610 |
+ __aligned(16); |
2611 |
+ |
2612 |
+-void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) |
2613 |
++void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) |
2614 |
+ { |
2615 |
+ unsigned long tsk_stk = (unsigned long)current->stack; |
2616 |
+ unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); |
2617 |
+@@ -871,7 +871,7 @@ void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) |
2618 |
+ console_verbose(); |
2619 |
+ pr_emerg("Insufficient stack space to handle exception!"); |
2620 |
+ |
2621 |
+- pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr)); |
2622 |
++ pr_emerg("ESR: 0x%016lx -- %s\n", esr, esr_get_class_string(esr)); |
2623 |
+ pr_emerg("FAR: 0x%016lx\n", far); |
2624 |
+ |
2625 |
+ pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", |
2626 |
+@@ -892,11 +892,11 @@ void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) |
2627 |
+ } |
2628 |
+ #endif |
2629 |
+ |
2630 |
+-void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr) |
2631 |
++void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) |
2632 |
+ { |
2633 |
+ console_verbose(); |
2634 |
+ |
2635 |
+- pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n", |
2636 |
++ pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n", |
2637 |
+ smp_processor_id(), esr, esr_get_class_string(esr)); |
2638 |
+ if (regs) |
2639 |
+ __show_regs(regs); |
2640 |
+@@ -907,9 +907,9 @@ void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr) |
2641 |
+ unreachable(); |
2642 |
+ } |
2643 |
+ |
2644 |
+-bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) |
2645 |
++bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr) |
2646 |
+ { |
2647 |
+- u32 aet = arm64_ras_serror_get_severity(esr); |
2648 |
++ unsigned long aet = arm64_ras_serror_get_severity(esr); |
2649 |
+ |
2650 |
+ switch (aet) { |
2651 |
+ case ESR_ELx_AET_CE: /* corrected error */ |
2652 |
+@@ -939,7 +939,7 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) |
2653 |
+ } |
2654 |
+ } |
2655 |
+ |
2656 |
+-void do_serror(struct pt_regs *regs, unsigned int esr) |
2657 |
++void do_serror(struct pt_regs *regs, unsigned long esr) |
2658 |
+ { |
2659 |
+ /* non-RAS errors are not containable */ |
2660 |
+ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) |
2661 |
+@@ -960,7 +960,7 @@ int is_valid_bugaddr(unsigned long addr) |
2662 |
+ return 1; |
2663 |
+ } |
2664 |
+ |
2665 |
+-static int bug_handler(struct pt_regs *regs, unsigned int esr) |
2666 |
++static int bug_handler(struct pt_regs *regs, unsigned long esr) |
2667 |
+ { |
2668 |
+ switch (report_bug(regs->pc, regs)) { |
2669 |
+ case BUG_TRAP_TYPE_BUG: |
2670 |
+@@ -985,7 +985,7 @@ static struct break_hook bug_break_hook = { |
2671 |
+ .imm = BUG_BRK_IMM, |
2672 |
+ }; |
2673 |
+ |
2674 |
+-static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr) |
2675 |
++static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr) |
2676 |
+ { |
2677 |
+ pr_err("%s generated an invalid instruction at %pS!\n", |
2678 |
+ "Kernel text patching", |
2679 |
+@@ -1007,7 +1007,7 @@ static struct break_hook fault_break_hook = { |
2680 |
+ #define KASAN_ESR_SIZE_MASK 0x0f |
2681 |
+ #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) |
2682 |
+ |
2683 |
+-static int kasan_handler(struct pt_regs *regs, unsigned int esr) |
2684 |
++static int kasan_handler(struct pt_regs *regs, unsigned long esr) |
2685 |
+ { |
2686 |
+ bool recover = esr & KASAN_ESR_RECOVER; |
2687 |
+ bool write = esr & KASAN_ESR_WRITE; |
2688 |
+@@ -1050,11 +1050,11 @@ static struct break_hook kasan_break_hook = { |
2689 |
+ * Initial handler for AArch64 BRK exceptions |
2690 |
+ * This handler only used until debug_traps_init(). |
2691 |
+ */ |
2692 |
+-int __init early_brk64(unsigned long addr, unsigned int esr, |
2693 |
++int __init early_brk64(unsigned long addr, unsigned long esr, |
2694 |
+ struct pt_regs *regs) |
2695 |
+ { |
2696 |
+ #ifdef CONFIG_KASAN_SW_TAGS |
2697 |
+- unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; |
2698 |
++ unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; |
2699 |
+ |
2700 |
+ if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) |
2701 |
+ return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; |
2702 |
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c |
2703 |
+index 9ae24e3b72be1..d09b21faa0b23 100644 |
2704 |
+--- a/arch/arm64/mm/fault.c |
2705 |
++++ b/arch/arm64/mm/fault.c |
2706 |
+@@ -43,7 +43,7 @@ |
2707 |
+ #include <asm/traps.h> |
2708 |
+ |
2709 |
+ struct fault_info { |
2710 |
+- int (*fn)(unsigned long far, unsigned int esr, |
2711 |
++ int (*fn)(unsigned long far, unsigned long esr, |
2712 |
+ struct pt_regs *regs); |
2713 |
+ int sig; |
2714 |
+ int code; |
2715 |
+@@ -53,17 +53,17 @@ struct fault_info { |
2716 |
+ static const struct fault_info fault_info[]; |
2717 |
+ static struct fault_info debug_fault_info[]; |
2718 |
+ |
2719 |
+-static inline const struct fault_info *esr_to_fault_info(unsigned int esr) |
2720 |
++static inline const struct fault_info *esr_to_fault_info(unsigned long esr) |
2721 |
+ { |
2722 |
+ return fault_info + (esr & ESR_ELx_FSC); |
2723 |
+ } |
2724 |
+ |
2725 |
+-static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr) |
2726 |
++static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr) |
2727 |
+ { |
2728 |
+ return debug_fault_info + DBG_ESR_EVT(esr); |
2729 |
+ } |
2730 |
+ |
2731 |
+-static void data_abort_decode(unsigned int esr) |
2732 |
++static void data_abort_decode(unsigned long esr) |
2733 |
+ { |
2734 |
+ pr_alert("Data abort info:\n"); |
2735 |
+ |
2736 |
+@@ -85,11 +85,11 @@ static void data_abort_decode(unsigned int esr) |
2737 |
+ (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); |
2738 |
+ } |
2739 |
+ |
2740 |
+-static void mem_abort_decode(unsigned int esr) |
2741 |
++static void mem_abort_decode(unsigned long esr) |
2742 |
+ { |
2743 |
+ pr_alert("Mem abort info:\n"); |
2744 |
+ |
2745 |
+- pr_alert(" ESR = 0x%08x\n", esr); |
2746 |
++ pr_alert(" ESR = 0x%016lx\n", esr); |
2747 |
+ pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n", |
2748 |
+ ESR_ELx_EC(esr), esr_get_class_string(esr), |
2749 |
+ (esr & ESR_ELx_IL) ? 32 : 16); |
2750 |
+@@ -99,7 +99,7 @@ static void mem_abort_decode(unsigned int esr) |
2751 |
+ pr_alert(" EA = %lu, S1PTW = %lu\n", |
2752 |
+ (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, |
2753 |
+ (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); |
2754 |
+- pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC), |
2755 |
++ pr_alert(" FSC = 0x%02lx: %s\n", (esr & ESR_ELx_FSC), |
2756 |
+ esr_to_fault_info(esr)->name); |
2757 |
+ |
2758 |
+ if (esr_is_data_abort(esr)) |
2759 |
+@@ -229,20 +229,20 @@ int ptep_set_access_flags(struct vm_area_struct *vma, |
2760 |
+ return 1; |
2761 |
+ } |
2762 |
+ |
2763 |
+-static bool is_el1_instruction_abort(unsigned int esr) |
2764 |
++static bool is_el1_instruction_abort(unsigned long esr) |
2765 |
+ { |
2766 |
+ return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; |
2767 |
+ } |
2768 |
+ |
2769 |
+-static bool is_el1_data_abort(unsigned int esr) |
2770 |
++static bool is_el1_data_abort(unsigned long esr) |
2771 |
+ { |
2772 |
+ return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR; |
2773 |
+ } |
2774 |
+ |
2775 |
+-static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, |
2776 |
++static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr, |
2777 |
+ struct pt_regs *regs) |
2778 |
+ { |
2779 |
+- unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; |
2780 |
++ unsigned long fsc_type = esr & ESR_ELx_FSC_TYPE; |
2781 |
+ |
2782 |
+ if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr)) |
2783 |
+ return false; |
2784 |
+@@ -258,7 +258,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, |
2785 |
+ } |
2786 |
+ |
2787 |
+ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, |
2788 |
+- unsigned int esr, |
2789 |
++ unsigned long esr, |
2790 |
+ struct pt_regs *regs) |
2791 |
+ { |
2792 |
+ unsigned long flags; |
2793 |
+@@ -290,7 +290,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, |
2794 |
+ } |
2795 |
+ |
2796 |
+ static void die_kernel_fault(const char *msg, unsigned long addr, |
2797 |
+- unsigned int esr, struct pt_regs *regs) |
2798 |
++ unsigned long esr, struct pt_regs *regs) |
2799 |
+ { |
2800 |
+ bust_spinlocks(1); |
2801 |
+ |
2802 |
+@@ -306,7 +306,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr, |
2803 |
+ } |
2804 |
+ |
2805 |
+ #ifdef CONFIG_KASAN_HW_TAGS |
2806 |
+-static void report_tag_fault(unsigned long addr, unsigned int esr, |
2807 |
++static void report_tag_fault(unsigned long addr, unsigned long esr, |
2808 |
+ struct pt_regs *regs) |
2809 |
+ { |
2810 |
+ /* |
2811 |
+@@ -318,11 +318,11 @@ static void report_tag_fault(unsigned long addr, unsigned int esr, |
2812 |
+ } |
2813 |
+ #else |
2814 |
+ /* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ |
2815 |
+-static inline void report_tag_fault(unsigned long addr, unsigned int esr, |
2816 |
++static inline void report_tag_fault(unsigned long addr, unsigned long esr, |
2817 |
+ struct pt_regs *regs) { } |
2818 |
+ #endif |
2819 |
+ |
2820 |
+-static void do_tag_recovery(unsigned long addr, unsigned int esr, |
2821 |
++static void do_tag_recovery(unsigned long addr, unsigned long esr, |
2822 |
+ struct pt_regs *regs) |
2823 |
+ { |
2824 |
+ |
2825 |
+@@ -337,9 +337,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr, |
2826 |
+ isb(); |
2827 |
+ } |
2828 |
+ |
2829 |
+-static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) |
2830 |
++static bool is_el1_mte_sync_tag_check_fault(unsigned long esr) |
2831 |
+ { |
2832 |
+- unsigned int fsc = esr & ESR_ELx_FSC; |
2833 |
++ unsigned long fsc = esr & ESR_ELx_FSC; |
2834 |
+ |
2835 |
+ if (!is_el1_data_abort(esr)) |
2836 |
+ return false; |
2837 |
+@@ -350,7 +350,12 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) |
2838 |
+ return false; |
2839 |
+ } |
2840 |
+ |
2841 |
+-static void __do_kernel_fault(unsigned long addr, unsigned int esr, |
2842 |
++static bool is_translation_fault(unsigned long esr) |
2843 |
++{ |
2844 |
++ return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT; |
2845 |
++} |
2846 |
++ |
2847 |
++static void __do_kernel_fault(unsigned long addr, unsigned long esr, |
2848 |
+ struct pt_regs *regs) |
2849 |
+ { |
2850 |
+ const char *msg; |
2851 |
+@@ -382,7 +387,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, |
2852 |
+ } else if (addr < PAGE_SIZE) { |
2853 |
+ msg = "NULL pointer dereference"; |
2854 |
+ } else { |
2855 |
+- if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) |
2856 |
++ if (is_translation_fault(esr) && |
2857 |
++ kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) |
2858 |
+ return; |
2859 |
+ |
2860 |
+ msg = "paging request"; |
2861 |
+@@ -391,7 +397,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, |
2862 |
+ die_kernel_fault(msg, addr, esr, regs); |
2863 |
+ } |
2864 |
+ |
2865 |
+-static void set_thread_esr(unsigned long address, unsigned int esr) |
2866 |
++static void set_thread_esr(unsigned long address, unsigned long esr) |
2867 |
+ { |
2868 |
+ current->thread.fault_address = address; |
2869 |
+ |
2870 |
+@@ -439,7 +445,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr) |
2871 |
+ * exception level). Fail safe by not providing an ESR |
2872 |
+ * context record at all. |
2873 |
+ */ |
2874 |
+- WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr); |
2875 |
++ WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n", esr); |
2876 |
+ esr = 0; |
2877 |
+ break; |
2878 |
+ } |
2879 |
+@@ -448,7 +454,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr) |
2880 |
+ current->thread.fault_code = esr; |
2881 |
+ } |
2882 |
+ |
2883 |
+-static void do_bad_area(unsigned long far, unsigned int esr, |
2884 |
++static void do_bad_area(unsigned long far, unsigned long esr, |
2885 |
+ struct pt_regs *regs) |
2886 |
+ { |
2887 |
+ unsigned long addr = untagged_addr(far); |
2888 |
+@@ -499,7 +505,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, |
2889 |
+ return handle_mm_fault(vma, addr, mm_flags, regs); |
2890 |
+ } |
2891 |
+ |
2892 |
+-static bool is_el0_instruction_abort(unsigned int esr) |
2893 |
++static bool is_el0_instruction_abort(unsigned long esr) |
2894 |
+ { |
2895 |
+ return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; |
2896 |
+ } |
2897 |
+@@ -508,12 +514,12 @@ static bool is_el0_instruction_abort(unsigned int esr) |
2898 |
+ * Note: not valid for EL1 DC IVAC, but we never use that such that it |
2899 |
+ * should fault. EL0 cannot issue DC IVAC (undef). |
2900 |
+ */ |
2901 |
+-static bool is_write_abort(unsigned int esr) |
2902 |
++static bool is_write_abort(unsigned long esr) |
2903 |
+ { |
2904 |
+ return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); |
2905 |
+ } |
2906 |
+ |
2907 |
+-static int __kprobes do_page_fault(unsigned long far, unsigned int esr, |
2908 |
++static int __kprobes do_page_fault(unsigned long far, unsigned long esr, |
2909 |
+ struct pt_regs *regs) |
2910 |
+ { |
2911 |
+ const struct fault_info *inf; |
2912 |
+@@ -671,7 +677,7 @@ no_context: |
2913 |
+ } |
2914 |
+ |
2915 |
+ static int __kprobes do_translation_fault(unsigned long far, |
2916 |
+- unsigned int esr, |
2917 |
++ unsigned long esr, |
2918 |
+ struct pt_regs *regs) |
2919 |
+ { |
2920 |
+ unsigned long addr = untagged_addr(far); |
2921 |
+@@ -683,19 +689,19 @@ static int __kprobes do_translation_fault(unsigned long far, |
2922 |
+ return 0; |
2923 |
+ } |
2924 |
+ |
2925 |
+-static int do_alignment_fault(unsigned long far, unsigned int esr, |
2926 |
++static int do_alignment_fault(unsigned long far, unsigned long esr, |
2927 |
+ struct pt_regs *regs) |
2928 |
+ { |
2929 |
+ do_bad_area(far, esr, regs); |
2930 |
+ return 0; |
2931 |
+ } |
2932 |
+ |
2933 |
+-static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs) |
2934 |
++static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) |
2935 |
+ { |
2936 |
+ return 1; /* "fault" */ |
2937 |
+ } |
2938 |
+ |
2939 |
+-static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs) |
2940 |
++static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) |
2941 |
+ { |
2942 |
+ const struct fault_info *inf; |
2943 |
+ unsigned long siaddr; |
2944 |
+@@ -725,7 +731,7 @@ static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs) |
2945 |
+ return 0; |
2946 |
+ } |
2947 |
+ |
2948 |
+-static int do_tag_check_fault(unsigned long far, unsigned int esr, |
2949 |
++static int do_tag_check_fault(unsigned long far, unsigned long esr, |
2950 |
+ struct pt_regs *regs) |
2951 |
+ { |
2952 |
+ /* |
2953 |
+@@ -805,7 +811,7 @@ static const struct fault_info fault_info[] = { |
2954 |
+ { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, |
2955 |
+ }; |
2956 |
+ |
2957 |
+-void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) |
2958 |
++void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs) |
2959 |
+ { |
2960 |
+ const struct fault_info *inf = esr_to_fault_info(esr); |
2961 |
+ unsigned long addr = untagged_addr(far); |
2962 |
+@@ -828,14 +834,14 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) |
2963 |
+ } |
2964 |
+ NOKPROBE_SYMBOL(do_mem_abort); |
2965 |
+ |
2966 |
+-void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) |
2967 |
++void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs) |
2968 |
+ { |
2969 |
+ arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, |
2970 |
+ addr, esr); |
2971 |
+ } |
2972 |
+ NOKPROBE_SYMBOL(do_sp_pc_abort); |
2973 |
+ |
2974 |
+-int __init early_brk64(unsigned long addr, unsigned int esr, |
2975 |
++int __init early_brk64(unsigned long addr, unsigned long esr, |
2976 |
+ struct pt_regs *regs); |
2977 |
+ |
2978 |
+ /* |
2979 |
+@@ -855,7 +861,7 @@ static struct fault_info __refdata debug_fault_info[] = { |
2980 |
+ }; |
2981 |
+ |
2982 |
+ void __init hook_debug_fault_code(int nr, |
2983 |
+- int (*fn)(unsigned long, unsigned int, struct pt_regs *), |
2984 |
++ int (*fn)(unsigned long, unsigned long, struct pt_regs *), |
2985 |
+ int sig, int code, const char *name) |
2986 |
+ { |
2987 |
+ BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); |
2988 |
+@@ -888,7 +894,7 @@ static void debug_exception_exit(struct pt_regs *regs) |
2989 |
+ } |
2990 |
+ NOKPROBE_SYMBOL(debug_exception_exit); |
2991 |
+ |
2992 |
+-void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, |
2993 |
++void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, |
2994 |
+ struct pt_regs *regs) |
2995 |
+ { |
2996 |
+ const struct fault_info *inf = esr_to_debug_fault_info(esr); |
2997 |
+diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c |
2998 |
+index 6e6756e8fa0a9..86a6e25908664 100644 |
2999 |
+--- a/arch/mips/bcm63xx/clk.c |
3000 |
++++ b/arch/mips/bcm63xx/clk.c |
3001 |
+@@ -361,6 +361,8 @@ static struct clk clk_periph = { |
3002 |
+ */ |
3003 |
+ int clk_enable(struct clk *clk) |
3004 |
+ { |
3005 |
++ if (!clk) |
3006 |
++ return 0; |
3007 |
+ mutex_lock(&clocks_mutex); |
3008 |
+ clk_enable_unlocked(clk); |
3009 |
+ mutex_unlock(&clocks_mutex); |
3010 |
+diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c |
3011 |
+index 1daa0c6b6f4ea..572a053e30ed5 100644 |
3012 |
+--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c |
3013 |
++++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c |
3014 |
+@@ -211,7 +211,7 @@ union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port) |
3015 |
+ { |
3016 |
+ union cvmx_helper_link_info result; |
3017 |
+ |
3018 |
+- WARN(!octeon_is_simulation(), |
3019 |
++ WARN_ONCE(!octeon_is_simulation(), |
3020 |
+ "Using deprecated link status - please update your DT"); |
3021 |
+ |
3022 |
+ /* Unless we fix it later, all links are defaulted to down */ |
3023 |
+diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c |
3024 |
+index 6044ff4710022..a18ad2daf0052 100644 |
3025 |
+--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c |
3026 |
++++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c |
3027 |
+@@ -1100,7 +1100,7 @@ union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port) |
3028 |
+ if (index == 0) |
3029 |
+ result = __cvmx_helper_rgmii_link_get(ipd_port); |
3030 |
+ else { |
3031 |
+- WARN(1, "Using deprecated link status - please update your DT"); |
3032 |
++ WARN_ONCE(1, "Using deprecated link status - please update your DT"); |
3033 |
+ result.s.full_duplex = 1; |
3034 |
+ result.s.link_up = 1; |
3035 |
+ result.s.speed = 1000; |
3036 |
+diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h |
3037 |
+index 6bbf082dd149e..79d5bb0e06d63 100644 |
3038 |
+--- a/arch/mips/include/asm/mach-ralink/mt7621.h |
3039 |
++++ b/arch/mips/include/asm/mach-ralink/mt7621.h |
3040 |
+@@ -7,10 +7,12 @@ |
3041 |
+ #ifndef _MT7621_REGS_H_ |
3042 |
+ #define _MT7621_REGS_H_ |
3043 |
+ |
3044 |
++#define IOMEM(x) ((void __iomem *)(KSEG1ADDR(x))) |
3045 |
++ |
3046 |
+ #define MT7621_PALMBUS_BASE 0x1C000000 |
3047 |
+ #define MT7621_PALMBUS_SIZE 0x03FFFFFF |
3048 |
+ |
3049 |
+-#define MT7621_SYSC_BASE 0x1E000000 |
3050 |
++#define MT7621_SYSC_BASE IOMEM(0x1E000000) |
3051 |
+ |
3052 |
+ #define SYSC_REG_CHIP_NAME0 0x00 |
3053 |
+ #define SYSC_REG_CHIP_NAME1 0x04 |
3054 |
+diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c |
3055 |
+index e673603e11e5d..92140edb3ce3e 100644 |
3056 |
+--- a/arch/mips/kernel/vpe-cmp.c |
3057 |
++++ b/arch/mips/kernel/vpe-cmp.c |
3058 |
+@@ -75,7 +75,6 @@ ATTRIBUTE_GROUPS(vpe); |
3059 |
+ |
3060 |
+ static void vpe_device_release(struct device *cd) |
3061 |
+ { |
3062 |
+- kfree(cd); |
3063 |
+ } |
3064 |
+ |
3065 |
+ static struct class vpe_class = { |
3066 |
+@@ -157,6 +156,7 @@ out_dev: |
3067 |
+ device_del(&vpe_device); |
3068 |
+ |
3069 |
+ out_class: |
3070 |
++ put_device(&vpe_device); |
3071 |
+ class_unregister(&vpe_class); |
3072 |
+ |
3073 |
+ out_chrdev: |
3074 |
+@@ -169,7 +169,7 @@ void __exit vpe_module_exit(void) |
3075 |
+ { |
3076 |
+ struct vpe *v, *n; |
3077 |
+ |
3078 |
+- device_del(&vpe_device); |
3079 |
++ device_unregister(&vpe_device); |
3080 |
+ class_unregister(&vpe_class); |
3081 |
+ unregister_chrdev(major, VPE_MODULE_NAME); |
3082 |
+ |
3083 |
+diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c |
3084 |
+index bad6b0891b2b5..84a82b551ec35 100644 |
3085 |
+--- a/arch/mips/kernel/vpe-mt.c |
3086 |
++++ b/arch/mips/kernel/vpe-mt.c |
3087 |
+@@ -313,7 +313,6 @@ ATTRIBUTE_GROUPS(vpe); |
3088 |
+ |
3089 |
+ static void vpe_device_release(struct device *cd) |
3090 |
+ { |
3091 |
+- kfree(cd); |
3092 |
+ } |
3093 |
+ |
3094 |
+ static struct class vpe_class = { |
3095 |
+@@ -497,6 +496,7 @@ out_dev: |
3096 |
+ device_del(&vpe_device); |
3097 |
+ |
3098 |
+ out_class: |
3099 |
++ put_device(&vpe_device); |
3100 |
+ class_unregister(&vpe_class); |
3101 |
+ |
3102 |
+ out_chrdev: |
3103 |
+@@ -509,7 +509,7 @@ void __exit vpe_module_exit(void) |
3104 |
+ { |
3105 |
+ struct vpe *v, *n; |
3106 |
+ |
3107 |
+- device_del(&vpe_device); |
3108 |
++ device_unregister(&vpe_device); |
3109 |
+ class_unregister(&vpe_class); |
3110 |
+ unregister_chrdev(major, VPE_MODULE_NAME); |
3111 |
+ |
3112 |
+diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c |
3113 |
+index 4c83786612193..0db23bcf2a970 100644 |
3114 |
+--- a/arch/mips/ralink/mt7621.c |
3115 |
++++ b/arch/mips/ralink/mt7621.c |
3116 |
+@@ -23,6 +23,7 @@ |
3117 |
+ #define MT7621_MEM_TEST_PATTERN 0xaa5555aa |
3118 |
+ |
3119 |
+ static u32 detect_magic __initdata; |
3120 |
++static struct ralink_soc_info *soc_info_ptr; |
3121 |
+ |
3122 |
+ phys_addr_t mips_cpc_default_phys_base(void) |
3123 |
+ { |
3124 |
+@@ -66,41 +67,83 @@ void __init ralink_of_remap(void) |
3125 |
+ panic("Failed to remap core resources"); |
3126 |
+ } |
3127 |
+ |
3128 |
+-static void soc_dev_init(struct ralink_soc_info *soc_info, u32 rev) |
3129 |
++static unsigned int __init mt7621_get_soc_name0(void) |
3130 |
++{ |
3131 |
++ return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME0); |
3132 |
++} |
3133 |
++ |
3134 |
++static unsigned int __init mt7621_get_soc_name1(void) |
3135 |
++{ |
3136 |
++ return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME1); |
3137 |
++} |
3138 |
++ |
3139 |
++static bool __init mt7621_soc_valid(void) |
3140 |
++{ |
3141 |
++ if (mt7621_get_soc_name0() == MT7621_CHIP_NAME0 && |
3142 |
++ mt7621_get_soc_name1() == MT7621_CHIP_NAME1) |
3143 |
++ return true; |
3144 |
++ else |
3145 |
++ return false; |
3146 |
++} |
3147 |
++ |
3148 |
++static const char __init *mt7621_get_soc_id(void) |
3149 |
++{ |
3150 |
++ if (mt7621_soc_valid()) |
3151 |
++ return "MT7621"; |
3152 |
++ else |
3153 |
++ return "invalid"; |
3154 |
++} |
3155 |
++ |
3156 |
++static unsigned int __init mt7621_get_soc_rev(void) |
3157 |
++{ |
3158 |
++ return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_REV); |
3159 |
++} |
3160 |
++ |
3161 |
++static unsigned int __init mt7621_get_soc_ver(void) |
3162 |
++{ |
3163 |
++ return (mt7621_get_soc_rev() >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK; |
3164 |
++} |
3165 |
++ |
3166 |
++static unsigned int __init mt7621_get_soc_eco(void) |
3167 |
++{ |
3168 |
++ return (mt7621_get_soc_rev() & CHIP_REV_ECO_MASK); |
3169 |
++} |
3170 |
++ |
3171 |
++static const char __init *mt7621_get_soc_revision(void) |
3172 |
++{ |
3173 |
++ if (mt7621_get_soc_rev() == 1 && mt7621_get_soc_eco() == 1) |
3174 |
++ return "E2"; |
3175 |
++ else |
3176 |
++ return "E1"; |
3177 |
++} |
3178 |
++ |
3179 |
++static int __init mt7621_soc_dev_init(void) |
3180 |
+ { |
3181 |
+ struct soc_device *soc_dev; |
3182 |
+ struct soc_device_attribute *soc_dev_attr; |
3183 |
+ |
3184 |
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); |
3185 |
+ if (!soc_dev_attr) |
3186 |
+- return; |
3187 |
++ return -ENOMEM; |
3188 |
+ |
3189 |
+ soc_dev_attr->soc_id = "mt7621"; |
3190 |
+ soc_dev_attr->family = "Ralink"; |
3191 |
++ soc_dev_attr->revision = mt7621_get_soc_revision(); |
3192 |
+ |
3193 |
+- if (((rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK) == 1 && |
3194 |
+- (rev & CHIP_REV_ECO_MASK) == 1) |
3195 |
+- soc_dev_attr->revision = "E2"; |
3196 |
+- else |
3197 |
+- soc_dev_attr->revision = "E1"; |
3198 |
+- |
3199 |
+- soc_dev_attr->data = soc_info; |
3200 |
++ soc_dev_attr->data = soc_info_ptr; |
3201 |
+ |
3202 |
+ soc_dev = soc_device_register(soc_dev_attr); |
3203 |
+ if (IS_ERR(soc_dev)) { |
3204 |
+ kfree(soc_dev_attr); |
3205 |
+- return; |
3206 |
++ return PTR_ERR(soc_dev); |
3207 |
+ } |
3208 |
++ |
3209 |
++ return 0; |
3210 |
+ } |
3211 |
++device_initcall(mt7621_soc_dev_init); |
3212 |
+ |
3213 |
+ void __init prom_soc_init(struct ralink_soc_info *soc_info) |
3214 |
+ { |
3215 |
+- void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE); |
3216 |
+- unsigned char *name = NULL; |
3217 |
+- u32 n0; |
3218 |
+- u32 n1; |
3219 |
+- u32 rev; |
3220 |
+- |
3221 |
+ /* Early detection of CMP support */ |
3222 |
+ mips_cm_probe(); |
3223 |
+ mips_cpc_probe(); |
3224 |
+@@ -123,27 +166,23 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info) |
3225 |
+ __sync(); |
3226 |
+ } |
3227 |
+ |
3228 |
+- n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); |
3229 |
+- n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); |
3230 |
+- |
3231 |
+- if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) { |
3232 |
+- name = "MT7621"; |
3233 |
++ if (mt7621_soc_valid()) |
3234 |
+ soc_info->compatible = "mediatek,mt7621-soc"; |
3235 |
+- } else { |
3236 |
+- panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1); |
3237 |
+- } |
3238 |
++ else |
3239 |
++ panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", |
3240 |
++ mt7621_get_soc_name0(), |
3241 |
++ mt7621_get_soc_name1()); |
3242 |
+ ralink_soc = MT762X_SOC_MT7621AT; |
3243 |
+- rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); |
3244 |
+ |
3245 |
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, |
3246 |
+ "MediaTek %s ver:%u eco:%u", |
3247 |
+- name, |
3248 |
+- (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK, |
3249 |
+- (rev & CHIP_REV_ECO_MASK)); |
3250 |
++ mt7621_get_soc_id(), |
3251 |
++ mt7621_get_soc_ver(), |
3252 |
++ mt7621_get_soc_eco()); |
3253 |
+ |
3254 |
+ soc_info->mem_detect = mt7621_memory_detect; |
3255 |
+ |
3256 |
+- soc_dev_init(soc_info, rev); |
3257 |
++ soc_info_ptr = soc_info; |
3258 |
+ |
3259 |
+ if (!register_cps_smp_ops()) |
3260 |
+ return; |
3261 |
+diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c |
3262 |
+index 082f6d0308a47..8718289c051dd 100644 |
3263 |
+--- a/arch/powerpc/perf/callchain.c |
3264 |
++++ b/arch/powerpc/perf/callchain.c |
3265 |
+@@ -61,6 +61,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re |
3266 |
+ next_sp = fp[0]; |
3267 |
+ |
3268 |
+ if (next_sp == sp + STACK_INT_FRAME_SIZE && |
3269 |
++ validate_sp(sp, current, STACK_INT_FRAME_SIZE) && |
3270 |
+ fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { |
3271 |
+ /* |
3272 |
+ * This looks like an interrupt frame for an |
3273 |
+diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h |
3274 |
+index 8965b4463d433..5e86371a20c78 100644 |
3275 |
+--- a/arch/powerpc/perf/hv-gpci-requests.h |
3276 |
++++ b/arch/powerpc/perf/hv-gpci-requests.h |
3277 |
+@@ -79,6 +79,7 @@ REQUEST(__field(0, 8, partition_id) |
3278 |
+ ) |
3279 |
+ #include I(REQUEST_END) |
3280 |
+ |
3281 |
++#ifdef ENABLE_EVENTS_COUNTERINFO_V6 |
3282 |
+ /* |
3283 |
+ * Not available for counter_info_version >= 0x8, use |
3284 |
+ * run_instruction_cycles_by_partition(0x100) instead. |
3285 |
+@@ -92,6 +93,7 @@ REQUEST(__field(0, 8, partition_id) |
3286 |
+ __count(0x10, 8, cycles) |
3287 |
+ ) |
3288 |
+ #include I(REQUEST_END) |
3289 |
++#endif |
3290 |
+ |
3291 |
+ #define REQUEST_NAME system_performance_capabilities |
3292 |
+ #define REQUEST_NUM 0x40 |
3293 |
+@@ -103,6 +105,7 @@ REQUEST(__field(0, 1, perf_collect_privileged) |
3294 |
+ ) |
3295 |
+ #include I(REQUEST_END) |
3296 |
+ |
3297 |
++#ifdef ENABLE_EVENTS_COUNTERINFO_V6 |
3298 |
+ #define REQUEST_NAME processor_bus_utilization_abc_links |
3299 |
+ #define REQUEST_NUM 0x50 |
3300 |
+ #define REQUEST_IDX_KIND "hw_chip_id=?" |
3301 |
+@@ -194,6 +197,7 @@ REQUEST(__field(0, 4, phys_processor_idx) |
3302 |
+ __count(0x28, 8, instructions_completed) |
3303 |
+ ) |
3304 |
+ #include I(REQUEST_END) |
3305 |
++#endif |
3306 |
+ |
3307 |
+ /* Processor_core_power_mode (0x95) skipped, no counters */ |
3308 |
+ /* Affinity_domain_information_by_virtual_processor (0xA0) skipped, |
3309 |
+diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c |
3310 |
+index c756228a081fb..28b770bbc10b4 100644 |
3311 |
+--- a/arch/powerpc/perf/hv-gpci.c |
3312 |
++++ b/arch/powerpc/perf/hv-gpci.c |
3313 |
+@@ -72,7 +72,7 @@ static struct attribute_group format_group = { |
3314 |
+ |
3315 |
+ static struct attribute_group event_group = { |
3316 |
+ .name = "events", |
3317 |
+- .attrs = hv_gpci_event_attrs, |
3318 |
++ /* .attrs is set in init */ |
3319 |
+ }; |
3320 |
+ |
3321 |
+ #define HV_CAPS_ATTR(_name, _format) \ |
3322 |
+@@ -330,6 +330,7 @@ static int hv_gpci_init(void) |
3323 |
+ int r; |
3324 |
+ unsigned long hret; |
3325 |
+ struct hv_perf_caps caps; |
3326 |
++ struct hv_gpci_request_buffer *arg; |
3327 |
+ |
3328 |
+ hv_gpci_assert_offsets_correct(); |
3329 |
+ |
3330 |
+@@ -353,6 +354,36 @@ static int hv_gpci_init(void) |
3331 |
+ /* sampling not supported */ |
3332 |
+ h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; |
3333 |
+ |
3334 |
++ arg = (void *)get_cpu_var(hv_gpci_reqb); |
3335 |
++ memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); |
3336 |
++ |
3337 |
++ /* |
3338 |
++ * hcall H_GET_PERF_COUNTER_INFO populates the output |
3339 |
++ * counter_info_version value based on the system hypervisor. |
3340 |
++ * Pass the counter request 0x10 corresponds to request type |
3341 |
++ * 'Dispatch_timebase_by_processor', to get the supported |
3342 |
++ * counter_info_version. |
3343 |
++ */ |
3344 |
++ arg->params.counter_request = cpu_to_be32(0x10); |
3345 |
++ |
3346 |
++ r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, |
3347 |
++ virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); |
3348 |
++ if (r) { |
3349 |
++ pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r); |
3350 |
++ arg->params.counter_info_version_out = 0x8; |
3351 |
++ } |
3352 |
++ |
3353 |
++ /* |
3354 |
++ * Use counter_info_version_out value to assign |
3355 |
++ * required hv-gpci event list. |
3356 |
++ */ |
3357 |
++ if (arg->params.counter_info_version_out >= 0x8) |
3358 |
++ event_group.attrs = hv_gpci_event_attrs; |
3359 |
++ else |
3360 |
++ event_group.attrs = hv_gpci_event_attrs_v6; |
3361 |
++ |
3362 |
++ put_cpu_var(hv_gpci_reqb); |
3363 |
++ |
3364 |
+ r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1); |
3365 |
+ if (r) |
3366 |
+ return r; |
3367 |
+diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h |
3368 |
+index 4d108262bed79..c72020912dea5 100644 |
3369 |
+--- a/arch/powerpc/perf/hv-gpci.h |
3370 |
++++ b/arch/powerpc/perf/hv-gpci.h |
3371 |
+@@ -26,6 +26,7 @@ enum { |
3372 |
+ #define REQUEST_FILE "../hv-gpci-requests.h" |
3373 |
+ #define NAME_LOWER hv_gpci |
3374 |
+ #define NAME_UPPER HV_GPCI |
3375 |
++#define ENABLE_EVENTS_COUNTERINFO_V6 |
3376 |
+ #include "req-gen/perf.h" |
3377 |
+ #undef REQUEST_FILE |
3378 |
+ #undef NAME_LOWER |
3379 |
+diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h |
3380 |
+index fa9bc804e67af..6b2a59fefffa7 100644 |
3381 |
+--- a/arch/powerpc/perf/req-gen/perf.h |
3382 |
++++ b/arch/powerpc/perf/req-gen/perf.h |
3383 |
+@@ -139,6 +139,26 @@ PMU_EVENT_ATTR_STRING( \ |
3384 |
+ #define REQUEST_(r_name, r_value, r_idx_1, r_fields) \ |
3385 |
+ r_fields |
3386 |
+ |
3387 |
++/* Generate event list for platforms with counter_info_version 0x6 or below */ |
3388 |
++static __maybe_unused struct attribute *hv_gpci_event_attrs_v6[] = { |
3389 |
++#include REQUEST_FILE |
3390 |
++ NULL |
3391 |
++}; |
3392 |
++ |
3393 |
++/* |
3394 |
++ * Based on getPerfCountInfo v1.018 documentation, some of the hv-gpci |
3395 |
++ * events were deprecated for platform firmware that supports |
3396 |
++ * counter_info_version 0x8 or above. |
3397 |
++ * Those deprecated events are still part of platform firmware that |
3398 |
++ * support counter_info_version 0x6 and below. As per the getPerfCountInfo |
3399 |
++ * v1.018 documentation there is no counter_info_version 0x7. |
3400 |
++ * Undefining macro ENABLE_EVENTS_COUNTERINFO_V6, to disable the addition of |
3401 |
++ * deprecated events in "hv_gpci_event_attrs" attribute group, for platforms |
3402 |
++ * that supports counter_info_version 0x8 or above. |
3403 |
++ */ |
3404 |
++#undef ENABLE_EVENTS_COUNTERINFO_V6 |
3405 |
++ |
3406 |
++/* Generate event list for platforms with counter_info_version 0x8 or above*/ |
3407 |
+ static __maybe_unused struct attribute *hv_gpci_event_attrs[] = { |
3408 |
+ #include REQUEST_FILE |
3409 |
+ NULL |
3410 |
+diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c |
3411 |
+index b91ebebd9ff20..e0049b7df2125 100644 |
3412 |
+--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c |
3413 |
++++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c |
3414 |
+@@ -530,6 +530,7 @@ static int mpc52xx_lpbfifo_probe(struct platform_device *op) |
3415 |
+ err_bcom_rx_irq: |
3416 |
+ bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); |
3417 |
+ err_bcom_rx: |
3418 |
++ free_irq(lpbfifo.irq, &lpbfifo); |
3419 |
+ err_irq: |
3420 |
+ iounmap(lpbfifo.regs); |
3421 |
+ lpbfifo.regs = NULL; |
3422 |
+diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c |
3423 |
+index b6133a237a709..6e18d07035680 100644 |
3424 |
+--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c |
3425 |
++++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c |
3426 |
+@@ -106,7 +106,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, |
3427 |
+ |
3428 |
+ goto next; |
3429 |
+ unreg: |
3430 |
+- platform_device_del(pdev); |
3431 |
++ platform_device_put(pdev); |
3432 |
+ err: |
3433 |
+ pr_err("%pOF: registration failed\n", np); |
3434 |
+ next: |
3435 |
+diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c |
3436 |
+index 09fafcf2d3a06..f51fd4ac3f0b6 100644 |
3437 |
+--- a/arch/powerpc/platforms/pseries/eeh_pseries.c |
3438 |
++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c |
3439 |
+@@ -845,18 +845,8 @@ static int __init eeh_pseries_init(void) |
3440 |
+ return -EINVAL; |
3441 |
+ } |
3442 |
+ |
3443 |
+- /* Initialize error log lock and size */ |
3444 |
+- spin_lock_init(&slot_errbuf_lock); |
3445 |
+- eeh_error_buf_size = rtas_token("rtas-error-log-max"); |
3446 |
+- if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { |
3447 |
+- pr_info("%s: unknown EEH error log size\n", |
3448 |
+- __func__); |
3449 |
+- eeh_error_buf_size = 1024; |
3450 |
+- } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { |
3451 |
+- pr_info("%s: EEH error log size %d exceeds the maximal %d\n", |
3452 |
+- __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); |
3453 |
+- eeh_error_buf_size = RTAS_ERROR_LOG_MAX; |
3454 |
+- } |
3455 |
++ /* Initialize error log size */ |
3456 |
++ eeh_error_buf_size = rtas_get_error_log_max(); |
3457 |
+ |
3458 |
+ /* Set EEH probe mode */ |
3459 |
+ eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); |
3460 |
+diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c |
3461 |
+index 2bf78a30238b9..43bd2579d942b 100644 |
3462 |
+--- a/arch/powerpc/sysdev/xive/spapr.c |
3463 |
++++ b/arch/powerpc/sysdev/xive/spapr.c |
3464 |
+@@ -437,6 +437,7 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) |
3465 |
+ |
3466 |
+ data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); |
3467 |
+ if (!data->trig_mmio) { |
3468 |
++ iounmap(data->eoi_mmio); |
3469 |
+ pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq); |
3470 |
+ return -ENOMEM; |
3471 |
+ } |
3472 |
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c |
3473 |
+index 8b28ff9d98d16..3c085e1e5232e 100644 |
3474 |
+--- a/arch/powerpc/xmon/xmon.c |
3475 |
++++ b/arch/powerpc/xmon/xmon.c |
3476 |
+@@ -1528,9 +1528,9 @@ bpt_cmds(void) |
3477 |
+ cmd = inchar(); |
3478 |
+ |
3479 |
+ switch (cmd) { |
3480 |
+- static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; |
3481 |
+- int mode; |
3482 |
+- case 'd': /* bd - hardware data breakpoint */ |
3483 |
++ case 'd': { /* bd - hardware data breakpoint */ |
3484 |
++ static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; |
3485 |
++ int mode; |
3486 |
+ if (xmon_is_ro) { |
3487 |
+ printf(xmon_ro_msg); |
3488 |
+ break; |
3489 |
+@@ -1563,6 +1563,7 @@ bpt_cmds(void) |
3490 |
+ |
3491 |
+ force_enable_xmon(); |
3492 |
+ break; |
3493 |
++ } |
3494 |
+ |
3495 |
+ case 'i': /* bi - hardware instr breakpoint */ |
3496 |
+ if (xmon_is_ro) { |
3497 |
+diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h |
3498 |
+index a5c2ca1d1cd8b..ec19d6afc8965 100644 |
3499 |
+--- a/arch/riscv/include/asm/hugetlb.h |
3500 |
++++ b/arch/riscv/include/asm/hugetlb.h |
3501 |
+@@ -5,4 +5,10 @@ |
3502 |
+ #include <asm-generic/hugetlb.h> |
3503 |
+ #include <asm/page.h> |
3504 |
+ |
3505 |
++static inline void arch_clear_hugepage_flags(struct page *page) |
3506 |
++{ |
3507 |
++ clear_bit(PG_dcache_clean, &page->flags); |
3508 |
++} |
3509 |
++#define arch_clear_hugepage_flags arch_clear_hugepage_flags |
3510 |
++ |
3511 |
+ #endif /* _ASM_RISCV_HUGETLB_H */ |
3512 |
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c |
3513 |
+index 2f4cd85fb6519..4102c97309cc2 100644 |
3514 |
+--- a/arch/riscv/kernel/traps.c |
3515 |
++++ b/arch/riscv/kernel/traps.c |
3516 |
+@@ -211,7 +211,7 @@ static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], |
3517 |
+ * shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used |
3518 |
+ * to get per-cpu overflow stack(get_overflow_stack). |
3519 |
+ */ |
3520 |
+-long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)]; |
3521 |
++long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16); |
3522 |
+ asmlinkage unsigned long get_overflow_stack(void) |
3523 |
+ { |
3524 |
+ return (unsigned long)this_cpu_ptr(overflow_stack) + |
3525 |
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c |
3526 |
+index 3af4131c22c7a..2e3f1a626a3af 100644 |
3527 |
+--- a/arch/riscv/net/bpf_jit_comp64.c |
3528 |
++++ b/arch/riscv/net/bpf_jit_comp64.c |
3529 |
+@@ -120,6 +120,25 @@ static bool in_auipc_jalr_range(s64 val) |
3530 |
+ val < ((1L << 31) - (1L << 11)); |
3531 |
+ } |
3532 |
+ |
3533 |
++/* Emit fixed-length instructions for address */ |
3534 |
++static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx) |
3535 |
++{ |
3536 |
++ u64 ip = (u64)(ctx->insns + ctx->ninsns); |
3537 |
++ s64 off = addr - ip; |
3538 |
++ s64 upper = (off + (1 << 11)) >> 12; |
3539 |
++ s64 lower = off & 0xfff; |
3540 |
++ |
3541 |
++ if (extra_pass && !in_auipc_jalr_range(off)) { |
3542 |
++ pr_err("bpf-jit: target offset 0x%llx is out of range\n", off); |
3543 |
++ return -ERANGE; |
3544 |
++ } |
3545 |
++ |
3546 |
++ emit(rv_auipc(rd, upper), ctx); |
3547 |
++ emit(rv_addi(rd, rd, lower), ctx); |
3548 |
++ return 0; |
3549 |
++} |
3550 |
++ |
3551 |
++/* Emit variable-length instructions for 32-bit and 64-bit imm */ |
3552 |
+ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) |
3553 |
+ { |
3554 |
+ /* Note that the immediate from the add is sign-extended, |
3555 |
+@@ -887,7 +906,15 @@ out_be: |
3556 |
+ u64 imm64; |
3557 |
+ |
3558 |
+ imm64 = (u64)insn1.imm << 32 | (u32)imm; |
3559 |
+- emit_imm(rd, imm64, ctx); |
3560 |
++ if (bpf_pseudo_func(insn)) { |
3561 |
++ /* fixed-length insns for extra jit pass */ |
3562 |
++ ret = emit_addr(rd, imm64, extra_pass, ctx); |
3563 |
++ if (ret) |
3564 |
++ return ret; |
3565 |
++ } else { |
3566 |
++ emit_imm(rd, imm64, ctx); |
3567 |
++ } |
3568 |
++ |
3569 |
+ return 1; |
3570 |
+ } |
3571 |
+ |
3572 |
+diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c |
3573 |
+index dc3ae55f79e08..912fb3821a6bb 100644 |
3574 |
+--- a/arch/x86/events/intel/uncore_snb.c |
3575 |
++++ b/arch/x86/events/intel/uncore_snb.c |
3576 |
+@@ -1423,6 +1423,7 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) |
3577 |
+ /* MCHBAR is disabled */ |
3578 |
+ if (!(mch_bar & BIT(0))) { |
3579 |
+ pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n"); |
3580 |
++ pci_dev_put(pdev); |
3581 |
+ return; |
3582 |
+ } |
3583 |
+ mch_bar &= ~BIT(0); |
3584 |
+@@ -1436,6 +1437,8 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) |
3585 |
+ box->io_addr = ioremap(addr, type->mmio_map_size); |
3586 |
+ if (!box->io_addr) |
3587 |
+ pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); |
3588 |
++ |
3589 |
++ pci_dev_put(pdev); |
3590 |
+ } |
3591 |
+ |
3592 |
+ static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = { |
3593 |
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c |
3594 |
+index ed869443efb21..fcd95e93f479a 100644 |
3595 |
+--- a/arch/x86/events/intel/uncore_snbep.c |
3596 |
++++ b/arch/x86/events/intel/uncore_snbep.c |
3597 |
+@@ -2891,6 +2891,7 @@ static bool hswep_has_limit_sbox(unsigned int device) |
3598 |
+ return false; |
3599 |
+ |
3600 |
+ pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4); |
3601 |
++ pci_dev_put(dev); |
3602 |
+ if (!hswep_get_chop(capid4)) |
3603 |
+ return true; |
3604 |
+ |
3605 |
+@@ -4492,6 +4493,8 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map |
3606 |
+ type->topology = NULL; |
3607 |
+ } |
3608 |
+ |
3609 |
++ pci_dev_put(dev); |
3610 |
++ |
3611 |
+ return ret; |
3612 |
+ } |
3613 |
+ |
3614 |
+@@ -4857,6 +4860,8 @@ static int snr_uncore_mmio_map(struct intel_uncore_box *box, |
3615 |
+ |
3616 |
+ addr += box_ctl; |
3617 |
+ |
3618 |
++ pci_dev_put(pdev); |
3619 |
++ |
3620 |
+ box->io_addr = ioremap(addr, type->mmio_map_size); |
3621 |
+ if (!box->io_addr) { |
3622 |
+ pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); |
3623 |
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c |
3624 |
+index 762f10cdfb7a0..95f98af74fdca 100644 |
3625 |
+--- a/arch/x86/hyperv/hv_init.c |
3626 |
++++ b/arch/x86/hyperv/hv_init.c |
3627 |
+@@ -469,8 +469,6 @@ void hyperv_cleanup(void) |
3628 |
+ { |
3629 |
+ union hv_x64_msr_hypercall_contents hypercall_msr; |
3630 |
+ |
3631 |
+- unregister_syscore_ops(&hv_syscore_ops); |
3632 |
+- |
3633 |
+ /* Reset our OS id */ |
3634 |
+ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); |
3635 |
+ |
3636 |
+diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c |
3637 |
+index 19876ebfb5044..fa5777af8da1a 100644 |
3638 |
+--- a/arch/x86/kernel/cpu/sgx/encl.c |
3639 |
++++ b/arch/x86/kernel/cpu/sgx/encl.c |
3640 |
+@@ -533,11 +533,15 @@ const struct vm_operations_struct sgx_vm_ops = { |
3641 |
+ void sgx_encl_release(struct kref *ref) |
3642 |
+ { |
3643 |
+ struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount); |
3644 |
++ unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1); |
3645 |
+ struct sgx_va_page *va_page; |
3646 |
+ struct sgx_encl_page *entry; |
3647 |
+- unsigned long index; |
3648 |
++ unsigned long count = 0; |
3649 |
++ |
3650 |
++ XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base)); |
3651 |
+ |
3652 |
+- xa_for_each(&encl->page_array, index, entry) { |
3653 |
++ xas_lock(&xas); |
3654 |
++ xas_for_each(&xas, entry, max_page_index) { |
3655 |
+ if (entry->epc_page) { |
3656 |
+ /* |
3657 |
+ * The page and its radix tree entry cannot be freed |
3658 |
+@@ -552,9 +556,20 @@ void sgx_encl_release(struct kref *ref) |
3659 |
+ } |
3660 |
+ |
3661 |
+ kfree(entry); |
3662 |
+- /* Invoke scheduler to prevent soft lockups. */ |
3663 |
+- cond_resched(); |
3664 |
++ /* |
3665 |
++ * Invoke scheduler on every XA_CHECK_SCHED iteration |
3666 |
++ * to prevent soft lockups. |
3667 |
++ */ |
3668 |
++ if (!(++count % XA_CHECK_SCHED)) { |
3669 |
++ xas_pause(&xas); |
3670 |
++ xas_unlock(&xas); |
3671 |
++ |
3672 |
++ cond_resched(); |
3673 |
++ |
3674 |
++ xas_lock(&xas); |
3675 |
++ } |
3676 |
+ } |
3677 |
++ xas_unlock(&xas); |
3678 |
+ |
3679 |
+ xa_destroy(&encl->page_array); |
3680 |
+ |
3681 |
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c |
3682 |
+index b63cf8f7745ee..6c07f6daaa227 100644 |
3683 |
+--- a/arch/x86/kernel/uprobes.c |
3684 |
++++ b/arch/x86/kernel/uprobes.c |
3685 |
+@@ -722,8 +722,9 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) |
3686 |
+ switch (opc1) { |
3687 |
+ case 0xeb: /* jmp 8 */ |
3688 |
+ case 0xe9: /* jmp 32 */ |
3689 |
+- case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ |
3690 |
+ break; |
3691 |
++ case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ |
3692 |
++ goto setup; |
3693 |
+ |
3694 |
+ case 0xe8: /* call relative */ |
3695 |
+ branch_clear_offset(auprobe, insn); |
3696 |
+@@ -753,6 +754,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) |
3697 |
+ return -ENOTSUPP; |
3698 |
+ } |
3699 |
+ |
3700 |
++setup: |
3701 |
+ auprobe->branch.opc1 = opc1; |
3702 |
+ auprobe->branch.ilen = insn->length; |
3703 |
+ auprobe->branch.offs = insn->immediate.value; |
3704 |
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c |
3705 |
+index c1b2f764b29a2..cdec892b28e2e 100644 |
3706 |
+--- a/arch/x86/xen/smp.c |
3707 |
++++ b/arch/x86/xen/smp.c |
3708 |
+@@ -32,30 +32,30 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) |
3709 |
+ |
3710 |
+ void xen_smp_intr_free(unsigned int cpu) |
3711 |
+ { |
3712 |
++ kfree(per_cpu(xen_resched_irq, cpu).name); |
3713 |
++ per_cpu(xen_resched_irq, cpu).name = NULL; |
3714 |
+ if (per_cpu(xen_resched_irq, cpu).irq >= 0) { |
3715 |
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); |
3716 |
+ per_cpu(xen_resched_irq, cpu).irq = -1; |
3717 |
+- kfree(per_cpu(xen_resched_irq, cpu).name); |
3718 |
+- per_cpu(xen_resched_irq, cpu).name = NULL; |
3719 |
+ } |
3720 |
++ kfree(per_cpu(xen_callfunc_irq, cpu).name); |
3721 |
++ per_cpu(xen_callfunc_irq, cpu).name = NULL; |
3722 |
+ if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { |
3723 |
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); |
3724 |
+ per_cpu(xen_callfunc_irq, cpu).irq = -1; |
3725 |
+- kfree(per_cpu(xen_callfunc_irq, cpu).name); |
3726 |
+- per_cpu(xen_callfunc_irq, cpu).name = NULL; |
3727 |
+ } |
3728 |
++ kfree(per_cpu(xen_debug_irq, cpu).name); |
3729 |
++ per_cpu(xen_debug_irq, cpu).name = NULL; |
3730 |
+ if (per_cpu(xen_debug_irq, cpu).irq >= 0) { |
3731 |
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); |
3732 |
+ per_cpu(xen_debug_irq, cpu).irq = -1; |
3733 |
+- kfree(per_cpu(xen_debug_irq, cpu).name); |
3734 |
+- per_cpu(xen_debug_irq, cpu).name = NULL; |
3735 |
+ } |
3736 |
++ kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); |
3737 |
++ per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; |
3738 |
+ if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { |
3739 |
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, |
3740 |
+ NULL); |
3741 |
+ per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; |
3742 |
+- kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); |
3743 |
+- per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; |
3744 |
+ } |
3745 |
+ } |
3746 |
+ |
3747 |
+@@ -65,6 +65,7 @@ int xen_smp_intr_init(unsigned int cpu) |
3748 |
+ char *resched_name, *callfunc_name, *debug_name; |
3749 |
+ |
3750 |
+ resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); |
3751 |
++ per_cpu(xen_resched_irq, cpu).name = resched_name; |
3752 |
+ rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, |
3753 |
+ cpu, |
3754 |
+ xen_reschedule_interrupt, |
3755 |
+@@ -74,9 +75,9 @@ int xen_smp_intr_init(unsigned int cpu) |
3756 |
+ if (rc < 0) |
3757 |
+ goto fail; |
3758 |
+ per_cpu(xen_resched_irq, cpu).irq = rc; |
3759 |
+- per_cpu(xen_resched_irq, cpu).name = resched_name; |
3760 |
+ |
3761 |
+ callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); |
3762 |
++ per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; |
3763 |
+ rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, |
3764 |
+ cpu, |
3765 |
+ xen_call_function_interrupt, |
3766 |
+@@ -86,10 +87,10 @@ int xen_smp_intr_init(unsigned int cpu) |
3767 |
+ if (rc < 0) |
3768 |
+ goto fail; |
3769 |
+ per_cpu(xen_callfunc_irq, cpu).irq = rc; |
3770 |
+- per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; |
3771 |
+ |
3772 |
+ if (!xen_fifo_events) { |
3773 |
+ debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); |
3774 |
++ per_cpu(xen_debug_irq, cpu).name = debug_name; |
3775 |
+ rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, |
3776 |
+ xen_debug_interrupt, |
3777 |
+ IRQF_PERCPU | IRQF_NOBALANCING, |
3778 |
+@@ -97,10 +98,10 @@ int xen_smp_intr_init(unsigned int cpu) |
3779 |
+ if (rc < 0) |
3780 |
+ goto fail; |
3781 |
+ per_cpu(xen_debug_irq, cpu).irq = rc; |
3782 |
+- per_cpu(xen_debug_irq, cpu).name = debug_name; |
3783 |
+ } |
3784 |
+ |
3785 |
+ callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); |
3786 |
++ per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; |
3787 |
+ rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, |
3788 |
+ cpu, |
3789 |
+ xen_call_function_single_interrupt, |
3790 |
+@@ -110,7 +111,6 @@ int xen_smp_intr_init(unsigned int cpu) |
3791 |
+ if (rc < 0) |
3792 |
+ goto fail; |
3793 |
+ per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; |
3794 |
+- per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; |
3795 |
+ |
3796 |
+ return 0; |
3797 |
+ |
3798 |
+diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c |
3799 |
+index cd5539fc5eb45..b47b5111397a7 100644 |
3800 |
+--- a/arch/x86/xen/smp_pv.c |
3801 |
++++ b/arch/x86/xen/smp_pv.c |
3802 |
+@@ -97,18 +97,18 @@ asmlinkage __visible void cpu_bringup_and_idle(void) |
3803 |
+ |
3804 |
+ void xen_smp_intr_free_pv(unsigned int cpu) |
3805 |
+ { |
3806 |
++ kfree(per_cpu(xen_irq_work, cpu).name); |
3807 |
++ per_cpu(xen_irq_work, cpu).name = NULL; |
3808 |
+ if (per_cpu(xen_irq_work, cpu).irq >= 0) { |
3809 |
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); |
3810 |
+ per_cpu(xen_irq_work, cpu).irq = -1; |
3811 |
+- kfree(per_cpu(xen_irq_work, cpu).name); |
3812 |
+- per_cpu(xen_irq_work, cpu).name = NULL; |
3813 |
+ } |
3814 |
+ |
3815 |
++ kfree(per_cpu(xen_pmu_irq, cpu).name); |
3816 |
++ per_cpu(xen_pmu_irq, cpu).name = NULL; |
3817 |
+ if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { |
3818 |
+ unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); |
3819 |
+ per_cpu(xen_pmu_irq, cpu).irq = -1; |
3820 |
+- kfree(per_cpu(xen_pmu_irq, cpu).name); |
3821 |
+- per_cpu(xen_pmu_irq, cpu).name = NULL; |
3822 |
+ } |
3823 |
+ } |
3824 |
+ |
3825 |
+@@ -118,6 +118,7 @@ int xen_smp_intr_init_pv(unsigned int cpu) |
3826 |
+ char *callfunc_name, *pmu_name; |
3827 |
+ |
3828 |
+ callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); |
3829 |
++ per_cpu(xen_irq_work, cpu).name = callfunc_name; |
3830 |
+ rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, |
3831 |
+ cpu, |
3832 |
+ xen_irq_work_interrupt, |
3833 |
+@@ -127,10 +128,10 @@ int xen_smp_intr_init_pv(unsigned int cpu) |
3834 |
+ if (rc < 0) |
3835 |
+ goto fail; |
3836 |
+ per_cpu(xen_irq_work, cpu).irq = rc; |
3837 |
+- per_cpu(xen_irq_work, cpu).name = callfunc_name; |
3838 |
+ |
3839 |
+ if (is_xen_pmu) { |
3840 |
+ pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); |
3841 |
++ per_cpu(xen_pmu_irq, cpu).name = pmu_name; |
3842 |
+ rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, |
3843 |
+ xen_pmu_irq_handler, |
3844 |
+ IRQF_PERCPU|IRQF_NOBALANCING, |
3845 |
+@@ -138,7 +139,6 @@ int xen_smp_intr_init_pv(unsigned int cpu) |
3846 |
+ if (rc < 0) |
3847 |
+ goto fail; |
3848 |
+ per_cpu(xen_pmu_irq, cpu).irq = rc; |
3849 |
+- per_cpu(xen_pmu_irq, cpu).name = pmu_name; |
3850 |
+ } |
3851 |
+ |
3852 |
+ return 0; |
3853 |
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c |
3854 |
+index 043c73dfd2c98..5c6fc16e4b925 100644 |
3855 |
+--- a/arch/x86/xen/spinlock.c |
3856 |
++++ b/arch/x86/xen/spinlock.c |
3857 |
+@@ -75,6 +75,7 @@ void xen_init_lock_cpu(int cpu) |
3858 |
+ cpu, per_cpu(lock_kicker_irq, cpu)); |
3859 |
+ |
3860 |
+ name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); |
3861 |
++ per_cpu(irq_name, cpu) = name; |
3862 |
+ irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, |
3863 |
+ cpu, |
3864 |
+ dummy_handler, |
3865 |
+@@ -85,7 +86,6 @@ void xen_init_lock_cpu(int cpu) |
3866 |
+ if (irq >= 0) { |
3867 |
+ disable_irq(irq); /* make sure it's never delivered */ |
3868 |
+ per_cpu(lock_kicker_irq, cpu) = irq; |
3869 |
+- per_cpu(irq_name, cpu) = name; |
3870 |
+ } |
3871 |
+ |
3872 |
+ printk("cpu %d spinlock event irq %d\n", cpu, irq); |
3873 |
+@@ -98,6 +98,8 @@ void xen_uninit_lock_cpu(int cpu) |
3874 |
+ if (!xen_pvspin) |
3875 |
+ return; |
3876 |
+ |
3877 |
++ kfree(per_cpu(irq_name, cpu)); |
3878 |
++ per_cpu(irq_name, cpu) = NULL; |
3879 |
+ /* |
3880 |
+ * When booting the kernel with 'mitigations=auto,nosmt', the secondary |
3881 |
+ * CPUs are not activated, and lock_kicker_irq is not initialized. |
3882 |
+@@ -108,8 +110,6 @@ void xen_uninit_lock_cpu(int cpu) |
3883 |
+ |
3884 |
+ unbind_from_irqhandler(irq, NULL); |
3885 |
+ per_cpu(lock_kicker_irq, cpu) = -1; |
3886 |
+- kfree(per_cpu(irq_name, cpu)); |
3887 |
+- per_cpu(irq_name, cpu) = NULL; |
3888 |
+ } |
3889 |
+ |
3890 |
+ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); |
3891 |
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c |
3892 |
+index 7d8fe13573f64..b8b6e9eae94b7 100644 |
3893 |
+--- a/block/bfq-iosched.c |
3894 |
++++ b/block/bfq-iosched.c |
3895 |
+@@ -386,6 +386,12 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq); |
3896 |
+ |
3897 |
+ void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) |
3898 |
+ { |
3899 |
++ struct bfq_queue *old_bfqq = bic->bfqq[is_sync]; |
3900 |
++ |
3901 |
++ /* Clear bic pointer if bfqq is detached from this bic */ |
3902 |
++ if (old_bfqq && old_bfqq->bic == bic) |
3903 |
++ old_bfqq->bic = NULL; |
3904 |
++ |
3905 |
+ /* |
3906 |
+ * If bfqq != NULL, then a non-stable queue merge between |
3907 |
+ * bic->bfqq and bfqq is happening here. This causes troubles |
3908 |
+@@ -5245,7 +5251,6 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) |
3909 |
+ unsigned long flags; |
3910 |
+ |
3911 |
+ spin_lock_irqsave(&bfqd->lock, flags); |
3912 |
+- bfqq->bic = NULL; |
3913 |
+ bfq_exit_bfqq(bfqd, bfqq); |
3914 |
+ bic_set_bfqq(bic, NULL, is_sync); |
3915 |
+ spin_unlock_irqrestore(&bfqd->lock, flags); |
3916 |
+@@ -6630,6 +6635,12 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) |
3917 |
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, |
3918 |
+ true, is_sync, |
3919 |
+ NULL); |
3920 |
++ if (unlikely(bfqq == &bfqd->oom_bfqq)) |
3921 |
++ bfqq_already_existing = true; |
3922 |
++ } else |
3923 |
++ bfqq_already_existing = true; |
3924 |
++ |
3925 |
++ if (!bfqq_already_existing) { |
3926 |
+ bfqq->waker_bfqq = old_bfqq->waker_bfqq; |
3927 |
+ bfqq->tentative_waker_bfqq = NULL; |
3928 |
+ |
3929 |
+@@ -6643,8 +6654,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) |
3930 |
+ if (bfqq->waker_bfqq) |
3931 |
+ hlist_add_head(&bfqq->woken_list_node, |
3932 |
+ &bfqq->waker_bfqq->woken_list); |
3933 |
+- } else |
3934 |
+- bfqq_already_existing = true; |
3935 |
++ } |
3936 |
+ } |
3937 |
+ } |
3938 |
+ |
3939 |
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c |
3940 |
+index 253c857cba47c..7074ce8d2d03f 100644 |
3941 |
+--- a/block/blk-mq-sysfs.c |
3942 |
++++ b/block/blk-mq-sysfs.c |
3943 |
+@@ -187,7 +187,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) |
3944 |
+ { |
3945 |
+ struct request_queue *q = hctx->queue; |
3946 |
+ struct blk_mq_ctx *ctx; |
3947 |
+- int i, ret; |
3948 |
++ int i, j, ret; |
3949 |
+ |
3950 |
+ if (!hctx->nr_ctx) |
3951 |
+ return 0; |
3952 |
+@@ -199,9 +199,16 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) |
3953 |
+ hctx_for_each_ctx(hctx, ctx, i) { |
3954 |
+ ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); |
3955 |
+ if (ret) |
3956 |
+- break; |
3957 |
++ goto out; |
3958 |
+ } |
3959 |
+ |
3960 |
++ return 0; |
3961 |
++out: |
3962 |
++ hctx_for_each_ctx(hctx, ctx, j) { |
3963 |
++ if (j < i) |
3964 |
++ kobject_del(&ctx->kobj); |
3965 |
++ } |
3966 |
++ kobject_del(&hctx->kobj); |
3967 |
+ return ret; |
3968 |
+ } |
3969 |
+ |
3970 |
+diff --git a/block/genhd.c b/block/genhd.c |
3971 |
+index 68065189ca176..a1d9e785dcf70 100644 |
3972 |
+--- a/block/genhd.c |
3973 |
++++ b/block/genhd.c |
3974 |
+@@ -530,6 +530,7 @@ out_unregister_queue: |
3975 |
+ rq_qos_exit(disk->queue); |
3976 |
+ out_put_slave_dir: |
3977 |
+ kobject_put(disk->slave_dir); |
3978 |
++ disk->slave_dir = NULL; |
3979 |
+ out_put_holder_dir: |
3980 |
+ kobject_put(disk->part0->bd_holder_dir); |
3981 |
+ out_del_integrity: |
3982 |
+@@ -624,6 +625,7 @@ void del_gendisk(struct gendisk *disk) |
3983 |
+ |
3984 |
+ kobject_put(disk->part0->bd_holder_dir); |
3985 |
+ kobject_put(disk->slave_dir); |
3986 |
++ disk->slave_dir = NULL; |
3987 |
+ |
3988 |
+ part_stat_set_all(disk->part0, 0); |
3989 |
+ disk->part0->bd_stamp = 0; |
3990 |
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c |
3991 |
+index 668095eca0faf..ca3a40fc7da91 100644 |
3992 |
+--- a/crypto/cryptd.c |
3993 |
++++ b/crypto/cryptd.c |
3994 |
+@@ -68,11 +68,12 @@ struct aead_instance_ctx { |
3995 |
+ |
3996 |
+ struct cryptd_skcipher_ctx { |
3997 |
+ refcount_t refcnt; |
3998 |
+- struct crypto_sync_skcipher *child; |
3999 |
++ struct crypto_skcipher *child; |
4000 |
+ }; |
4001 |
+ |
4002 |
+ struct cryptd_skcipher_request_ctx { |
4003 |
+ crypto_completion_t complete; |
4004 |
++ struct skcipher_request req; |
4005 |
+ }; |
4006 |
+ |
4007 |
+ struct cryptd_hash_ctx { |
4008 |
+@@ -227,13 +228,13 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, |
4009 |
+ const u8 *key, unsigned int keylen) |
4010 |
+ { |
4011 |
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); |
4012 |
+- struct crypto_sync_skcipher *child = ctx->child; |
4013 |
++ struct crypto_skcipher *child = ctx->child; |
4014 |
+ |
4015 |
+- crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
4016 |
+- crypto_sync_skcipher_set_flags(child, |
4017 |
+- crypto_skcipher_get_flags(parent) & |
4018 |
+- CRYPTO_TFM_REQ_MASK); |
4019 |
+- return crypto_sync_skcipher_setkey(child, key, keylen); |
4020 |
++ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
4021 |
++ crypto_skcipher_set_flags(child, |
4022 |
++ crypto_skcipher_get_flags(parent) & |
4023 |
++ CRYPTO_TFM_REQ_MASK); |
4024 |
++ return crypto_skcipher_setkey(child, key, keylen); |
4025 |
+ } |
4026 |
+ |
4027 |
+ static void cryptd_skcipher_complete(struct skcipher_request *req, int err) |
4028 |
+@@ -258,13 +259,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base, |
4029 |
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); |
4030 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
4031 |
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
4032 |
+- struct crypto_sync_skcipher *child = ctx->child; |
4033 |
+- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); |
4034 |
++ struct skcipher_request *subreq = &rctx->req; |
4035 |
++ struct crypto_skcipher *child = ctx->child; |
4036 |
+ |
4037 |
+ if (unlikely(err == -EINPROGRESS)) |
4038 |
+ goto out; |
4039 |
+ |
4040 |
+- skcipher_request_set_sync_tfm(subreq, child); |
4041 |
++ skcipher_request_set_tfm(subreq, child); |
4042 |
+ skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, |
4043 |
+ NULL, NULL); |
4044 |
+ skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, |
4045 |
+@@ -286,13 +287,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base, |
4046 |
+ struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); |
4047 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
4048 |
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
4049 |
+- struct crypto_sync_skcipher *child = ctx->child; |
4050 |
+- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); |
4051 |
++ struct skcipher_request *subreq = &rctx->req; |
4052 |
++ struct crypto_skcipher *child = ctx->child; |
4053 |
+ |
4054 |
+ if (unlikely(err == -EINPROGRESS)) |
4055 |
+ goto out; |
4056 |
+ |
4057 |
+- skcipher_request_set_sync_tfm(subreq, child); |
4058 |
++ skcipher_request_set_tfm(subreq, child); |
4059 |
+ skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, |
4060 |
+ NULL, NULL); |
4061 |
+ skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, |
4062 |
+@@ -343,9 +344,10 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) |
4063 |
+ if (IS_ERR(cipher)) |
4064 |
+ return PTR_ERR(cipher); |
4065 |
+ |
4066 |
+- ctx->child = (struct crypto_sync_skcipher *)cipher; |
4067 |
++ ctx->child = cipher; |
4068 |
+ crypto_skcipher_set_reqsize( |
4069 |
+- tfm, sizeof(struct cryptd_skcipher_request_ctx)); |
4070 |
++ tfm, sizeof(struct cryptd_skcipher_request_ctx) + |
4071 |
++ crypto_skcipher_reqsize(cipher)); |
4072 |
+ return 0; |
4073 |
+ } |
4074 |
+ |
4075 |
+@@ -353,7 +355,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) |
4076 |
+ { |
4077 |
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
4078 |
+ |
4079 |
+- crypto_free_sync_skcipher(ctx->child); |
4080 |
++ crypto_free_skcipher(ctx->child); |
4081 |
+ } |
4082 |
+ |
4083 |
+ static void cryptd_skcipher_free(struct skcipher_instance *inst) |
4084 |
+@@ -931,7 +933,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) |
4085 |
+ { |
4086 |
+ struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); |
4087 |
+ |
4088 |
+- return &ctx->child->base; |
4089 |
++ return ctx->child; |
4090 |
+ } |
4091 |
+ EXPORT_SYMBOL_GPL(cryptd_skcipher_child); |
4092 |
+ |
4093 |
+diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c |
4094 |
+index 3362897bf61b9..4ada7e7493904 100644 |
4095 |
+--- a/crypto/tcrypt.c |
4096 |
++++ b/crypto/tcrypt.c |
4097 |
+@@ -1295,15 +1295,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, |
4098 |
+ goto out_free_tfm; |
4099 |
+ } |
4100 |
+ |
4101 |
+- |
4102 |
+- for (i = 0; i < num_mb; ++i) |
4103 |
+- if (testmgr_alloc_buf(data[i].xbuf)) { |
4104 |
+- while (i--) |
4105 |
+- testmgr_free_buf(data[i].xbuf); |
4106 |
+- goto out_free_tfm; |
4107 |
+- } |
4108 |
+- |
4109 |
+- |
4110 |
+ for (i = 0; i < num_mb; ++i) { |
4111 |
+ data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL); |
4112 |
+ if (!data[i].req) { |
4113 |
+diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c |
4114 |
+index 8e011e59b9b48..ee1832ba39a24 100644 |
4115 |
+--- a/drivers/acpi/acpica/dsmethod.c |
4116 |
++++ b/drivers/acpi/acpica/dsmethod.c |
4117 |
+@@ -517,7 +517,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, |
4118 |
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); |
4119 |
+ if (!info) { |
4120 |
+ status = AE_NO_MEMORY; |
4121 |
+- goto cleanup; |
4122 |
++ goto pop_walk_state; |
4123 |
+ } |
4124 |
+ |
4125 |
+ info->parameters = &this_walk_state->operands[0]; |
4126 |
+@@ -529,7 +529,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, |
4127 |
+ |
4128 |
+ ACPI_FREE(info); |
4129 |
+ if (ACPI_FAILURE(status)) { |
4130 |
+- goto cleanup; |
4131 |
++ goto pop_walk_state; |
4132 |
+ } |
4133 |
+ |
4134 |
+ next_walk_state->method_nesting_depth = |
4135 |
+@@ -575,6 +575,12 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, |
4136 |
+ |
4137 |
+ return_ACPI_STATUS(status); |
4138 |
+ |
4139 |
++pop_walk_state: |
4140 |
++ |
4141 |
++ /* On error, pop the walk state to be deleted from thread */ |
4142 |
++ |
4143 |
++ acpi_ds_pop_walk_state(thread); |
4144 |
++ |
4145 |
+ cleanup: |
4146 |
+ |
4147 |
+ /* On error, we must terminate the method properly */ |
4148 |
+diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c |
4149 |
+index d9877153f4001..fdd503bb69c47 100644 |
4150 |
+--- a/drivers/acpi/acpica/utcopy.c |
4151 |
++++ b/drivers/acpi/acpica/utcopy.c |
4152 |
+@@ -916,13 +916,6 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, |
4153 |
+ status = acpi_ut_walk_package_tree(source_obj, dest_obj, |
4154 |
+ acpi_ut_copy_ielement_to_ielement, |
4155 |
+ walk_state); |
4156 |
+- if (ACPI_FAILURE(status)) { |
4157 |
+- |
4158 |
+- /* On failure, delete the destination package object */ |
4159 |
+- |
4160 |
+- acpi_ut_remove_reference(dest_obj); |
4161 |
+- } |
4162 |
+- |
4163 |
+ return_ACPI_STATUS(status); |
4164 |
+ } |
4165 |
+ |
4166 |
+diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c |
4167 |
+index 2a04e8abd3977..26e0eb537b4f5 100644 |
4168 |
+--- a/drivers/ata/acard-ahci.c |
4169 |
++++ b/drivers/ata/acard-ahci.c |
4170 |
+@@ -267,7 +267,7 @@ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) |
4171 |
+ if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && |
4172 |
+ !(qc->flags & ATA_QCFLAG_FAILED)) { |
4173 |
+ ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); |
4174 |
+- qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; |
4175 |
++ qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15]; |
4176 |
+ } else |
4177 |
+ ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); |
4178 |
+ |
4179 |
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c |
4180 |
+index 812731e80f8e0..c1bf7117a9fff 100644 |
4181 |
+--- a/drivers/ata/ahci.c |
4182 |
++++ b/drivers/ata/ahci.c |
4183 |
+@@ -735,7 +735,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, |
4184 |
+ |
4185 |
+ /* clear D2H reception area to properly wait for D2H FIS */ |
4186 |
+ ata_tf_init(link->device, &tf); |
4187 |
+- tf.command = ATA_BUSY; |
4188 |
++ tf.status = ATA_BUSY; |
4189 |
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
4190 |
+ |
4191 |
+ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), |
4192 |
+@@ -806,7 +806,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, |
4193 |
+ |
4194 |
+ /* clear D2H reception area to properly wait for D2H FIS */ |
4195 |
+ ata_tf_init(link->device, &tf); |
4196 |
+- tf.command = ATA_BUSY; |
4197 |
++ tf.status = ATA_BUSY; |
4198 |
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
4199 |
+ |
4200 |
+ rc = sata_link_hardreset(link, timing, deadline, &online, |
4201 |
+diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c |
4202 |
+index 5b46fc9aeb4a0..e5ac3d1c214c0 100644 |
4203 |
+--- a/drivers/ata/ahci_qoriq.c |
4204 |
++++ b/drivers/ata/ahci_qoriq.c |
4205 |
+@@ -125,7 +125,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, |
4206 |
+ |
4207 |
+ /* clear D2H reception area to properly wait for D2H FIS */ |
4208 |
+ ata_tf_init(link->device, &tf); |
4209 |
+- tf.command = ATA_BUSY; |
4210 |
++ tf.status = ATA_BUSY; |
4211 |
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
4212 |
+ |
4213 |
+ rc = sata_link_hardreset(link, timing, deadline, &online, |
4214 |
+diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c |
4215 |
+index dffc432b9d54a..292099410cf68 100644 |
4216 |
+--- a/drivers/ata/ahci_xgene.c |
4217 |
++++ b/drivers/ata/ahci_xgene.c |
4218 |
+@@ -365,7 +365,7 @@ static int xgene_ahci_do_hardreset(struct ata_link *link, |
4219 |
+ do { |
4220 |
+ /* clear D2H reception area to properly wait for D2H FIS */ |
4221 |
+ ata_tf_init(link->device, &tf); |
4222 |
+- tf.command = ATA_BUSY; |
4223 |
++ tf.status = ATA_BUSY; |
4224 |
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
4225 |
+ rc = sata_link_hardreset(link, timing, deadline, online, |
4226 |
+ ahci_check_ready); |
4227 |
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c |
4228 |
+index 395772fa39432..192115a45dd78 100644 |
4229 |
+--- a/drivers/ata/libahci.c |
4230 |
++++ b/drivers/ata/libahci.c |
4231 |
+@@ -1552,7 +1552,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, |
4232 |
+ |
4233 |
+ /* clear D2H reception area to properly wait for D2H FIS */ |
4234 |
+ ata_tf_init(link->device, &tf); |
4235 |
+- tf.command = ATA_BUSY; |
4236 |
++ tf.status = ATA_BUSY; |
4237 |
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
4238 |
+ |
4239 |
+ rc = sata_link_hardreset(link, timing, deadline, online, |
4240 |
+@@ -2038,7 +2038,7 @@ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) |
4241 |
+ if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && |
4242 |
+ !(qc->flags & ATA_QCFLAG_FAILED)) { |
4243 |
+ ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); |
4244 |
+- qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; |
4245 |
++ qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15]; |
4246 |
+ } else |
4247 |
+ ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); |
4248 |
+ |
4249 |
+diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c |
4250 |
+index 7a7d6642edcc5..d15f3e908ea4a 100644 |
4251 |
+--- a/drivers/ata/libata-acpi.c |
4252 |
++++ b/drivers/ata/libata-acpi.c |
4253 |
+@@ -554,13 +554,13 @@ static void ata_acpi_gtf_to_tf(struct ata_device *dev, |
4254 |
+ |
4255 |
+ tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
4256 |
+ tf->protocol = ATA_PROT_NODATA; |
4257 |
+- tf->feature = gtf->tf[0]; /* 0x1f1 */ |
4258 |
++ tf->error = gtf->tf[0]; /* 0x1f1 */ |
4259 |
+ tf->nsect = gtf->tf[1]; /* 0x1f2 */ |
4260 |
+ tf->lbal = gtf->tf[2]; /* 0x1f3 */ |
4261 |
+ tf->lbam = gtf->tf[3]; /* 0x1f4 */ |
4262 |
+ tf->lbah = gtf->tf[4]; /* 0x1f5 */ |
4263 |
+ tf->device = gtf->tf[5]; /* 0x1f6 */ |
4264 |
+- tf->command = gtf->tf[6]; /* 0x1f7 */ |
4265 |
++ tf->status = gtf->tf[6]; /* 0x1f7 */ |
4266 |
+ } |
4267 |
+ |
4268 |
+ static int ata_acpi_filter_tf(struct ata_device *dev, |
4269 |
+@@ -650,9 +650,7 @@ static int ata_acpi_run_tf(struct ata_device *dev, |
4270 |
+ struct ata_taskfile *pptf = NULL; |
4271 |
+ struct ata_taskfile tf, ptf, rtf; |
4272 |
+ unsigned int err_mask; |
4273 |
+- const char *level; |
4274 |
+ const char *descr; |
4275 |
+- char msg[60]; |
4276 |
+ int rc; |
4277 |
+ |
4278 |
+ if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0) |
4279 |
+@@ -666,6 +664,10 @@ static int ata_acpi_run_tf(struct ata_device *dev, |
4280 |
+ pptf = &ptf; |
4281 |
+ } |
4282 |
+ |
4283 |
++ descr = ata_get_cmd_descript(tf.command); |
4284 |
++ if (!descr) |
4285 |
++ descr = "unknown"; |
4286 |
++ |
4287 |
+ if (!ata_acpi_filter_tf(dev, &tf, pptf)) { |
4288 |
+ rtf = tf; |
4289 |
+ err_mask = ata_exec_internal(dev, &rtf, NULL, |
4290 |
+@@ -673,40 +675,42 @@ static int ata_acpi_run_tf(struct ata_device *dev, |
4291 |
+ |
4292 |
+ switch (err_mask) { |
4293 |
+ case 0: |
4294 |
+- level = KERN_DEBUG; |
4295 |
+- snprintf(msg, sizeof(msg), "succeeded"); |
4296 |
++ ata_dev_dbg(dev, |
4297 |
++ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" |
4298 |
++ "(%s) succeeded\n", |
4299 |
++ tf.command, tf.feature, tf.nsect, tf.lbal, |
4300 |
++ tf.lbam, tf.lbah, tf.device, descr); |
4301 |
+ rc = 1; |
4302 |
+ break; |
4303 |
+ |
4304 |
+ case AC_ERR_DEV: |
4305 |
+- level = KERN_INFO; |
4306 |
+- snprintf(msg, sizeof(msg), |
4307 |
+- "rejected by device (Stat=0x%02x Err=0x%02x)", |
4308 |
+- rtf.command, rtf.feature); |
4309 |
++ ata_dev_info(dev, |
4310 |
++ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" |
4311 |
++ "(%s) rejected by device (Stat=0x%02x Err=0x%02x)", |
4312 |
++ tf.command, tf.feature, tf.nsect, tf.lbal, |
4313 |
++ tf.lbam, tf.lbah, tf.device, descr, |
4314 |
++ rtf.status, rtf.error); |
4315 |
+ rc = 0; |
4316 |
+ break; |
4317 |
+ |
4318 |
+ default: |
4319 |
+- level = KERN_ERR; |
4320 |
+- snprintf(msg, sizeof(msg), |
4321 |
+- "failed (Emask=0x%x Stat=0x%02x Err=0x%02x)", |
4322 |
+- err_mask, rtf.command, rtf.feature); |
4323 |
++ ata_dev_err(dev, |
4324 |
++ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" |
4325 |
++ "(%s) failed (Emask=0x%x Stat=0x%02x Err=0x%02x)", |
4326 |
++ tf.command, tf.feature, tf.nsect, tf.lbal, |
4327 |
++ tf.lbam, tf.lbah, tf.device, descr, |
4328 |
++ err_mask, rtf.status, rtf.error); |
4329 |
+ rc = -EIO; |
4330 |
+ break; |
4331 |
+ } |
4332 |
+ } else { |
4333 |
+- level = KERN_INFO; |
4334 |
+- snprintf(msg, sizeof(msg), "filtered out"); |
4335 |
++ ata_dev_info(dev, |
4336 |
++ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" |
4337 |
++ "(%s) filtered out\n", |
4338 |
++ tf.command, tf.feature, tf.nsect, tf.lbal, |
4339 |
++ tf.lbam, tf.lbah, tf.device, descr); |
4340 |
+ rc = 0; |
4341 |
+ } |
4342 |
+- descr = ata_get_cmd_descript(tf.command); |
4343 |
+- |
4344 |
+- ata_dev_printk(dev, level, |
4345 |
+- "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n", |
4346 |
+- tf.command, tf.feature, tf.nsect, tf.lbal, |
4347 |
+- tf.lbam, tf.lbah, tf.device, |
4348 |
+- (descr ? descr : "unknown"), msg); |
4349 |
+- |
4350 |
+ return rc; |
4351 |
+ } |
4352 |
+ |
4353 |
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
4354 |
+index 4d308e3163c39..c430cd3cfa171 100644 |
4355 |
+--- a/drivers/ata/libata-core.c |
4356 |
++++ b/drivers/ata/libata-core.c |
4357 |
+@@ -1185,7 +1185,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) |
4358 |
+ ata_dev_warn(dev, |
4359 |
+ "failed to read native max address (err_mask=0x%x)\n", |
4360 |
+ err_mask); |
4361 |
+- if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) |
4362 |
++ if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED)) |
4363 |
+ return -EACCES; |
4364 |
+ return -EIO; |
4365 |
+ } |
4366 |
+@@ -1249,7 +1249,7 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) |
4367 |
+ "failed to set max address (err_mask=0x%x)\n", |
4368 |
+ err_mask); |
4369 |
+ if (err_mask == AC_ERR_DEV && |
4370 |
+- (tf.feature & (ATA_ABORTED | ATA_IDNF))) |
4371 |
++ (tf.error & (ATA_ABORTED | ATA_IDNF))) |
4372 |
+ return -EACCES; |
4373 |
+ return -EIO; |
4374 |
+ } |
4375 |
+@@ -1616,7 +1616,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, |
4376 |
+ |
4377 |
+ /* perform minimal error analysis */ |
4378 |
+ if (qc->flags & ATA_QCFLAG_FAILED) { |
4379 |
+- if (qc->result_tf.command & (ATA_ERR | ATA_DF)) |
4380 |
++ if (qc->result_tf.status & (ATA_ERR | ATA_DF)) |
4381 |
+ qc->err_mask |= AC_ERR_DEV; |
4382 |
+ |
4383 |
+ if (!qc->err_mask) |
4384 |
+@@ -1625,7 +1625,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, |
4385 |
+ if (qc->err_mask & ~AC_ERR_OTHER) |
4386 |
+ qc->err_mask &= ~AC_ERR_OTHER; |
4387 |
+ } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) { |
4388 |
+- qc->result_tf.command |= ATA_SENSE; |
4389 |
++ qc->result_tf.status |= ATA_SENSE; |
4390 |
+ } |
4391 |
+ |
4392 |
+ /* finish up */ |
4393 |
+@@ -1848,7 +1848,7 @@ retry: |
4394 |
+ return 0; |
4395 |
+ } |
4396 |
+ |
4397 |
+- if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { |
4398 |
++ if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) { |
4399 |
+ /* Device or controller might have reported |
4400 |
+ * the wrong device class. Give a shot at the |
4401 |
+ * other IDENTIFY if the current one is |
4402 |
+@@ -4371,7 +4371,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, |
4403 |
+ /* A clean abort indicates an original or just out of spec drive |
4404 |
+ and we should continue as we issue the setup based on the |
4405 |
+ drive reported working geometry */ |
4406 |
+- if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) |
4407 |
++ if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED)) |
4408 |
+ err_mask = 0; |
4409 |
+ |
4410 |
+ DPRINTK("EXIT, err_mask=%x\n", err_mask); |
4411 |
+@@ -6497,67 +6497,6 @@ const struct ata_port_info ata_dummy_port_info = { |
4412 |
+ }; |
4413 |
+ EXPORT_SYMBOL_GPL(ata_dummy_port_info); |
4414 |
+ |
4415 |
+-/* |
4416 |
+- * Utility print functions |
4417 |
+- */ |
4418 |
+-void ata_port_printk(const struct ata_port *ap, const char *level, |
4419 |
+- const char *fmt, ...) |
4420 |
+-{ |
4421 |
+- struct va_format vaf; |
4422 |
+- va_list args; |
4423 |
+- |
4424 |
+- va_start(args, fmt); |
4425 |
+- |
4426 |
+- vaf.fmt = fmt; |
4427 |
+- vaf.va = &args; |
4428 |
+- |
4429 |
+- printk("%sata%u: %pV", level, ap->print_id, &vaf); |
4430 |
+- |
4431 |
+- va_end(args); |
4432 |
+-} |
4433 |
+-EXPORT_SYMBOL(ata_port_printk); |
4434 |
+- |
4435 |
+-void ata_link_printk(const struct ata_link *link, const char *level, |
4436 |
+- const char *fmt, ...) |
4437 |
+-{ |
4438 |
+- struct va_format vaf; |
4439 |
+- va_list args; |
4440 |
+- |
4441 |
+- va_start(args, fmt); |
4442 |
+- |
4443 |
+- vaf.fmt = fmt; |
4444 |
+- vaf.va = &args; |
4445 |
+- |
4446 |
+- if (sata_pmp_attached(link->ap) || link->ap->slave_link) |
4447 |
+- printk("%sata%u.%02u: %pV", |
4448 |
+- level, link->ap->print_id, link->pmp, &vaf); |
4449 |
+- else |
4450 |
+- printk("%sata%u: %pV", |
4451 |
+- level, link->ap->print_id, &vaf); |
4452 |
+- |
4453 |
+- va_end(args); |
4454 |
+-} |
4455 |
+-EXPORT_SYMBOL(ata_link_printk); |
4456 |
+- |
4457 |
+-void ata_dev_printk(const struct ata_device *dev, const char *level, |
4458 |
+- const char *fmt, ...) |
4459 |
+-{ |
4460 |
+- struct va_format vaf; |
4461 |
+- va_list args; |
4462 |
+- |
4463 |
+- va_start(args, fmt); |
4464 |
+- |
4465 |
+- vaf.fmt = fmt; |
4466 |
+- vaf.va = &args; |
4467 |
+- |
4468 |
+- printk("%sata%u.%02u: %pV", |
4469 |
+- level, dev->link->ap->print_id, dev->link->pmp + dev->devno, |
4470 |
+- &vaf); |
4471 |
+- |
4472 |
+- va_end(args); |
4473 |
+-} |
4474 |
+-EXPORT_SYMBOL(ata_dev_printk); |
4475 |
+- |
4476 |
+ void ata_print_version(const struct device *dev, const char *version) |
4477 |
+ { |
4478 |
+ dev_printk(KERN_DEBUG, dev, "version %s\n", version); |
4479 |
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c |
4480 |
+index 7aea631edb274..8350abc172908 100644 |
4481 |
+--- a/drivers/ata/libata-eh.c |
4482 |
++++ b/drivers/ata/libata-eh.c |
4483 |
+@@ -1386,7 +1386,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) |
4484 |
+ |
4485 |
+ err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); |
4486 |
+ if (err_mask == AC_ERR_DEV) |
4487 |
+- *r_sense_key = tf.feature >> 4; |
4488 |
++ *r_sense_key = tf.error >> 4; |
4489 |
+ return err_mask; |
4490 |
+ } |
4491 |
+ |
4492 |
+@@ -1431,12 +1431,12 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc, |
4493 |
+ |
4494 |
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
4495 |
+ /* Ignore err_mask; ATA_ERR might be set */ |
4496 |
+- if (tf.command & ATA_SENSE) { |
4497 |
++ if (tf.status & ATA_SENSE) { |
4498 |
+ ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal); |
4499 |
+ qc->flags |= ATA_QCFLAG_SENSE_VALID; |
4500 |
+ } else { |
4501 |
+ ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", |
4502 |
+- tf.command, err_mask); |
4503 |
++ tf.status, err_mask); |
4504 |
+ } |
4505 |
+ } |
4506 |
+ |
4507 |
+@@ -1561,7 +1561,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, |
4508 |
+ const struct ata_taskfile *tf) |
4509 |
+ { |
4510 |
+ unsigned int tmp, action = 0; |
4511 |
+- u8 stat = tf->command, err = tf->feature; |
4512 |
++ u8 stat = tf->status, err = tf->error; |
4513 |
+ |
4514 |
+ if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { |
4515 |
+ qc->err_mask |= AC_ERR_HSM; |
4516 |
+@@ -1598,7 +1598,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, |
4517 |
+ if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { |
4518 |
+ tmp = atapi_eh_request_sense(qc->dev, |
4519 |
+ qc->scsicmd->sense_buffer, |
4520 |
+- qc->result_tf.feature >> 4); |
4521 |
++ qc->result_tf.error >> 4); |
4522 |
+ if (!tmp) |
4523 |
+ qc->flags |= ATA_QCFLAG_SENSE_VALID; |
4524 |
+ else |
4525 |
+@@ -2372,7 +2372,7 @@ static void ata_eh_link_report(struct ata_link *link) |
4526 |
+ cmd->hob_feature, cmd->hob_nsect, |
4527 |
+ cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, |
4528 |
+ cmd->device, qc->tag, data_buf, cdb_buf, |
4529 |
+- res->command, res->feature, res->nsect, |
4530 |
++ res->status, res->error, res->nsect, |
4531 |
+ res->lbal, res->lbam, res->lbah, |
4532 |
+ res->hob_feature, res->hob_nsect, |
4533 |
+ res->hob_lbal, res->hob_lbam, res->hob_lbah, |
4534 |
+@@ -2380,28 +2380,28 @@ static void ata_eh_link_report(struct ata_link *link) |
4535 |
+ qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); |
4536 |
+ |
4537 |
+ #ifdef CONFIG_ATA_VERBOSE_ERROR |
4538 |
+- if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | |
4539 |
+- ATA_SENSE | ATA_ERR)) { |
4540 |
+- if (res->command & ATA_BUSY) |
4541 |
++ if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | |
4542 |
++ ATA_SENSE | ATA_ERR)) { |
4543 |
++ if (res->status & ATA_BUSY) |
4544 |
+ ata_dev_err(qc->dev, "status: { Busy }\n"); |
4545 |
+ else |
4546 |
+ ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", |
4547 |
+- res->command & ATA_DRDY ? "DRDY " : "", |
4548 |
+- res->command & ATA_DF ? "DF " : "", |
4549 |
+- res->command & ATA_DRQ ? "DRQ " : "", |
4550 |
+- res->command & ATA_SENSE ? "SENSE " : "", |
4551 |
+- res->command & ATA_ERR ? "ERR " : ""); |
4552 |
++ res->status & ATA_DRDY ? "DRDY " : "", |
4553 |
++ res->status & ATA_DF ? "DF " : "", |
4554 |
++ res->status & ATA_DRQ ? "DRQ " : "", |
4555 |
++ res->status & ATA_SENSE ? "SENSE " : "", |
4556 |
++ res->status & ATA_ERR ? "ERR " : ""); |
4557 |
+ } |
4558 |
+ |
4559 |
+ if (cmd->command != ATA_CMD_PACKET && |
4560 |
+- (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | |
4561 |
+- ATA_IDNF | ATA_ABORTED))) |
4562 |
++ (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF | |
4563 |
++ ATA_ABORTED))) |
4564 |
+ ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", |
4565 |
+- res->feature & ATA_ICRC ? "ICRC " : "", |
4566 |
+- res->feature & ATA_UNC ? "UNC " : "", |
4567 |
+- res->feature & ATA_AMNF ? "AMNF " : "", |
4568 |
+- res->feature & ATA_IDNF ? "IDNF " : "", |
4569 |
+- res->feature & ATA_ABORTED ? "ABRT " : ""); |
4570 |
++ res->error & ATA_ICRC ? "ICRC " : "", |
4571 |
++ res->error & ATA_UNC ? "UNC " : "", |
4572 |
++ res->error & ATA_AMNF ? "AMNF " : "", |
4573 |
++ res->error & ATA_IDNF ? "IDNF " : "", |
4574 |
++ res->error & ATA_ABORTED ? "ABRT " : ""); |
4575 |
+ #endif |
4576 |
+ } |
4577 |
+ } |
4578 |
+diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c |
4579 |
+index 8f3ff830ab0c6..b5aa525d87603 100644 |
4580 |
+--- a/drivers/ata/libata-sata.c |
4581 |
++++ b/drivers/ata/libata-sata.c |
4582 |
+@@ -191,8 +191,8 @@ EXPORT_SYMBOL_GPL(ata_tf_to_fis); |
4583 |
+ |
4584 |
+ void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) |
4585 |
+ { |
4586 |
+- tf->command = fis[2]; /* status */ |
4587 |
+- tf->feature = fis[3]; /* error */ |
4588 |
++ tf->status = fis[2]; |
4589 |
++ tf->error = fis[3]; |
4590 |
+ |
4591 |
+ tf->lbal = fis[4]; |
4592 |
+ tf->lbam = fis[5]; |
4593 |
+@@ -1402,8 +1402,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, |
4594 |
+ |
4595 |
+ *tag = buf[0] & 0x1f; |
4596 |
+ |
4597 |
+- tf->command = buf[2]; |
4598 |
+- tf->feature = buf[3]; |
4599 |
++ tf->status = buf[2]; |
4600 |
++ tf->error = buf[3]; |
4601 |
+ tf->lbal = buf[4]; |
4602 |
+ tf->lbam = buf[5]; |
4603 |
+ tf->lbah = buf[6]; |
4604 |
+@@ -1413,7 +1413,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, |
4605 |
+ tf->hob_lbah = buf[10]; |
4606 |
+ tf->nsect = buf[12]; |
4607 |
+ tf->hob_nsect = buf[13]; |
4608 |
+- if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id)) |
4609 |
++ if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id) && |
4610 |
++ (tf->status & ATA_SENSE)) |
4611 |
+ tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; |
4612 |
+ |
4613 |
+ return 0; |
4614 |
+@@ -1477,8 +1478,12 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) |
4615 |
+ memcpy(&qc->result_tf, &tf, sizeof(tf)); |
4616 |
+ qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; |
4617 |
+ qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; |
4618 |
+- if (dev->class == ATA_DEV_ZAC && |
4619 |
+- ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) { |
4620 |
++ |
4621 |
++ /* |
4622 |
++ * If the device supports NCQ autosense, ata_eh_read_log_10h() will have |
4623 |
++ * stored the sense data in qc->result_tf.auxiliary. |
4624 |
++ */ |
4625 |
++ if (qc->result_tf.auxiliary) { |
4626 |
+ char sense_key, asc, ascq; |
4627 |
+ |
4628 |
+ sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; |
4629 |
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c |
4630 |
+index 061d2f8feeb53..4d8129640d60e 100644 |
4631 |
+--- a/drivers/ata/libata-scsi.c |
4632 |
++++ b/drivers/ata/libata-scsi.c |
4633 |
+@@ -671,7 +671,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc) |
4634 |
+ */ |
4635 |
+ static void ata_dump_status(unsigned id, struct ata_taskfile *tf) |
4636 |
+ { |
4637 |
+- u8 stat = tf->command, err = tf->feature; |
4638 |
++ u8 stat = tf->status, err = tf->error; |
4639 |
+ |
4640 |
+ pr_warn("ata%u: status=0x%02x { ", id, stat); |
4641 |
+ if (stat & ATA_BUSY) { |
4642 |
+@@ -867,8 +867,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) |
4643 |
+ * onto sense key, asc & ascq. |
4644 |
+ */ |
4645 |
+ if (qc->err_mask || |
4646 |
+- tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { |
4647 |
+- ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, |
4648 |
++ tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { |
4649 |
++ ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, |
4650 |
+ &sense_key, &asc, &ascq, verbose); |
4651 |
+ ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); |
4652 |
+ } else { |
4653 |
+@@ -897,13 +897,13 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) |
4654 |
+ * Copy registers into sense buffer. |
4655 |
+ */ |
4656 |
+ desc[2] = 0x00; |
4657 |
+- desc[3] = tf->feature; /* == error reg */ |
4658 |
++ desc[3] = tf->error; |
4659 |
+ desc[5] = tf->nsect; |
4660 |
+ desc[7] = tf->lbal; |
4661 |
+ desc[9] = tf->lbam; |
4662 |
+ desc[11] = tf->lbah; |
4663 |
+ desc[12] = tf->device; |
4664 |
+- desc[13] = tf->command; /* == status reg */ |
4665 |
++ desc[13] = tf->status; |
4666 |
+ |
4667 |
+ /* |
4668 |
+ * Fill in Extend bit, and the high order bytes |
4669 |
+@@ -918,8 +918,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) |
4670 |
+ } |
4671 |
+ } else { |
4672 |
+ /* Fixed sense format */ |
4673 |
+- desc[0] = tf->feature; |
4674 |
+- desc[1] = tf->command; /* status */ |
4675 |
++ desc[0] = tf->error; |
4676 |
++ desc[1] = tf->status; |
4677 |
+ desc[2] = tf->device; |
4678 |
+ desc[3] = tf->nsect; |
4679 |
+ desc[7] = 0; |
4680 |
+@@ -968,14 +968,14 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) |
4681 |
+ * onto sense key, asc & ascq. |
4682 |
+ */ |
4683 |
+ if (qc->err_mask || |
4684 |
+- tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { |
4685 |
+- ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, |
4686 |
++ tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { |
4687 |
++ ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, |
4688 |
+ &sense_key, &asc, &ascq, verbose); |
4689 |
+ ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq); |
4690 |
+ } else { |
4691 |
+ /* Could not decode error */ |
4692 |
+ ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n", |
4693 |
+- tf->command, qc->err_mask); |
4694 |
++ tf->status, qc->err_mask); |
4695 |
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); |
4696 |
+ return; |
4697 |
+ } |
4698 |
+@@ -2490,7 +2490,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) |
4699 |
+ |
4700 |
+ /* fill these in, for the case where they are -not- overwritten */ |
4701 |
+ cmd->sense_buffer[0] = 0x70; |
4702 |
+- cmd->sense_buffer[2] = qc->tf.feature >> 4; |
4703 |
++ cmd->sense_buffer[2] = qc->tf.error >> 4; |
4704 |
+ |
4705 |
+ ata_qc_reinit(qc); |
4706 |
+ |
4707 |
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c |
4708 |
+index b71ea4a680b01..8409e53b7b7a0 100644 |
4709 |
+--- a/drivers/ata/libata-sff.c |
4710 |
++++ b/drivers/ata/libata-sff.c |
4711 |
+@@ -457,8 +457,8 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
4712 |
+ { |
4713 |
+ struct ata_ioports *ioaddr = &ap->ioaddr; |
4714 |
+ |
4715 |
+- tf->command = ata_sff_check_status(ap); |
4716 |
+- tf->feature = ioread8(ioaddr->error_addr); |
4717 |
++ tf->status = ata_sff_check_status(ap); |
4718 |
++ tf->error = ioread8(ioaddr->error_addr); |
4719 |
+ tf->nsect = ioread8(ioaddr->nsect_addr); |
4720 |
+ tf->lbal = ioread8(ioaddr->lbal_addr); |
4721 |
+ tf->lbam = ioread8(ioaddr->lbam_addr); |
4722 |
+@@ -1837,7 +1837,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, |
4723 |
+ memset(&tf, 0, sizeof(tf)); |
4724 |
+ |
4725 |
+ ap->ops->sff_tf_read(ap, &tf); |
4726 |
+- err = tf.feature; |
4727 |
++ err = tf.error; |
4728 |
+ if (r_err) |
4729 |
+ *r_err = err; |
4730 |
+ |
4731 |
+diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c |
4732 |
+index 46208ececbb6a..3fc26026014e2 100644 |
4733 |
+--- a/drivers/ata/pata_ep93xx.c |
4734 |
++++ b/drivers/ata/pata_ep93xx.c |
4735 |
+@@ -416,8 +416,8 @@ static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
4736 |
+ { |
4737 |
+ struct ep93xx_pata_data *drv_data = ap->host->private_data; |
4738 |
+ |
4739 |
+- tf->command = ep93xx_pata_check_status(ap); |
4740 |
+- tf->feature = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE); |
4741 |
++ tf->status = ep93xx_pata_check_status(ap); |
4742 |
++ tf->error = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE); |
4743 |
+ tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT); |
4744 |
+ tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL); |
4745 |
+ tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM); |
4746 |
+diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c |
4747 |
+index 99c63087c8ae9..17b557c91e1c7 100644 |
4748 |
+--- a/drivers/ata/pata_ixp4xx_cf.c |
4749 |
++++ b/drivers/ata/pata_ixp4xx_cf.c |
4750 |
+@@ -114,7 +114,7 @@ static void ixp4xx_set_piomode(struct ata_port *ap, struct ata_device *adev) |
4751 |
+ { |
4752 |
+ struct ixp4xx_pata *ixpp = ap->host->private_data; |
4753 |
+ |
4754 |
+- ata_dev_printk(adev, KERN_INFO, "configured for PIO%d 8bit\n", |
4755 |
++ ata_dev_info(adev, "configured for PIO%d 8bit\n", |
4756 |
+ adev->pio_mode - XFER_PIO_0); |
4757 |
+ ixp4xx_set_8bit_timing(ixpp, adev->pio_mode); |
4758 |
+ } |
4759 |
+@@ -132,8 +132,8 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc, |
4760 |
+ struct ixp4xx_pata *ixpp = ap->host->private_data; |
4761 |
+ unsigned long flags; |
4762 |
+ |
4763 |
+- ata_dev_printk(adev, KERN_DEBUG, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE", |
4764 |
+- buflen); |
4765 |
++ ata_dev_dbg(adev, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE", |
4766 |
++ buflen); |
4767 |
+ spin_lock_irqsave(ap->lock, flags); |
4768 |
+ |
4769 |
+ /* set the expansion bus in 16bit mode and restore |
4770 |
+diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c |
4771 |
+index f4949e704356e..9dd6bffefb485 100644 |
4772 |
+--- a/drivers/ata/pata_ns87415.c |
4773 |
++++ b/drivers/ata/pata_ns87415.c |
4774 |
+@@ -264,8 +264,8 @@ void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
4775 |
+ { |
4776 |
+ struct ata_ioports *ioaddr = &ap->ioaddr; |
4777 |
+ |
4778 |
+- tf->command = ns87560_check_status(ap); |
4779 |
+- tf->feature = ioread8(ioaddr->error_addr); |
4780 |
++ tf->status = ns87560_check_status(ap); |
4781 |
++ tf->error = ioread8(ioaddr->error_addr); |
4782 |
+ tf->nsect = ioread8(ioaddr->nsect_addr); |
4783 |
+ tf->lbal = ioread8(ioaddr->lbal_addr); |
4784 |
+ tf->lbam = ioread8(ioaddr->lbam_addr); |
4785 |
+diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c |
4786 |
+index 4cc8a1027888a..6c9f2efcedc11 100644 |
4787 |
+--- a/drivers/ata/pata_octeon_cf.c |
4788 |
++++ b/drivers/ata/pata_octeon_cf.c |
4789 |
+@@ -386,7 +386,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf) |
4790 |
+ void __iomem *base = ap->ioaddr.data_addr; |
4791 |
+ |
4792 |
+ blob = __raw_readw(base + 0xc); |
4793 |
+- tf->feature = blob >> 8; |
4794 |
++ tf->error = blob >> 8; |
4795 |
+ |
4796 |
+ blob = __raw_readw(base + 2); |
4797 |
+ tf->nsect = blob & 0xff; |
4798 |
+@@ -398,7 +398,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf) |
4799 |
+ |
4800 |
+ blob = __raw_readw(base + 6); |
4801 |
+ tf->device = blob & 0xff; |
4802 |
+- tf->command = blob >> 8; |
4803 |
++ tf->status = blob >> 8; |
4804 |
+ |
4805 |
+ if (tf->flags & ATA_TFLAG_LBA48) { |
4806 |
+ if (likely(ap->ioaddr.ctl_addr)) { |
4807 |
+diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c |
4808 |
+index 3da0e8e302861..149d771c61d67 100644 |
4809 |
+--- a/drivers/ata/pata_samsung_cf.c |
4810 |
++++ b/drivers/ata/pata_samsung_cf.c |
4811 |
+@@ -213,7 +213,7 @@ static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
4812 |
+ { |
4813 |
+ struct ata_ioports *ioaddr = &ap->ioaddr; |
4814 |
+ |
4815 |
+- tf->feature = ata_inb(ap->host, ioaddr->error_addr); |
4816 |
++ tf->error = ata_inb(ap->host, ioaddr->error_addr); |
4817 |
+ tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr); |
4818 |
+ tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr); |
4819 |
+ tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr); |
4820 |
+diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c |
4821 |
+index 8440203e835ed..f9bb3be4b939e 100644 |
4822 |
+--- a/drivers/ata/sata_highbank.c |
4823 |
++++ b/drivers/ata/sata_highbank.c |
4824 |
+@@ -400,7 +400,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, |
4825 |
+ |
4826 |
+ /* clear D2H reception area to properly wait for D2H FIS */ |
4827 |
+ ata_tf_init(link->device, &tf); |
4828 |
+- tf.command = ATA_BUSY; |
4829 |
++ tf.status = ATA_BUSY; |
4830 |
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
4831 |
+ |
4832 |
+ do { |
4833 |
+diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c |
4834 |
+index e517bd8822a5f..659f1a903298f 100644 |
4835 |
+--- a/drivers/ata/sata_inic162x.c |
4836 |
++++ b/drivers/ata/sata_inic162x.c |
4837 |
+@@ -559,13 +559,13 @@ static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
4838 |
+ { |
4839 |
+ void __iomem *port_base = inic_port_base(ap); |
4840 |
+ |
4841 |
+- tf->feature = readb(port_base + PORT_TF_FEATURE); |
4842 |
++ tf->error = readb(port_base + PORT_TF_FEATURE); |
4843 |
+ tf->nsect = readb(port_base + PORT_TF_NSECT); |
4844 |
+ tf->lbal = readb(port_base + PORT_TF_LBAL); |
4845 |
+ tf->lbam = readb(port_base + PORT_TF_LBAM); |
4846 |
+ tf->lbah = readb(port_base + PORT_TF_LBAH); |
4847 |
+ tf->device = readb(port_base + PORT_TF_DEVICE); |
4848 |
+- tf->command = readb(port_base + PORT_TF_COMMAND); |
4849 |
++ tf->status = readb(port_base + PORT_TF_COMMAND); |
4850 |
+ } |
4851 |
+ |
4852 |
+ static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) |
4853 |
+@@ -582,11 +582,11 @@ static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) |
4854 |
+ */ |
4855 |
+ inic_tf_read(qc->ap, &tf); |
4856 |
+ |
4857 |
+- if (!(tf.command & ATA_ERR)) |
4858 |
++ if (!(tf.status & ATA_ERR)) |
4859 |
+ return false; |
4860 |
+ |
4861 |
+- rtf->command = tf.command; |
4862 |
+- rtf->feature = tf.feature; |
4863 |
++ rtf->status = tf.status; |
4864 |
++ rtf->error = tf.error; |
4865 |
+ return true; |
4866 |
+ } |
4867 |
+ |
4868 |
+diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c |
4869 |
+index 44b0ed8f6bb8a..9759e24f718fc 100644 |
4870 |
+--- a/drivers/ata/sata_rcar.c |
4871 |
++++ b/drivers/ata/sata_rcar.c |
4872 |
+@@ -417,8 +417,8 @@ static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
4873 |
+ { |
4874 |
+ struct ata_ioports *ioaddr = &ap->ioaddr; |
4875 |
+ |
4876 |
+- tf->command = sata_rcar_check_status(ap); |
4877 |
+- tf->feature = ioread32(ioaddr->error_addr); |
4878 |
++ tf->status = sata_rcar_check_status(ap); |
4879 |
++ tf->error = ioread32(ioaddr->error_addr); |
4880 |
+ tf->nsect = ioread32(ioaddr->nsect_addr); |
4881 |
+ tf->lbal = ioread32(ioaddr->lbal_addr); |
4882 |
+ tf->lbam = ioread32(ioaddr->lbam_addr); |
4883 |
+diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c |
4884 |
+index f8552559db7f5..2e3418a82b445 100644 |
4885 |
+--- a/drivers/ata/sata_svw.c |
4886 |
++++ b/drivers/ata/sata_svw.c |
4887 |
+@@ -194,24 +194,24 @@ static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) |
4888 |
+ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
4889 |
+ { |
4890 |
+ struct ata_ioports *ioaddr = &ap->ioaddr; |
4891 |
+- u16 nsect, lbal, lbam, lbah, feature; |
4892 |
++ u16 nsect, lbal, lbam, lbah, error; |
4893 |
+ |
4894 |
+- tf->command = k2_stat_check_status(ap); |
4895 |
++ tf->status = k2_stat_check_status(ap); |
4896 |
+ tf->device = readw(ioaddr->device_addr); |
4897 |
+- feature = readw(ioaddr->error_addr); |
4898 |
++ error = readw(ioaddr->error_addr); |
4899 |
+ nsect = readw(ioaddr->nsect_addr); |
4900 |
+ lbal = readw(ioaddr->lbal_addr); |
4901 |
+ lbam = readw(ioaddr->lbam_addr); |
4902 |
+ lbah = readw(ioaddr->lbah_addr); |
4903 |
+ |
4904 |
+- tf->feature = feature; |
4905 |
++ tf->error = error; |
4906 |
+ tf->nsect = nsect; |
4907 |
+ tf->lbal = lbal; |
4908 |
+ tf->lbam = lbam; |
4909 |
+ tf->lbah = lbah; |
4910 |
+ |
4911 |
+ if (tf->flags & ATA_TFLAG_LBA48) { |
4912 |
+- tf->hob_feature = feature >> 8; |
4913 |
++ tf->hob_feature = error >> 8; |
4914 |
+ tf->hob_nsect = nsect >> 8; |
4915 |
+ tf->hob_lbal = lbal >> 8; |
4916 |
+ tf->hob_lbam = lbam >> 8; |
4917 |
+diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c |
4918 |
+index 8fa952cb9f7f4..87e4ed66b3064 100644 |
4919 |
+--- a/drivers/ata/sata_vsc.c |
4920 |
++++ b/drivers/ata/sata_vsc.c |
4921 |
+@@ -183,24 +183,24 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) |
4922 |
+ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
4923 |
+ { |
4924 |
+ struct ata_ioports *ioaddr = &ap->ioaddr; |
4925 |
+- u16 nsect, lbal, lbam, lbah, feature; |
4926 |
++ u16 nsect, lbal, lbam, lbah, error; |
4927 |
+ |
4928 |
+- tf->command = ata_sff_check_status(ap); |
4929 |
++ tf->status = ata_sff_check_status(ap); |
4930 |
+ tf->device = readw(ioaddr->device_addr); |
4931 |
+- feature = readw(ioaddr->error_addr); |
4932 |
++ error = readw(ioaddr->error_addr); |
4933 |
+ nsect = readw(ioaddr->nsect_addr); |
4934 |
+ lbal = readw(ioaddr->lbal_addr); |
4935 |
+ lbam = readw(ioaddr->lbam_addr); |
4936 |
+ lbah = readw(ioaddr->lbah_addr); |
4937 |
+ |
4938 |
+- tf->feature = feature; |
4939 |
++ tf->error = error; |
4940 |
+ tf->nsect = nsect; |
4941 |
+ tf->lbal = lbal; |
4942 |
+ tf->lbam = lbam; |
4943 |
+ tf->lbah = lbah; |
4944 |
+ |
4945 |
+ if (tf->flags & ATA_TFLAG_LBA48) { |
4946 |
+- tf->hob_feature = feature >> 8; |
4947 |
++ tf->hob_feature = error >> 8; |
4948 |
+ tf->hob_nsect = nsect >> 8; |
4949 |
+ tf->hob_lbal = lbal >> 8; |
4950 |
+ tf->hob_lbam = lbam >> 8; |
4951 |
+diff --git a/drivers/base/class.c b/drivers/base/class.c |
4952 |
+index 7476f393df977..0e44a68e90a02 100644 |
4953 |
+--- a/drivers/base/class.c |
4954 |
++++ b/drivers/base/class.c |
4955 |
+@@ -192,6 +192,11 @@ int __class_register(struct class *cls, struct lock_class_key *key) |
4956 |
+ } |
4957 |
+ error = class_add_groups(class_get(cls), cls->class_groups); |
4958 |
+ class_put(cls); |
4959 |
++ if (error) { |
4960 |
++ kobject_del(&cp->subsys.kobj); |
4961 |
++ kfree_const(cp->subsys.kobj.name); |
4962 |
++ kfree(cp); |
4963 |
++ } |
4964 |
+ return error; |
4965 |
+ } |
4966 |
+ EXPORT_SYMBOL_GPL(__class_register); |
4967 |
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c |
4968 |
+index 3179c9265471b..c1142a7a4fe65 100644 |
4969 |
+--- a/drivers/base/power/runtime.c |
4970 |
++++ b/drivers/base/power/runtime.c |
4971 |
+@@ -484,7 +484,17 @@ static int rpm_idle(struct device *dev, int rpmflags) |
4972 |
+ |
4973 |
+ dev->power.idle_notification = true; |
4974 |
+ |
4975 |
+- retval = __rpm_callback(callback, dev); |
4976 |
++ if (dev->power.irq_safe) |
4977 |
++ spin_unlock(&dev->power.lock); |
4978 |
++ else |
4979 |
++ spin_unlock_irq(&dev->power.lock); |
4980 |
++ |
4981 |
++ retval = callback(dev); |
4982 |
++ |
4983 |
++ if (dev->power.irq_safe) |
4984 |
++ spin_lock(&dev->power.lock); |
4985 |
++ else |
4986 |
++ spin_lock_irq(&dev->power.lock); |
4987 |
+ |
4988 |
+ dev->power.idle_notification = false; |
4989 |
+ wake_up_all(&dev->power.wait_queue); |
4990 |
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c |
4991 |
+index f4e38c208b9fd..eaf20a3324018 100644 |
4992 |
+--- a/drivers/block/drbd/drbd_main.c |
4993 |
++++ b/drivers/block/drbd/drbd_main.c |
4994 |
+@@ -2244,7 +2244,8 @@ void drbd_destroy_device(struct kref *kref) |
4995 |
+ kref_put(&peer_device->connection->kref, drbd_destroy_connection); |
4996 |
+ kfree(peer_device); |
4997 |
+ } |
4998 |
+- memset(device, 0xfd, sizeof(*device)); |
4999 |
++ if (device->submit.wq) |
5000 |
++ destroy_workqueue(device->submit.wq); |
5001 |
+ kfree(device); |
5002 |
+ kref_put(&resource->kref, drbd_destroy_resource); |
5003 |
+ } |
5004 |
+@@ -2336,7 +2337,6 @@ void drbd_destroy_resource(struct kref *kref) |
5005 |
+ idr_destroy(&resource->devices); |
5006 |
+ free_cpumask_var(resource->cpu_mask); |
5007 |
+ kfree(resource->name); |
5008 |
+- memset(resource, 0xf2, sizeof(*resource)); |
5009 |
+ kfree(resource); |
5010 |
+ } |
5011 |
+ |
5012 |
+@@ -2677,7 +2677,6 @@ void drbd_destroy_connection(struct kref *kref) |
5013 |
+ drbd_free_socket(&connection->data); |
5014 |
+ kfree(connection->int_dig_in); |
5015 |
+ kfree(connection->int_dig_vv); |
5016 |
+- memset(connection, 0xfc, sizeof(*connection)); |
5017 |
+ kfree(connection); |
5018 |
+ kref_put(&resource->kref, drbd_destroy_resource); |
5019 |
+ } |
5020 |
+@@ -2800,7 +2799,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig |
5021 |
+ |
5022 |
+ err = add_disk(disk); |
5023 |
+ if (err) |
5024 |
+- goto out_idr_remove_from_resource; |
5025 |
++ goto out_destroy_workqueue; |
5026 |
+ |
5027 |
+ /* inherit the connection state */ |
5028 |
+ device->state.conn = first_connection(resource)->cstate; |
5029 |
+@@ -2814,6 +2813,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig |
5030 |
+ drbd_debugfs_device_add(device); |
5031 |
+ return NO_ERROR; |
5032 |
+ |
5033 |
++out_destroy_workqueue: |
5034 |
++ destroy_workqueue(device->submit.wq); |
5035 |
+ out_idr_remove_from_resource: |
5036 |
+ for_each_connection_safe(connection, n, resource) { |
5037 |
+ peer_device = idr_remove(&connection->peer_devices, vnr); |
5038 |
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c |
5039 |
+index db0b3e8982fe5..4dc25a123d946 100644 |
5040 |
+--- a/drivers/block/floppy.c |
5041 |
++++ b/drivers/block/floppy.c |
5042 |
+@@ -4587,8 +4587,10 @@ static int __init do_floppy_init(void) |
5043 |
+ goto out_put_disk; |
5044 |
+ |
5045 |
+ err = floppy_alloc_disk(drive, 0); |
5046 |
+- if (err) |
5047 |
++ if (err) { |
5048 |
++ blk_mq_free_tag_set(&tag_sets[drive]); |
5049 |
+ goto out_put_disk; |
5050 |
++ } |
5051 |
+ |
5052 |
+ timer_setup(&motor_off_timer[drive], motor_off_callback, 0); |
5053 |
+ } |
5054 |
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c |
5055 |
+index 79e485949b60d..68a0c0fe64dd8 100644 |
5056 |
+--- a/drivers/block/loop.c |
5057 |
++++ b/drivers/block/loop.c |
5058 |
+@@ -2091,7 +2091,16 @@ static const struct block_device_operations lo_fops = { |
5059 |
+ /* |
5060 |
+ * And now the modules code and kernel interface. |
5061 |
+ */ |
5062 |
+-static int max_loop; |
5063 |
++ |
5064 |
++/* |
5065 |
++ * If max_loop is specified, create that many devices upfront. |
5066 |
++ * This also becomes a hard limit. If max_loop is not specified, |
5067 |
++ * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module |
5068 |
++ * init time. Loop devices can be requested on-demand with the |
5069 |
++ * /dev/loop-control interface, or be instantiated by accessing |
5070 |
++ * a 'dead' device node. |
5071 |
++ */ |
5072 |
++static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT; |
5073 |
+ module_param(max_loop, int, 0444); |
5074 |
+ MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); |
5075 |
+ module_param(max_part, int, 0444); |
5076 |
+@@ -2536,7 +2545,7 @@ MODULE_ALIAS("devname:loop-control"); |
5077 |
+ |
5078 |
+ static int __init loop_init(void) |
5079 |
+ { |
5080 |
+- int i, nr; |
5081 |
++ int i; |
5082 |
+ int err; |
5083 |
+ |
5084 |
+ part_shift = 0; |
5085 |
+@@ -2564,19 +2573,6 @@ static int __init loop_init(void) |
5086 |
+ goto err_out; |
5087 |
+ } |
5088 |
+ |
5089 |
+- /* |
5090 |
+- * If max_loop is specified, create that many devices upfront. |
5091 |
+- * This also becomes a hard limit. If max_loop is not specified, |
5092 |
+- * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module |
5093 |
+- * init time. Loop devices can be requested on-demand with the |
5094 |
+- * /dev/loop-control interface, or be instantiated by accessing |
5095 |
+- * a 'dead' device node. |
5096 |
+- */ |
5097 |
+- if (max_loop) |
5098 |
+- nr = max_loop; |
5099 |
+- else |
5100 |
+- nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; |
5101 |
+- |
5102 |
+ err = misc_register(&loop_misc); |
5103 |
+ if (err < 0) |
5104 |
+ goto err_out; |
5105 |
+@@ -2588,7 +2584,7 @@ static int __init loop_init(void) |
5106 |
+ } |
5107 |
+ |
5108 |
+ /* pre-create number of devices given by config or max_loop */ |
5109 |
+- for (i = 0; i < nr; i++) |
5110 |
++ for (i = 0; i < max_loop; i++) |
5111 |
+ loop_add(i); |
5112 |
+ |
5113 |
+ printk(KERN_INFO "loop: module loaded\n"); |
5114 |
+diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c |
5115 |
+index de3d851d85e7b..d707aa63e9441 100644 |
5116 |
+--- a/drivers/bluetooth/btintel.c |
5117 |
++++ b/drivers/bluetooth/btintel.c |
5118 |
+@@ -2353,7 +2353,7 @@ static int btintel_setup_combined(struct hci_dev *hdev) |
5119 |
+ */ |
5120 |
+ err = btintel_read_version(hdev, &ver); |
5121 |
+ if (err) |
5122 |
+- return err; |
5123 |
++ break; |
5124 |
+ |
5125 |
+ /* Apply the device specific HCI quirks |
5126 |
+ * |
5127 |
+@@ -2394,7 +2394,8 @@ static int btintel_setup_combined(struct hci_dev *hdev) |
5128 |
+ default: |
5129 |
+ bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", |
5130 |
+ INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); |
5131 |
+- return -EINVAL; |
5132 |
++ err = -EINVAL; |
5133 |
++ break; |
5134 |
+ } |
5135 |
+ |
5136 |
+ exit_error: |
5137 |
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c |
5138 |
+index 69380cb03dd32..9c32263f872b9 100644 |
5139 |
+--- a/drivers/bluetooth/btusb.c |
5140 |
++++ b/drivers/bluetooth/btusb.c |
5141 |
+@@ -735,13 +735,13 @@ static inline void btusb_free_frags(struct btusb_data *data) |
5142 |
+ |
5143 |
+ spin_lock_irqsave(&data->rxlock, flags); |
5144 |
+ |
5145 |
+- kfree_skb(data->evt_skb); |
5146 |
++ dev_kfree_skb_irq(data->evt_skb); |
5147 |
+ data->evt_skb = NULL; |
5148 |
+ |
5149 |
+- kfree_skb(data->acl_skb); |
5150 |
++ dev_kfree_skb_irq(data->acl_skb); |
5151 |
+ data->acl_skb = NULL; |
5152 |
+ |
5153 |
+- kfree_skb(data->sco_skb); |
5154 |
++ dev_kfree_skb_irq(data->sco_skb); |
5155 |
+ data->sco_skb = NULL; |
5156 |
+ |
5157 |
+ spin_unlock_irqrestore(&data->rxlock, flags); |
5158 |
+diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c |
5159 |
+index cf4a560958173..8055f63603f45 100644 |
5160 |
+--- a/drivers/bluetooth/hci_bcsp.c |
5161 |
++++ b/drivers/bluetooth/hci_bcsp.c |
5162 |
+@@ -378,7 +378,7 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp) |
5163 |
+ i++; |
5164 |
+ |
5165 |
+ __skb_unlink(skb, &bcsp->unack); |
5166 |
+- kfree_skb(skb); |
5167 |
++ dev_kfree_skb_irq(skb); |
5168 |
+ } |
5169 |
+ |
5170 |
+ if (skb_queue_empty(&bcsp->unack)) |
5171 |
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c |
5172 |
+index e0ea9d25bb393..1363b21c81b73 100644 |
5173 |
+--- a/drivers/bluetooth/hci_h5.c |
5174 |
++++ b/drivers/bluetooth/hci_h5.c |
5175 |
+@@ -313,7 +313,7 @@ static void h5_pkt_cull(struct h5 *h5) |
5176 |
+ break; |
5177 |
+ |
5178 |
+ __skb_unlink(skb, &h5->unack); |
5179 |
+- kfree_skb(skb); |
5180 |
++ dev_kfree_skb_irq(skb); |
5181 |
+ } |
5182 |
+ |
5183 |
+ if (skb_queue_empty(&h5->unack)) |
5184 |
+diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c |
5185 |
+index eb1e736efeebb..e4e5b26e2c33b 100644 |
5186 |
+--- a/drivers/bluetooth/hci_ll.c |
5187 |
++++ b/drivers/bluetooth/hci_ll.c |
5188 |
+@@ -345,7 +345,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) |
5189 |
+ default: |
5190 |
+ BT_ERR("illegal hcill state: %ld (losing packet)", |
5191 |
+ ll->hcill_state); |
5192 |
+- kfree_skb(skb); |
5193 |
++ dev_kfree_skb_irq(skb); |
5194 |
+ break; |
5195 |
+ } |
5196 |
+ |
5197 |
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c |
5198 |
+index ed91af4319b5b..5d0428fc854fa 100644 |
5199 |
+--- a/drivers/bluetooth/hci_qca.c |
5200 |
++++ b/drivers/bluetooth/hci_qca.c |
5201 |
+@@ -912,7 +912,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) |
5202 |
+ default: |
5203 |
+ BT_ERR("Illegal tx state: %d (losing packet)", |
5204 |
+ qca->tx_ibs_state); |
5205 |
+- kfree_skb(skb); |
5206 |
++ dev_kfree_skb_irq(skb); |
5207 |
+ break; |
5208 |
+ } |
5209 |
+ |
5210 |
+diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c |
5211 |
+index c22d4184bb612..0555e3838bce1 100644 |
5212 |
+--- a/drivers/char/hw_random/amd-rng.c |
5213 |
++++ b/drivers/char/hw_random/amd-rng.c |
5214 |
+@@ -143,15 +143,19 @@ static int __init amd_rng_mod_init(void) |
5215 |
+ found: |
5216 |
+ err = pci_read_config_dword(pdev, 0x58, &pmbase); |
5217 |
+ if (err) |
5218 |
+- return err; |
5219 |
++ goto put_dev; |
5220 |
+ |
5221 |
+ pmbase &= 0x0000FF00; |
5222 |
+- if (pmbase == 0) |
5223 |
+- return -EIO; |
5224 |
++ if (pmbase == 0) { |
5225 |
++ err = -EIO; |
5226 |
++ goto put_dev; |
5227 |
++ } |
5228 |
+ |
5229 |
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
5230 |
+- if (!priv) |
5231 |
+- return -ENOMEM; |
5232 |
++ if (!priv) { |
5233 |
++ err = -ENOMEM; |
5234 |
++ goto put_dev; |
5235 |
++ } |
5236 |
+ |
5237 |
+ if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { |
5238 |
+ dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", |
5239 |
+@@ -185,6 +189,8 @@ err_iomap: |
5240 |
+ release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); |
5241 |
+ out: |
5242 |
+ kfree(priv); |
5243 |
++put_dev: |
5244 |
++ pci_dev_put(pdev); |
5245 |
+ return err; |
5246 |
+ } |
5247 |
+ |
5248 |
+@@ -200,6 +206,8 @@ static void __exit amd_rng_mod_exit(void) |
5249 |
+ |
5250 |
+ release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); |
5251 |
+ |
5252 |
++ pci_dev_put(priv->pcidev); |
5253 |
++ |
5254 |
+ kfree(priv); |
5255 |
+ } |
5256 |
+ |
5257 |
+diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c |
5258 |
+index 138ce434f86b2..12fbe80918319 100644 |
5259 |
+--- a/drivers/char/hw_random/geode-rng.c |
5260 |
++++ b/drivers/char/hw_random/geode-rng.c |
5261 |
+@@ -51,6 +51,10 @@ static const struct pci_device_id pci_tbl[] = { |
5262 |
+ }; |
5263 |
+ MODULE_DEVICE_TABLE(pci, pci_tbl); |
5264 |
+ |
5265 |
++struct amd_geode_priv { |
5266 |
++ struct pci_dev *pcidev; |
5267 |
++ void __iomem *membase; |
5268 |
++}; |
5269 |
+ |
5270 |
+ static int geode_rng_data_read(struct hwrng *rng, u32 *data) |
5271 |
+ { |
5272 |
+@@ -90,6 +94,7 @@ static int __init geode_rng_init(void) |
5273 |
+ const struct pci_device_id *ent; |
5274 |
+ void __iomem *mem; |
5275 |
+ unsigned long rng_base; |
5276 |
++ struct amd_geode_priv *priv; |
5277 |
+ |
5278 |
+ for_each_pci_dev(pdev) { |
5279 |
+ ent = pci_match_id(pci_tbl, pdev); |
5280 |
+@@ -97,17 +102,26 @@ static int __init geode_rng_init(void) |
5281 |
+ goto found; |
5282 |
+ } |
5283 |
+ /* Device not found. */ |
5284 |
+- goto out; |
5285 |
++ return err; |
5286 |
+ |
5287 |
+ found: |
5288 |
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
5289 |
++ if (!priv) { |
5290 |
++ err = -ENOMEM; |
5291 |
++ goto put_dev; |
5292 |
++ } |
5293 |
++ |
5294 |
+ rng_base = pci_resource_start(pdev, 0); |
5295 |
+ if (rng_base == 0) |
5296 |
+- goto out; |
5297 |
++ goto free_priv; |
5298 |
+ err = -ENOMEM; |
5299 |
+ mem = ioremap(rng_base, 0x58); |
5300 |
+ if (!mem) |
5301 |
+- goto out; |
5302 |
+- geode_rng.priv = (unsigned long)mem; |
5303 |
++ goto free_priv; |
5304 |
++ |
5305 |
++ geode_rng.priv = (unsigned long)priv; |
5306 |
++ priv->membase = mem; |
5307 |
++ priv->pcidev = pdev; |
5308 |
+ |
5309 |
+ pr_info("AMD Geode RNG detected\n"); |
5310 |
+ err = hwrng_register(&geode_rng); |
5311 |
+@@ -116,20 +130,26 @@ found: |
5312 |
+ err); |
5313 |
+ goto err_unmap; |
5314 |
+ } |
5315 |
+-out: |
5316 |
+ return err; |
5317 |
+ |
5318 |
+ err_unmap: |
5319 |
+ iounmap(mem); |
5320 |
+- goto out; |
5321 |
++free_priv: |
5322 |
++ kfree(priv); |
5323 |
++put_dev: |
5324 |
++ pci_dev_put(pdev); |
5325 |
++ return err; |
5326 |
+ } |
5327 |
+ |
5328 |
+ static void __exit geode_rng_exit(void) |
5329 |
+ { |
5330 |
+- void __iomem *mem = (void __iomem *)geode_rng.priv; |
5331 |
++ struct amd_geode_priv *priv; |
5332 |
+ |
5333 |
++ priv = (struct amd_geode_priv *)geode_rng.priv; |
5334 |
+ hwrng_unregister(&geode_rng); |
5335 |
+- iounmap(mem); |
5336 |
++ iounmap(priv->membase); |
5337 |
++ pci_dev_put(priv->pcidev); |
5338 |
++ kfree(priv); |
5339 |
+ } |
5340 |
+ |
5341 |
+ module_init(geode_rng_init); |
5342 |
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c |
5343 |
+index 2badf36d4816c..8dbc349a2edd7 100644 |
5344 |
+--- a/drivers/char/ipmi/ipmi_msghandler.c |
5345 |
++++ b/drivers/char/ipmi/ipmi_msghandler.c |
5346 |
+@@ -3527,12 +3527,16 @@ static void deliver_smi_err_response(struct ipmi_smi *intf, |
5347 |
+ struct ipmi_smi_msg *msg, |
5348 |
+ unsigned char err) |
5349 |
+ { |
5350 |
++ int rv; |
5351 |
+ msg->rsp[0] = msg->data[0] | 4; |
5352 |
+ msg->rsp[1] = msg->data[1]; |
5353 |
+ msg->rsp[2] = err; |
5354 |
+ msg->rsp_size = 3; |
5355 |
+- /* It's an error, so it will never requeue, no need to check return. */ |
5356 |
+- handle_one_recv_msg(intf, msg); |
5357 |
++ |
5358 |
++ /* This will never requeue, but it may ask us to free the message. */ |
5359 |
++ rv = handle_one_recv_msg(intf, msg); |
5360 |
++ if (rv == 0) |
5361 |
++ ipmi_free_smi_msg(msg); |
5362 |
+ } |
5363 |
+ |
5364 |
+ static void cleanup_smi_msgs(struct ipmi_smi *intf) |
5365 |
+diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c |
5366 |
+index 92a37b33494cb..f23c146bb740c 100644 |
5367 |
+--- a/drivers/char/ipmi/kcs_bmc_aspeed.c |
5368 |
++++ b/drivers/char/ipmi/kcs_bmc_aspeed.c |
5369 |
+@@ -404,13 +404,31 @@ static void aspeed_kcs_check_obe(struct timer_list *timer) |
5370 |
+ static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state) |
5371 |
+ { |
5372 |
+ struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); |
5373 |
++ int rc; |
5374 |
++ u8 str; |
5375 |
+ |
5376 |
+ /* We don't have an OBE IRQ, emulate it */ |
5377 |
+ if (mask & KCS_BMC_EVENT_TYPE_OBE) { |
5378 |
+- if (KCS_BMC_EVENT_TYPE_OBE & state) |
5379 |
+- mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); |
5380 |
+- else |
5381 |
++ if (KCS_BMC_EVENT_TYPE_OBE & state) { |
5382 |
++ /* |
5383 |
++ * Given we don't have an OBE IRQ, delay by polling briefly to see if we can |
5384 |
++ * observe such an event before returning to the caller. This is not |
5385 |
++ * incorrect because OBF may have already become clear before enabling the |
5386 |
++ * IRQ if we had one, under which circumstance no event will be propagated |
5387 |
++ * anyway. |
5388 |
++ * |
5389 |
++ * The onus is on the client to perform a race-free check that it hasn't |
5390 |
++ * missed the event. |
5391 |
++ */ |
5392 |
++ rc = read_poll_timeout_atomic(aspeed_kcs_inb, str, |
5393 |
++ !(str & KCS_BMC_STR_OBF), 1, 100, false, |
5394 |
++ &priv->kcs_bmc, priv->kcs_bmc.ioreg.str); |
5395 |
++ /* Time for the slow path? */ |
5396 |
++ if (rc == -ETIMEDOUT) |
5397 |
++ mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); |
5398 |
++ } else { |
5399 |
+ del_timer(&priv->obe.timer); |
5400 |
++ } |
5401 |
+ } |
5402 |
+ |
5403 |
+ if (mask & KCS_BMC_EVENT_TYPE_IBF) { |
5404 |
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c |
5405 |
+index 18606651d1aa4..65f8f179a27f0 100644 |
5406 |
+--- a/drivers/char/tpm/tpm_crb.c |
5407 |
++++ b/drivers/char/tpm/tpm_crb.c |
5408 |
+@@ -252,7 +252,7 @@ static int __crb_relinquish_locality(struct device *dev, |
5409 |
+ iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl); |
5410 |
+ if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value, |
5411 |
+ TPM2_TIMEOUT_C)) { |
5412 |
+- dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); |
5413 |
++ dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n"); |
5414 |
+ return -ETIME; |
5415 |
+ } |
5416 |
+ |
5417 |
+diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c |
5418 |
+index 6e3235565a4d8..d9daaafdd295c 100644 |
5419 |
+--- a/drivers/char/tpm/tpm_ftpm_tee.c |
5420 |
++++ b/drivers/char/tpm/tpm_ftpm_tee.c |
5421 |
+@@ -397,7 +397,13 @@ static int __init ftpm_mod_init(void) |
5422 |
+ if (rc) |
5423 |
+ return rc; |
5424 |
+ |
5425 |
+- return driver_register(&ftpm_tee_driver.driver); |
5426 |
++ rc = driver_register(&ftpm_tee_driver.driver); |
5427 |
++ if (rc) { |
5428 |
++ platform_driver_unregister(&ftpm_tee_plat_driver); |
5429 |
++ return rc; |
5430 |
++ } |
5431 |
++ |
5432 |
++ return 0; |
5433 |
+ } |
5434 |
+ |
5435 |
+ static void __exit ftpm_mod_exit(void) |
5436 |
+diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c |
5437 |
+index 021355a247081..52903146fdbaf 100644 |
5438 |
+--- a/drivers/clk/imx/clk-imx8mn.c |
5439 |
++++ b/drivers/clk/imx/clk-imx8mn.c |
5440 |
+@@ -30,7 +30,7 @@ static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ |
5441 |
+ static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", }; |
5442 |
+ static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", }; |
5443 |
+ static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", }; |
5444 |
+-static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", }; |
5445 |
++static const char * const m7_alt_pll_bypass_sels[] = {"m7_alt_pll", "m7_alt_pll_ref_sel", }; |
5446 |
+ static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", }; |
5447 |
+ static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", }; |
5448 |
+ |
5449 |
+@@ -40,7 +40,7 @@ static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl |
5450 |
+ |
5451 |
+ static const char * const imx8mn_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", }; |
5452 |
+ |
5453 |
+-static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "vpu_pll_out", |
5454 |
++static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "m7_alt_pll_out", |
5455 |
+ "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", }; |
5456 |
+ |
5457 |
+ static const char * const imx8mn_gpu_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", |
5458 |
+@@ -108,27 +108,27 @@ static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll1_out |
5459 |
+ "sys_pll3_out", "clk_ext4", }; |
5460 |
+ |
5461 |
+ static const char * const imx8mn_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", |
5462 |
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", |
5463 |
+- "clk_ext3", "clk_ext4", }; |
5464 |
++ "video_pll1_out", "sys_pll1_133m", "dummy", |
5465 |
++ "clk_ext2", "clk_ext3", }; |
5466 |
+ |
5467 |
+ static const char * const imx8mn_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", |
5468 |
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", |
5469 |
++ "video_pll1_out", "sys_pll1_133m", "dummy", |
5470 |
+ "clk_ext3", "clk_ext4", }; |
5471 |
+ |
5472 |
+ static const char * const imx8mn_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", |
5473 |
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", |
5474 |
++ "video_pll1_out", "sys_pll1_133m", "dummy", |
5475 |
+ "clk_ext2", "clk_ext3", }; |
5476 |
+ |
5477 |
+ static const char * const imx8mn_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", |
5478 |
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", |
5479 |
++ "video_pll1_out", "sys_pll1_133m", "dummy", |
5480 |
+ "clk_ext3", "clk_ext4", }; |
5481 |
+ |
5482 |
+ static const char * const imx8mn_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", |
5483 |
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", |
5484 |
++ "video_pll1_out", "sys_pll1_133m", "dummy", |
5485 |
+ "clk_ext3", "clk_ext4", }; |
5486 |
+ |
5487 |
+ static const char * const imx8mn_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", |
5488 |
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", |
5489 |
++ "video_pll1_out", "sys_pll1_133m", "dummy", |
5490 |
+ "clk_ext2", "clk_ext3", }; |
5491 |
+ |
5492 |
+ static const char * const imx8mn_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m", |
5493 |
+@@ -140,8 +140,8 @@ static const char * const imx8mn_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m" |
5494 |
+ "clk_ext4", "video_pll1_out", }; |
5495 |
+ |
5496 |
+ static const char * const imx8mn_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m", |
5497 |
+- "sys_pll2_200m", "sys_pll2_500m", "video_pll1_out", |
5498 |
+- "audio_pll2_out", }; |
5499 |
++ "sys_pll2_200m", "sys_pll2_500m", "audio_pll1_out", |
5500 |
++ "video_pll_out", "audio_pll2_out", }; |
5501 |
+ |
5502 |
+ static const char * const imx8mn_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out", |
5503 |
+ "sys_pll1_400m", "audio_pll2_out", "sys_pll3_out", |
5504 |
+@@ -228,10 +228,10 @@ static const char * const imx8mn_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys |
5505 |
+ "sys_pll1_80m", "video_pll1_out", }; |
5506 |
+ |
5507 |
+ static const char * const imx8mn_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m", |
5508 |
+- "vpu_pll_out", "sys_pll2_125m", "sys_pll3_out", |
5509 |
++ "m7_alt_pll_out", "sys_pll2_125m", "sys_pll3_out", |
5510 |
+ "sys_pll1_80m", "sys_pll2_166m", }; |
5511 |
+ |
5512 |
+-static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out", |
5513 |
++static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "m7_alt_pll_out", |
5514 |
+ "sys_pll3_out", "sys_pll2_200m", "sys_pll1_266m", |
5515 |
+ "sys_pll2_500m", "sys_pll1_100m", }; |
5516 |
+ |
5517 |
+@@ -328,7 +328,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) |
5518 |
+ hws[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); |
5519 |
+ hws[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); |
5520 |
+ hws[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); |
5521 |
+- hws[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); |
5522 |
++ hws[IMX8MN_M7_ALT_PLL_REF_SEL] = imx_clk_hw_mux("m7_alt_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); |
5523 |
+ hws[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); |
5524 |
+ hws[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); |
5525 |
+ |
5526 |
+@@ -337,7 +337,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) |
5527 |
+ hws[IMX8MN_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll); |
5528 |
+ hws[IMX8MN_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll); |
5529 |
+ hws[IMX8MN_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll); |
5530 |
+- hws[IMX8MN_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll); |
5531 |
++ hws[IMX8MN_M7_ALT_PLL] = imx_clk_hw_pll14xx("m7_alt_pll", "m7_alt_pll_ref_sel", base + 0x74, &imx_1416x_pll); |
5532 |
+ hws[IMX8MN_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll); |
5533 |
+ hws[IMX8MN_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000); |
5534 |
+ hws[IMX8MN_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000); |
5535 |
+@@ -349,7 +349,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) |
5536 |
+ hws[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT); |
5537 |
+ hws[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT); |
5538 |
+ hws[IMX8MN_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT); |
5539 |
+- hws[IMX8MN_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT); |
5540 |
++ hws[IMX8MN_M7_ALT_PLL_BYPASS] = imx_clk_hw_mux_flags("m7_alt_pll_bypass", base + 0x74, 28, 1, m7_alt_pll_bypass_sels, ARRAY_SIZE(m7_alt_pll_bypass_sels), CLK_SET_RATE_PARENT); |
5541 |
+ hws[IMX8MN_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); |
5542 |
+ hws[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT); |
5543 |
+ |
5544 |
+@@ -359,7 +359,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) |
5545 |
+ hws[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13); |
5546 |
+ hws[IMX8MN_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13); |
5547 |
+ hws[IMX8MN_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11); |
5548 |
+- hws[IMX8MN_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11); |
5549 |
++ hws[IMX8MN_M7_ALT_PLL_OUT] = imx_clk_hw_gate("m7_alt_pll_out", "m7_alt_pll_bypass", base + 0x74, 11); |
5550 |
+ hws[IMX8MN_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11); |
5551 |
+ hws[IMX8MN_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11); |
5552 |
+ |
5553 |
+diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c |
5554 |
+index 90046428693c2..e74fc81a14d00 100644 |
5555 |
+--- a/drivers/clk/qcom/clk-krait.c |
5556 |
++++ b/drivers/clk/qcom/clk-krait.c |
5557 |
+@@ -98,6 +98,8 @@ static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate, |
5558 |
+ |
5559 |
+ if (d->lpl) |
5560 |
+ mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift; |
5561 |
++ else |
5562 |
++ mask <<= d->shift; |
5563 |
+ |
5564 |
+ spin_lock_irqsave(&krait_clock_reg_lock, flags); |
5565 |
+ val = krait_get_l2_indirect_reg(d->offset); |
5566 |
+diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c |
5567 |
+index 9755ef4888c19..a0ba37656b07b 100644 |
5568 |
+--- a/drivers/clk/qcom/gcc-sm8250.c |
5569 |
++++ b/drivers/clk/qcom/gcc-sm8250.c |
5570 |
+@@ -3267,7 +3267,7 @@ static struct gdsc usb30_prim_gdsc = { |
5571 |
+ .pd = { |
5572 |
+ .name = "usb30_prim_gdsc", |
5573 |
+ }, |
5574 |
+- .pwrsts = PWRSTS_OFF_ON, |
5575 |
++ .pwrsts = PWRSTS_RET_ON, |
5576 |
+ }; |
5577 |
+ |
5578 |
+ static struct gdsc usb30_sec_gdsc = { |
5579 |
+@@ -3275,7 +3275,7 @@ static struct gdsc usb30_sec_gdsc = { |
5580 |
+ .pd = { |
5581 |
+ .name = "usb30_sec_gdsc", |
5582 |
+ }, |
5583 |
+- .pwrsts = PWRSTS_OFF_ON, |
5584 |
++ .pwrsts = PWRSTS_RET_ON, |
5585 |
+ }; |
5586 |
+ |
5587 |
+ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { |
5588 |
+diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c |
5589 |
+index ac09b7b840aba..a5731994cbed1 100644 |
5590 |
+--- a/drivers/clk/qcom/lpasscorecc-sc7180.c |
5591 |
++++ b/drivers/clk/qcom/lpasscorecc-sc7180.c |
5592 |
+@@ -356,7 +356,7 @@ static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = { |
5593 |
+ .num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs), |
5594 |
+ }; |
5595 |
+ |
5596 |
+-static int lpass_create_pm_clks(struct platform_device *pdev) |
5597 |
++static int lpass_setup_runtime_pm(struct platform_device *pdev) |
5598 |
+ { |
5599 |
+ int ret; |
5600 |
+ |
5601 |
+@@ -375,7 +375,7 @@ static int lpass_create_pm_clks(struct platform_device *pdev) |
5602 |
+ if (ret < 0) |
5603 |
+ dev_err(&pdev->dev, "failed to acquire iface clock\n"); |
5604 |
+ |
5605 |
+- return ret; |
5606 |
++ return pm_runtime_resume_and_get(&pdev->dev); |
5607 |
+ } |
5608 |
+ |
5609 |
+ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) |
5610 |
+@@ -384,7 +384,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) |
5611 |
+ struct regmap *regmap; |
5612 |
+ int ret; |
5613 |
+ |
5614 |
+- ret = lpass_create_pm_clks(pdev); |
5615 |
++ ret = lpass_setup_runtime_pm(pdev); |
5616 |
+ if (ret) |
5617 |
+ return ret; |
5618 |
+ |
5619 |
+@@ -392,12 +392,14 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) |
5620 |
+ desc = &lpass_audio_hm_sc7180_desc; |
5621 |
+ ret = qcom_cc_probe_by_index(pdev, 1, desc); |
5622 |
+ if (ret) |
5623 |
+- return ret; |
5624 |
++ goto exit; |
5625 |
+ |
5626 |
+ lpass_core_cc_sc7180_regmap_config.name = "lpass_core_cc"; |
5627 |
+ regmap = qcom_cc_map(pdev, &lpass_core_cc_sc7180_desc); |
5628 |
+- if (IS_ERR(regmap)) |
5629 |
+- return PTR_ERR(regmap); |
5630 |
++ if (IS_ERR(regmap)) { |
5631 |
++ ret = PTR_ERR(regmap); |
5632 |
++ goto exit; |
5633 |
++ } |
5634 |
+ |
5635 |
+ /* |
5636 |
+ * Keep the CLK always-ON |
5637 |
+@@ -415,6 +417,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) |
5638 |
+ ret = qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap); |
5639 |
+ |
5640 |
+ pm_runtime_mark_last_busy(&pdev->dev); |
5641 |
++exit: |
5642 |
+ pm_runtime_put_autosuspend(&pdev->dev); |
5643 |
+ |
5644 |
+ return ret; |
5645 |
+@@ -425,14 +428,19 @@ static int lpass_hm_core_probe(struct platform_device *pdev) |
5646 |
+ const struct qcom_cc_desc *desc; |
5647 |
+ int ret; |
5648 |
+ |
5649 |
+- ret = lpass_create_pm_clks(pdev); |
5650 |
++ ret = lpass_setup_runtime_pm(pdev); |
5651 |
+ if (ret) |
5652 |
+ return ret; |
5653 |
+ |
5654 |
+ lpass_core_cc_sc7180_regmap_config.name = "lpass_hm_core"; |
5655 |
+ desc = &lpass_core_hm_sc7180_desc; |
5656 |
+ |
5657 |
+- return qcom_cc_probe_by_index(pdev, 0, desc); |
5658 |
++ ret = qcom_cc_probe_by_index(pdev, 0, desc); |
5659 |
++ |
5660 |
++ pm_runtime_mark_last_busy(&pdev->dev); |
5661 |
++ pm_runtime_put_autosuspend(&pdev->dev); |
5662 |
++ |
5663 |
++ return ret; |
5664 |
+ } |
5665 |
+ |
5666 |
+ static const struct of_device_id lpass_hm_sc7180_match_table[] = { |
5667 |
+diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c |
5668 |
+index abc0891fd96db..3e43ae8480ddf 100644 |
5669 |
+--- a/drivers/clk/renesas/r9a06g032-clocks.c |
5670 |
++++ b/drivers/clk/renesas/r9a06g032-clocks.c |
5671 |
+@@ -386,7 +386,7 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd, |
5672 |
+ int error; |
5673 |
+ int index; |
5674 |
+ |
5675 |
+- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, |
5676 |
++ while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i++, |
5677 |
+ &clkspec)) { |
5678 |
+ if (clkspec.np != pd->dev.of_node) |
5679 |
+ continue; |
5680 |
+@@ -399,7 +399,6 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd, |
5681 |
+ if (error) |
5682 |
+ return error; |
5683 |
+ } |
5684 |
+- i++; |
5685 |
+ } |
5686 |
+ |
5687 |
+ return 0; |
5688 |
+diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c |
5689 |
+index f7827b3b7fc1c..6e5e502be44a6 100644 |
5690 |
+--- a/drivers/clk/rockchip/clk-pll.c |
5691 |
++++ b/drivers/clk/rockchip/clk-pll.c |
5692 |
+@@ -981,6 +981,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, |
5693 |
+ return mux_clk; |
5694 |
+ |
5695 |
+ err_pll: |
5696 |
++ kfree(pll->rate_table); |
5697 |
+ clk_unregister(mux_clk); |
5698 |
+ mux_clk = pll_clk; |
5699 |
+ err_mux: |
5700 |
+diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c |
5701 |
+index 5873a9354b507..4909e940f0ab6 100644 |
5702 |
+--- a/drivers/clk/samsung/clk-pll.c |
5703 |
++++ b/drivers/clk/samsung/clk-pll.c |
5704 |
+@@ -1385,6 +1385,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, |
5705 |
+ if (ret) { |
5706 |
+ pr_err("%s: failed to register pll clock %s : %d\n", |
5707 |
+ __func__, pll_clk->name, ret); |
5708 |
++ kfree(pll->rate_table); |
5709 |
+ kfree(pll); |
5710 |
+ return; |
5711 |
+ } |
5712 |
+diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c |
5713 |
+index 1ec9678d8cd32..ee2a2d284113c 100644 |
5714 |
+--- a/drivers/clk/socfpga/clk-gate.c |
5715 |
++++ b/drivers/clk/socfpga/clk-gate.c |
5716 |
+@@ -188,8 +188,10 @@ void __init socfpga_gate_init(struct device_node *node) |
5717 |
+ return; |
5718 |
+ |
5719 |
+ ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL); |
5720 |
+- if (WARN_ON(!ops)) |
5721 |
++ if (WARN_ON(!ops)) { |
5722 |
++ kfree(socfpga_clk); |
5723 |
+ return; |
5724 |
++ } |
5725 |
+ |
5726 |
+ rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); |
5727 |
+ if (rc) |
5728 |
+@@ -243,6 +245,7 @@ void __init socfpga_gate_init(struct device_node *node) |
5729 |
+ |
5730 |
+ err = clk_hw_register(NULL, hw_clk); |
5731 |
+ if (err) { |
5732 |
++ kfree(ops); |
5733 |
+ kfree(socfpga_clk); |
5734 |
+ return; |
5735 |
+ } |
5736 |
+diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c |
5737 |
+index 164285d6be978..ba18e58f0aae3 100644 |
5738 |
+--- a/drivers/clk/st/clkgen-fsyn.c |
5739 |
++++ b/drivers/clk/st/clkgen-fsyn.c |
5740 |
+@@ -1008,9 +1008,10 @@ static void __init st_of_quadfs_setup(struct device_node *np, |
5741 |
+ |
5742 |
+ clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, datac->data, |
5743 |
+ reg, lock); |
5744 |
+- if (IS_ERR(clk)) |
5745 |
++ if (IS_ERR(clk)) { |
5746 |
++ kfree(lock); |
5747 |
+ goto err_exit; |
5748 |
+- else |
5749 |
++ } else |
5750 |
+ pr_debug("%s: parent %s rate %u\n", |
5751 |
+ __clk_get_name(clk), |
5752 |
+ __clk_get_name(clk_get_parent(clk)), |
5753 |
+diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c |
5754 |
+index dd0956ad969c1..d35548aa026fb 100644 |
5755 |
+--- a/drivers/clocksource/sh_cmt.c |
5756 |
++++ b/drivers/clocksource/sh_cmt.c |
5757 |
+@@ -13,6 +13,7 @@ |
5758 |
+ #include <linux/init.h> |
5759 |
+ #include <linux/interrupt.h> |
5760 |
+ #include <linux/io.h> |
5761 |
++#include <linux/iopoll.h> |
5762 |
+ #include <linux/ioport.h> |
5763 |
+ #include <linux/irq.h> |
5764 |
+ #include <linux/module.h> |
5765 |
+@@ -116,6 +117,7 @@ struct sh_cmt_device { |
5766 |
+ void __iomem *mapbase; |
5767 |
+ struct clk *clk; |
5768 |
+ unsigned long rate; |
5769 |
++ unsigned int reg_delay; |
5770 |
+ |
5771 |
+ raw_spinlock_t lock; /* Protect the shared start/stop register */ |
5772 |
+ |
5773 |
+@@ -247,10 +249,17 @@ static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch) |
5774 |
+ |
5775 |
+ static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value) |
5776 |
+ { |
5777 |
+- if (ch->iostart) |
5778 |
+- ch->cmt->info->write_control(ch->iostart, 0, value); |
5779 |
+- else |
5780 |
+- ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); |
5781 |
++ u32 old_value = sh_cmt_read_cmstr(ch); |
5782 |
++ |
5783 |
++ if (value != old_value) { |
5784 |
++ if (ch->iostart) { |
5785 |
++ ch->cmt->info->write_control(ch->iostart, 0, value); |
5786 |
++ udelay(ch->cmt->reg_delay); |
5787 |
++ } else { |
5788 |
++ ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); |
5789 |
++ udelay(ch->cmt->reg_delay); |
5790 |
++ } |
5791 |
++ } |
5792 |
+ } |
5793 |
+ |
5794 |
+ static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) |
5795 |
+@@ -260,7 +269,12 @@ static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) |
5796 |
+ |
5797 |
+ static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value) |
5798 |
+ { |
5799 |
+- ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); |
5800 |
++ u32 old_value = sh_cmt_read_cmcsr(ch); |
5801 |
++ |
5802 |
++ if (value != old_value) { |
5803 |
++ ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); |
5804 |
++ udelay(ch->cmt->reg_delay); |
5805 |
++ } |
5806 |
+ } |
5807 |
+ |
5808 |
+ static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) |
5809 |
+@@ -268,14 +282,33 @@ static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) |
5810 |
+ return ch->cmt->info->read_count(ch->ioctrl, CMCNT); |
5811 |
+ } |
5812 |
+ |
5813 |
+-static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) |
5814 |
++static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) |
5815 |
+ { |
5816 |
++ /* Tests showed that we need to wait 3 clocks here */ |
5817 |
++ unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2); |
5818 |
++ u32 reg; |
5819 |
++ |
5820 |
++ if (ch->cmt->info->model > SH_CMT_16BIT) { |
5821 |
++ int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg, |
5822 |
++ !(reg & SH_CMT32_CMCSR_WRFLG), |
5823 |
++ 1, cmcnt_delay, false, ch); |
5824 |
++ if (ret < 0) |
5825 |
++ return ret; |
5826 |
++ } |
5827 |
++ |
5828 |
+ ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); |
5829 |
++ udelay(cmcnt_delay); |
5830 |
++ return 0; |
5831 |
+ } |
5832 |
+ |
5833 |
+ static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value) |
5834 |
+ { |
5835 |
+- ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); |
5836 |
++ u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR); |
5837 |
++ |
5838 |
++ if (value != old_value) { |
5839 |
++ ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); |
5840 |
++ udelay(ch->cmt->reg_delay); |
5841 |
++ } |
5842 |
+ } |
5843 |
+ |
5844 |
+ static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped) |
5845 |
+@@ -319,7 +352,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) |
5846 |
+ |
5847 |
+ static int sh_cmt_enable(struct sh_cmt_channel *ch) |
5848 |
+ { |
5849 |
+- int k, ret; |
5850 |
++ int ret; |
5851 |
+ |
5852 |
+ dev_pm_syscore_device(&ch->cmt->pdev->dev, true); |
5853 |
+ |
5854 |
+@@ -347,26 +380,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch) |
5855 |
+ } |
5856 |
+ |
5857 |
+ sh_cmt_write_cmcor(ch, 0xffffffff); |
5858 |
+- sh_cmt_write_cmcnt(ch, 0); |
5859 |
+- |
5860 |
+- /* |
5861 |
+- * According to the sh73a0 user's manual, as CMCNT can be operated |
5862 |
+- * only by the RCLK (Pseudo 32 kHz), there's one restriction on |
5863 |
+- * modifying CMCNT register; two RCLK cycles are necessary before |
5864 |
+- * this register is either read or any modification of the value |
5865 |
+- * it holds is reflected in the LSI's actual operation. |
5866 |
+- * |
5867 |
+- * While at it, we're supposed to clear out the CMCNT as of this |
5868 |
+- * moment, so make sure it's processed properly here. This will |
5869 |
+- * take RCLKx2 at maximum. |
5870 |
+- */ |
5871 |
+- for (k = 0; k < 100; k++) { |
5872 |
+- if (!sh_cmt_read_cmcnt(ch)) |
5873 |
+- break; |
5874 |
+- udelay(1); |
5875 |
+- } |
5876 |
++ ret = sh_cmt_write_cmcnt(ch, 0); |
5877 |
+ |
5878 |
+- if (sh_cmt_read_cmcnt(ch)) { |
5879 |
++ if (ret || sh_cmt_read_cmcnt(ch)) { |
5880 |
+ dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n", |
5881 |
+ ch->index); |
5882 |
+ ret = -ETIMEDOUT; |
5883 |
+@@ -987,8 +1003,8 @@ MODULE_DEVICE_TABLE(of, sh_cmt_of_table); |
5884 |
+ |
5885 |
+ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) |
5886 |
+ { |
5887 |
+- unsigned int mask; |
5888 |
+- unsigned int i; |
5889 |
++ unsigned int mask, i; |
5890 |
++ unsigned long rate; |
5891 |
+ int ret; |
5892 |
+ |
5893 |
+ cmt->pdev = pdev; |
5894 |
+@@ -1024,10 +1040,16 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) |
5895 |
+ if (ret < 0) |
5896 |
+ goto err_clk_unprepare; |
5897 |
+ |
5898 |
+- if (cmt->info->width == 16) |
5899 |
+- cmt->rate = clk_get_rate(cmt->clk) / 512; |
5900 |
+- else |
5901 |
+- cmt->rate = clk_get_rate(cmt->clk) / 8; |
5902 |
++ rate = clk_get_rate(cmt->clk); |
5903 |
++ if (!rate) { |
5904 |
++ ret = -EINVAL; |
5905 |
++ goto err_clk_disable; |
5906 |
++ } |
5907 |
++ |
5908 |
++ /* We shall wait 2 input clks after register writes */ |
5909 |
++ if (cmt->info->model >= SH_CMT_48BIT) |
5910 |
++ cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate); |
5911 |
++ cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8); |
5912 |
+ |
5913 |
+ /* Map the memory resource(s). */ |
5914 |
+ ret = sh_cmt_map_memory(cmt); |
5915 |
+diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c |
5916 |
+index 2737407ff0698..632523c1232f6 100644 |
5917 |
+--- a/drivers/clocksource/timer-ti-dm-systimer.c |
5918 |
++++ b/drivers/clocksource/timer-ti-dm-systimer.c |
5919 |
+@@ -345,8 +345,10 @@ static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t, |
5920 |
+ return error; |
5921 |
+ |
5922 |
+ r = clk_get_rate(clock); |
5923 |
+- if (!r) |
5924 |
++ if (!r) { |
5925 |
++ clk_disable_unprepare(clock); |
5926 |
+ return -ENODEV; |
5927 |
++ } |
5928 |
+ |
5929 |
+ if (is_ick) |
5930 |
+ t->ick = clock; |
5931 |
+diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c |
5932 |
+index 13656957c45f1..fa7f86cf0ea32 100644 |
5933 |
+--- a/drivers/counter/stm32-lptimer-cnt.c |
5934 |
++++ b/drivers/counter/stm32-lptimer-cnt.c |
5935 |
+@@ -69,7 +69,7 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv, |
5936 |
+ |
5937 |
+ /* ensure CMP & ARR registers are properly written */ |
5938 |
+ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, |
5939 |
+- (val & STM32_LPTIM_CMPOK_ARROK), |
5940 |
++ (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK, |
5941 |
+ 100, 1000); |
5942 |
+ if (ret) |
5943 |
+ return ret; |
5944 |
+diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c |
5945 |
+index d0b10baf039ab..151771129c7ba 100644 |
5946 |
+--- a/drivers/cpufreq/amd_freq_sensitivity.c |
5947 |
++++ b/drivers/cpufreq/amd_freq_sensitivity.c |
5948 |
+@@ -124,6 +124,8 @@ static int __init amd_freq_sensitivity_init(void) |
5949 |
+ if (!pcidev) { |
5950 |
+ if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK)) |
5951 |
+ return -ENODEV; |
5952 |
++ } else { |
5953 |
++ pci_dev_put(pcidev); |
5954 |
+ } |
5955 |
+ |
5956 |
+ if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) |
5957 |
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c |
5958 |
+index bb2f59fd0de43..bbcba2c38e853 100644 |
5959 |
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c |
5960 |
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c |
5961 |
+@@ -177,6 +177,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, |
5962 |
+ } |
5963 |
+ } else if (ret != -ENODEV) { |
5964 |
+ dev_err(cpu_dev, "Invalid opp table in device tree\n"); |
5965 |
++ kfree(table); |
5966 |
+ return ret; |
5967 |
+ } else { |
5968 |
+ policy->fast_switch_possible = true; |
5969 |
+diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c |
5970 |
+index 252f2a9686a62..448bc796b0b40 100644 |
5971 |
+--- a/drivers/cpuidle/dt_idle_states.c |
5972 |
++++ b/drivers/cpuidle/dt_idle_states.c |
5973 |
+@@ -223,6 +223,6 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, |
5974 |
+ * also be 0 on platforms with missing DT idle states or legacy DT |
5975 |
+ * configuration predating the DT idle states bindings. |
5976 |
+ */ |
5977 |
+- return i; |
5978 |
++ return state_idx - start_idx; |
5979 |
+ } |
5980 |
+ EXPORT_SYMBOL_GPL(dt_init_idle_driver); |
5981 |
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig |
5982 |
+index 51690e73153ad..b46343b5c26b4 100644 |
5983 |
+--- a/drivers/crypto/Kconfig |
5984 |
++++ b/drivers/crypto/Kconfig |
5985 |
+@@ -772,7 +772,12 @@ config CRYPTO_DEV_IMGTEC_HASH |
5986 |
+ config CRYPTO_DEV_ROCKCHIP |
5987 |
+ tristate "Rockchip's Cryptographic Engine driver" |
5988 |
+ depends on OF && ARCH_ROCKCHIP |
5989 |
++ depends on PM |
5990 |
++ select CRYPTO_ECB |
5991 |
++ select CRYPTO_CBC |
5992 |
++ select CRYPTO_DES |
5993 |
+ select CRYPTO_AES |
5994 |
++ select CRYPTO_ENGINE |
5995 |
+ select CRYPTO_LIB_DES |
5996 |
+ select CRYPTO_MD5 |
5997 |
+ select CRYPTO_SHA1 |
5998 |
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c |
5999 |
+index 3c46ad8c3a1c5..005eefecfdf59 100644 |
6000 |
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c |
6001 |
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c |
6002 |
+@@ -105,7 +105,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq) |
6003 |
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm); |
6004 |
+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; |
6005 |
+ int i = 0; |
6006 |
+- u32 a; |
6007 |
++ dma_addr_t a; |
6008 |
+ int err; |
6009 |
+ |
6010 |
+ rctx->ivlen = ivsize; |
6011 |
+diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c |
6012 |
+index 6e7ae896717cd..937187027ad57 100644 |
6013 |
+--- a/drivers/crypto/amlogic/amlogic-gxl-core.c |
6014 |
++++ b/drivers/crypto/amlogic/amlogic-gxl-core.c |
6015 |
+@@ -237,7 +237,6 @@ static int meson_crypto_probe(struct platform_device *pdev) |
6016 |
+ return err; |
6017 |
+ } |
6018 |
+ |
6019 |
+- mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL); |
6020 |
+ for (i = 0; i < MAXFLOW; i++) { |
6021 |
+ mc->irqs[i] = platform_get_irq(pdev, i); |
6022 |
+ if (mc->irqs[i] < 0) |
6023 |
+diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h |
6024 |
+index dc0f142324a3c..8c0746a1d6d43 100644 |
6025 |
+--- a/drivers/crypto/amlogic/amlogic-gxl.h |
6026 |
++++ b/drivers/crypto/amlogic/amlogic-gxl.h |
6027 |
+@@ -95,7 +95,7 @@ struct meson_dev { |
6028 |
+ struct device *dev; |
6029 |
+ struct meson_flow *chanlist; |
6030 |
+ atomic_t flow; |
6031 |
+- int *irqs; |
6032 |
++ int irqs[MAXFLOW]; |
6033 |
+ #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG |
6034 |
+ struct dentry *dbgfs_dir; |
6035 |
+ #endif |
6036 |
+diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c |
6037 |
+index 2e9c0d2143632..199fcec9b8d0b 100644 |
6038 |
+--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c |
6039 |
++++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c |
6040 |
+@@ -191,6 +191,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev) |
6041 |
+ ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); |
6042 |
+ if (!ndev->iov.pf2vf_wq) { |
6043 |
+ kfree(ndev->iov.vfdev); |
6044 |
++ ndev->iov.vfdev = NULL; |
6045 |
+ return -ENOMEM; |
6046 |
+ } |
6047 |
+ /* enable pf2vf mailbox interrupts */ |
6048 |
+diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c |
6049 |
+index 7083767602fcf..8f008f024f8f1 100644 |
6050 |
+--- a/drivers/crypto/ccree/cc_debugfs.c |
6051 |
++++ b/drivers/crypto/ccree/cc_debugfs.c |
6052 |
+@@ -55,7 +55,7 @@ void __init cc_debugfs_global_init(void) |
6053 |
+ cc_debugfs_dir = debugfs_create_dir("ccree", NULL); |
6054 |
+ } |
6055 |
+ |
6056 |
+-void __exit cc_debugfs_global_fini(void) |
6057 |
++void cc_debugfs_global_fini(void) |
6058 |
+ { |
6059 |
+ debugfs_remove(cc_debugfs_dir); |
6060 |
+ } |
6061 |
+diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c |
6062 |
+index 790fa9058a36d..41f0a404bdf9e 100644 |
6063 |
+--- a/drivers/crypto/ccree/cc_driver.c |
6064 |
++++ b/drivers/crypto/ccree/cc_driver.c |
6065 |
+@@ -656,9 +656,17 @@ static struct platform_driver ccree_driver = { |
6066 |
+ |
6067 |
+ static int __init ccree_init(void) |
6068 |
+ { |
6069 |
++ int rc; |
6070 |
++ |
6071 |
+ cc_debugfs_global_init(); |
6072 |
+ |
6073 |
+- return platform_driver_register(&ccree_driver); |
6074 |
++ rc = platform_driver_register(&ccree_driver); |
6075 |
++ if (rc) { |
6076 |
++ cc_debugfs_global_fini(); |
6077 |
++ return rc; |
6078 |
++ } |
6079 |
++ |
6080 |
++ return 0; |
6081 |
+ } |
6082 |
+ module_init(ccree_init); |
6083 |
+ |
6084 |
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c |
6085 |
+index 65a641396c07f..edc61e4105f30 100644 |
6086 |
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c |
6087 |
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c |
6088 |
+@@ -1143,18 +1143,12 @@ err_with_qm_init: |
6089 |
+ static void hpre_remove(struct pci_dev *pdev) |
6090 |
+ { |
6091 |
+ struct hisi_qm *qm = pci_get_drvdata(pdev); |
6092 |
+- int ret; |
6093 |
+ |
6094 |
+ hisi_qm_pm_uninit(qm); |
6095 |
+ hisi_qm_wait_task_finish(qm, &hpre_devices); |
6096 |
+ hisi_qm_alg_unregister(qm, &hpre_devices); |
6097 |
+- if (qm->fun_type == QM_HW_PF && qm->vfs_num) { |
6098 |
+- ret = hisi_qm_sriov_disable(pdev, true); |
6099 |
+- if (ret) { |
6100 |
+- pci_err(pdev, "Disable SRIOV fail!\n"); |
6101 |
+- return; |
6102 |
+- } |
6103 |
+- } |
6104 |
++ if (qm->fun_type == QM_HW_PF && qm->vfs_num) |
6105 |
++ hisi_qm_sriov_disable(pdev, true); |
6106 |
+ |
6107 |
+ hpre_debugfs_exit(qm); |
6108 |
+ hisi_qm_stop(qm, QM_NORMAL); |
6109 |
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c |
6110 |
+index b8900a5dbf6e1..fd89918abd191 100644 |
6111 |
+--- a/drivers/crypto/hisilicon/qm.c |
6112 |
++++ b/drivers/crypto/hisilicon/qm.c |
6113 |
+@@ -5727,8 +5727,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) |
6114 |
+ GFP_ATOMIC); |
6115 |
+ dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); |
6116 |
+ if (!qm->qdma.va) { |
6117 |
+- ret = -ENOMEM; |
6118 |
+- goto err_alloc_qdma; |
6119 |
++ ret = -ENOMEM; |
6120 |
++ goto err_destroy_idr; |
6121 |
+ } |
6122 |
+ |
6123 |
+ QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH); |
6124 |
+@@ -5744,7 +5744,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) |
6125 |
+ |
6126 |
+ err_alloc_qp_array: |
6127 |
+ dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); |
6128 |
+-err_alloc_qdma: |
6129 |
++err_destroy_idr: |
6130 |
++ idr_destroy(&qm->qp_idr); |
6131 |
+ kfree(qm->factor); |
6132 |
+ |
6133 |
+ return ret; |
6134 |
+diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h |
6135 |
+index 3068093229a50..bbb35de994eb7 100644 |
6136 |
+--- a/drivers/crypto/hisilicon/qm.h |
6137 |
++++ b/drivers/crypto/hisilicon/qm.h |
6138 |
+@@ -318,14 +318,14 @@ struct hisi_qp { |
6139 |
+ static inline int q_num_set(const char *val, const struct kernel_param *kp, |
6140 |
+ unsigned int device) |
6141 |
+ { |
6142 |
+- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, |
6143 |
+- device, NULL); |
6144 |
++ struct pci_dev *pdev; |
6145 |
+ u32 n, q_num; |
6146 |
+ int ret; |
6147 |
+ |
6148 |
+ if (!val) |
6149 |
+ return -EINVAL; |
6150 |
+ |
6151 |
++ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); |
6152 |
+ if (!pdev) { |
6153 |
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); |
6154 |
+ pr_info("No device found currently, suppose queue number is %u\n", |
6155 |
+@@ -335,6 +335,8 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp, |
6156 |
+ q_num = QM_QNUM_V1; |
6157 |
+ else |
6158 |
+ q_num = QM_QNUM_V2; |
6159 |
++ |
6160 |
++ pci_dev_put(pdev); |
6161 |
+ } |
6162 |
+ |
6163 |
+ ret = kstrtou32(val, 10, &n); |
6164 |
+diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c |
6165 |
+index aa4c7b2af3e2e..34b41cbcfa8de 100644 |
6166 |
+--- a/drivers/crypto/img-hash.c |
6167 |
++++ b/drivers/crypto/img-hash.c |
6168 |
+@@ -358,12 +358,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev) |
6169 |
+ static void img_hash_dma_task(unsigned long d) |
6170 |
+ { |
6171 |
+ struct img_hash_dev *hdev = (struct img_hash_dev *)d; |
6172 |
+- struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); |
6173 |
++ struct img_hash_request_ctx *ctx; |
6174 |
+ u8 *addr; |
6175 |
+ size_t nbytes, bleft, wsend, len, tbc; |
6176 |
+ struct scatterlist tsg; |
6177 |
+ |
6178 |
+- if (!hdev->req || !ctx->sg) |
6179 |
++ if (!hdev->req) |
6180 |
++ return; |
6181 |
++ |
6182 |
++ ctx = ahash_request_ctx(hdev->req); |
6183 |
++ if (!ctx->sg) |
6184 |
+ return; |
6185 |
+ |
6186 |
+ addr = sg_virt(ctx->sg); |
6187 |
+diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c |
6188 |
+index f6bf53c00b614..4ec6949a7ca9e 100644 |
6189 |
+--- a/drivers/crypto/omap-sham.c |
6190 |
++++ b/drivers/crypto/omap-sham.c |
6191 |
+@@ -2114,7 +2114,7 @@ static int omap_sham_probe(struct platform_device *pdev) |
6192 |
+ |
6193 |
+ pm_runtime_enable(dev); |
6194 |
+ |
6195 |
+- err = pm_runtime_get_sync(dev); |
6196 |
++ err = pm_runtime_resume_and_get(dev); |
6197 |
+ if (err < 0) { |
6198 |
+ dev_err(dev, "failed to get sync: %d\n", err); |
6199 |
+ goto err_pm; |
6200 |
+diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c |
6201 |
+index 35d73061d1569..14a0aef18ab13 100644 |
6202 |
+--- a/drivers/crypto/rockchip/rk3288_crypto.c |
6203 |
++++ b/drivers/crypto/rockchip/rk3288_crypto.c |
6204 |
+@@ -65,186 +65,24 @@ static void rk_crypto_disable_clk(struct rk_crypto_info *dev) |
6205 |
+ clk_disable_unprepare(dev->sclk); |
6206 |
+ } |
6207 |
+ |
6208 |
+-static int check_alignment(struct scatterlist *sg_src, |
6209 |
+- struct scatterlist *sg_dst, |
6210 |
+- int align_mask) |
6211 |
+-{ |
6212 |
+- int in, out, align; |
6213 |
+- |
6214 |
+- in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && |
6215 |
+- IS_ALIGNED((uint32_t)sg_src->length, align_mask); |
6216 |
+- if (!sg_dst) |
6217 |
+- return in; |
6218 |
+- out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && |
6219 |
+- IS_ALIGNED((uint32_t)sg_dst->length, align_mask); |
6220 |
+- align = in && out; |
6221 |
+- |
6222 |
+- return (align && (sg_src->length == sg_dst->length)); |
6223 |
+-} |
6224 |
+- |
6225 |
+-static int rk_load_data(struct rk_crypto_info *dev, |
6226 |
+- struct scatterlist *sg_src, |
6227 |
+- struct scatterlist *sg_dst) |
6228 |
+-{ |
6229 |
+- unsigned int count; |
6230 |
+- |
6231 |
+- dev->aligned = dev->aligned ? |
6232 |
+- check_alignment(sg_src, sg_dst, dev->align_size) : |
6233 |
+- dev->aligned; |
6234 |
+- if (dev->aligned) { |
6235 |
+- count = min(dev->left_bytes, sg_src->length); |
6236 |
+- dev->left_bytes -= count; |
6237 |
+- |
6238 |
+- if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { |
6239 |
+- dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", |
6240 |
+- __func__, __LINE__); |
6241 |
+- return -EINVAL; |
6242 |
+- } |
6243 |
+- dev->addr_in = sg_dma_address(sg_src); |
6244 |
+- |
6245 |
+- if (sg_dst) { |
6246 |
+- if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { |
6247 |
+- dev_err(dev->dev, |
6248 |
+- "[%s:%d] dma_map_sg(dst) error\n", |
6249 |
+- __func__, __LINE__); |
6250 |
+- dma_unmap_sg(dev->dev, sg_src, 1, |
6251 |
+- DMA_TO_DEVICE); |
6252 |
+- return -EINVAL; |
6253 |
+- } |
6254 |
+- dev->addr_out = sg_dma_address(sg_dst); |
6255 |
+- } |
6256 |
+- } else { |
6257 |
+- count = (dev->left_bytes > PAGE_SIZE) ? |
6258 |
+- PAGE_SIZE : dev->left_bytes; |
6259 |
+- |
6260 |
+- if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, |
6261 |
+- dev->addr_vir, count, |
6262 |
+- dev->total - dev->left_bytes)) { |
6263 |
+- dev_err(dev->dev, "[%s:%d] pcopy err\n", |
6264 |
+- __func__, __LINE__); |
6265 |
+- return -EINVAL; |
6266 |
+- } |
6267 |
+- dev->left_bytes -= count; |
6268 |
+- sg_init_one(&dev->sg_tmp, dev->addr_vir, count); |
6269 |
+- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { |
6270 |
+- dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", |
6271 |
+- __func__, __LINE__); |
6272 |
+- return -ENOMEM; |
6273 |
+- } |
6274 |
+- dev->addr_in = sg_dma_address(&dev->sg_tmp); |
6275 |
+- |
6276 |
+- if (sg_dst) { |
6277 |
+- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, |
6278 |
+- DMA_FROM_DEVICE)) { |
6279 |
+- dev_err(dev->dev, |
6280 |
+- "[%s:%d] dma_map_sg(sg_tmp) error\n", |
6281 |
+- __func__, __LINE__); |
6282 |
+- dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, |
6283 |
+- DMA_TO_DEVICE); |
6284 |
+- return -ENOMEM; |
6285 |
+- } |
6286 |
+- dev->addr_out = sg_dma_address(&dev->sg_tmp); |
6287 |
+- } |
6288 |
+- } |
6289 |
+- dev->count = count; |
6290 |
+- return 0; |
6291 |
+-} |
6292 |
+- |
6293 |
+-static void rk_unload_data(struct rk_crypto_info *dev) |
6294 |
+-{ |
6295 |
+- struct scatterlist *sg_in, *sg_out; |
6296 |
+- |
6297 |
+- sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; |
6298 |
+- dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); |
6299 |
+- |
6300 |
+- if (dev->sg_dst) { |
6301 |
+- sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; |
6302 |
+- dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); |
6303 |
+- } |
6304 |
+-} |
6305 |
+- |
6306 |
+ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) |
6307 |
+ { |
6308 |
+ struct rk_crypto_info *dev = platform_get_drvdata(dev_id); |
6309 |
+ u32 interrupt_status; |
6310 |
+ |
6311 |
+- spin_lock(&dev->lock); |
6312 |
+ interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); |
6313 |
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); |
6314 |
+ |
6315 |
++ dev->status = 1; |
6316 |
+ if (interrupt_status & 0x0a) { |
6317 |
+ dev_warn(dev->dev, "DMA Error\n"); |
6318 |
+- dev->err = -EFAULT; |
6319 |
++ dev->status = 0; |
6320 |
+ } |
6321 |
+- tasklet_schedule(&dev->done_task); |
6322 |
++ complete(&dev->complete); |
6323 |
+ |
6324 |
+- spin_unlock(&dev->lock); |
6325 |
+ return IRQ_HANDLED; |
6326 |
+ } |
6327 |
+ |
6328 |
+-static int rk_crypto_enqueue(struct rk_crypto_info *dev, |
6329 |
+- struct crypto_async_request *async_req) |
6330 |
+-{ |
6331 |
+- unsigned long flags; |
6332 |
+- int ret; |
6333 |
+- |
6334 |
+- spin_lock_irqsave(&dev->lock, flags); |
6335 |
+- ret = crypto_enqueue_request(&dev->queue, async_req); |
6336 |
+- if (dev->busy) { |
6337 |
+- spin_unlock_irqrestore(&dev->lock, flags); |
6338 |
+- return ret; |
6339 |
+- } |
6340 |
+- dev->busy = true; |
6341 |
+- spin_unlock_irqrestore(&dev->lock, flags); |
6342 |
+- tasklet_schedule(&dev->queue_task); |
6343 |
+- |
6344 |
+- return ret; |
6345 |
+-} |
6346 |
+- |
6347 |
+-static void rk_crypto_queue_task_cb(unsigned long data) |
6348 |
+-{ |
6349 |
+- struct rk_crypto_info *dev = (struct rk_crypto_info *)data; |
6350 |
+- struct crypto_async_request *async_req, *backlog; |
6351 |
+- unsigned long flags; |
6352 |
+- int err = 0; |
6353 |
+- |
6354 |
+- dev->err = 0; |
6355 |
+- spin_lock_irqsave(&dev->lock, flags); |
6356 |
+- backlog = crypto_get_backlog(&dev->queue); |
6357 |
+- async_req = crypto_dequeue_request(&dev->queue); |
6358 |
+- |
6359 |
+- if (!async_req) { |
6360 |
+- dev->busy = false; |
6361 |
+- spin_unlock_irqrestore(&dev->lock, flags); |
6362 |
+- return; |
6363 |
+- } |
6364 |
+- spin_unlock_irqrestore(&dev->lock, flags); |
6365 |
+- |
6366 |
+- if (backlog) { |
6367 |
+- backlog->complete(backlog, -EINPROGRESS); |
6368 |
+- backlog = NULL; |
6369 |
+- } |
6370 |
+- |
6371 |
+- dev->async_req = async_req; |
6372 |
+- err = dev->start(dev); |
6373 |
+- if (err) |
6374 |
+- dev->complete(dev->async_req, err); |
6375 |
+-} |
6376 |
+- |
6377 |
+-static void rk_crypto_done_task_cb(unsigned long data) |
6378 |
+-{ |
6379 |
+- struct rk_crypto_info *dev = (struct rk_crypto_info *)data; |
6380 |
+- |
6381 |
+- if (dev->err) { |
6382 |
+- dev->complete(dev->async_req, dev->err); |
6383 |
+- return; |
6384 |
+- } |
6385 |
+- |
6386 |
+- dev->err = dev->update(dev); |
6387 |
+- if (dev->err) |
6388 |
+- dev->complete(dev->async_req, dev->err); |
6389 |
+-} |
6390 |
+- |
6391 |
+ static struct rk_crypto_tmp *rk_cipher_algs[] = { |
6392 |
+ &rk_ecb_aes_alg, |
6393 |
+ &rk_cbc_aes_alg, |
6394 |
+@@ -337,8 +175,6 @@ static int rk_crypto_probe(struct platform_device *pdev) |
6395 |
+ if (err) |
6396 |
+ goto err_crypto; |
6397 |
+ |
6398 |
+- spin_lock_init(&crypto_info->lock); |
6399 |
+- |
6400 |
+ crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); |
6401 |
+ if (IS_ERR(crypto_info->reg)) { |
6402 |
+ err = PTR_ERR(crypto_info->reg); |
6403 |
+@@ -389,18 +225,11 @@ static int rk_crypto_probe(struct platform_device *pdev) |
6404 |
+ crypto_info->dev = &pdev->dev; |
6405 |
+ platform_set_drvdata(pdev, crypto_info); |
6406 |
+ |
6407 |
+- tasklet_init(&crypto_info->queue_task, |
6408 |
+- rk_crypto_queue_task_cb, (unsigned long)crypto_info); |
6409 |
+- tasklet_init(&crypto_info->done_task, |
6410 |
+- rk_crypto_done_task_cb, (unsigned long)crypto_info); |
6411 |
+- crypto_init_queue(&crypto_info->queue, 50); |
6412 |
++ crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true); |
6413 |
++ crypto_engine_start(crypto_info->engine); |
6414 |
++ init_completion(&crypto_info->complete); |
6415 |
+ |
6416 |
+- crypto_info->enable_clk = rk_crypto_enable_clk; |
6417 |
+- crypto_info->disable_clk = rk_crypto_disable_clk; |
6418 |
+- crypto_info->load_data = rk_load_data; |
6419 |
+- crypto_info->unload_data = rk_unload_data; |
6420 |
+- crypto_info->enqueue = rk_crypto_enqueue; |
6421 |
+- crypto_info->busy = false; |
6422 |
++ rk_crypto_enable_clk(crypto_info); |
6423 |
+ |
6424 |
+ err = rk_crypto_register(crypto_info); |
6425 |
+ if (err) { |
6426 |
+@@ -412,9 +241,9 @@ static int rk_crypto_probe(struct platform_device *pdev) |
6427 |
+ return 0; |
6428 |
+ |
6429 |
+ err_register_alg: |
6430 |
+- tasklet_kill(&crypto_info->queue_task); |
6431 |
+- tasklet_kill(&crypto_info->done_task); |
6432 |
++ crypto_engine_exit(crypto_info->engine); |
6433 |
+ err_crypto: |
6434 |
++ dev_err(dev, "Crypto Accelerator not successfully registered\n"); |
6435 |
+ return err; |
6436 |
+ } |
6437 |
+ |
6438 |
+@@ -423,8 +252,8 @@ static int rk_crypto_remove(struct platform_device *pdev) |
6439 |
+ struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); |
6440 |
+ |
6441 |
+ rk_crypto_unregister(); |
6442 |
+- tasklet_kill(&crypto_tmp->done_task); |
6443 |
+- tasklet_kill(&crypto_tmp->queue_task); |
6444 |
++ rk_crypto_disable_clk(crypto_tmp); |
6445 |
++ crypto_engine_exit(crypto_tmp->engine); |
6446 |
+ return 0; |
6447 |
+ } |
6448 |
+ |
6449 |
+diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h |
6450 |
+index 97278c2574ff9..045e811b4af84 100644 |
6451 |
+--- a/drivers/crypto/rockchip/rk3288_crypto.h |
6452 |
++++ b/drivers/crypto/rockchip/rk3288_crypto.h |
6453 |
+@@ -5,9 +5,11 @@ |
6454 |
+ #include <crypto/aes.h> |
6455 |
+ #include <crypto/internal/des.h> |
6456 |
+ #include <crypto/algapi.h> |
6457 |
++#include <linux/dma-mapping.h> |
6458 |
+ #include <linux/interrupt.h> |
6459 |
+ #include <linux/delay.h> |
6460 |
+ #include <linux/scatterlist.h> |
6461 |
++#include <crypto/engine.h> |
6462 |
+ #include <crypto/internal/hash.h> |
6463 |
+ #include <crypto/internal/skcipher.h> |
6464 |
+ |
6465 |
+@@ -193,45 +195,15 @@ struct rk_crypto_info { |
6466 |
+ struct reset_control *rst; |
6467 |
+ void __iomem *reg; |
6468 |
+ int irq; |
6469 |
+- struct crypto_queue queue; |
6470 |
+- struct tasklet_struct queue_task; |
6471 |
+- struct tasklet_struct done_task; |
6472 |
+- struct crypto_async_request *async_req; |
6473 |
+- int err; |
6474 |
+- /* device lock */ |
6475 |
+- spinlock_t lock; |
6476 |
+- |
6477 |
+- /* the public variable */ |
6478 |
+- struct scatterlist *sg_src; |
6479 |
+- struct scatterlist *sg_dst; |
6480 |
+- struct scatterlist sg_tmp; |
6481 |
+- struct scatterlist *first; |
6482 |
+- unsigned int left_bytes; |
6483 |
+- void *addr_vir; |
6484 |
+- int aligned; |
6485 |
+- int align_size; |
6486 |
+- size_t src_nents; |
6487 |
+- size_t dst_nents; |
6488 |
+- unsigned int total; |
6489 |
+- unsigned int count; |
6490 |
+- dma_addr_t addr_in; |
6491 |
+- dma_addr_t addr_out; |
6492 |
+- bool busy; |
6493 |
+- int (*start)(struct rk_crypto_info *dev); |
6494 |
+- int (*update)(struct rk_crypto_info *dev); |
6495 |
+- void (*complete)(struct crypto_async_request *base, int err); |
6496 |
+- int (*enable_clk)(struct rk_crypto_info *dev); |
6497 |
+- void (*disable_clk)(struct rk_crypto_info *dev); |
6498 |
+- int (*load_data)(struct rk_crypto_info *dev, |
6499 |
+- struct scatterlist *sg_src, |
6500 |
+- struct scatterlist *sg_dst); |
6501 |
+- void (*unload_data)(struct rk_crypto_info *dev); |
6502 |
+- int (*enqueue)(struct rk_crypto_info *dev, |
6503 |
+- struct crypto_async_request *async_req); |
6504 |
++ |
6505 |
++ struct crypto_engine *engine; |
6506 |
++ struct completion complete; |
6507 |
++ int status; |
6508 |
+ }; |
6509 |
+ |
6510 |
+ /* the private variable of hash */ |
6511 |
+ struct rk_ahash_ctx { |
6512 |
++ struct crypto_engine_ctx enginectx; |
6513 |
+ struct rk_crypto_info *dev; |
6514 |
+ /* for fallback */ |
6515 |
+ struct crypto_ahash *fallback_tfm; |
6516 |
+@@ -241,14 +213,23 @@ struct rk_ahash_ctx { |
6517 |
+ struct rk_ahash_rctx { |
6518 |
+ struct ahash_request fallback_req; |
6519 |
+ u32 mode; |
6520 |
++ int nrsg; |
6521 |
+ }; |
6522 |
+ |
6523 |
+ /* the private variable of cipher */ |
6524 |
+ struct rk_cipher_ctx { |
6525 |
++ struct crypto_engine_ctx enginectx; |
6526 |
+ struct rk_crypto_info *dev; |
6527 |
+ unsigned int keylen; |
6528 |
+- u32 mode; |
6529 |
++ u8 key[AES_MAX_KEY_SIZE]; |
6530 |
+ u8 iv[AES_BLOCK_SIZE]; |
6531 |
++ struct crypto_skcipher *fallback_tfm; |
6532 |
++}; |
6533 |
++ |
6534 |
++struct rk_cipher_rctx { |
6535 |
++ u8 backup_iv[AES_BLOCK_SIZE]; |
6536 |
++ u32 mode; |
6537 |
++ struct skcipher_request fallback_req; // keep at the end |
6538 |
+ }; |
6539 |
+ |
6540 |
+ enum alg_type { |
6541 |
+diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c |
6542 |
+index ed03058497bc2..edd40e16a3f0a 100644 |
6543 |
+--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c |
6544 |
++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c |
6545 |
+@@ -9,6 +9,7 @@ |
6546 |
+ * Some ideas are from marvell/cesa.c and s5p-sss.c driver. |
6547 |
+ */ |
6548 |
+ #include <linux/device.h> |
6549 |
++#include <asm/unaligned.h> |
6550 |
+ #include "rk3288_crypto.h" |
6551 |
+ |
6552 |
+ /* |
6553 |
+@@ -16,6 +17,40 @@ |
6554 |
+ * so we put the fixed hash out when met zero message. |
6555 |
+ */ |
6556 |
+ |
6557 |
++static bool rk_ahash_need_fallback(struct ahash_request *req) |
6558 |
++{ |
6559 |
++ struct scatterlist *sg; |
6560 |
++ |
6561 |
++ sg = req->src; |
6562 |
++ while (sg) { |
6563 |
++ if (!IS_ALIGNED(sg->offset, sizeof(u32))) { |
6564 |
++ return true; |
6565 |
++ } |
6566 |
++ if (sg->length % 4) { |
6567 |
++ return true; |
6568 |
++ } |
6569 |
++ sg = sg_next(sg); |
6570 |
++ } |
6571 |
++ return false; |
6572 |
++} |
6573 |
++ |
6574 |
++static int rk_ahash_digest_fb(struct ahash_request *areq) |
6575 |
++{ |
6576 |
++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); |
6577 |
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
6578 |
++ struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm); |
6579 |
++ |
6580 |
++ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); |
6581 |
++ rctx->fallback_req.base.flags = areq->base.flags & |
6582 |
++ CRYPTO_TFM_REQ_MAY_SLEEP; |
6583 |
++ |
6584 |
++ rctx->fallback_req.nbytes = areq->nbytes; |
6585 |
++ rctx->fallback_req.src = areq->src; |
6586 |
++ rctx->fallback_req.result = areq->result; |
6587 |
++ |
6588 |
++ return crypto_ahash_digest(&rctx->fallback_req); |
6589 |
++} |
6590 |
++ |
6591 |
+ static int zero_message_process(struct ahash_request *req) |
6592 |
+ { |
6593 |
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
6594 |
+@@ -38,16 +73,12 @@ static int zero_message_process(struct ahash_request *req) |
6595 |
+ return 0; |
6596 |
+ } |
6597 |
+ |
6598 |
+-static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err) |
6599 |
+-{ |
6600 |
+- if (base->complete) |
6601 |
+- base->complete(base, err); |
6602 |
+-} |
6603 |
+- |
6604 |
+-static void rk_ahash_reg_init(struct rk_crypto_info *dev) |
6605 |
++static void rk_ahash_reg_init(struct ahash_request *req) |
6606 |
+ { |
6607 |
+- struct ahash_request *req = ahash_request_cast(dev->async_req); |
6608 |
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req); |
6609 |
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
6610 |
++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); |
6611 |
++ struct rk_crypto_info *dev = tctx->dev; |
6612 |
+ int reg_status; |
6613 |
+ |
6614 |
+ reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | |
6615 |
+@@ -74,7 +105,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev) |
6616 |
+ RK_CRYPTO_BYTESWAP_BRFIFO | |
6617 |
+ RK_CRYPTO_BYTESWAP_BTFIFO); |
6618 |
+ |
6619 |
+- CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total); |
6620 |
++ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes); |
6621 |
+ } |
6622 |
+ |
6623 |
+ static int rk_ahash_init(struct ahash_request *req) |
6624 |
+@@ -167,48 +198,64 @@ static int rk_ahash_digest(struct ahash_request *req) |
6625 |
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); |
6626 |
+ struct rk_crypto_info *dev = tctx->dev; |
6627 |
+ |
6628 |
++ if (rk_ahash_need_fallback(req)) |
6629 |
++ return rk_ahash_digest_fb(req); |
6630 |
++ |
6631 |
+ if (!req->nbytes) |
6632 |
+ return zero_message_process(req); |
6633 |
+- else |
6634 |
+- return dev->enqueue(dev, &req->base); |
6635 |
++ |
6636 |
++ return crypto_transfer_hash_request_to_engine(dev->engine, req); |
6637 |
+ } |
6638 |
+ |
6639 |
+-static void crypto_ahash_dma_start(struct rk_crypto_info *dev) |
6640 |
++static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg) |
6641 |
+ { |
6642 |
+- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in); |
6643 |
+- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4); |
6644 |
++ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg)); |
6645 |
++ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4); |
6646 |
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START | |
6647 |
+ (RK_CRYPTO_HASH_START << 16)); |
6648 |
+ } |
6649 |
+ |
6650 |
+-static int rk_ahash_set_data_start(struct rk_crypto_info *dev) |
6651 |
++static int rk_hash_prepare(struct crypto_engine *engine, void *breq) |
6652 |
++{ |
6653 |
++ struct ahash_request *areq = container_of(breq, struct ahash_request, base); |
6654 |
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
6655 |
++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); |
6656 |
++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); |
6657 |
++ int ret; |
6658 |
++ |
6659 |
++ ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); |
6660 |
++ if (ret <= 0) |
6661 |
++ return -EINVAL; |
6662 |
++ |
6663 |
++ rctx->nrsg = ret; |
6664 |
++ |
6665 |
++ return 0; |
6666 |
++} |
6667 |
++ |
6668 |
++static int rk_hash_unprepare(struct crypto_engine *engine, void *breq) |
6669 |
+ { |
6670 |
+- int err; |
6671 |
++ struct ahash_request *areq = container_of(breq, struct ahash_request, base); |
6672 |
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
6673 |
++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); |
6674 |
++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); |
6675 |
+ |
6676 |
+- err = dev->load_data(dev, dev->sg_src, NULL); |
6677 |
+- if (!err) |
6678 |
+- crypto_ahash_dma_start(dev); |
6679 |
+- return err; |
6680 |
++ dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); |
6681 |
++ return 0; |
6682 |
+ } |
6683 |
+ |
6684 |
+-static int rk_ahash_start(struct rk_crypto_info *dev) |
6685 |
++static int rk_hash_run(struct crypto_engine *engine, void *breq) |
6686 |
+ { |
6687 |
+- struct ahash_request *req = ahash_request_cast(dev->async_req); |
6688 |
+- struct crypto_ahash *tfm; |
6689 |
+- struct rk_ahash_rctx *rctx; |
6690 |
+- |
6691 |
+- dev->total = req->nbytes; |
6692 |
+- dev->left_bytes = req->nbytes; |
6693 |
+- dev->aligned = 0; |
6694 |
+- dev->align_size = 4; |
6695 |
+- dev->sg_dst = NULL; |
6696 |
+- dev->sg_src = req->src; |
6697 |
+- dev->first = req->src; |
6698 |
+- dev->src_nents = sg_nents(req->src); |
6699 |
+- rctx = ahash_request_ctx(req); |
6700 |
++ struct ahash_request *areq = container_of(breq, struct ahash_request, base); |
6701 |
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
6702 |
++ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); |
6703 |
++ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); |
6704 |
++ struct scatterlist *sg = areq->src; |
6705 |
++ int err = 0; |
6706 |
++ int i; |
6707 |
++ u32 v; |
6708 |
++ |
6709 |
+ rctx->mode = 0; |
6710 |
+ |
6711 |
+- tfm = crypto_ahash_reqtfm(req); |
6712 |
+ switch (crypto_ahash_digestsize(tfm)) { |
6713 |
+ case SHA1_DIGEST_SIZE: |
6714 |
+ rctx->mode = RK_CRYPTO_HASH_SHA1; |
6715 |
+@@ -220,32 +267,26 @@ static int rk_ahash_start(struct rk_crypto_info *dev) |
6716 |
+ rctx->mode = RK_CRYPTO_HASH_MD5; |
6717 |
+ break; |
6718 |
+ default: |
6719 |
+- return -EINVAL; |
6720 |
++ err = -EINVAL; |
6721 |
++ goto theend; |
6722 |
+ } |
6723 |
+ |
6724 |
+- rk_ahash_reg_init(dev); |
6725 |
+- return rk_ahash_set_data_start(dev); |
6726 |
+-} |
6727 |
+- |
6728 |
+-static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) |
6729 |
+-{ |
6730 |
+- int err = 0; |
6731 |
+- struct ahash_request *req = ahash_request_cast(dev->async_req); |
6732 |
+- struct crypto_ahash *tfm; |
6733 |
+- |
6734 |
+- dev->unload_data(dev); |
6735 |
+- if (dev->left_bytes) { |
6736 |
+- if (dev->aligned) { |
6737 |
+- if (sg_is_last(dev->sg_src)) { |
6738 |
+- dev_warn(dev->dev, "[%s:%d], Lack of data\n", |
6739 |
+- __func__, __LINE__); |
6740 |
+- err = -ENOMEM; |
6741 |
+- goto out_rx; |
6742 |
+- } |
6743 |
+- dev->sg_src = sg_next(dev->sg_src); |
6744 |
++ rk_ahash_reg_init(areq); |
6745 |
++ |
6746 |
++ while (sg) { |
6747 |
++ reinit_completion(&tctx->dev->complete); |
6748 |
++ tctx->dev->status = 0; |
6749 |
++ crypto_ahash_dma_start(tctx->dev, sg); |
6750 |
++ wait_for_completion_interruptible_timeout(&tctx->dev->complete, |
6751 |
++ msecs_to_jiffies(2000)); |
6752 |
++ if (!tctx->dev->status) { |
6753 |
++ dev_err(tctx->dev->dev, "DMA timeout\n"); |
6754 |
++ err = -EFAULT; |
6755 |
++ goto theend; |
6756 |
+ } |
6757 |
+- err = rk_ahash_set_data_start(dev); |
6758 |
+- } else { |
6759 |
++ sg = sg_next(sg); |
6760 |
++ } |
6761 |
++ |
6762 |
+ /* |
6763 |
+ * it will take some time to process date after last dma |
6764 |
+ * transmission. |
6765 |
+@@ -256,18 +297,20 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) |
6766 |
+ * efficiency, and make it response quickly when dma |
6767 |
+ * complete. |
6768 |
+ */ |
6769 |
+- while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) |
6770 |
+- udelay(10); |
6771 |
+- |
6772 |
+- tfm = crypto_ahash_reqtfm(req); |
6773 |
+- memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, |
6774 |
+- crypto_ahash_digestsize(tfm)); |
6775 |
+- dev->complete(dev->async_req, 0); |
6776 |
+- tasklet_schedule(&dev->queue_task); |
6777 |
++ while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS)) |
6778 |
++ udelay(10); |
6779 |
++ |
6780 |
++ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { |
6781 |
++ v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); |
6782 |
++ put_unaligned_le32(v, areq->result + i * 4); |
6783 |
+ } |
6784 |
+ |
6785 |
+-out_rx: |
6786 |
+- return err; |
6787 |
++theend: |
6788 |
++ local_bh_disable(); |
6789 |
++ crypto_finalize_hash_request(engine, breq, err); |
6790 |
++ local_bh_enable(); |
6791 |
++ |
6792 |
++ return 0; |
6793 |
+ } |
6794 |
+ |
6795 |
+ static int rk_cra_hash_init(struct crypto_tfm *tfm) |
6796 |
+@@ -281,14 +324,6 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) |
6797 |
+ algt = container_of(alg, struct rk_crypto_tmp, alg.hash); |
6798 |
+ |
6799 |
+ tctx->dev = algt->dev; |
6800 |
+- tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL); |
6801 |
+- if (!tctx->dev->addr_vir) { |
6802 |
+- dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n"); |
6803 |
+- return -ENOMEM; |
6804 |
+- } |
6805 |
+- tctx->dev->start = rk_ahash_start; |
6806 |
+- tctx->dev->update = rk_ahash_crypto_rx; |
6807 |
+- tctx->dev->complete = rk_ahash_crypto_complete; |
6808 |
+ |
6809 |
+ /* for fallback */ |
6810 |
+ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, |
6811 |
+@@ -297,19 +332,23 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) |
6812 |
+ dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); |
6813 |
+ return PTR_ERR(tctx->fallback_tfm); |
6814 |
+ } |
6815 |
++ |
6816 |
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
6817 |
+ sizeof(struct rk_ahash_rctx) + |
6818 |
+ crypto_ahash_reqsize(tctx->fallback_tfm)); |
6819 |
+ |
6820 |
+- return tctx->dev->enable_clk(tctx->dev); |
6821 |
++ tctx->enginectx.op.do_one_request = rk_hash_run; |
6822 |
++ tctx->enginectx.op.prepare_request = rk_hash_prepare; |
6823 |
++ tctx->enginectx.op.unprepare_request = rk_hash_unprepare; |
6824 |
++ |
6825 |
++ return 0; |
6826 |
+ } |
6827 |
+ |
6828 |
+ static void rk_cra_hash_exit(struct crypto_tfm *tfm) |
6829 |
+ { |
6830 |
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); |
6831 |
+ |
6832 |
+- free_page((unsigned long)tctx->dev->addr_vir); |
6833 |
+- return tctx->dev->disable_clk(tctx->dev); |
6834 |
++ crypto_free_ahash(tctx->fallback_tfm); |
6835 |
+ } |
6836 |
+ |
6837 |
+ struct rk_crypto_tmp rk_ahash_sha1 = { |
6838 |
+diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c |
6839 |
+index 5bbf0d2722e11..67a7e05d5ae31 100644 |
6840 |
+--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c |
6841 |
++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c |
6842 |
+@@ -9,23 +9,77 @@ |
6843 |
+ * Some ideas are from marvell-cesa.c and s5p-sss.c driver. |
6844 |
+ */ |
6845 |
+ #include <linux/device.h> |
6846 |
++#include <crypto/scatterwalk.h> |
6847 |
+ #include "rk3288_crypto.h" |
6848 |
+ |
6849 |
+ #define RK_CRYPTO_DEC BIT(0) |
6850 |
+ |
6851 |
+-static void rk_crypto_complete(struct crypto_async_request *base, int err) |
6852 |
++static int rk_cipher_need_fallback(struct skcipher_request *req) |
6853 |
+ { |
6854 |
+- if (base->complete) |
6855 |
+- base->complete(base, err); |
6856 |
++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
6857 |
++ unsigned int bs = crypto_skcipher_blocksize(tfm); |
6858 |
++ struct scatterlist *sgs, *sgd; |
6859 |
++ unsigned int stodo, dtodo, len; |
6860 |
++ |
6861 |
++ if (!req->cryptlen) |
6862 |
++ return true; |
6863 |
++ |
6864 |
++ len = req->cryptlen; |
6865 |
++ sgs = req->src; |
6866 |
++ sgd = req->dst; |
6867 |
++ while (sgs && sgd) { |
6868 |
++ if (!IS_ALIGNED(sgs->offset, sizeof(u32))) { |
6869 |
++ return true; |
6870 |
++ } |
6871 |
++ if (!IS_ALIGNED(sgd->offset, sizeof(u32))) { |
6872 |
++ return true; |
6873 |
++ } |
6874 |
++ stodo = min(len, sgs->length); |
6875 |
++ if (stodo % bs) { |
6876 |
++ return true; |
6877 |
++ } |
6878 |
++ dtodo = min(len, sgd->length); |
6879 |
++ if (dtodo % bs) { |
6880 |
++ return true; |
6881 |
++ } |
6882 |
++ if (stodo != dtodo) { |
6883 |
++ return true; |
6884 |
++ } |
6885 |
++ len -= stodo; |
6886 |
++ sgs = sg_next(sgs); |
6887 |
++ sgd = sg_next(sgd); |
6888 |
++ } |
6889 |
++ return false; |
6890 |
++} |
6891 |
++ |
6892 |
++static int rk_cipher_fallback(struct skcipher_request *areq) |
6893 |
++{ |
6894 |
++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
6895 |
++ struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm); |
6896 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); |
6897 |
++ int err; |
6898 |
++ |
6899 |
++ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); |
6900 |
++ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, |
6901 |
++ areq->base.complete, areq->base.data); |
6902 |
++ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, |
6903 |
++ areq->cryptlen, areq->iv); |
6904 |
++ if (rctx->mode & RK_CRYPTO_DEC) |
6905 |
++ err = crypto_skcipher_decrypt(&rctx->fallback_req); |
6906 |
++ else |
6907 |
++ err = crypto_skcipher_encrypt(&rctx->fallback_req); |
6908 |
++ return err; |
6909 |
+ } |
6910 |
+ |
6911 |
+ static int rk_handle_req(struct rk_crypto_info *dev, |
6912 |
+ struct skcipher_request *req) |
6913 |
+ { |
6914 |
+- if (!IS_ALIGNED(req->cryptlen, dev->align_size)) |
6915 |
+- return -EINVAL; |
6916 |
+- else |
6917 |
+- return dev->enqueue(dev, &req->base); |
6918 |
++ struct crypto_engine *engine = dev->engine; |
6919 |
++ |
6920 |
++ if (rk_cipher_need_fallback(req)) |
6921 |
++ return rk_cipher_fallback(req); |
6922 |
++ |
6923 |
++ return crypto_transfer_skcipher_request_to_engine(engine, req); |
6924 |
+ } |
6925 |
+ |
6926 |
+ static int rk_aes_setkey(struct crypto_skcipher *cipher, |
6927 |
+@@ -38,8 +92,9 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher, |
6928 |
+ keylen != AES_KEYSIZE_256) |
6929 |
+ return -EINVAL; |
6930 |
+ ctx->keylen = keylen; |
6931 |
+- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); |
6932 |
+- return 0; |
6933 |
++ memcpy(ctx->key, key, keylen); |
6934 |
++ |
6935 |
++ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); |
6936 |
+ } |
6937 |
+ |
6938 |
+ static int rk_des_setkey(struct crypto_skcipher *cipher, |
6939 |
+@@ -53,8 +108,9 @@ static int rk_des_setkey(struct crypto_skcipher *cipher, |
6940 |
+ return err; |
6941 |
+ |
6942 |
+ ctx->keylen = keylen; |
6943 |
+- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); |
6944 |
+- return 0; |
6945 |
++ memcpy(ctx->key, key, keylen); |
6946 |
++ |
6947 |
++ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); |
6948 |
+ } |
6949 |
+ |
6950 |
+ static int rk_tdes_setkey(struct crypto_skcipher *cipher, |
6951 |
+@@ -68,17 +124,19 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher, |
6952 |
+ return err; |
6953 |
+ |
6954 |
+ ctx->keylen = keylen; |
6955 |
+- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); |
6956 |
+- return 0; |
6957 |
++ memcpy(ctx->key, key, keylen); |
6958 |
++ |
6959 |
++ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); |
6960 |
+ } |
6961 |
+ |
6962 |
+ static int rk_aes_ecb_encrypt(struct skcipher_request *req) |
6963 |
+ { |
6964 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
6965 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
6966 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
6967 |
+ struct rk_crypto_info *dev = ctx->dev; |
6968 |
+ |
6969 |
+- ctx->mode = RK_CRYPTO_AES_ECB_MODE; |
6970 |
++ rctx->mode = RK_CRYPTO_AES_ECB_MODE; |
6971 |
+ return rk_handle_req(dev, req); |
6972 |
+ } |
6973 |
+ |
6974 |
+@@ -86,9 +144,10 @@ static int rk_aes_ecb_decrypt(struct skcipher_request *req) |
6975 |
+ { |
6976 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
6977 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
6978 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
6979 |
+ struct rk_crypto_info *dev = ctx->dev; |
6980 |
+ |
6981 |
+- ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; |
6982 |
++ rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; |
6983 |
+ return rk_handle_req(dev, req); |
6984 |
+ } |
6985 |
+ |
6986 |
+@@ -96,9 +155,10 @@ static int rk_aes_cbc_encrypt(struct skcipher_request *req) |
6987 |
+ { |
6988 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
6989 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
6990 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
6991 |
+ struct rk_crypto_info *dev = ctx->dev; |
6992 |
+ |
6993 |
+- ctx->mode = RK_CRYPTO_AES_CBC_MODE; |
6994 |
++ rctx->mode = RK_CRYPTO_AES_CBC_MODE; |
6995 |
+ return rk_handle_req(dev, req); |
6996 |
+ } |
6997 |
+ |
6998 |
+@@ -106,9 +166,10 @@ static int rk_aes_cbc_decrypt(struct skcipher_request *req) |
6999 |
+ { |
7000 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7001 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7002 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
7003 |
+ struct rk_crypto_info *dev = ctx->dev; |
7004 |
+ |
7005 |
+- ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; |
7006 |
++ rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; |
7007 |
+ return rk_handle_req(dev, req); |
7008 |
+ } |
7009 |
+ |
7010 |
+@@ -116,9 +177,10 @@ static int rk_des_ecb_encrypt(struct skcipher_request *req) |
7011 |
+ { |
7012 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7013 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7014 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
7015 |
+ struct rk_crypto_info *dev = ctx->dev; |
7016 |
+ |
7017 |
+- ctx->mode = 0; |
7018 |
++ rctx->mode = 0; |
7019 |
+ return rk_handle_req(dev, req); |
7020 |
+ } |
7021 |
+ |
7022 |
+@@ -126,9 +188,10 @@ static int rk_des_ecb_decrypt(struct skcipher_request *req) |
7023 |
+ { |
7024 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7025 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7026 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
7027 |
+ struct rk_crypto_info *dev = ctx->dev; |
7028 |
+ |
7029 |
+- ctx->mode = RK_CRYPTO_DEC; |
7030 |
++ rctx->mode = RK_CRYPTO_DEC; |
7031 |
+ return rk_handle_req(dev, req); |
7032 |
+ } |
7033 |
+ |
7034 |
+@@ -136,9 +199,10 @@ static int rk_des_cbc_encrypt(struct skcipher_request *req) |
7035 |
+ { |
7036 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7037 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7038 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
7039 |
+ struct rk_crypto_info *dev = ctx->dev; |
7040 |
+ |
7041 |
+- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; |
7042 |
++ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; |
7043 |
+ return rk_handle_req(dev, req); |
7044 |
+ } |
7045 |
+ |
7046 |
+@@ -146,9 +210,10 @@ static int rk_des_cbc_decrypt(struct skcipher_request *req) |
7047 |
+ { |
7048 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7049 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7050 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
7051 |
+ struct rk_crypto_info *dev = ctx->dev; |
7052 |
+ |
7053 |
+- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; |
7054 |
++ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; |
7055 |
+ return rk_handle_req(dev, req); |
7056 |
+ } |
7057 |
+ |
7058 |
+@@ -156,9 +221,10 @@ static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req) |
7059 |
+ { |
7060 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7061 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7062 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
7063 |
+ struct rk_crypto_info *dev = ctx->dev; |
7064 |
+ |
7065 |
+- ctx->mode = RK_CRYPTO_TDES_SELECT; |
7066 |
++ rctx->mode = RK_CRYPTO_TDES_SELECT; |
7067 |
+ return rk_handle_req(dev, req); |
7068 |
+ } |
7069 |
+ |
7070 |
+@@ -166,9 +232,10 @@ static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req) |
7071 |
+ { |
7072 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7073 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7074 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
7075 |
+ struct rk_crypto_info *dev = ctx->dev; |
7076 |
+ |
7077 |
+- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; |
7078 |
++ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; |
7079 |
+ return rk_handle_req(dev, req); |
7080 |
+ } |
7081 |
+ |
7082 |
+@@ -176,9 +243,10 @@ static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req) |
7083 |
+ { |
7084 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7085 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7086 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
7087 |
+ struct rk_crypto_info *dev = ctx->dev; |
7088 |
+ |
7089 |
+- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; |
7090 |
++ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; |
7091 |
+ return rk_handle_req(dev, req); |
7092 |
+ } |
7093 |
+ |
7094 |
+@@ -186,43 +254,42 @@ static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req) |
7095 |
+ { |
7096 |
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7097 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7098 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
7099 |
+ struct rk_crypto_info *dev = ctx->dev; |
7100 |
+ |
7101 |
+- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | |
7102 |
++ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | |
7103 |
+ RK_CRYPTO_DEC; |
7104 |
+ return rk_handle_req(dev, req); |
7105 |
+ } |
7106 |
+ |
7107 |
+-static void rk_ablk_hw_init(struct rk_crypto_info *dev) |
7108 |
++static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req) |
7109 |
+ { |
7110 |
+- struct skcipher_request *req = |
7111 |
+- skcipher_request_cast(dev->async_req); |
7112 |
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); |
7113 |
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); |
7114 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); |
7115 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); |
7116 |
+- u32 ivsize, block, conf_reg = 0; |
7117 |
++ u32 block, conf_reg = 0; |
7118 |
+ |
7119 |
+ block = crypto_tfm_alg_blocksize(tfm); |
7120 |
+- ivsize = crypto_skcipher_ivsize(cipher); |
7121 |
+ |
7122 |
+ if (block == DES_BLOCK_SIZE) { |
7123 |
+- ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | |
7124 |
++ rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | |
7125 |
+ RK_CRYPTO_TDES_BYTESWAP_KEY | |
7126 |
+ RK_CRYPTO_TDES_BYTESWAP_IV; |
7127 |
+- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode); |
7128 |
+- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize); |
7129 |
++ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode); |
7130 |
++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen); |
7131 |
+ conf_reg = RK_CRYPTO_DESSEL; |
7132 |
+ } else { |
7133 |
+- ctx->mode |= RK_CRYPTO_AES_FIFO_MODE | |
7134 |
++ rctx->mode |= RK_CRYPTO_AES_FIFO_MODE | |
7135 |
+ RK_CRYPTO_AES_KEY_CHANGE | |
7136 |
+ RK_CRYPTO_AES_BYTESWAP_KEY | |
7137 |
+ RK_CRYPTO_AES_BYTESWAP_IV; |
7138 |
+ if (ctx->keylen == AES_KEYSIZE_192) |
7139 |
+- ctx->mode |= RK_CRYPTO_AES_192BIT_key; |
7140 |
++ rctx->mode |= RK_CRYPTO_AES_192BIT_key; |
7141 |
+ else if (ctx->keylen == AES_KEYSIZE_256) |
7142 |
+- ctx->mode |= RK_CRYPTO_AES_256BIT_key; |
7143 |
+- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode); |
7144 |
+- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize); |
7145 |
++ rctx->mode |= RK_CRYPTO_AES_256BIT_key; |
7146 |
++ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode); |
7147 |
++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen); |
7148 |
+ } |
7149 |
+ conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | |
7150 |
+ RK_CRYPTO_BYTESWAP_BRFIFO; |
7151 |
+@@ -231,146 +298,138 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev) |
7152 |
+ RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA); |
7153 |
+ } |
7154 |
+ |
7155 |
+-static void crypto_dma_start(struct rk_crypto_info *dev) |
7156 |
++static void crypto_dma_start(struct rk_crypto_info *dev, |
7157 |
++ struct scatterlist *sgs, |
7158 |
++ struct scatterlist *sgd, unsigned int todo) |
7159 |
+ { |
7160 |
+- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in); |
7161 |
+- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4); |
7162 |
+- CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out); |
7163 |
++ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs)); |
7164 |
++ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo); |
7165 |
++ CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd)); |
7166 |
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START | |
7167 |
+ _SBF(RK_CRYPTO_BLOCK_START, 16)); |
7168 |
+ } |
7169 |
+ |
7170 |
+-static int rk_set_data_start(struct rk_crypto_info *dev) |
7171 |
++static int rk_cipher_run(struct crypto_engine *engine, void *async_req) |
7172 |
+ { |
7173 |
+- int err; |
7174 |
+- struct skcipher_request *req = |
7175 |
+- skcipher_request_cast(dev->async_req); |
7176 |
+- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7177 |
++ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); |
7178 |
++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
7179 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7180 |
+- u32 ivsize = crypto_skcipher_ivsize(tfm); |
7181 |
+- u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + |
7182 |
+- dev->sg_src->offset + dev->sg_src->length - ivsize; |
7183 |
+- |
7184 |
+- /* Store the iv that need to be updated in chain mode. |
7185 |
+- * And update the IV buffer to contain the next IV for decryption mode. |
7186 |
+- */ |
7187 |
+- if (ctx->mode & RK_CRYPTO_DEC) { |
7188 |
+- memcpy(ctx->iv, src_last_blk, ivsize); |
7189 |
+- sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv, |
7190 |
+- ivsize, dev->total - ivsize); |
7191 |
+- } |
7192 |
+- |
7193 |
+- err = dev->load_data(dev, dev->sg_src, dev->sg_dst); |
7194 |
+- if (!err) |
7195 |
+- crypto_dma_start(dev); |
7196 |
+- return err; |
7197 |
+-} |
7198 |
+- |
7199 |
+-static int rk_ablk_start(struct rk_crypto_info *dev) |
7200 |
+-{ |
7201 |
+- struct skcipher_request *req = |
7202 |
+- skcipher_request_cast(dev->async_req); |
7203 |
+- unsigned long flags; |
7204 |
++ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); |
7205 |
++ struct scatterlist *sgs, *sgd; |
7206 |
+ int err = 0; |
7207 |
++ int ivsize = crypto_skcipher_ivsize(tfm); |
7208 |
++ int offset; |
7209 |
++ u8 iv[AES_BLOCK_SIZE]; |
7210 |
++ u8 biv[AES_BLOCK_SIZE]; |
7211 |
++ u8 *ivtouse = areq->iv; |
7212 |
++ unsigned int len = areq->cryptlen; |
7213 |
++ unsigned int todo; |
7214 |
++ |
7215 |
++ ivsize = crypto_skcipher_ivsize(tfm); |
7216 |
++ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { |
7217 |
++ if (rctx->mode & RK_CRYPTO_DEC) { |
7218 |
++ offset = areq->cryptlen - ivsize; |
7219 |
++ scatterwalk_map_and_copy(rctx->backup_iv, areq->src, |
7220 |
++ offset, ivsize, 0); |
7221 |
++ } |
7222 |
++ } |
7223 |
+ |
7224 |
+- dev->left_bytes = req->cryptlen; |
7225 |
+- dev->total = req->cryptlen; |
7226 |
+- dev->sg_src = req->src; |
7227 |
+- dev->first = req->src; |
7228 |
+- dev->src_nents = sg_nents(req->src); |
7229 |
+- dev->sg_dst = req->dst; |
7230 |
+- dev->dst_nents = sg_nents(req->dst); |
7231 |
+- dev->aligned = 1; |
7232 |
+- |
7233 |
+- spin_lock_irqsave(&dev->lock, flags); |
7234 |
+- rk_ablk_hw_init(dev); |
7235 |
+- err = rk_set_data_start(dev); |
7236 |
+- spin_unlock_irqrestore(&dev->lock, flags); |
7237 |
+- return err; |
7238 |
+-} |
7239 |
++ sgs = areq->src; |
7240 |
++ sgd = areq->dst; |
7241 |
+ |
7242 |
+-static void rk_iv_copyback(struct rk_crypto_info *dev) |
7243 |
+-{ |
7244 |
+- struct skcipher_request *req = |
7245 |
+- skcipher_request_cast(dev->async_req); |
7246 |
+- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7247 |
+- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7248 |
+- u32 ivsize = crypto_skcipher_ivsize(tfm); |
7249 |
+- |
7250 |
+- /* Update the IV buffer to contain the next IV for encryption mode. */ |
7251 |
+- if (!(ctx->mode & RK_CRYPTO_DEC)) { |
7252 |
+- if (dev->aligned) { |
7253 |
+- memcpy(req->iv, sg_virt(dev->sg_dst) + |
7254 |
+- dev->sg_dst->length - ivsize, ivsize); |
7255 |
++ while (sgs && sgd && len) { |
7256 |
++ if (!sgs->length) { |
7257 |
++ sgs = sg_next(sgs); |
7258 |
++ sgd = sg_next(sgd); |
7259 |
++ continue; |
7260 |
++ } |
7261 |
++ if (rctx->mode & RK_CRYPTO_DEC) { |
7262 |
++ /* we backup last block of source to be used as IV at next step */ |
7263 |
++ offset = sgs->length - ivsize; |
7264 |
++ scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0); |
7265 |
++ } |
7266 |
++ if (sgs == sgd) { |
7267 |
++ err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); |
7268 |
++ if (err <= 0) { |
7269 |
++ err = -EINVAL; |
7270 |
++ goto theend_iv; |
7271 |
++ } |
7272 |
++ } else { |
7273 |
++ err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); |
7274 |
++ if (err <= 0) { |
7275 |
++ err = -EINVAL; |
7276 |
++ goto theend_iv; |
7277 |
++ } |
7278 |
++ err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); |
7279 |
++ if (err <= 0) { |
7280 |
++ err = -EINVAL; |
7281 |
++ goto theend_sgs; |
7282 |
++ } |
7283 |
++ } |
7284 |
++ err = 0; |
7285 |
++ rk_ablk_hw_init(ctx->dev, areq); |
7286 |
++ if (ivsize) { |
7287 |
++ if (ivsize == DES_BLOCK_SIZE) |
7288 |
++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); |
7289 |
++ else |
7290 |
++ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize); |
7291 |
++ } |
7292 |
++ reinit_completion(&ctx->dev->complete); |
7293 |
++ ctx->dev->status = 0; |
7294 |
++ |
7295 |
++ todo = min(sg_dma_len(sgs), len); |
7296 |
++ len -= todo; |
7297 |
++ crypto_dma_start(ctx->dev, sgs, sgd, todo / 4); |
7298 |
++ wait_for_completion_interruptible_timeout(&ctx->dev->complete, |
7299 |
++ msecs_to_jiffies(2000)); |
7300 |
++ if (!ctx->dev->status) { |
7301 |
++ dev_err(ctx->dev->dev, "DMA timeout\n"); |
7302 |
++ err = -EFAULT; |
7303 |
++ goto theend; |
7304 |
++ } |
7305 |
++ if (sgs == sgd) { |
7306 |
++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); |
7307 |
++ } else { |
7308 |
++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); |
7309 |
++ dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); |
7310 |
++ } |
7311 |
++ if (rctx->mode & RK_CRYPTO_DEC) { |
7312 |
++ memcpy(iv, biv, ivsize); |
7313 |
++ ivtouse = iv; |
7314 |
+ } else { |
7315 |
+- memcpy(req->iv, dev->addr_vir + |
7316 |
+- dev->count - ivsize, ivsize); |
7317 |
++ offset = sgd->length - ivsize; |
7318 |
++ scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0); |
7319 |
++ ivtouse = iv; |
7320 |
+ } |
7321 |
++ sgs = sg_next(sgs); |
7322 |
++ sgd = sg_next(sgd); |
7323 |
+ } |
7324 |
+-} |
7325 |
+- |
7326 |
+-static void rk_update_iv(struct rk_crypto_info *dev) |
7327 |
+-{ |
7328 |
+- struct skcipher_request *req = |
7329 |
+- skcipher_request_cast(dev->async_req); |
7330 |
+- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
7331 |
+- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7332 |
+- u32 ivsize = crypto_skcipher_ivsize(tfm); |
7333 |
+- u8 *new_iv = NULL; |
7334 |
+ |
7335 |
+- if (ctx->mode & RK_CRYPTO_DEC) { |
7336 |
+- new_iv = ctx->iv; |
7337 |
+- } else { |
7338 |
+- new_iv = page_address(sg_page(dev->sg_dst)) + |
7339 |
+- dev->sg_dst->offset + dev->sg_dst->length - ivsize; |
7340 |
++ if (areq->iv && ivsize > 0) { |
7341 |
++ offset = areq->cryptlen - ivsize; |
7342 |
++ if (rctx->mode & RK_CRYPTO_DEC) { |
7343 |
++ memcpy(areq->iv, rctx->backup_iv, ivsize); |
7344 |
++ memzero_explicit(rctx->backup_iv, ivsize); |
7345 |
++ } else { |
7346 |
++ scatterwalk_map_and_copy(areq->iv, areq->dst, offset, |
7347 |
++ ivsize, 0); |
7348 |
++ } |
7349 |
+ } |
7350 |
+ |
7351 |
+- if (ivsize == DES_BLOCK_SIZE) |
7352 |
+- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize); |
7353 |
+- else if (ivsize == AES_BLOCK_SIZE) |
7354 |
+- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize); |
7355 |
+-} |
7356 |
++theend: |
7357 |
++ local_bh_disable(); |
7358 |
++ crypto_finalize_skcipher_request(engine, areq, err); |
7359 |
++ local_bh_enable(); |
7360 |
++ return 0; |
7361 |
+ |
7362 |
+-/* return: |
7363 |
+- * true some err was occurred |
7364 |
+- * fault no err, continue |
7365 |
+- */ |
7366 |
+-static int rk_ablk_rx(struct rk_crypto_info *dev) |
7367 |
+-{ |
7368 |
+- int err = 0; |
7369 |
+- struct skcipher_request *req = |
7370 |
+- skcipher_request_cast(dev->async_req); |
7371 |
+- |
7372 |
+- dev->unload_data(dev); |
7373 |
+- if (!dev->aligned) { |
7374 |
+- if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents, |
7375 |
+- dev->addr_vir, dev->count, |
7376 |
+- dev->total - dev->left_bytes - |
7377 |
+- dev->count)) { |
7378 |
+- err = -EINVAL; |
7379 |
+- goto out_rx; |
7380 |
+- } |
7381 |
+- } |
7382 |
+- if (dev->left_bytes) { |
7383 |
+- rk_update_iv(dev); |
7384 |
+- if (dev->aligned) { |
7385 |
+- if (sg_is_last(dev->sg_src)) { |
7386 |
+- dev_err(dev->dev, "[%s:%d] Lack of data\n", |
7387 |
+- __func__, __LINE__); |
7388 |
+- err = -ENOMEM; |
7389 |
+- goto out_rx; |
7390 |
+- } |
7391 |
+- dev->sg_src = sg_next(dev->sg_src); |
7392 |
+- dev->sg_dst = sg_next(dev->sg_dst); |
7393 |
+- } |
7394 |
+- err = rk_set_data_start(dev); |
7395 |
++theend_sgs: |
7396 |
++ if (sgs == sgd) { |
7397 |
++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); |
7398 |
+ } else { |
7399 |
+- rk_iv_copyback(dev); |
7400 |
+- /* here show the calculation is over without any err */ |
7401 |
+- dev->complete(dev->async_req, 0); |
7402 |
+- tasklet_schedule(&dev->queue_task); |
7403 |
++ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); |
7404 |
++ dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); |
7405 |
+ } |
7406 |
+-out_rx: |
7407 |
++theend_iv: |
7408 |
+ return err; |
7409 |
+ } |
7410 |
+ |
7411 |
+@@ -378,26 +437,34 @@ static int rk_ablk_init_tfm(struct crypto_skcipher *tfm) |
7412 |
+ { |
7413 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7414 |
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
7415 |
++ const char *name = crypto_tfm_alg_name(&tfm->base); |
7416 |
+ struct rk_crypto_tmp *algt; |
7417 |
+ |
7418 |
+ algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); |
7419 |
+ |
7420 |
+ ctx->dev = algt->dev; |
7421 |
+- ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1; |
7422 |
+- ctx->dev->start = rk_ablk_start; |
7423 |
+- ctx->dev->update = rk_ablk_rx; |
7424 |
+- ctx->dev->complete = rk_crypto_complete; |
7425 |
+- ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL); |
7426 |
+ |
7427 |
+- return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM; |
7428 |
++ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); |
7429 |
++ if (IS_ERR(ctx->fallback_tfm)) { |
7430 |
++ dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", |
7431 |
++ name, PTR_ERR(ctx->fallback_tfm)); |
7432 |
++ return PTR_ERR(ctx->fallback_tfm); |
7433 |
++ } |
7434 |
++ |
7435 |
++ tfm->reqsize = sizeof(struct rk_cipher_rctx) + |
7436 |
++ crypto_skcipher_reqsize(ctx->fallback_tfm); |
7437 |
++ |
7438 |
++ ctx->enginectx.op.do_one_request = rk_cipher_run; |
7439 |
++ |
7440 |
++ return 0; |
7441 |
+ } |
7442 |
+ |
7443 |
+ static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm) |
7444 |
+ { |
7445 |
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
7446 |
+ |
7447 |
+- free_page((unsigned long)ctx->dev->addr_vir); |
7448 |
+- ctx->dev->disable_clk(ctx->dev); |
7449 |
++ memzero_explicit(ctx->key, ctx->keylen); |
7450 |
++ crypto_free_skcipher(ctx->fallback_tfm); |
7451 |
+ } |
7452 |
+ |
7453 |
+ struct rk_crypto_tmp rk_ecb_aes_alg = { |
7454 |
+@@ -406,7 +473,7 @@ struct rk_crypto_tmp rk_ecb_aes_alg = { |
7455 |
+ .base.cra_name = "ecb(aes)", |
7456 |
+ .base.cra_driver_name = "ecb-aes-rk", |
7457 |
+ .base.cra_priority = 300, |
7458 |
+- .base.cra_flags = CRYPTO_ALG_ASYNC, |
7459 |
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
7460 |
+ .base.cra_blocksize = AES_BLOCK_SIZE, |
7461 |
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), |
7462 |
+ .base.cra_alignmask = 0x0f, |
7463 |
+@@ -428,7 +495,7 @@ struct rk_crypto_tmp rk_cbc_aes_alg = { |
7464 |
+ .base.cra_name = "cbc(aes)", |
7465 |
+ .base.cra_driver_name = "cbc-aes-rk", |
7466 |
+ .base.cra_priority = 300, |
7467 |
+- .base.cra_flags = CRYPTO_ALG_ASYNC, |
7468 |
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
7469 |
+ .base.cra_blocksize = AES_BLOCK_SIZE, |
7470 |
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), |
7471 |
+ .base.cra_alignmask = 0x0f, |
7472 |
+@@ -451,7 +518,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = { |
7473 |
+ .base.cra_name = "ecb(des)", |
7474 |
+ .base.cra_driver_name = "ecb-des-rk", |
7475 |
+ .base.cra_priority = 300, |
7476 |
+- .base.cra_flags = CRYPTO_ALG_ASYNC, |
7477 |
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
7478 |
+ .base.cra_blocksize = DES_BLOCK_SIZE, |
7479 |
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), |
7480 |
+ .base.cra_alignmask = 0x07, |
7481 |
+@@ -473,7 +540,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = { |
7482 |
+ .base.cra_name = "cbc(des)", |
7483 |
+ .base.cra_driver_name = "cbc-des-rk", |
7484 |
+ .base.cra_priority = 300, |
7485 |
+- .base.cra_flags = CRYPTO_ALG_ASYNC, |
7486 |
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
7487 |
+ .base.cra_blocksize = DES_BLOCK_SIZE, |
7488 |
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), |
7489 |
+ .base.cra_alignmask = 0x07, |
7490 |
+@@ -496,7 +563,7 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { |
7491 |
+ .base.cra_name = "ecb(des3_ede)", |
7492 |
+ .base.cra_driver_name = "ecb-des3-ede-rk", |
7493 |
+ .base.cra_priority = 300, |
7494 |
+- .base.cra_flags = CRYPTO_ALG_ASYNC, |
7495 |
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
7496 |
+ .base.cra_blocksize = DES_BLOCK_SIZE, |
7497 |
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), |
7498 |
+ .base.cra_alignmask = 0x07, |
7499 |
+@@ -518,7 +585,7 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = { |
7500 |
+ .base.cra_name = "cbc(des3_ede)", |
7501 |
+ .base.cra_driver_name = "cbc-des3-ede-rk", |
7502 |
+ .base.cra_priority = 300, |
7503 |
+- .base.cra_flags = CRYPTO_ALG_ASYNC, |
7504 |
++ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
7505 |
+ .base.cra_blocksize = DES_BLOCK_SIZE, |
7506 |
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), |
7507 |
+ .base.cra_alignmask = 0x07, |
7508 |
+diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c |
7509 |
+index 4c06c93c93d32..c7f7134adc21d 100644 |
7510 |
+--- a/drivers/dio/dio.c |
7511 |
++++ b/drivers/dio/dio.c |
7512 |
+@@ -110,6 +110,12 @@ static char dio_no_name[] = { 0 }; |
7513 |
+ |
7514 |
+ #endif /* CONFIG_DIO_CONSTANTS */ |
7515 |
+ |
7516 |
++static void dio_dev_release(struct device *dev) |
7517 |
++{ |
7518 |
++ struct dio_dev *ddev = container_of(dev, typeof(struct dio_dev), dev); |
7519 |
++ kfree(ddev); |
7520 |
++} |
7521 |
++ |
7522 |
+ int __init dio_find(int deviceid) |
7523 |
+ { |
7524 |
+ /* Called to find a DIO device before the full bus scan has run. |
7525 |
+@@ -224,6 +230,7 @@ static int __init dio_init(void) |
7526 |
+ dev->bus = &dio_bus; |
7527 |
+ dev->dev.parent = &dio_bus.dev; |
7528 |
+ dev->dev.bus = &dio_bus_type; |
7529 |
++ dev->dev.release = dio_dev_release; |
7530 |
+ dev->scode = scode; |
7531 |
+ dev->resource.start = pa; |
7532 |
+ dev->resource.end = pa + DIO_SIZE(scode, va); |
7533 |
+@@ -251,6 +258,7 @@ static int __init dio_init(void) |
7534 |
+ if (error) { |
7535 |
+ pr_err("DIO: Error registering device %s\n", |
7536 |
+ dev->name); |
7537 |
++ put_device(&dev->dev); |
7538 |
+ continue; |
7539 |
+ } |
7540 |
+ error = dio_create_sysfs_dev_files(dev); |
7541 |
+diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c |
7542 |
+index 6cf50ee0b77c5..e0af60833d28c 100644 |
7543 |
+--- a/drivers/edac/i10nm_base.c |
7544 |
++++ b/drivers/edac/i10nm_base.c |
7545 |
+@@ -198,11 +198,10 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus, |
7546 |
+ if (unlikely(pci_enable_device(pdev) < 0)) { |
7547 |
+ edac_dbg(2, "Failed to enable device %02x:%02x.%x\n", |
7548 |
+ bus, dev, fun); |
7549 |
++ pci_dev_put(pdev); |
7550 |
+ return NULL; |
7551 |
+ } |
7552 |
+ |
7553 |
+- pci_dev_get(pdev); |
7554 |
+- |
7555 |
+ return pdev; |
7556 |
+ } |
7557 |
+ |
7558 |
+diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig |
7559 |
+index c69d40ae5619a..7684b3afa6304 100644 |
7560 |
+--- a/drivers/extcon/Kconfig |
7561 |
++++ b/drivers/extcon/Kconfig |
7562 |
+@@ -180,7 +180,7 @@ config EXTCON_USBC_CROS_EC |
7563 |
+ |
7564 |
+ config EXTCON_USBC_TUSB320 |
7565 |
+ tristate "TI TUSB320 USB-C extcon support" |
7566 |
+- depends on I2C |
7567 |
++ depends on I2C && TYPEC |
7568 |
+ select REGMAP_I2C |
7569 |
+ help |
7570 |
+ Say Y here to enable support for USB Type C cable detection extcon |
7571 |
+diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c |
7572 |
+index 805af73b41521..7223c4b9dc707 100644 |
7573 |
+--- a/drivers/extcon/extcon-usbc-tusb320.c |
7574 |
++++ b/drivers/extcon/extcon-usbc-tusb320.c |
7575 |
+@@ -6,6 +6,7 @@ |
7576 |
+ * Author: Michael Auchter <michael.auchter@××.com> |
7577 |
+ */ |
7578 |
+ |
7579 |
++#include <linux/bitfield.h> |
7580 |
+ #include <linux/extcon-provider.h> |
7581 |
+ #include <linux/i2c.h> |
7582 |
+ #include <linux/init.h> |
7583 |
+@@ -13,21 +14,70 @@ |
7584 |
+ #include <linux/kernel.h> |
7585 |
+ #include <linux/module.h> |
7586 |
+ #include <linux/regmap.h> |
7587 |
++#include <linux/usb/typec.h> |
7588 |
++ |
7589 |
++#define TUSB320_REG8 0x8 |
7590 |
++#define TUSB320_REG8_CURRENT_MODE_ADVERTISE GENMASK(7, 6) |
7591 |
++#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB 0x0 |
7592 |
++#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A 0x1 |
7593 |
++#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A 0x2 |
7594 |
++#define TUSB320_REG8_CURRENT_MODE_DETECT GENMASK(5, 4) |
7595 |
++#define TUSB320_REG8_CURRENT_MODE_DETECT_DEF 0x0 |
7596 |
++#define TUSB320_REG8_CURRENT_MODE_DETECT_MED 0x1 |
7597 |
++#define TUSB320_REG8_CURRENT_MODE_DETECT_ACC 0x2 |
7598 |
++#define TUSB320_REG8_CURRENT_MODE_DETECT_HI 0x3 |
7599 |
++#define TUSB320_REG8_ACCESSORY_CONNECTED GENMASK(3, 2) |
7600 |
++#define TUSB320_REG8_ACCESSORY_CONNECTED_NONE 0x0 |
7601 |
++#define TUSB320_REG8_ACCESSORY_CONNECTED_AUDIO 0x4 |
7602 |
++#define TUSB320_REG8_ACCESSORY_CONNECTED_ACC 0x5 |
7603 |
++#define TUSB320_REG8_ACCESSORY_CONNECTED_DEBUG 0x6 |
7604 |
++#define TUSB320_REG8_ACTIVE_CABLE_DETECTION BIT(0) |
7605 |
+ |
7606 |
+ #define TUSB320_REG9 0x9 |
7607 |
+ #define TUSB320_REG9_ATTACHED_STATE_SHIFT 6 |
7608 |
+ #define TUSB320_REG9_ATTACHED_STATE_MASK 0x3 |
7609 |
+ #define TUSB320_REG9_CABLE_DIRECTION BIT(5) |
7610 |
+ #define TUSB320_REG9_INTERRUPT_STATUS BIT(4) |
7611 |
+-#define TUSB320_ATTACHED_STATE_NONE 0x0 |
7612 |
+-#define TUSB320_ATTACHED_STATE_DFP 0x1 |
7613 |
+-#define TUSB320_ATTACHED_STATE_UFP 0x2 |
7614 |
+-#define TUSB320_ATTACHED_STATE_ACC 0x3 |
7615 |
++ |
7616 |
++#define TUSB320_REGA 0xa |
7617 |
++#define TUSB320L_REGA_DISABLE_TERM BIT(0) |
7618 |
++#define TUSB320_REGA_I2C_SOFT_RESET BIT(3) |
7619 |
++#define TUSB320_REGA_MODE_SELECT_SHIFT 4 |
7620 |
++#define TUSB320_REGA_MODE_SELECT_MASK 0x3 |
7621 |
++ |
7622 |
++#define TUSB320L_REGA0_REVISION 0xa0 |
7623 |
++ |
7624 |
++enum tusb320_attached_state { |
7625 |
++ TUSB320_ATTACHED_STATE_NONE, |
7626 |
++ TUSB320_ATTACHED_STATE_DFP, |
7627 |
++ TUSB320_ATTACHED_STATE_UFP, |
7628 |
++ TUSB320_ATTACHED_STATE_ACC, |
7629 |
++}; |
7630 |
++ |
7631 |
++enum tusb320_mode { |
7632 |
++ TUSB320_MODE_PORT, |
7633 |
++ TUSB320_MODE_UFP, |
7634 |
++ TUSB320_MODE_DFP, |
7635 |
++ TUSB320_MODE_DRP, |
7636 |
++}; |
7637 |
++ |
7638 |
++struct tusb320_priv; |
7639 |
++ |
7640 |
++struct tusb320_ops { |
7641 |
++ int (*set_mode)(struct tusb320_priv *priv, enum tusb320_mode mode); |
7642 |
++ int (*get_revision)(struct tusb320_priv *priv, unsigned int *revision); |
7643 |
++}; |
7644 |
+ |
7645 |
+ struct tusb320_priv { |
7646 |
+ struct device *dev; |
7647 |
+ struct regmap *regmap; |
7648 |
+ struct extcon_dev *edev; |
7649 |
++ struct tusb320_ops *ops; |
7650 |
++ enum tusb320_attached_state state; |
7651 |
++ struct typec_port *port; |
7652 |
++ struct typec_capability cap; |
7653 |
++ enum typec_port_type port_type; |
7654 |
++ enum typec_pwr_opmode pwr_opmode; |
7655 |
+ }; |
7656 |
+ |
7657 |
+ static const char * const tusb_attached_states[] = { |
7658 |
+@@ -62,19 +112,142 @@ static int tusb320_check_signature(struct tusb320_priv *priv) |
7659 |
+ return 0; |
7660 |
+ } |
7661 |
+ |
7662 |
+-static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) |
7663 |
++static int tusb320_set_mode(struct tusb320_priv *priv, enum tusb320_mode mode) |
7664 |
+ { |
7665 |
+- struct tusb320_priv *priv = dev_id; |
7666 |
+- int state, polarity; |
7667 |
+- unsigned reg; |
7668 |
++ int ret; |
7669 |
+ |
7670 |
+- if (regmap_read(priv->regmap, TUSB320_REG9, ®)) { |
7671 |
+- dev_err(priv->dev, "error during i2c read!\n"); |
7672 |
+- return IRQ_NONE; |
7673 |
++ /* Mode cannot be changed while cable is attached */ |
7674 |
++ if (priv->state != TUSB320_ATTACHED_STATE_NONE) |
7675 |
++ return -EBUSY; |
7676 |
++ |
7677 |
++ /* Write mode */ |
7678 |
++ ret = regmap_write_bits(priv->regmap, TUSB320_REGA, |
7679 |
++ TUSB320_REGA_MODE_SELECT_MASK << TUSB320_REGA_MODE_SELECT_SHIFT, |
7680 |
++ mode << TUSB320_REGA_MODE_SELECT_SHIFT); |
7681 |
++ if (ret) { |
7682 |
++ dev_err(priv->dev, "failed to write mode: %d\n", ret); |
7683 |
++ return ret; |
7684 |
+ } |
7685 |
+ |
7686 |
+- if (!(reg & TUSB320_REG9_INTERRUPT_STATUS)) |
7687 |
+- return IRQ_NONE; |
7688 |
++ return 0; |
7689 |
++} |
7690 |
++ |
7691 |
++static int tusb320l_set_mode(struct tusb320_priv *priv, enum tusb320_mode mode) |
7692 |
++{ |
7693 |
++ int ret; |
7694 |
++ |
7695 |
++ /* Disable CC state machine */ |
7696 |
++ ret = regmap_write_bits(priv->regmap, TUSB320_REGA, |
7697 |
++ TUSB320L_REGA_DISABLE_TERM, 1); |
7698 |
++ if (ret) { |
7699 |
++ dev_err(priv->dev, |
7700 |
++ "failed to disable CC state machine: %d\n", ret); |
7701 |
++ return ret; |
7702 |
++ } |
7703 |
++ |
7704 |
++ /* Write mode */ |
7705 |
++ ret = regmap_write_bits(priv->regmap, TUSB320_REGA, |
7706 |
++ TUSB320_REGA_MODE_SELECT_MASK << TUSB320_REGA_MODE_SELECT_SHIFT, |
7707 |
++ mode << TUSB320_REGA_MODE_SELECT_SHIFT); |
7708 |
++ if (ret) { |
7709 |
++ dev_err(priv->dev, "failed to write mode: %d\n", ret); |
7710 |
++ goto err; |
7711 |
++ } |
7712 |
++ |
7713 |
++ msleep(5); |
7714 |
++err: |
7715 |
++ /* Re-enable CC state machine */ |
7716 |
++ ret = regmap_write_bits(priv->regmap, TUSB320_REGA, |
7717 |
++ TUSB320L_REGA_DISABLE_TERM, 0); |
7718 |
++ if (ret) |
7719 |
++ dev_err(priv->dev, |
7720 |
++ "failed to re-enable CC state machine: %d\n", ret); |
7721 |
++ |
7722 |
++ return ret; |
7723 |
++} |
7724 |
++ |
7725 |
++static int tusb320_reset(struct tusb320_priv *priv) |
7726 |
++{ |
7727 |
++ int ret; |
7728 |
++ |
7729 |
++ /* Set mode to default (follow PORT pin) */ |
7730 |
++ ret = priv->ops->set_mode(priv, TUSB320_MODE_PORT); |
7731 |
++ if (ret && ret != -EBUSY) { |
7732 |
++ dev_err(priv->dev, |
7733 |
++ "failed to set mode to PORT: %d\n", ret); |
7734 |
++ return ret; |
7735 |
++ } |
7736 |
++ |
7737 |
++ /* Perform soft reset */ |
7738 |
++ ret = regmap_write_bits(priv->regmap, TUSB320_REGA, |
7739 |
++ TUSB320_REGA_I2C_SOFT_RESET, 1); |
7740 |
++ if (ret) { |
7741 |
++ dev_err(priv->dev, |
7742 |
++ "failed to write soft reset bit: %d\n", ret); |
7743 |
++ return ret; |
7744 |
++ } |
7745 |
++ |
7746 |
++ /* Wait for chip to go through reset */ |
7747 |
++ msleep(95); |
7748 |
++ |
7749 |
++ return 0; |
7750 |
++} |
7751 |
++ |
7752 |
++static int tusb320l_get_revision(struct tusb320_priv *priv, unsigned int *revision) |
7753 |
++{ |
7754 |
++ return regmap_read(priv->regmap, TUSB320L_REGA0_REVISION, revision); |
7755 |
++} |
7756 |
++ |
7757 |
++static struct tusb320_ops tusb320_ops = { |
7758 |
++ .set_mode = tusb320_set_mode, |
7759 |
++}; |
7760 |
++ |
7761 |
++static struct tusb320_ops tusb320l_ops = { |
7762 |
++ .set_mode = tusb320l_set_mode, |
7763 |
++ .get_revision = tusb320l_get_revision, |
7764 |
++}; |
7765 |
++ |
7766 |
++static int tusb320_set_adv_pwr_mode(struct tusb320_priv *priv) |
7767 |
++{ |
7768 |
++ u8 mode; |
7769 |
++ |
7770 |
++ if (priv->pwr_opmode == TYPEC_PWR_MODE_USB) |
7771 |
++ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB; |
7772 |
++ else if (priv->pwr_opmode == TYPEC_PWR_MODE_1_5A) |
7773 |
++ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A; |
7774 |
++ else if (priv->pwr_opmode == TYPEC_PWR_MODE_3_0A) |
7775 |
++ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A; |
7776 |
++ else /* No other mode is supported. */ |
7777 |
++ return -EINVAL; |
7778 |
++ |
7779 |
++ return regmap_write_bits(priv->regmap, TUSB320_REG8, |
7780 |
++ TUSB320_REG8_CURRENT_MODE_ADVERTISE, |
7781 |
++ FIELD_PREP(TUSB320_REG8_CURRENT_MODE_ADVERTISE, |
7782 |
++ mode)); |
7783 |
++} |
7784 |
++ |
7785 |
++static int tusb320_port_type_set(struct typec_port *port, |
7786 |
++ enum typec_port_type type) |
7787 |
++{ |
7788 |
++ struct tusb320_priv *priv = typec_get_drvdata(port); |
7789 |
++ |
7790 |
++ if (type == TYPEC_PORT_SRC) |
7791 |
++ return priv->ops->set_mode(priv, TUSB320_MODE_DFP); |
7792 |
++ else if (type == TYPEC_PORT_SNK) |
7793 |
++ return priv->ops->set_mode(priv, TUSB320_MODE_UFP); |
7794 |
++ else if (type == TYPEC_PORT_DRP) |
7795 |
++ return priv->ops->set_mode(priv, TUSB320_MODE_DRP); |
7796 |
++ else |
7797 |
++ return priv->ops->set_mode(priv, TUSB320_MODE_PORT); |
7798 |
++} |
7799 |
++ |
7800 |
++static const struct typec_operations tusb320_typec_ops = { |
7801 |
++ .port_type_set = tusb320_port_type_set, |
7802 |
++}; |
7803 |
++ |
7804 |
++static void tusb320_extcon_irq_handler(struct tusb320_priv *priv, u8 reg) |
7805 |
++{ |
7806 |
++ int state, polarity; |
7807 |
+ |
7808 |
+ state = (reg >> TUSB320_REG9_ATTACHED_STATE_SHIFT) & |
7809 |
+ TUSB320_REG9_ATTACHED_STATE_MASK; |
7810 |
+@@ -96,20 +269,171 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) |
7811 |
+ extcon_sync(priv->edev, EXTCON_USB); |
7812 |
+ extcon_sync(priv->edev, EXTCON_USB_HOST); |
7813 |
+ |
7814 |
++ priv->state = state; |
7815 |
++} |
7816 |
++ |
7817 |
++static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9) |
7818 |
++{ |
7819 |
++ struct typec_port *port = priv->port; |
7820 |
++ struct device *dev = priv->dev; |
7821 |
++ u8 mode, role, state; |
7822 |
++ int ret, reg8; |
7823 |
++ bool ori; |
7824 |
++ |
7825 |
++ ori = reg9 & TUSB320_REG9_CABLE_DIRECTION; |
7826 |
++ typec_set_orientation(port, ori ? TYPEC_ORIENTATION_REVERSE : |
7827 |
++ TYPEC_ORIENTATION_NORMAL); |
7828 |
++ |
7829 |
++ state = (reg9 >> TUSB320_REG9_ATTACHED_STATE_SHIFT) & |
7830 |
++ TUSB320_REG9_ATTACHED_STATE_MASK; |
7831 |
++ if (state == TUSB320_ATTACHED_STATE_DFP) |
7832 |
++ role = TYPEC_SOURCE; |
7833 |
++ else |
7834 |
++ role = TYPEC_SINK; |
7835 |
++ |
7836 |
++ typec_set_vconn_role(port, role); |
7837 |
++ typec_set_pwr_role(port, role); |
7838 |
++ typec_set_data_role(port, role == TYPEC_SOURCE ? |
7839 |
++ TYPEC_HOST : TYPEC_DEVICE); |
7840 |
++ |
7841 |
++ ret = regmap_read(priv->regmap, TUSB320_REG8, ®8); |
7842 |
++ if (ret) { |
7843 |
++ dev_err(dev, "error during reg8 i2c read, ret=%d!\n", ret); |
7844 |
++ return; |
7845 |
++ } |
7846 |
++ |
7847 |
++ mode = FIELD_GET(TUSB320_REG8_CURRENT_MODE_DETECT, reg8); |
7848 |
++ if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_DEF) |
7849 |
++ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB); |
7850 |
++ else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_MED) |
7851 |
++ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_1_5A); |
7852 |
++ else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_HI) |
7853 |
++ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_3_0A); |
7854 |
++ else /* Charge through accessory */ |
7855 |
++ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB); |
7856 |
++} |
7857 |
++ |
7858 |
++static irqreturn_t tusb320_state_update_handler(struct tusb320_priv *priv, |
7859 |
++ bool force_update) |
7860 |
++{ |
7861 |
++ unsigned int reg; |
7862 |
++ |
7863 |
++ if (regmap_read(priv->regmap, TUSB320_REG9, ®)) { |
7864 |
++ dev_err(priv->dev, "error during i2c read!\n"); |
7865 |
++ return IRQ_NONE; |
7866 |
++ } |
7867 |
++ |
7868 |
++ if (!force_update && !(reg & TUSB320_REG9_INTERRUPT_STATUS)) |
7869 |
++ return IRQ_NONE; |
7870 |
++ |
7871 |
++ tusb320_extcon_irq_handler(priv, reg); |
7872 |
++ |
7873 |
++ /* |
7874 |
++ * Type-C support is optional. Only call the Type-C handler if a |
7875 |
++ * port had been registered previously. |
7876 |
++ */ |
7877 |
++ if (priv->port) |
7878 |
++ tusb320_typec_irq_handler(priv, reg); |
7879 |
++ |
7880 |
+ regmap_write(priv->regmap, TUSB320_REG9, reg); |
7881 |
+ |
7882 |
+ return IRQ_HANDLED; |
7883 |
+ } |
7884 |
+ |
7885 |
++static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) |
7886 |
++{ |
7887 |
++ struct tusb320_priv *priv = dev_id; |
7888 |
++ |
7889 |
++ return tusb320_state_update_handler(priv, false); |
7890 |
++} |
7891 |
++ |
7892 |
+ static const struct regmap_config tusb320_regmap_config = { |
7893 |
+ .reg_bits = 8, |
7894 |
+ .val_bits = 8, |
7895 |
+ }; |
7896 |
+ |
7897 |
+-static int tusb320_extcon_probe(struct i2c_client *client, |
7898 |
+- const struct i2c_device_id *id) |
7899 |
++static int tusb320_extcon_probe(struct tusb320_priv *priv) |
7900 |
++{ |
7901 |
++ int ret; |
7902 |
++ |
7903 |
++ priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable); |
7904 |
++ if (IS_ERR(priv->edev)) { |
7905 |
++ dev_err(priv->dev, "failed to allocate extcon device\n"); |
7906 |
++ return PTR_ERR(priv->edev); |
7907 |
++ } |
7908 |
++ |
7909 |
++ ret = devm_extcon_dev_register(priv->dev, priv->edev); |
7910 |
++ if (ret < 0) { |
7911 |
++ dev_err(priv->dev, "failed to register extcon device\n"); |
7912 |
++ return ret; |
7913 |
++ } |
7914 |
++ |
7915 |
++ extcon_set_property_capability(priv->edev, EXTCON_USB, |
7916 |
++ EXTCON_PROP_USB_TYPEC_POLARITY); |
7917 |
++ extcon_set_property_capability(priv->edev, EXTCON_USB_HOST, |
7918 |
++ EXTCON_PROP_USB_TYPEC_POLARITY); |
7919 |
++ |
7920 |
++ return 0; |
7921 |
++} |
7922 |
++ |
7923 |
++static int tusb320_typec_probe(struct i2c_client *client, |
7924 |
++ struct tusb320_priv *priv) |
7925 |
++{ |
7926 |
++ struct fwnode_handle *connector; |
7927 |
++ const char *cap_str; |
7928 |
++ int ret; |
7929 |
++ |
7930 |
++ /* The Type-C connector is optional, for backward compatibility. */ |
7931 |
++ connector = device_get_named_child_node(&client->dev, "connector"); |
7932 |
++ if (!connector) |
7933 |
++ return 0; |
7934 |
++ |
7935 |
++ /* Type-C connector found. */ |
7936 |
++ ret = typec_get_fw_cap(&priv->cap, connector); |
7937 |
++ if (ret) |
7938 |
++ return ret; |
7939 |
++ |
7940 |
++ priv->port_type = priv->cap.type; |
7941 |
++ |
7942 |
++ /* This goes into register 0x8 field CURRENT_MODE_ADVERTISE */ |
7943 |
++ ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str); |
7944 |
++ if (ret) |
7945 |
++ return ret; |
7946 |
++ |
7947 |
++ ret = typec_find_pwr_opmode(cap_str); |
7948 |
++ if (ret < 0) |
7949 |
++ return ret; |
7950 |
++ if (ret == TYPEC_PWR_MODE_PD) |
7951 |
++ return -EINVAL; |
7952 |
++ |
7953 |
++ priv->pwr_opmode = ret; |
7954 |
++ |
7955 |
++ /* Initialize the hardware with the devicetree settings. */ |
7956 |
++ ret = tusb320_set_adv_pwr_mode(priv); |
7957 |
++ if (ret) |
7958 |
++ return ret; |
7959 |
++ |
7960 |
++ priv->cap.revision = USB_TYPEC_REV_1_1; |
7961 |
++ priv->cap.accessory[0] = TYPEC_ACCESSORY_AUDIO; |
7962 |
++ priv->cap.accessory[1] = TYPEC_ACCESSORY_DEBUG; |
7963 |
++ priv->cap.orientation_aware = true; |
7964 |
++ priv->cap.driver_data = priv; |
7965 |
++ priv->cap.ops = &tusb320_typec_ops; |
7966 |
++ priv->cap.fwnode = connector; |
7967 |
++ |
7968 |
++ priv->port = typec_register_port(&client->dev, &priv->cap); |
7969 |
++ if (IS_ERR(priv->port)) |
7970 |
++ return PTR_ERR(priv->port); |
7971 |
++ |
7972 |
++ return 0; |
7973 |
++} |
7974 |
++ |
7975 |
++static int tusb320_probe(struct i2c_client *client, |
7976 |
++ const struct i2c_device_id *id) |
7977 |
+ { |
7978 |
+ struct tusb320_priv *priv; |
7979 |
++ const void *match_data; |
7980 |
++ unsigned int revision; |
7981 |
+ int ret; |
7982 |
+ |
7983 |
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); |
7984 |
+@@ -125,25 +449,42 @@ static int tusb320_extcon_probe(struct i2c_client *client, |
7985 |
+ if (ret) |
7986 |
+ return ret; |
7987 |
+ |
7988 |
+- priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable); |
7989 |
+- if (IS_ERR(priv->edev)) { |
7990 |
+- dev_err(priv->dev, "failed to allocate extcon device\n"); |
7991 |
+- return PTR_ERR(priv->edev); |
7992 |
++ match_data = device_get_match_data(&client->dev); |
7993 |
++ if (!match_data) |
7994 |
++ return -EINVAL; |
7995 |
++ |
7996 |
++ priv->ops = (struct tusb320_ops*)match_data; |
7997 |
++ |
7998 |
++ if (priv->ops->get_revision) { |
7999 |
++ ret = priv->ops->get_revision(priv, &revision); |
8000 |
++ if (ret) |
8001 |
++ dev_warn(priv->dev, |
8002 |
++ "failed to read revision register: %d\n", ret); |
8003 |
++ else |
8004 |
++ dev_info(priv->dev, "chip revision %d\n", revision); |
8005 |
+ } |
8006 |
+ |
8007 |
+- ret = devm_extcon_dev_register(priv->dev, priv->edev); |
8008 |
+- if (ret < 0) { |
8009 |
+- dev_err(priv->dev, "failed to register extcon device\n"); |
8010 |
++ ret = tusb320_extcon_probe(priv); |
8011 |
++ if (ret) |
8012 |
+ return ret; |
8013 |
+- } |
8014 |
+ |
8015 |
+- extcon_set_property_capability(priv->edev, EXTCON_USB, |
8016 |
+- EXTCON_PROP_USB_TYPEC_POLARITY); |
8017 |
+- extcon_set_property_capability(priv->edev, EXTCON_USB_HOST, |
8018 |
+- EXTCON_PROP_USB_TYPEC_POLARITY); |
8019 |
++ ret = tusb320_typec_probe(client, priv); |
8020 |
++ if (ret) |
8021 |
++ return ret; |
8022 |
+ |
8023 |
+ /* update initial state */ |
8024 |
+- tusb320_irq_handler(client->irq, priv); |
8025 |
++ tusb320_state_update_handler(priv, true); |
8026 |
++ |
8027 |
++ /* Reset chip to its default state */ |
8028 |
++ ret = tusb320_reset(priv); |
8029 |
++ if (ret) |
8030 |
++ dev_warn(priv->dev, "failed to reset chip: %d\n", ret); |
8031 |
++ else |
8032 |
++ /* |
8033 |
++ * State and polarity might change after a reset, so update |
8034 |
++ * them again and make sure the interrupt status bit is cleared. |
8035 |
++ */ |
8036 |
++ tusb320_state_update_handler(priv, true); |
8037 |
+ |
8038 |
+ ret = devm_request_threaded_irq(priv->dev, client->irq, NULL, |
8039 |
+ tusb320_irq_handler, |
8040 |
+@@ -154,13 +495,14 @@ static int tusb320_extcon_probe(struct i2c_client *client, |
8041 |
+ } |
8042 |
+ |
8043 |
+ static const struct of_device_id tusb320_extcon_dt_match[] = { |
8044 |
+- { .compatible = "ti,tusb320", }, |
8045 |
++ { .compatible = "ti,tusb320", .data = &tusb320_ops, }, |
8046 |
++ { .compatible = "ti,tusb320l", .data = &tusb320l_ops, }, |
8047 |
+ { } |
8048 |
+ }; |
8049 |
+ MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match); |
8050 |
+ |
8051 |
+ static struct i2c_driver tusb320_extcon_driver = { |
8052 |
+- .probe = tusb320_extcon_probe, |
8053 |
++ .probe = tusb320_probe, |
8054 |
+ .driver = { |
8055 |
+ .name = "extcon-tusb320", |
8056 |
+ .of_match_table = tusb320_extcon_dt_match, |
8057 |
+diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c |
8058 |
+index 4b8978b254f9a..dba315f675bc7 100644 |
8059 |
+--- a/drivers/firmware/raspberrypi.c |
8060 |
++++ b/drivers/firmware/raspberrypi.c |
8061 |
+@@ -272,6 +272,7 @@ static int rpi_firmware_probe(struct platform_device *pdev) |
8062 |
+ int ret = PTR_ERR(fw->chan); |
8063 |
+ if (ret != -EPROBE_DEFER) |
8064 |
+ dev_err(dev, "Failed to get mbox channel: %d\n", ret); |
8065 |
++ kfree(fw); |
8066 |
+ return ret; |
8067 |
+ } |
8068 |
+ |
8069 |
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c |
8070 |
+index 937e7a8dd8a96..2a2e0691462bf 100644 |
8071 |
+--- a/drivers/gpio/gpiolib-cdev.c |
8072 |
++++ b/drivers/gpio/gpiolib-cdev.c |
8073 |
+@@ -54,6 +54,50 @@ static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); |
8074 |
+ * interface to gpiolib GPIOs via ioctl()s. |
8075 |
+ */ |
8076 |
+ |
8077 |
++typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *); |
8078 |
++typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long); |
8079 |
++typedef ssize_t (*read_fn)(struct file *, char __user *, |
8080 |
++ size_t count, loff_t *); |
8081 |
++ |
8082 |
++static __poll_t call_poll_locked(struct file *file, |
8083 |
++ struct poll_table_struct *wait, |
8084 |
++ struct gpio_device *gdev, poll_fn func) |
8085 |
++{ |
8086 |
++ __poll_t ret; |
8087 |
++ |
8088 |
++ down_read(&gdev->sem); |
8089 |
++ ret = func(file, wait); |
8090 |
++ up_read(&gdev->sem); |
8091 |
++ |
8092 |
++ return ret; |
8093 |
++} |
8094 |
++ |
8095 |
++static long call_ioctl_locked(struct file *file, unsigned int cmd, |
8096 |
++ unsigned long arg, struct gpio_device *gdev, |
8097 |
++ ioctl_fn func) |
8098 |
++{ |
8099 |
++ long ret; |
8100 |
++ |
8101 |
++ down_read(&gdev->sem); |
8102 |
++ ret = func(file, cmd, arg); |
8103 |
++ up_read(&gdev->sem); |
8104 |
++ |
8105 |
++ return ret; |
8106 |
++} |
8107 |
++ |
8108 |
++static ssize_t call_read_locked(struct file *file, char __user *buf, |
8109 |
++ size_t count, loff_t *f_ps, |
8110 |
++ struct gpio_device *gdev, read_fn func) |
8111 |
++{ |
8112 |
++ ssize_t ret; |
8113 |
++ |
8114 |
++ down_read(&gdev->sem); |
8115 |
++ ret = func(file, buf, count, f_ps); |
8116 |
++ up_read(&gdev->sem); |
8117 |
++ |
8118 |
++ return ret; |
8119 |
++} |
8120 |
++ |
8121 |
+ /* |
8122 |
+ * GPIO line handle management |
8123 |
+ */ |
8124 |
+@@ -190,23 +234,25 @@ static long linehandle_set_config(struct linehandle_state *lh, |
8125 |
+ return 0; |
8126 |
+ } |
8127 |
+ |
8128 |
+-static long linehandle_ioctl(struct file *file, unsigned int cmd, |
8129 |
+- unsigned long arg) |
8130 |
++static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd, |
8131 |
++ unsigned long arg) |
8132 |
+ { |
8133 |
+ struct linehandle_state *lh = file->private_data; |
8134 |
+ void __user *ip = (void __user *)arg; |
8135 |
+ struct gpiohandle_data ghd; |
8136 |
+ DECLARE_BITMAP(vals, GPIOHANDLES_MAX); |
8137 |
+- int i; |
8138 |
++ unsigned int i; |
8139 |
++ int ret; |
8140 |
+ |
8141 |
+- if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { |
8142 |
+- /* NOTE: It's ok to read values of output lines. */ |
8143 |
+- int ret = gpiod_get_array_value_complex(false, |
8144 |
+- true, |
8145 |
+- lh->num_descs, |
8146 |
+- lh->descs, |
8147 |
+- NULL, |
8148 |
+- vals); |
8149 |
++ if (!lh->gdev->chip) |
8150 |
++ return -ENODEV; |
8151 |
++ |
8152 |
++ switch (cmd) { |
8153 |
++ case GPIOHANDLE_GET_LINE_VALUES_IOCTL: |
8154 |
++ /* NOTE: It's okay to read values of output lines */ |
8155 |
++ ret = gpiod_get_array_value_complex(false, true, |
8156 |
++ lh->num_descs, lh->descs, |
8157 |
++ NULL, vals); |
8158 |
+ if (ret) |
8159 |
+ return ret; |
8160 |
+ |
8161 |
+@@ -218,7 +264,7 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, |
8162 |
+ return -EFAULT; |
8163 |
+ |
8164 |
+ return 0; |
8165 |
+- } else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) { |
8166 |
++ case GPIOHANDLE_SET_LINE_VALUES_IOCTL: |
8167 |
+ /* |
8168 |
+ * All line descriptors were created at once with the same |
8169 |
+ * flags so just check if the first one is really output. |
8170 |
+@@ -240,10 +286,20 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, |
8171 |
+ lh->descs, |
8172 |
+ NULL, |
8173 |
+ vals); |
8174 |
+- } else if (cmd == GPIOHANDLE_SET_CONFIG_IOCTL) { |
8175 |
++ case GPIOHANDLE_SET_CONFIG_IOCTL: |
8176 |
+ return linehandle_set_config(lh, ip); |
8177 |
++ default: |
8178 |
++ return -EINVAL; |
8179 |
+ } |
8180 |
+- return -EINVAL; |
8181 |
++} |
8182 |
++ |
8183 |
++static long linehandle_ioctl(struct file *file, unsigned int cmd, |
8184 |
++ unsigned long arg) |
8185 |
++{ |
8186 |
++ struct linehandle_state *lh = file->private_data; |
8187 |
++ |
8188 |
++ return call_ioctl_locked(file, cmd, arg, lh->gdev, |
8189 |
++ linehandle_ioctl_unlocked); |
8190 |
+ } |
8191 |
+ |
8192 |
+ #ifdef CONFIG_COMPAT |
8193 |
+@@ -1182,20 +1238,34 @@ static long linereq_set_config(struct linereq *lr, void __user *ip) |
8194 |
+ return ret; |
8195 |
+ } |
8196 |
+ |
8197 |
+-static long linereq_ioctl(struct file *file, unsigned int cmd, |
8198 |
+- unsigned long arg) |
8199 |
++static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd, |
8200 |
++ unsigned long arg) |
8201 |
+ { |
8202 |
+ struct linereq *lr = file->private_data; |
8203 |
+ void __user *ip = (void __user *)arg; |
8204 |
+ |
8205 |
+- if (cmd == GPIO_V2_LINE_GET_VALUES_IOCTL) |
8206 |
++ if (!lr->gdev->chip) |
8207 |
++ return -ENODEV; |
8208 |
++ |
8209 |
++ switch (cmd) { |
8210 |
++ case GPIO_V2_LINE_GET_VALUES_IOCTL: |
8211 |
+ return linereq_get_values(lr, ip); |
8212 |
+- else if (cmd == GPIO_V2_LINE_SET_VALUES_IOCTL) |
8213 |
++ case GPIO_V2_LINE_SET_VALUES_IOCTL: |
8214 |
+ return linereq_set_values(lr, ip); |
8215 |
+- else if (cmd == GPIO_V2_LINE_SET_CONFIG_IOCTL) |
8216 |
++ case GPIO_V2_LINE_SET_CONFIG_IOCTL: |
8217 |
+ return linereq_set_config(lr, ip); |
8218 |
++ default: |
8219 |
++ return -EINVAL; |
8220 |
++ } |
8221 |
++} |
8222 |
+ |
8223 |
+- return -EINVAL; |
8224 |
++static long linereq_ioctl(struct file *file, unsigned int cmd, |
8225 |
++ unsigned long arg) |
8226 |
++{ |
8227 |
++ struct linereq *lr = file->private_data; |
8228 |
++ |
8229 |
++ return call_ioctl_locked(file, cmd, arg, lr->gdev, |
8230 |
++ linereq_ioctl_unlocked); |
8231 |
+ } |
8232 |
+ |
8233 |
+ #ifdef CONFIG_COMPAT |
8234 |
+@@ -1206,12 +1276,15 @@ static long linereq_ioctl_compat(struct file *file, unsigned int cmd, |
8235 |
+ } |
8236 |
+ #endif |
8237 |
+ |
8238 |
+-static __poll_t linereq_poll(struct file *file, |
8239 |
+- struct poll_table_struct *wait) |
8240 |
++static __poll_t linereq_poll_unlocked(struct file *file, |
8241 |
++ struct poll_table_struct *wait) |
8242 |
+ { |
8243 |
+ struct linereq *lr = file->private_data; |
8244 |
+ __poll_t events = 0; |
8245 |
+ |
8246 |
++ if (!lr->gdev->chip) |
8247 |
++ return EPOLLHUP | EPOLLERR; |
8248 |
++ |
8249 |
+ poll_wait(file, &lr->wait, wait); |
8250 |
+ |
8251 |
+ if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, |
8252 |
+@@ -1221,16 +1294,25 @@ static __poll_t linereq_poll(struct file *file, |
8253 |
+ return events; |
8254 |
+ } |
8255 |
+ |
8256 |
+-static ssize_t linereq_read(struct file *file, |
8257 |
+- char __user *buf, |
8258 |
+- size_t count, |
8259 |
+- loff_t *f_ps) |
8260 |
++static __poll_t linereq_poll(struct file *file, |
8261 |
++ struct poll_table_struct *wait) |
8262 |
++{ |
8263 |
++ struct linereq *lr = file->private_data; |
8264 |
++ |
8265 |
++ return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked); |
8266 |
++} |
8267 |
++ |
8268 |
++static ssize_t linereq_read_unlocked(struct file *file, char __user *buf, |
8269 |
++ size_t count, loff_t *f_ps) |
8270 |
+ { |
8271 |
+ struct linereq *lr = file->private_data; |
8272 |
+ struct gpio_v2_line_event le; |
8273 |
+ ssize_t bytes_read = 0; |
8274 |
+ int ret; |
8275 |
+ |
8276 |
++ if (!lr->gdev->chip) |
8277 |
++ return -ENODEV; |
8278 |
++ |
8279 |
+ if (count < sizeof(le)) |
8280 |
+ return -EINVAL; |
8281 |
+ |
8282 |
+@@ -1275,6 +1357,15 @@ static ssize_t linereq_read(struct file *file, |
8283 |
+ return bytes_read; |
8284 |
+ } |
8285 |
+ |
8286 |
++static ssize_t linereq_read(struct file *file, char __user *buf, |
8287 |
++ size_t count, loff_t *f_ps) |
8288 |
++{ |
8289 |
++ struct linereq *lr = file->private_data; |
8290 |
++ |
8291 |
++ return call_read_locked(file, buf, count, f_ps, lr->gdev, |
8292 |
++ linereq_read_unlocked); |
8293 |
++} |
8294 |
++ |
8295 |
+ static void linereq_free(struct linereq *lr) |
8296 |
+ { |
8297 |
+ unsigned int i; |
8298 |
+@@ -1490,12 +1581,15 @@ struct lineevent_state { |
8299 |
+ (GPIOEVENT_REQUEST_RISING_EDGE | \ |
8300 |
+ GPIOEVENT_REQUEST_FALLING_EDGE) |
8301 |
+ |
8302 |
+-static __poll_t lineevent_poll(struct file *file, |
8303 |
+- struct poll_table_struct *wait) |
8304 |
++static __poll_t lineevent_poll_unlocked(struct file *file, |
8305 |
++ struct poll_table_struct *wait) |
8306 |
+ { |
8307 |
+ struct lineevent_state *le = file->private_data; |
8308 |
+ __poll_t events = 0; |
8309 |
+ |
8310 |
++ if (!le->gdev->chip) |
8311 |
++ return EPOLLHUP | EPOLLERR; |
8312 |
++ |
8313 |
+ poll_wait(file, &le->wait, wait); |
8314 |
+ |
8315 |
+ if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) |
8316 |
+@@ -1504,15 +1598,21 @@ static __poll_t lineevent_poll(struct file *file, |
8317 |
+ return events; |
8318 |
+ } |
8319 |
+ |
8320 |
++static __poll_t lineevent_poll(struct file *file, |
8321 |
++ struct poll_table_struct *wait) |
8322 |
++{ |
8323 |
++ struct lineevent_state *le = file->private_data; |
8324 |
++ |
8325 |
++ return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked); |
8326 |
++} |
8327 |
++ |
8328 |
+ struct compat_gpioeevent_data { |
8329 |
+ compat_u64 timestamp; |
8330 |
+ u32 id; |
8331 |
+ }; |
8332 |
+ |
8333 |
+-static ssize_t lineevent_read(struct file *file, |
8334 |
+- char __user *buf, |
8335 |
+- size_t count, |
8336 |
+- loff_t *f_ps) |
8337 |
++static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf, |
8338 |
++ size_t count, loff_t *f_ps) |
8339 |
+ { |
8340 |
+ struct lineevent_state *le = file->private_data; |
8341 |
+ struct gpioevent_data ge; |
8342 |
+@@ -1520,6 +1620,9 @@ static ssize_t lineevent_read(struct file *file, |
8343 |
+ ssize_t ge_size; |
8344 |
+ int ret; |
8345 |
+ |
8346 |
++ if (!le->gdev->chip) |
8347 |
++ return -ENODEV; |
8348 |
++ |
8349 |
+ /* |
8350 |
+ * When compatible system call is being used the struct gpioevent_data, |
8351 |
+ * in case of at least ia32, has different size due to the alignment |
8352 |
+@@ -1577,6 +1680,15 @@ static ssize_t lineevent_read(struct file *file, |
8353 |
+ return bytes_read; |
8354 |
+ } |
8355 |
+ |
8356 |
++static ssize_t lineevent_read(struct file *file, char __user *buf, |
8357 |
++ size_t count, loff_t *f_ps) |
8358 |
++{ |
8359 |
++ struct lineevent_state *le = file->private_data; |
8360 |
++ |
8361 |
++ return call_read_locked(file, buf, count, f_ps, le->gdev, |
8362 |
++ lineevent_read_unlocked); |
8363 |
++} |
8364 |
++ |
8365 |
+ static void lineevent_free(struct lineevent_state *le) |
8366 |
+ { |
8367 |
+ if (le->irq) |
8368 |
+@@ -1594,13 +1706,16 @@ static int lineevent_release(struct inode *inode, struct file *file) |
8369 |
+ return 0; |
8370 |
+ } |
8371 |
+ |
8372 |
+-static long lineevent_ioctl(struct file *file, unsigned int cmd, |
8373 |
+- unsigned long arg) |
8374 |
++static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd, |
8375 |
++ unsigned long arg) |
8376 |
+ { |
8377 |
+ struct lineevent_state *le = file->private_data; |
8378 |
+ void __user *ip = (void __user *)arg; |
8379 |
+ struct gpiohandle_data ghd; |
8380 |
+ |
8381 |
++ if (!le->gdev->chip) |
8382 |
++ return -ENODEV; |
8383 |
++ |
8384 |
+ /* |
8385 |
+ * We can get the value for an event line but not set it, |
8386 |
+ * because it is input by definition. |
8387 |
+@@ -1623,6 +1738,15 @@ static long lineevent_ioctl(struct file *file, unsigned int cmd, |
8388 |
+ return -EINVAL; |
8389 |
+ } |
8390 |
+ |
8391 |
++static long lineevent_ioctl(struct file *file, unsigned int cmd, |
8392 |
++ unsigned long arg) |
8393 |
++{ |
8394 |
++ struct lineevent_state *le = file->private_data; |
8395 |
++ |
8396 |
++ return call_ioctl_locked(file, cmd, arg, le->gdev, |
8397 |
++ lineevent_ioctl_unlocked); |
8398 |
++} |
8399 |
++ |
8400 |
+ #ifdef CONFIG_COMPAT |
8401 |
+ static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, |
8402 |
+ unsigned long arg) |
8403 |
+@@ -2114,28 +2238,30 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
8404 |
+ return -ENODEV; |
8405 |
+ |
8406 |
+ /* Fill in the struct and pass to userspace */ |
8407 |
+- if (cmd == GPIO_GET_CHIPINFO_IOCTL) { |
8408 |
++ switch (cmd) { |
8409 |
++ case GPIO_GET_CHIPINFO_IOCTL: |
8410 |
+ return chipinfo_get(cdev, ip); |
8411 |
+ #ifdef CONFIG_GPIO_CDEV_V1 |
8412 |
+- } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) { |
8413 |
++ case GPIO_GET_LINEHANDLE_IOCTL: |
8414 |
+ return linehandle_create(gdev, ip); |
8415 |
+- } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) { |
8416 |
++ case GPIO_GET_LINEEVENT_IOCTL: |
8417 |
+ return lineevent_create(gdev, ip); |
8418 |
+- } else if (cmd == GPIO_GET_LINEINFO_IOCTL || |
8419 |
+- cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) { |
8420 |
+- return lineinfo_get_v1(cdev, ip, |
8421 |
+- cmd == GPIO_GET_LINEINFO_WATCH_IOCTL); |
8422 |
++ case GPIO_GET_LINEINFO_IOCTL: |
8423 |
++ return lineinfo_get_v1(cdev, ip, false); |
8424 |
++ case GPIO_GET_LINEINFO_WATCH_IOCTL: |
8425 |
++ return lineinfo_get_v1(cdev, ip, true); |
8426 |
+ #endif /* CONFIG_GPIO_CDEV_V1 */ |
8427 |
+- } else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL || |
8428 |
+- cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) { |
8429 |
+- return lineinfo_get(cdev, ip, |
8430 |
+- cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL); |
8431 |
+- } else if (cmd == GPIO_V2_GET_LINE_IOCTL) { |
8432 |
++ case GPIO_V2_GET_LINEINFO_IOCTL: |
8433 |
++ return lineinfo_get(cdev, ip, false); |
8434 |
++ case GPIO_V2_GET_LINEINFO_WATCH_IOCTL: |
8435 |
++ return lineinfo_get(cdev, ip, true); |
8436 |
++ case GPIO_V2_GET_LINE_IOCTL: |
8437 |
+ return linereq_create(gdev, ip); |
8438 |
+- } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) { |
8439 |
++ case GPIO_GET_LINEINFO_UNWATCH_IOCTL: |
8440 |
+ return lineinfo_unwatch(cdev, ip); |
8441 |
++ default: |
8442 |
++ return -EINVAL; |
8443 |
+ } |
8444 |
+- return -EINVAL; |
8445 |
+ } |
8446 |
+ |
8447 |
+ #ifdef CONFIG_COMPAT |
8448 |
+@@ -2177,12 +2303,15 @@ static int lineinfo_changed_notify(struct notifier_block *nb, |
8449 |
+ return NOTIFY_OK; |
8450 |
+ } |
8451 |
+ |
8452 |
+-static __poll_t lineinfo_watch_poll(struct file *file, |
8453 |
+- struct poll_table_struct *pollt) |
8454 |
++static __poll_t lineinfo_watch_poll_unlocked(struct file *file, |
8455 |
++ struct poll_table_struct *pollt) |
8456 |
+ { |
8457 |
+ struct gpio_chardev_data *cdev = file->private_data; |
8458 |
+ __poll_t events = 0; |
8459 |
+ |
8460 |
++ if (!cdev->gdev->chip) |
8461 |
++ return EPOLLHUP | EPOLLERR; |
8462 |
++ |
8463 |
+ poll_wait(file, &cdev->wait, pollt); |
8464 |
+ |
8465 |
+ if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, |
8466 |
+@@ -2192,8 +2321,17 @@ static __poll_t lineinfo_watch_poll(struct file *file, |
8467 |
+ return events; |
8468 |
+ } |
8469 |
+ |
8470 |
+-static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, |
8471 |
+- size_t count, loff_t *off) |
8472 |
++static __poll_t lineinfo_watch_poll(struct file *file, |
8473 |
++ struct poll_table_struct *pollt) |
8474 |
++{ |
8475 |
++ struct gpio_chardev_data *cdev = file->private_data; |
8476 |
++ |
8477 |
++ return call_poll_locked(file, pollt, cdev->gdev, |
8478 |
++ lineinfo_watch_poll_unlocked); |
8479 |
++} |
8480 |
++ |
8481 |
++static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf, |
8482 |
++ size_t count, loff_t *off) |
8483 |
+ { |
8484 |
+ struct gpio_chardev_data *cdev = file->private_data; |
8485 |
+ struct gpio_v2_line_info_changed event; |
8486 |
+@@ -2201,6 +2339,9 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, |
8487 |
+ int ret; |
8488 |
+ size_t event_size; |
8489 |
+ |
8490 |
++ if (!cdev->gdev->chip) |
8491 |
++ return -ENODEV; |
8492 |
++ |
8493 |
+ #ifndef CONFIG_GPIO_CDEV_V1 |
8494 |
+ event_size = sizeof(struct gpio_v2_line_info_changed); |
8495 |
+ if (count < event_size) |
8496 |
+@@ -2268,6 +2409,15 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, |
8497 |
+ return bytes_read; |
8498 |
+ } |
8499 |
+ |
8500 |
++static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, |
8501 |
++ size_t count, loff_t *off) |
8502 |
++{ |
8503 |
++ struct gpio_chardev_data *cdev = file->private_data; |
8504 |
++ |
8505 |
++ return call_read_locked(file, buf, count, off, cdev->gdev, |
8506 |
++ lineinfo_watch_read_unlocked); |
8507 |
++} |
8508 |
++ |
8509 |
+ /** |
8510 |
+ * gpio_chrdev_open() - open the chardev for ioctl operations |
8511 |
+ * @inode: inode for this chardev |
8512 |
+@@ -2281,13 +2431,17 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) |
8513 |
+ struct gpio_chardev_data *cdev; |
8514 |
+ int ret = -ENOMEM; |
8515 |
+ |
8516 |
++ down_read(&gdev->sem); |
8517 |
++ |
8518 |
+ /* Fail on open if the backing gpiochip is gone */ |
8519 |
+- if (!gdev->chip) |
8520 |
+- return -ENODEV; |
8521 |
++ if (!gdev->chip) { |
8522 |
++ ret = -ENODEV; |
8523 |
++ goto out_unlock; |
8524 |
++ } |
8525 |
+ |
8526 |
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); |
8527 |
+ if (!cdev) |
8528 |
+- return -ENOMEM; |
8529 |
++ goto out_unlock; |
8530 |
+ |
8531 |
+ cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL); |
8532 |
+ if (!cdev->watched_lines) |
8533 |
+@@ -2310,6 +2464,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) |
8534 |
+ if (ret) |
8535 |
+ goto out_unregister_notifier; |
8536 |
+ |
8537 |
++ up_read(&gdev->sem); |
8538 |
++ |
8539 |
+ return ret; |
8540 |
+ |
8541 |
+ out_unregister_notifier: |
8542 |
+@@ -2319,6 +2475,8 @@ out_free_bitmap: |
8543 |
+ bitmap_free(cdev->watched_lines); |
8544 |
+ out_free_cdev: |
8545 |
+ kfree(cdev); |
8546 |
++out_unlock: |
8547 |
++ up_read(&gdev->sem); |
8548 |
+ return ret; |
8549 |
+ } |
8550 |
+ |
8551 |
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c |
8552 |
+index 67bc96403a4e6..8c041a8dd9d8f 100644 |
8553 |
+--- a/drivers/gpio/gpiolib.c |
8554 |
++++ b/drivers/gpio/gpiolib.c |
8555 |
+@@ -189,9 +189,8 @@ static int gpiochip_find_base(int ngpio) |
8556 |
+ /* found a free space? */ |
8557 |
+ if (gdev->base + gdev->ngpio <= base) |
8558 |
+ break; |
8559 |
+- else |
8560 |
+- /* nope, check the space right before the chip */ |
8561 |
+- base = gdev->base - ngpio; |
8562 |
++ /* nope, check the space right before the chip */ |
8563 |
++ base = gdev->base - ngpio; |
8564 |
+ } |
8565 |
+ |
8566 |
+ if (gpio_is_valid(base)) { |
8567 |
+@@ -728,6 +727,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, |
8568 |
+ spin_unlock_irqrestore(&gpio_lock, flags); |
8569 |
+ |
8570 |
+ BLOCKING_INIT_NOTIFIER_HEAD(&gdev->notifier); |
8571 |
++ init_rwsem(&gdev->sem); |
8572 |
+ |
8573 |
+ #ifdef CONFIG_PINCTRL |
8574 |
+ INIT_LIST_HEAD(&gdev->pin_ranges); |
8575 |
+@@ -866,6 +866,8 @@ void gpiochip_remove(struct gpio_chip *gc) |
8576 |
+ unsigned long flags; |
8577 |
+ unsigned int i; |
8578 |
+ |
8579 |
++ down_write(&gdev->sem); |
8580 |
++ |
8581 |
+ /* FIXME: should the legacy sysfs handling be moved to gpio_device? */ |
8582 |
+ gpiochip_sysfs_unregister(gdev); |
8583 |
+ gpiochip_free_hogs(gc); |
8584 |
+@@ -900,6 +902,7 @@ void gpiochip_remove(struct gpio_chip *gc) |
8585 |
+ * gone. |
8586 |
+ */ |
8587 |
+ gcdev_unregister(gdev); |
8588 |
++ up_write(&gdev->sem); |
8589 |
+ put_device(&gdev->dev); |
8590 |
+ } |
8591 |
+ EXPORT_SYMBOL_GPL(gpiochip_remove); |
8592 |
+@@ -2410,8 +2413,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) |
8593 |
+ ret = gpiod_direction_input(desc); |
8594 |
+ goto set_output_flag; |
8595 |
+ } |
8596 |
+- } |
8597 |
+- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { |
8598 |
++ } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { |
8599 |
+ ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE); |
8600 |
+ if (!ret) |
8601 |
+ goto set_output_value; |
8602 |
+@@ -2568,9 +2570,9 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc) |
8603 |
+ static int gpio_chip_get_multiple(struct gpio_chip *gc, |
8604 |
+ unsigned long *mask, unsigned long *bits) |
8605 |
+ { |
8606 |
+- if (gc->get_multiple) { |
8607 |
++ if (gc->get_multiple) |
8608 |
+ return gc->get_multiple(gc, mask, bits); |
8609 |
+- } else if (gc->get) { |
8610 |
++ if (gc->get) { |
8611 |
+ int i, value; |
8612 |
+ |
8613 |
+ for_each_set_bit(i, mask, gc->ngpio) { |
8614 |
+diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h |
8615 |
+index c31f4626915de..73b732a1d9c94 100644 |
8616 |
+--- a/drivers/gpio/gpiolib.h |
8617 |
++++ b/drivers/gpio/gpiolib.h |
8618 |
+@@ -15,6 +15,7 @@ |
8619 |
+ #include <linux/device.h> |
8620 |
+ #include <linux/module.h> |
8621 |
+ #include <linux/cdev.h> |
8622 |
++#include <linux/rwsem.h> |
8623 |
+ |
8624 |
+ #define GPIOCHIP_NAME "gpiochip" |
8625 |
+ |
8626 |
+@@ -37,6 +38,12 @@ |
8627 |
+ * or name of the IP component in a System on Chip. |
8628 |
+ * @data: per-instance data assigned by the driver |
8629 |
+ * @list: links gpio_device:s together for traversal |
8630 |
++ * @notifier: used to notify subscribers about lines being requested, released |
8631 |
++ * or reconfigured |
8632 |
++ * @sem: protects the structure from a NULL-pointer dereference of @chip by |
8633 |
++ * user-space operations when the device gets unregistered during |
8634 |
++ * a hot-unplug event |
8635 |
++ * @pin_ranges: range of pins served by the GPIO driver |
8636 |
+ * |
8637 |
+ * This state container holds most of the runtime variable data |
8638 |
+ * for a GPIO device and can hold references and live on after the |
8639 |
+@@ -57,6 +64,7 @@ struct gpio_device { |
8640 |
+ void *data; |
8641 |
+ struct list_head list; |
8642 |
+ struct blocking_notifier_head notifier; |
8643 |
++ struct rw_semaphore sem; |
8644 |
+ |
8645 |
+ #ifdef CONFIG_PINCTRL |
8646 |
+ /* |
8647 |
+@@ -72,6 +80,20 @@ struct gpio_device { |
8648 |
+ /* gpio suffixes used for ACPI and device tree lookup */ |
8649 |
+ static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" }; |
8650 |
+ |
8651 |
++/** |
8652 |
++ * struct gpio_array - Opaque descriptor for a structure of GPIO array attributes |
8653 |
++ * |
8654 |
++ * @desc: Array of pointers to the GPIO descriptors |
8655 |
++ * @size: Number of elements in desc |
8656 |
++ * @chip: Parent GPIO chip |
8657 |
++ * @get_mask: Get mask used in fastpath |
8658 |
++ * @set_mask: Set mask used in fastpath |
8659 |
++ * @invert_mask: Invert mask used in fastpath |
8660 |
++ * |
8661 |
++ * This structure is attached to struct gpiod_descs obtained from |
8662 |
++ * gpiod_get_array() and can be passed back to get/set array functions in order |
8663 |
++ * to activate fast processing path if applicable. |
8664 |
++ */ |
8665 |
+ struct gpio_array { |
8666 |
+ struct gpio_desc **desc; |
8667 |
+ unsigned int size; |
8668 |
+@@ -96,6 +118,23 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, |
8669 |
+ extern spinlock_t gpio_lock; |
8670 |
+ extern struct list_head gpio_devices; |
8671 |
+ |
8672 |
++ |
8673 |
++/** |
8674 |
++ * struct gpio_desc - Opaque descriptor for a GPIO |
8675 |
++ * |
8676 |
++ * @gdev: Pointer to the parent GPIO device |
8677 |
++ * @flags: Binary descriptor flags |
8678 |
++ * @label: Name of the consumer |
8679 |
++ * @name: Line name |
8680 |
++ * @hog: Pointer to the device node that hogs this line (if any) |
8681 |
++ * @debounce_period_us: Debounce period in microseconds |
8682 |
++ * |
8683 |
++ * These are obtained using gpiod_get() and are preferable to the old |
8684 |
++ * integer-based handles. |
8685 |
++ * |
8686 |
++ * Contrary to integers, a pointer to a &struct gpio_desc is guaranteed to be |
8687 |
++ * valid until the GPIO is released. |
8688 |
++ */ |
8689 |
+ struct gpio_desc { |
8690 |
+ struct gpio_device *gdev; |
8691 |
+ unsigned long flags; |
8692 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |
8693 |
+index 477ab35511770..34303dd3ada96 100644 |
8694 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |
8695 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |
8696 |
+@@ -1910,7 +1910,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, |
8697 |
+ |
8698 |
+ ret = drm_vma_node_allow(&obj->vma_node, drm_priv); |
8699 |
+ if (ret) { |
8700 |
+- kfree(mem); |
8701 |
++ kfree(*mem); |
8702 |
+ return ret; |
8703 |
+ } |
8704 |
+ |
8705 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c |
8706 |
+index 27b19503773b9..71354f505b84b 100644 |
8707 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c |
8708 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c |
8709 |
+@@ -317,6 +317,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) |
8710 |
+ |
8711 |
+ if (!found) |
8712 |
+ return false; |
8713 |
++ pci_dev_put(pdev); |
8714 |
+ |
8715 |
+ adev->bios = kmalloc(size, GFP_KERNEL); |
8716 |
+ if (!adev->bios) { |
8717 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
8718 |
+index 36cc89f56cea2..0d998bc830c2c 100644 |
8719 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
8720 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
8721 |
+@@ -4902,6 +4902,8 @@ static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) |
8722 |
+ pm_runtime_enable(&(p->dev)); |
8723 |
+ pm_runtime_resume(&(p->dev)); |
8724 |
+ } |
8725 |
++ |
8726 |
++ pci_dev_put(p); |
8727 |
+ } |
8728 |
+ |
8729 |
+ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) |
8730 |
+@@ -4940,6 +4942,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) |
8731 |
+ |
8732 |
+ if (expires < ktime_get_mono_fast_ns()) { |
8733 |
+ dev_warn(adev->dev, "failed to suspend display audio\n"); |
8734 |
++ pci_dev_put(p); |
8735 |
+ /* TODO: abort the succeeding gpu reset? */ |
8736 |
+ return -ETIMEDOUT; |
8737 |
+ } |
8738 |
+@@ -4947,6 +4950,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) |
8739 |
+ |
8740 |
+ pm_runtime_disable(&(p->dev)); |
8741 |
+ |
8742 |
++ pci_dev_put(p); |
8743 |
+ return 0; |
8744 |
+ } |
8745 |
+ |
8746 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h |
8747 |
+index ce31d4fdee935..4af3610f4a827 100644 |
8748 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h |
8749 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h |
8750 |
+@@ -62,6 +62,8 @@ struct amdgpu_vf_error_buffer { |
8751 |
+ uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; |
8752 |
+ }; |
8753 |
+ |
8754 |
++enum idh_request; |
8755 |
++ |
8756 |
+ /** |
8757 |
+ * struct amdgpu_virt_ops - amdgpu device virt operations |
8758 |
+ */ |
8759 |
+@@ -71,7 +73,8 @@ struct amdgpu_virt_ops { |
8760 |
+ int (*req_init_data)(struct amdgpu_device *adev); |
8761 |
+ int (*reset_gpu)(struct amdgpu_device *adev); |
8762 |
+ int (*wait_reset)(struct amdgpu_device *adev); |
8763 |
+- void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); |
8764 |
++ void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, |
8765 |
++ u32 data1, u32 data2, u32 data3); |
8766 |
+ }; |
8767 |
+ |
8768 |
+ /* |
8769 |
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c |
8770 |
+index d793eec69d61e..6fee12c91ef59 100644 |
8771 |
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c |
8772 |
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c |
8773 |
+@@ -40,39 +40,6 @@ |
8774 |
+ |
8775 |
+ #include "dm_helpers.h" |
8776 |
+ |
8777 |
+-struct monitor_patch_info { |
8778 |
+- unsigned int manufacturer_id; |
8779 |
+- unsigned int product_id; |
8780 |
+- void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param); |
8781 |
+- unsigned int patch_param; |
8782 |
+-}; |
8783 |
+-static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param); |
8784 |
+- |
8785 |
+-static const struct monitor_patch_info monitor_patch_table[] = { |
8786 |
+-{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15}, |
8787 |
+-{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15}, |
8788 |
+-}; |
8789 |
+- |
8790 |
+-static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param) |
8791 |
+-{ |
8792 |
+- if (edid_caps) |
8793 |
+- edid_caps->panel_patch.max_dsc_target_bpp_limit = param; |
8794 |
+-} |
8795 |
+- |
8796 |
+-static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) |
8797 |
+-{ |
8798 |
+- int i, ret = 0; |
8799 |
+- |
8800 |
+- for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++) |
8801 |
+- if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id) |
8802 |
+- && (edid_caps->product_id == monitor_patch_table[i].product_id)) { |
8803 |
+- monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param); |
8804 |
+- ret++; |
8805 |
+- } |
8806 |
+- |
8807 |
+- return ret; |
8808 |
+-} |
8809 |
+- |
8810 |
+ /* dm_helpers_parse_edid_caps |
8811 |
+ * |
8812 |
+ * Parse edid caps |
8813 |
+@@ -158,8 +125,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps( |
8814 |
+ kfree(sads); |
8815 |
+ kfree(sadb); |
8816 |
+ |
8817 |
+- amdgpu_dm_patch_edid_caps(edid_caps); |
8818 |
+- |
8819 |
+ return result; |
8820 |
+ } |
8821 |
+ |
8822 |
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c |
8823 |
+index 6dbde74c1e069..1d86fd5610c03 100644 |
8824 |
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c |
8825 |
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c |
8826 |
+@@ -352,6 +352,7 @@ static enum bp_result get_gpio_i2c_info( |
8827 |
+ uint32_t count = 0; |
8828 |
+ unsigned int table_index = 0; |
8829 |
+ bool find_valid = false; |
8830 |
++ struct atom_gpio_pin_assignment *pin; |
8831 |
+ |
8832 |
+ if (!info) |
8833 |
+ return BP_RESULT_BADINPUT; |
8834 |
+@@ -379,20 +380,17 @@ static enum bp_result get_gpio_i2c_info( |
8835 |
+ - sizeof(struct atom_common_table_header)) |
8836 |
+ / sizeof(struct atom_gpio_pin_assignment); |
8837 |
+ |
8838 |
++ pin = (struct atom_gpio_pin_assignment *) header->gpio_pin; |
8839 |
++ |
8840 |
+ for (table_index = 0; table_index < count; table_index++) { |
8841 |
+- if (((record->i2c_id & I2C_HW_CAP) == ( |
8842 |
+- header->gpio_pin[table_index].gpio_id & |
8843 |
+- I2C_HW_CAP)) && |
8844 |
+- ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == |
8845 |
+- (header->gpio_pin[table_index].gpio_id & |
8846 |
+- I2C_HW_ENGINE_ID_MASK)) && |
8847 |
+- ((record->i2c_id & I2C_HW_LANE_MUX) == |
8848 |
+- (header->gpio_pin[table_index].gpio_id & |
8849 |
+- I2C_HW_LANE_MUX))) { |
8850 |
++ if (((record->i2c_id & I2C_HW_CAP) == (pin->gpio_id & I2C_HW_CAP)) && |
8851 |
++ ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == (pin->gpio_id & I2C_HW_ENGINE_ID_MASK)) && |
8852 |
++ ((record->i2c_id & I2C_HW_LANE_MUX) == (pin->gpio_id & I2C_HW_LANE_MUX))) { |
8853 |
+ /* still valid */ |
8854 |
+ find_valid = true; |
8855 |
+ break; |
8856 |
+ } |
8857 |
++ pin = (struct atom_gpio_pin_assignment *)((uint8_t *)pin + sizeof(struct atom_gpio_pin_assignment)); |
8858 |
+ } |
8859 |
+ |
8860 |
+ /* If we don't find the entry that we are looking for then |
8861 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c |
8862 |
+index dcfa0a3efa00d..bf72d3f60d7f4 100644 |
8863 |
+--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c |
8864 |
++++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c |
8865 |
+@@ -1127,6 +1127,7 @@ struct resource_pool *dce60_create_resource_pool( |
8866 |
+ if (dce60_construct(num_virtual_links, dc, pool)) |
8867 |
+ return &pool->base; |
8868 |
+ |
8869 |
++ kfree(pool); |
8870 |
+ BREAK_TO_DEBUGGER(); |
8871 |
+ return NULL; |
8872 |
+ } |
8873 |
+@@ -1324,6 +1325,7 @@ struct resource_pool *dce61_create_resource_pool( |
8874 |
+ if (dce61_construct(num_virtual_links, dc, pool)) |
8875 |
+ return &pool->base; |
8876 |
+ |
8877 |
++ kfree(pool); |
8878 |
+ BREAK_TO_DEBUGGER(); |
8879 |
+ return NULL; |
8880 |
+ } |
8881 |
+@@ -1517,6 +1519,7 @@ struct resource_pool *dce64_create_resource_pool( |
8882 |
+ if (dce64_construct(num_virtual_links, dc, pool)) |
8883 |
+ return &pool->base; |
8884 |
+ |
8885 |
++ kfree(pool); |
8886 |
+ BREAK_TO_DEBUGGER(); |
8887 |
+ return NULL; |
8888 |
+ } |
8889 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c |
8890 |
+index 725d92e40cd30..52d1f9746e8cb 100644 |
8891 |
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c |
8892 |
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c |
8893 |
+@@ -1138,6 +1138,7 @@ struct resource_pool *dce80_create_resource_pool( |
8894 |
+ if (dce80_construct(num_virtual_links, dc, pool)) |
8895 |
+ return &pool->base; |
8896 |
+ |
8897 |
++ kfree(pool); |
8898 |
+ BREAK_TO_DEBUGGER(); |
8899 |
+ return NULL; |
8900 |
+ } |
8901 |
+@@ -1337,6 +1338,7 @@ struct resource_pool *dce81_create_resource_pool( |
8902 |
+ if (dce81_construct(num_virtual_links, dc, pool)) |
8903 |
+ return &pool->base; |
8904 |
+ |
8905 |
++ kfree(pool); |
8906 |
+ BREAK_TO_DEBUGGER(); |
8907 |
+ return NULL; |
8908 |
+ } |
8909 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c |
8910 |
+index 91ab4dbbe1a6d..c655d03ef754d 100644 |
8911 |
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c |
8912 |
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c |
8913 |
+@@ -804,6 +804,32 @@ static void false_optc_underflow_wa( |
8914 |
+ tg->funcs->clear_optc_underflow(tg); |
8915 |
+ } |
8916 |
+ |
8917 |
++static int calculate_vready_offset_for_group(struct pipe_ctx *pipe) |
8918 |
++{ |
8919 |
++ struct pipe_ctx *other_pipe; |
8920 |
++ int vready_offset = pipe->pipe_dlg_param.vready_offset; |
8921 |
++ |
8922 |
++ /* Always use the largest vready_offset of all connected pipes */ |
8923 |
++ for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { |
8924 |
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) |
8925 |
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset; |
8926 |
++ } |
8927 |
++ for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { |
8928 |
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) |
8929 |
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset; |
8930 |
++ } |
8931 |
++ for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { |
8932 |
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) |
8933 |
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset; |
8934 |
++ } |
8935 |
++ for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { |
8936 |
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) |
8937 |
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset; |
8938 |
++ } |
8939 |
++ |
8940 |
++ return vready_offset; |
8941 |
++} |
8942 |
++ |
8943 |
+ enum dc_status dcn10_enable_stream_timing( |
8944 |
+ struct pipe_ctx *pipe_ctx, |
8945 |
+ struct dc_state *context, |
8946 |
+@@ -838,7 +864,7 @@ enum dc_status dcn10_enable_stream_timing( |
8947 |
+ pipe_ctx->stream_res.tg->funcs->program_timing( |
8948 |
+ pipe_ctx->stream_res.tg, |
8949 |
+ &stream->timing, |
8950 |
+- pipe_ctx->pipe_dlg_param.vready_offset, |
8951 |
++ calculate_vready_offset_for_group(pipe_ctx), |
8952 |
+ pipe_ctx->pipe_dlg_param.vstartup_start, |
8953 |
+ pipe_ctx->pipe_dlg_param.vupdate_offset, |
8954 |
+ pipe_ctx->pipe_dlg_param.vupdate_width, |
8955 |
+@@ -2776,7 +2802,7 @@ void dcn10_program_pipe( |
8956 |
+ |
8957 |
+ pipe_ctx->stream_res.tg->funcs->program_global_sync( |
8958 |
+ pipe_ctx->stream_res.tg, |
8959 |
+- pipe_ctx->pipe_dlg_param.vready_offset, |
8960 |
++ calculate_vready_offset_for_group(pipe_ctx), |
8961 |
+ pipe_ctx->pipe_dlg_param.vstartup_start, |
8962 |
+ pipe_ctx->pipe_dlg_param.vupdate_offset, |
8963 |
+ pipe_ctx->pipe_dlg_param.vupdate_width); |
8964 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c |
8965 |
+index 58eea3aa3bfc5..bf2a8f53694b4 100644 |
8966 |
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c |
8967 |
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c |
8968 |
+@@ -1564,6 +1564,31 @@ static void dcn20_update_dchubp_dpp( |
8969 |
+ hubp->funcs->set_blank(hubp, false); |
8970 |
+ } |
8971 |
+ |
8972 |
++static int calculate_vready_offset_for_group(struct pipe_ctx *pipe) |
8973 |
++{ |
8974 |
++ struct pipe_ctx *other_pipe; |
8975 |
++ int vready_offset = pipe->pipe_dlg_param.vready_offset; |
8976 |
++ |
8977 |
++ /* Always use the largest vready_offset of all connected pipes */ |
8978 |
++ for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { |
8979 |
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) |
8980 |
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset; |
8981 |
++ } |
8982 |
++ for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { |
8983 |
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) |
8984 |
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset; |
8985 |
++ } |
8986 |
++ for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { |
8987 |
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) |
8988 |
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset; |
8989 |
++ } |
8990 |
++ for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { |
8991 |
++ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) |
8992 |
++ vready_offset = other_pipe->pipe_dlg_param.vready_offset; |
8993 |
++ } |
8994 |
++ |
8995 |
++ return vready_offset; |
8996 |
++} |
8997 |
+ |
8998 |
+ static void dcn20_program_pipe( |
8999 |
+ struct dc *dc, |
9000 |
+@@ -1582,7 +1607,7 @@ static void dcn20_program_pipe( |
9001 |
+ |
9002 |
+ pipe_ctx->stream_res.tg->funcs->program_global_sync( |
9003 |
+ pipe_ctx->stream_res.tg, |
9004 |
+- pipe_ctx->pipe_dlg_param.vready_offset, |
9005 |
++ calculate_vready_offset_for_group(pipe_ctx), |
9006 |
+ pipe_ctx->pipe_dlg_param.vstartup_start, |
9007 |
+ pipe_ctx->pipe_dlg_param.vupdate_offset, |
9008 |
+ pipe_ctx->pipe_dlg_param.vupdate_width); |
9009 |
+@@ -1875,7 +1900,7 @@ bool dcn20_update_bandwidth( |
9010 |
+ |
9011 |
+ pipe_ctx->stream_res.tg->funcs->program_global_sync( |
9012 |
+ pipe_ctx->stream_res.tg, |
9013 |
+- pipe_ctx->pipe_dlg_param.vready_offset, |
9014 |
++ calculate_vready_offset_for_group(pipe_ctx), |
9015 |
+ pipe_ctx->pipe_dlg_param.vstartup_start, |
9016 |
+ pipe_ctx->pipe_dlg_param.vupdate_offset, |
9017 |
+ pipe_ctx->pipe_dlg_param.vupdate_width); |
9018 |
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c |
9019 |
+index 7aad0340f7946..67d83417ec337 100644 |
9020 |
+--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c |
9021 |
++++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c |
9022 |
+@@ -1344,6 +1344,20 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param |
9023 |
+ dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz; |
9024 |
+ dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz; |
9025 |
+ } |
9026 |
++ |
9027 |
++ // WA: patch strobe modes to compensate for DCN303 BW issue |
9028 |
++ if (dcn3_03_soc.num_chans <= 4) { |
9029 |
++ for (i = 0; i < dcn3_03_soc.num_states; i++) { |
9030 |
++ if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700) |
9031 |
++ break; |
9032 |
++ |
9033 |
++ if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) { |
9034 |
++ dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100; |
9035 |
++ dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100; |
9036 |
++ } |
9037 |
++ } |
9038 |
++ } |
9039 |
++ |
9040 |
+ /* re-init DML with updated bb */ |
9041 |
+ dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); |
9042 |
+ if (dc->current_state) |
9043 |
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h |
9044 |
+index bac15c466733d..6e27c8b16391f 100644 |
9045 |
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h |
9046 |
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h |
9047 |
+@@ -341,7 +341,8 @@ struct amd_pm_funcs { |
9048 |
+ int (*get_power_profile_mode)(void *handle, char *buf); |
9049 |
+ int (*set_power_profile_mode)(void *handle, long *input, uint32_t size); |
9050 |
+ int (*set_fine_grain_clk_vol)(void *handle, uint32_t type, long *input, uint32_t size); |
9051 |
+- int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size); |
9052 |
++ int (*odn_edit_dpm_table)(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, |
9053 |
++ long *input, uint32_t size); |
9054 |
+ int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state); |
9055 |
+ int (*smu_i2c_bus_access)(void *handle, bool acquire); |
9056 |
+ int (*gfx_state_change_set)(void *handle, uint32_t state); |
9057 |
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c |
9058 |
+index 321215003643b..0f5930e797bd5 100644 |
9059 |
+--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c |
9060 |
++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c |
9061 |
+@@ -924,7 +924,8 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u |
9062 |
+ return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size); |
9063 |
+ } |
9064 |
+ |
9065 |
+-static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size) |
9066 |
++static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, |
9067 |
++ long *input, uint32_t size) |
9068 |
+ { |
9069 |
+ struct pp_hwmgr *hwmgr = handle; |
9070 |
+ |
9071 |
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c |
9072 |
+index 67d7da0b6fed5..1d829402cd2e2 100644 |
9073 |
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c |
9074 |
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c |
9075 |
+@@ -75,8 +75,10 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr) |
9076 |
+ for (i = 0; i < table_entries; i++) { |
9077 |
+ result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state); |
9078 |
+ if (result) { |
9079 |
++ kfree(hwmgr->current_ps); |
9080 |
+ kfree(hwmgr->request_ps); |
9081 |
+ kfree(hwmgr->ps); |
9082 |
++ hwmgr->current_ps = NULL; |
9083 |
+ hwmgr->request_ps = NULL; |
9084 |
+ hwmgr->ps = NULL; |
9085 |
+ return -EINVAL; |
9086 |
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c |
9087 |
+index 85d55ab4e369f..299b5c838bf70 100644 |
9088 |
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c |
9089 |
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c |
9090 |
+@@ -2961,7 +2961,8 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, |
9091 |
+ data->od8_settings.od8_settings_array; |
9092 |
+ OverDriveTable_t *od_table = |
9093 |
+ &(data->smc_state_table.overdrive_table); |
9094 |
+- int32_t input_index, input_clk, input_vol, i; |
9095 |
++ int32_t input_clk, input_vol, i; |
9096 |
++ uint32_t input_index; |
9097 |
+ int od8_id; |
9098 |
+ int ret; |
9099 |
+ |
9100 |
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c |
9101 |
+index 614c3d0495141..83fa3d20a1d57 100644 |
9102 |
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c |
9103 |
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c |
9104 |
+@@ -1595,6 +1595,10 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu) |
9105 |
+ if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) |
9106 |
+ return false; |
9107 |
+ |
9108 |
++ /* return true if ASIC is in BACO state already */ |
9109 |
++ if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) |
9110 |
++ return true; |
9111 |
++ |
9112 |
+ /* Arcturus does not support this bit mask */ |
9113 |
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && |
9114 |
+ !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) |
9115 |
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h |
9116 |
+index aeeb09a27202e..fdd8e3d3232ec 100644 |
9117 |
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h |
9118 |
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h |
9119 |
+@@ -395,7 +395,8 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) |
9120 |
+ |
9121 |
+ void adv7533_dsi_power_on(struct adv7511 *adv); |
9122 |
+ void adv7533_dsi_power_off(struct adv7511 *adv); |
9123 |
+-void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode); |
9124 |
++enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, |
9125 |
++ const struct drm_display_mode *mode); |
9126 |
+ int adv7533_patch_registers(struct adv7511 *adv); |
9127 |
+ int adv7533_patch_cec_registers(struct adv7511 *adv); |
9128 |
+ int adv7533_attach_dsi(struct adv7511 *adv); |
9129 |
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c |
9130 |
+index 3dc551d223d66..44762116aac97 100644 |
9131 |
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c |
9132 |
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c |
9133 |
+@@ -697,7 +697,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) |
9134 |
+ } |
9135 |
+ |
9136 |
+ static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511, |
9137 |
+- struct drm_display_mode *mode) |
9138 |
++ const struct drm_display_mode *mode) |
9139 |
+ { |
9140 |
+ if (mode->clock > 165000) |
9141 |
+ return MODE_CLOCK_HIGH; |
9142 |
+@@ -791,9 +791,6 @@ static void adv7511_mode_set(struct adv7511 *adv7511, |
9143 |
+ regmap_update_bits(adv7511->regmap, 0x17, |
9144 |
+ 0x60, (vsync_polarity << 6) | (hsync_polarity << 5)); |
9145 |
+ |
9146 |
+- if (adv7511->type == ADV7533 || adv7511->type == ADV7535) |
9147 |
+- adv7533_mode_set(adv7511, adj_mode); |
9148 |
+- |
9149 |
+ drm_mode_copy(&adv7511->curr_mode, adj_mode); |
9150 |
+ |
9151 |
+ /* |
9152 |
+@@ -913,6 +910,18 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge, |
9153 |
+ adv7511_mode_set(adv, mode, adj_mode); |
9154 |
+ } |
9155 |
+ |
9156 |
++static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge, |
9157 |
++ const struct drm_display_info *info, |
9158 |
++ const struct drm_display_mode *mode) |
9159 |
++{ |
9160 |
++ struct adv7511 *adv = bridge_to_adv7511(bridge); |
9161 |
++ |
9162 |
++ if (adv->type == ADV7533 || adv->type == ADV7535) |
9163 |
++ return adv7533_mode_valid(adv, mode); |
9164 |
++ else |
9165 |
++ return adv7511_mode_valid(adv, mode); |
9166 |
++} |
9167 |
++ |
9168 |
+ static int adv7511_bridge_attach(struct drm_bridge *bridge, |
9169 |
+ enum drm_bridge_attach_flags flags) |
9170 |
+ { |
9171 |
+@@ -963,6 +972,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = { |
9172 |
+ .enable = adv7511_bridge_enable, |
9173 |
+ .disable = adv7511_bridge_disable, |
9174 |
+ .mode_set = adv7511_bridge_mode_set, |
9175 |
++ .mode_valid = adv7511_bridge_mode_valid, |
9176 |
+ .attach = adv7511_bridge_attach, |
9177 |
+ .detect = adv7511_bridge_detect, |
9178 |
+ .get_edid = adv7511_bridge_get_edid, |
9179 |
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c |
9180 |
+index 59d718bde8c41..7eda12f338a1d 100644 |
9181 |
+--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c |
9182 |
++++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c |
9183 |
+@@ -100,26 +100,27 @@ void adv7533_dsi_power_off(struct adv7511 *adv) |
9184 |
+ regmap_write(adv->regmap_cec, 0x27, 0x0b); |
9185 |
+ } |
9186 |
+ |
9187 |
+-void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode) |
9188 |
++enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, |
9189 |
++ const struct drm_display_mode *mode) |
9190 |
+ { |
9191 |
++ int lanes; |
9192 |
+ struct mipi_dsi_device *dsi = adv->dsi; |
9193 |
+- int lanes, ret; |
9194 |
+- |
9195 |
+- if (adv->num_dsi_lanes != 4) |
9196 |
+- return; |
9197 |
+ |
9198 |
+ if (mode->clock > 80000) |
9199 |
+ lanes = 4; |
9200 |
+ else |
9201 |
+ lanes = 3; |
9202 |
+ |
9203 |
+- if (lanes != dsi->lanes) { |
9204 |
+- mipi_dsi_detach(dsi); |
9205 |
+- dsi->lanes = lanes; |
9206 |
+- ret = mipi_dsi_attach(dsi); |
9207 |
+- if (ret) |
9208 |
+- dev_err(&dsi->dev, "failed to change host lanes\n"); |
9209 |
+- } |
9210 |
++ /* |
9211 |
++ * TODO: add support for dynamic switching of lanes |
9212 |
++ * by using the bridge pre_enable() op . Till then filter |
9213 |
++ * out the modes which shall need different number of lanes |
9214 |
++ * than what was configured in the device tree. |
9215 |
++ */ |
9216 |
++ if (lanes != dsi->lanes) |
9217 |
++ return MODE_BAD; |
9218 |
++ |
9219 |
++ return MODE_OK; |
9220 |
+ } |
9221 |
+ |
9222 |
+ int adv7533_patch_registers(struct adv7511 *adv) |
9223 |
+diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c |
9224 |
+index eda832f9200db..32ee023aed266 100644 |
9225 |
+--- a/drivers/gpu/drm/drm_fourcc.c |
9226 |
++++ b/drivers/gpu/drm/drm_fourcc.c |
9227 |
+@@ -260,12 +260,15 @@ const struct drm_format_info *__drm_format_info(u32 format) |
9228 |
+ .vsub = 2, .is_yuv = true }, |
9229 |
+ { .format = DRM_FORMAT_Q410, .depth = 0, |
9230 |
+ .num_planes = 3, .char_per_block = { 2, 2, 2 }, |
9231 |
+- .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, |
9232 |
+- .vsub = 0, .is_yuv = true }, |
9233 |
++ .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, |
9234 |
++ .vsub = 1, .is_yuv = true }, |
9235 |
+ { .format = DRM_FORMAT_Q401, .depth = 0, |
9236 |
+ .num_planes = 3, .char_per_block = { 2, 2, 2 }, |
9237 |
+- .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, |
9238 |
+- .vsub = 0, .is_yuv = true }, |
9239 |
++ .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, |
9240 |
++ .vsub = 1, .is_yuv = true }, |
9241 |
++ { .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2, |
9242 |
++ .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 }, |
9243 |
++ .hsub = 2, .vsub = 2, .is_yuv = true}, |
9244 |
+ }; |
9245 |
+ |
9246 |
+ unsigned int i; |
9247 |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c |
9248 |
+index cc5b07f863463..e8ff70be449ac 100644 |
9249 |
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c |
9250 |
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c |
9251 |
+@@ -416,6 +416,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) |
9252 |
+ if (gpu->identity.model == chipModel_GC700) |
9253 |
+ gpu->identity.features &= ~chipFeatures_FAST_CLEAR; |
9254 |
+ |
9255 |
++ /* These models/revisions don't have the 2D pipe bit */ |
9256 |
++ if ((gpu->identity.model == chipModel_GC500 && |
9257 |
++ gpu->identity.revision <= 2) || |
9258 |
++ gpu->identity.model == chipModel_GC300) |
9259 |
++ gpu->identity.features |= chipFeatures_PIPE_2D; |
9260 |
++ |
9261 |
+ if ((gpu->identity.model == chipModel_GC500 && |
9262 |
+ gpu->identity.revision < 2) || |
9263 |
+ (gpu->identity.model == chipModel_GC300 && |
9264 |
+@@ -449,8 +455,9 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) |
9265 |
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5); |
9266 |
+ } |
9267 |
+ |
9268 |
+- /* GC600 idle register reports zero bits where modules aren't present */ |
9269 |
+- if (gpu->identity.model == chipModel_GC600) |
9270 |
++ /* GC600/300 idle register reports zero bits where modules aren't present */ |
9271 |
++ if (gpu->identity.model == chipModel_GC600 || |
9272 |
++ gpu->identity.model == chipModel_GC300) |
9273 |
+ gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | |
9274 |
+ VIVS_HI_IDLE_STATE_RA | |
9275 |
+ VIVS_HI_IDLE_STATE_SE | |
9276 |
+diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c |
9277 |
+index 4d4a715b429d1..2c2b92324a2e9 100644 |
9278 |
+--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c |
9279 |
++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c |
9280 |
+@@ -60,8 +60,9 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) |
9281 |
+ return drm_panel_get_modes(fsl_connector->panel, connector); |
9282 |
+ } |
9283 |
+ |
9284 |
+-static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, |
9285 |
+- struct drm_display_mode *mode) |
9286 |
++static enum drm_mode_status |
9287 |
++fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, |
9288 |
++ struct drm_display_mode *mode) |
9289 |
+ { |
9290 |
+ if (mode->hdisplay & 0xf) |
9291 |
+ return MODE_ERROR; |
9292 |
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c |
9293 |
+index 1ccdf2da042bc..64a15b636e8d4 100644 |
9294 |
+--- a/drivers/gpu/drm/i915/display/intel_dp.c |
9295 |
++++ b/drivers/gpu/drm/i915/display/intel_dp.c |
9296 |
+@@ -3245,61 +3245,6 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, |
9297 |
+ } |
9298 |
+ } |
9299 |
+ |
9300 |
+-static void |
9301 |
+-intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, |
9302 |
+- const struct intel_crtc_state *crtc_state) |
9303 |
+-{ |
9304 |
+- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
9305 |
+- struct drm_device *dev = dig_port->base.base.dev; |
9306 |
+- struct drm_i915_private *dev_priv = to_i915(dev); |
9307 |
+- struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); |
9308 |
+- enum pipe pipe = crtc->pipe; |
9309 |
+- u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; |
9310 |
+- |
9311 |
+- trans_ddi_func_ctl_value = intel_de_read(dev_priv, |
9312 |
+- TRANS_DDI_FUNC_CTL(pipe)); |
9313 |
+- trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); |
9314 |
+- dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); |
9315 |
+- |
9316 |
+- trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | |
9317 |
+- TGL_TRANS_DDI_PORT_MASK); |
9318 |
+- trans_conf_value &= ~PIPECONF_ENABLE; |
9319 |
+- dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; |
9320 |
+- |
9321 |
+- intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); |
9322 |
+- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), |
9323 |
+- trans_ddi_func_ctl_value); |
9324 |
+- intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); |
9325 |
+-} |
9326 |
+- |
9327 |
+-static void |
9328 |
+-intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, |
9329 |
+- const struct intel_crtc_state *crtc_state) |
9330 |
+-{ |
9331 |
+- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
9332 |
+- struct drm_device *dev = dig_port->base.base.dev; |
9333 |
+- struct drm_i915_private *dev_priv = to_i915(dev); |
9334 |
+- enum port port = dig_port->base.port; |
9335 |
+- struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); |
9336 |
+- enum pipe pipe = crtc->pipe; |
9337 |
+- u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; |
9338 |
+- |
9339 |
+- trans_ddi_func_ctl_value = intel_de_read(dev_priv, |
9340 |
+- TRANS_DDI_FUNC_CTL(pipe)); |
9341 |
+- trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); |
9342 |
+- dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); |
9343 |
+- |
9344 |
+- trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | |
9345 |
+- TGL_TRANS_DDI_SELECT_PORT(port); |
9346 |
+- trans_conf_value |= PIPECONF_ENABLE; |
9347 |
+- dp_tp_ctl_value |= DP_TP_CTL_ENABLE; |
9348 |
+- |
9349 |
+- intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); |
9350 |
+- intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); |
9351 |
+- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), |
9352 |
+- trans_ddi_func_ctl_value); |
9353 |
+-} |
9354 |
+- |
9355 |
+ static void intel_dp_process_phy_request(struct intel_dp *intel_dp, |
9356 |
+ const struct intel_crtc_state *crtc_state) |
9357 |
+ { |
9358 |
+@@ -3317,14 +3262,10 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp, |
9359 |
+ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, |
9360 |
+ link_status); |
9361 |
+ |
9362 |
+- intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); |
9363 |
+- |
9364 |
+ intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); |
9365 |
+ |
9366 |
+ intel_dp_phy_pattern_update(intel_dp, crtc_state); |
9367 |
+ |
9368 |
+- intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); |
9369 |
+- |
9370 |
+ drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, |
9371 |
+ intel_dp->train_set, crtc_state->lane_count); |
9372 |
+ |
9373 |
+diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c |
9374 |
+index 41c783349321e..94c6bd3b00823 100644 |
9375 |
+--- a/drivers/gpu/drm/mediatek/mtk_dpi.c |
9376 |
++++ b/drivers/gpu/drm/mediatek/mtk_dpi.c |
9377 |
+@@ -387,9 +387,6 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi) |
9378 |
+ if (--dpi->refcount != 0) |
9379 |
+ return; |
9380 |
+ |
9381 |
+- if (dpi->pinctrl && dpi->pins_gpio) |
9382 |
+- pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); |
9383 |
+- |
9384 |
+ mtk_dpi_disable(dpi); |
9385 |
+ clk_disable_unprepare(dpi->pixel_clk); |
9386 |
+ clk_disable_unprepare(dpi->engine_clk); |
9387 |
+@@ -414,9 +411,6 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) |
9388 |
+ goto err_pixel; |
9389 |
+ } |
9390 |
+ |
9391 |
+- if (dpi->pinctrl && dpi->pins_dpi) |
9392 |
+- pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); |
9393 |
+- |
9394 |
+ return 0; |
9395 |
+ |
9396 |
+ err_pixel: |
9397 |
+@@ -630,12 +624,18 @@ static void mtk_dpi_bridge_disable(struct drm_bridge *bridge) |
9398 |
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge); |
9399 |
+ |
9400 |
+ mtk_dpi_power_off(dpi); |
9401 |
++ |
9402 |
++ if (dpi->pinctrl && dpi->pins_gpio) |
9403 |
++ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); |
9404 |
+ } |
9405 |
+ |
9406 |
+ static void mtk_dpi_bridge_enable(struct drm_bridge *bridge) |
9407 |
+ { |
9408 |
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge); |
9409 |
+ |
9410 |
++ if (dpi->pinctrl && dpi->pins_dpi) |
9411 |
++ pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); |
9412 |
++ |
9413 |
+ mtk_dpi_power_on(dpi); |
9414 |
+ mtk_dpi_set_display_mode(dpi, &dpi->mode); |
9415 |
+ mtk_dpi_enable(dpi); |
9416 |
+diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c |
9417 |
+index 3196189429bcf..7613b0fa2be6e 100644 |
9418 |
+--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c |
9419 |
++++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c |
9420 |
+@@ -1203,9 +1203,10 @@ static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi) |
9421 |
+ return mtk_hdmi_update_plugged_status(hdmi); |
9422 |
+ } |
9423 |
+ |
9424 |
+-static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge, |
9425 |
+- const struct drm_display_info *info, |
9426 |
+- const struct drm_display_mode *mode) |
9427 |
++static enum drm_mode_status |
9428 |
++mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge, |
9429 |
++ const struct drm_display_info *info, |
9430 |
++ const struct drm_display_mode *mode) |
9431 |
+ { |
9432 |
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); |
9433 |
+ struct drm_bridge *next_bridge; |
9434 |
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c |
9435 |
+index c0dec5b919d43..2d07c02c59f14 100644 |
9436 |
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c |
9437 |
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c |
9438 |
+@@ -1746,7 +1746,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse) |
9439 |
+ |
9440 |
+ if (val == UINT_MAX) { |
9441 |
+ DRM_DEV_ERROR(dev, |
9442 |
+- "missing support for speed-bin: %u. Some OPPs may not be supported by hardware", |
9443 |
++ "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n", |
9444 |
+ fuse); |
9445 |
+ return UINT_MAX; |
9446 |
+ } |
9447 |
+@@ -1756,7 +1756,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse) |
9448 |
+ |
9449 |
+ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev) |
9450 |
+ { |
9451 |
+- u32 supp_hw = UINT_MAX; |
9452 |
++ u32 supp_hw; |
9453 |
+ u32 speedbin; |
9454 |
+ int ret; |
9455 |
+ |
9456 |
+@@ -1768,15 +1768,13 @@ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev) |
9457 |
+ if (ret == -ENOENT) { |
9458 |
+ return 0; |
9459 |
+ } else if (ret) { |
9460 |
+- DRM_DEV_ERROR(dev, |
9461 |
+- "failed to read speed-bin (%d). Some OPPs may not be supported by hardware", |
9462 |
+- ret); |
9463 |
+- goto done; |
9464 |
++ dev_err_probe(dev, ret, |
9465 |
++ "failed to read speed-bin. Some OPPs may not be supported by hardware\n"); |
9466 |
++ return ret; |
9467 |
+ } |
9468 |
+ |
9469 |
+ supp_hw = fuse_to_supp_hw(dev, rev, speedbin); |
9470 |
+ |
9471 |
+-done: |
9472 |
+ ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1); |
9473 |
+ if (ret) |
9474 |
+ return ret; |
9475 |
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c |
9476 |
+index d13fd39f05ded..15e38ad7aefb4 100644 |
9477 |
+--- a/drivers/gpu/drm/msm/dp/dp_display.c |
9478 |
++++ b/drivers/gpu/drm/msm/dp/dp_display.c |
9479 |
+@@ -840,7 +840,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display, |
9480 |
+ |
9481 |
+ dp = container_of(dp_display, struct dp_display_private, dp_display); |
9482 |
+ |
9483 |
+- dp->panel->dp_mode.drm_mode = mode->drm_mode; |
9484 |
++ drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode); |
9485 |
+ dp->panel->dp_mode.bpp = mode->bpp; |
9486 |
+ dp->panel->dp_mode.capabilities = mode->capabilities; |
9487 |
+ dp_panel_init_panel_info(dp->panel); |
9488 |
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c |
9489 |
+index f6b09e8eca672..e1a9b52d0a292 100644 |
9490 |
+--- a/drivers/gpu/drm/msm/hdmi/hdmi.c |
9491 |
++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c |
9492 |
+@@ -247,7 +247,21 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) |
9493 |
+ hdmi->pwr_clks[i] = clk; |
9494 |
+ } |
9495 |
+ |
9496 |
+- pm_runtime_enable(&pdev->dev); |
9497 |
++ hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); |
9498 |
++ /* This will catch e.g. -EPROBE_DEFER */ |
9499 |
++ if (IS_ERR(hdmi->hpd_gpiod)) { |
9500 |
++ ret = PTR_ERR(hdmi->hpd_gpiod); |
9501 |
++ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd gpio: (%d)\n", ret); |
9502 |
++ goto fail; |
9503 |
++ } |
9504 |
++ |
9505 |
++ if (!hdmi->hpd_gpiod) |
9506 |
++ DBG("failed to get HPD gpio"); |
9507 |
++ |
9508 |
++ if (hdmi->hpd_gpiod) |
9509 |
++ gpiod_set_consumer_name(hdmi->hpd_gpiod, "HDMI_HPD"); |
9510 |
++ |
9511 |
++ devm_pm_runtime_enable(&pdev->dev); |
9512 |
+ |
9513 |
+ hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0); |
9514 |
+ |
9515 |
+@@ -429,20 +443,6 @@ static struct hdmi_platform_config hdmi_tx_8996_config = { |
9516 |
+ .hpd_freq = hpd_clk_freq_8x74, |
9517 |
+ }; |
9518 |
+ |
9519 |
+-static const struct { |
9520 |
+- const char *name; |
9521 |
+- const bool output; |
9522 |
+- const int value; |
9523 |
+- const char *label; |
9524 |
+-} msm_hdmi_gpio_pdata[] = { |
9525 |
+- { "qcom,hdmi-tx-ddc-clk", true, 1, "HDMI_DDC_CLK" }, |
9526 |
+- { "qcom,hdmi-tx-ddc-data", true, 1, "HDMI_DDC_DATA" }, |
9527 |
+- { "qcom,hdmi-tx-hpd", false, 1, "HDMI_HPD" }, |
9528 |
+- { "qcom,hdmi-tx-mux-en", true, 1, "HDMI_MUX_EN" }, |
9529 |
+- { "qcom,hdmi-tx-mux-sel", true, 0, "HDMI_MUX_SEL" }, |
9530 |
+- { "qcom,hdmi-tx-mux-lpm", true, 1, "HDMI_MUX_LPM" }, |
9531 |
+-}; |
9532 |
+- |
9533 |
+ /* |
9534 |
+ * HDMI audio codec callbacks |
9535 |
+ */ |
9536 |
+@@ -555,7 +555,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) |
9537 |
+ struct hdmi_platform_config *hdmi_cfg; |
9538 |
+ struct hdmi *hdmi; |
9539 |
+ struct device_node *of_node = dev->of_node; |
9540 |
+- int i, err; |
9541 |
++ int err; |
9542 |
+ |
9543 |
+ hdmi_cfg = (struct hdmi_platform_config *) |
9544 |
+ of_device_get_match_data(dev); |
9545 |
+@@ -567,42 +567,6 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) |
9546 |
+ hdmi_cfg->mmio_name = "core_physical"; |
9547 |
+ hdmi_cfg->qfprom_mmio_name = "qfprom_physical"; |
9548 |
+ |
9549 |
+- for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { |
9550 |
+- const char *name = msm_hdmi_gpio_pdata[i].name; |
9551 |
+- struct gpio_desc *gpiod; |
9552 |
+- |
9553 |
+- /* |
9554 |
+- * We are fetching the GPIO lines "as is" since the connector |
9555 |
+- * code is enabling and disabling the lines. Until that point |
9556 |
+- * the power-on default value will be kept. |
9557 |
+- */ |
9558 |
+- gpiod = devm_gpiod_get_optional(dev, name, GPIOD_ASIS); |
9559 |
+- /* This will catch e.g. -PROBE_DEFER */ |
9560 |
+- if (IS_ERR(gpiod)) |
9561 |
+- return PTR_ERR(gpiod); |
9562 |
+- if (!gpiod) { |
9563 |
+- /* Try a second time, stripping down the name */ |
9564 |
+- char name3[32]; |
9565 |
+- |
9566 |
+- /* |
9567 |
+- * Try again after stripping out the "qcom,hdmi-tx" |
9568 |
+- * prefix. This is mainly to match "hpd-gpios" used |
9569 |
+- * in the upstream bindings. |
9570 |
+- */ |
9571 |
+- if (sscanf(name, "qcom,hdmi-tx-%s", name3)) |
9572 |
+- gpiod = devm_gpiod_get_optional(dev, name3, GPIOD_ASIS); |
9573 |
+- if (IS_ERR(gpiod)) |
9574 |
+- return PTR_ERR(gpiod); |
9575 |
+- if (!gpiod) |
9576 |
+- DBG("failed to get gpio: %s", name); |
9577 |
+- } |
9578 |
+- hdmi_cfg->gpios[i].gpiod = gpiod; |
9579 |
+- if (gpiod) |
9580 |
+- gpiod_set_consumer_name(gpiod, msm_hdmi_gpio_pdata[i].label); |
9581 |
+- hdmi_cfg->gpios[i].output = msm_hdmi_gpio_pdata[i].output; |
9582 |
+- hdmi_cfg->gpios[i].value = msm_hdmi_gpio_pdata[i].value; |
9583 |
+- } |
9584 |
+- |
9585 |
+ dev->platform_data = hdmi_cfg; |
9586 |
+ |
9587 |
+ hdmi = msm_hdmi_init(to_platform_device(dev)); |
9588 |
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h |
9589 |
+index 8d2706bec3b99..20f554312b17c 100644 |
9590 |
+--- a/drivers/gpu/drm/msm/hdmi/hdmi.h |
9591 |
++++ b/drivers/gpu/drm/msm/hdmi/hdmi.h |
9592 |
+@@ -19,17 +19,9 @@ |
9593 |
+ #include "msm_drv.h" |
9594 |
+ #include "hdmi.xml.h" |
9595 |
+ |
9596 |
+-#define HDMI_MAX_NUM_GPIO 6 |
9597 |
+- |
9598 |
+ struct hdmi_phy; |
9599 |
+ struct hdmi_platform_config; |
9600 |
+ |
9601 |
+-struct hdmi_gpio_data { |
9602 |
+- struct gpio_desc *gpiod; |
9603 |
+- bool output; |
9604 |
+- int value; |
9605 |
+-}; |
9606 |
+- |
9607 |
+ struct hdmi_audio { |
9608 |
+ bool enabled; |
9609 |
+ struct hdmi_audio_infoframe infoframe; |
9610 |
+@@ -61,6 +53,8 @@ struct hdmi { |
9611 |
+ struct clk **hpd_clks; |
9612 |
+ struct clk **pwr_clks; |
9613 |
+ |
9614 |
++ struct gpio_desc *hpd_gpiod; |
9615 |
++ |
9616 |
+ struct hdmi_phy *phy; |
9617 |
+ struct device *phy_dev; |
9618 |
+ |
9619 |
+@@ -109,9 +103,6 @@ struct hdmi_platform_config { |
9620 |
+ /* clks that need to be on for screen pwr (ie pixel clk): */ |
9621 |
+ const char **pwr_clk_names; |
9622 |
+ int pwr_clk_cnt; |
9623 |
+- |
9624 |
+- /* gpio's: */ |
9625 |
+- struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO]; |
9626 |
+ }; |
9627 |
+ |
9628 |
+ struct hdmi_bridge { |
9629 |
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c |
9630 |
+index c3a236bb952ca..52ebe562ca9be 100644 |
9631 |
+--- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c |
9632 |
++++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c |
9633 |
+@@ -60,48 +60,6 @@ static void msm_hdmi_phy_reset(struct hdmi *hdmi) |
9634 |
+ } |
9635 |
+ } |
9636 |
+ |
9637 |
+-static int gpio_config(struct hdmi *hdmi, bool on) |
9638 |
+-{ |
9639 |
+- const struct hdmi_platform_config *config = hdmi->config; |
9640 |
+- int i; |
9641 |
+- |
9642 |
+- if (on) { |
9643 |
+- for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { |
9644 |
+- struct hdmi_gpio_data gpio = config->gpios[i]; |
9645 |
+- |
9646 |
+- if (gpio.gpiod) { |
9647 |
+- if (gpio.output) { |
9648 |
+- gpiod_direction_output(gpio.gpiod, |
9649 |
+- gpio.value); |
9650 |
+- } else { |
9651 |
+- gpiod_direction_input(gpio.gpiod); |
9652 |
+- gpiod_set_value_cansleep(gpio.gpiod, |
9653 |
+- gpio.value); |
9654 |
+- } |
9655 |
+- } |
9656 |
+- } |
9657 |
+- |
9658 |
+- DBG("gpio on"); |
9659 |
+- } else { |
9660 |
+- for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { |
9661 |
+- struct hdmi_gpio_data gpio = config->gpios[i]; |
9662 |
+- |
9663 |
+- if (!gpio.gpiod) |
9664 |
+- continue; |
9665 |
+- |
9666 |
+- if (gpio.output) { |
9667 |
+- int value = gpio.value ? 0 : 1; |
9668 |
+- |
9669 |
+- gpiod_set_value_cansleep(gpio.gpiod, value); |
9670 |
+- } |
9671 |
+- } |
9672 |
+- |
9673 |
+- DBG("gpio off"); |
9674 |
+- } |
9675 |
+- |
9676 |
+- return 0; |
9677 |
+-} |
9678 |
+- |
9679 |
+ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable) |
9680 |
+ { |
9681 |
+ const struct hdmi_platform_config *config = hdmi->config; |
9682 |
+@@ -157,11 +115,8 @@ int msm_hdmi_hpd_enable(struct drm_bridge *bridge) |
9683 |
+ goto fail; |
9684 |
+ } |
9685 |
+ |
9686 |
+- ret = gpio_config(hdmi, true); |
9687 |
+- if (ret) { |
9688 |
+- DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret); |
9689 |
+- goto fail; |
9690 |
+- } |
9691 |
++ if (hdmi->hpd_gpiod) |
9692 |
++ gpiod_set_value_cansleep(hdmi->hpd_gpiod, 1); |
9693 |
+ |
9694 |
+ pm_runtime_get_sync(dev); |
9695 |
+ enable_hpd_clocks(hdmi, true); |
9696 |
+@@ -210,10 +165,6 @@ void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge) |
9697 |
+ enable_hpd_clocks(hdmi, false); |
9698 |
+ pm_runtime_put_autosuspend(dev); |
9699 |
+ |
9700 |
+- ret = gpio_config(hdmi, false); |
9701 |
+- if (ret) |
9702 |
+- dev_warn(dev, "failed to unconfigure GPIOs: %d\n", ret); |
9703 |
+- |
9704 |
+ ret = pinctrl_pm_select_sleep_state(dev); |
9705 |
+ if (ret) |
9706 |
+ dev_warn(dev, "pinctrl state chg failed: %d\n", ret); |
9707 |
+@@ -275,10 +226,7 @@ static enum drm_connector_status detect_reg(struct hdmi *hdmi) |
9708 |
+ #define HPD_GPIO_INDEX 2 |
9709 |
+ static enum drm_connector_status detect_gpio(struct hdmi *hdmi) |
9710 |
+ { |
9711 |
+- const struct hdmi_platform_config *config = hdmi->config; |
9712 |
+- struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX]; |
9713 |
+- |
9714 |
+- return gpiod_get_value(hpd_gpio.gpiod) ? |
9715 |
++ return gpiod_get_value(hdmi->hpd_gpiod) ? |
9716 |
+ connector_status_connected : |
9717 |
+ connector_status_disconnected; |
9718 |
+ } |
9719 |
+@@ -288,8 +236,6 @@ enum drm_connector_status msm_hdmi_bridge_detect( |
9720 |
+ { |
9721 |
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); |
9722 |
+ struct hdmi *hdmi = hdmi_bridge->hdmi; |
9723 |
+- const struct hdmi_platform_config *config = hdmi->config; |
9724 |
+- struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX]; |
9725 |
+ enum drm_connector_status stat_gpio, stat_reg; |
9726 |
+ int retry = 20; |
9727 |
+ |
9728 |
+@@ -297,7 +243,7 @@ enum drm_connector_status msm_hdmi_bridge_detect( |
9729 |
+ * some platforms may not have hpd gpio. Rely only on the status |
9730 |
+ * provided by REG_HDMI_HPD_INT_STATUS in this case. |
9731 |
+ */ |
9732 |
+- if (!hpd_gpio.gpiod) |
9733 |
++ if (!hdmi->hpd_gpiod) |
9734 |
+ return detect_reg(hdmi); |
9735 |
+ |
9736 |
+ do { |
9737 |
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c |
9738 |
+index 320a2a8fd4592..098955526b687 100644 |
9739 |
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c |
9740 |
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c |
9741 |
+@@ -384,7 +384,15 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi) |
9742 |
+ st7701->dsi = dsi; |
9743 |
+ st7701->desc = desc; |
9744 |
+ |
9745 |
+- return mipi_dsi_attach(dsi); |
9746 |
++ ret = mipi_dsi_attach(dsi); |
9747 |
++ if (ret) |
9748 |
++ goto err_attach; |
9749 |
++ |
9750 |
++ return 0; |
9751 |
++ |
9752 |
++err_attach: |
9753 |
++ drm_panel_remove(&st7701->panel); |
9754 |
++ return ret; |
9755 |
+ } |
9756 |
+ |
9757 |
+ static int st7701_dsi_remove(struct mipi_dsi_device *dsi) |
9758 |
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c |
9759 |
+index 33121655d50bb..63bdc9f6fc243 100644 |
9760 |
+--- a/drivers/gpu/drm/radeon/radeon_bios.c |
9761 |
++++ b/drivers/gpu/drm/radeon/radeon_bios.c |
9762 |
+@@ -227,6 +227,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) |
9763 |
+ |
9764 |
+ if (!found) |
9765 |
+ return false; |
9766 |
++ pci_dev_put(pdev); |
9767 |
+ |
9768 |
+ rdev->bios = kmalloc(size, GFP_KERNEL); |
9769 |
+ if (!rdev->bios) { |
9770 |
+@@ -612,13 +613,14 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) |
9771 |
+ acpi_size tbl_size; |
9772 |
+ UEFI_ACPI_VFCT *vfct; |
9773 |
+ unsigned offset; |
9774 |
++ bool r = false; |
9775 |
+ |
9776 |
+ if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr))) |
9777 |
+ return false; |
9778 |
+ tbl_size = hdr->length; |
9779 |
+ if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { |
9780 |
+ DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); |
9781 |
+- return false; |
9782 |
++ goto out; |
9783 |
+ } |
9784 |
+ |
9785 |
+ vfct = (UEFI_ACPI_VFCT *)hdr; |
9786 |
+@@ -631,13 +633,13 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) |
9787 |
+ offset += sizeof(VFCT_IMAGE_HEADER); |
9788 |
+ if (offset > tbl_size) { |
9789 |
+ DRM_ERROR("ACPI VFCT image header truncated\n"); |
9790 |
+- return false; |
9791 |
++ goto out; |
9792 |
+ } |
9793 |
+ |
9794 |
+ offset += vhdr->ImageLength; |
9795 |
+ if (offset > tbl_size) { |
9796 |
+ DRM_ERROR("ACPI VFCT image truncated\n"); |
9797 |
+- return false; |
9798 |
++ goto out; |
9799 |
+ } |
9800 |
+ |
9801 |
+ if (vhdr->ImageLength && |
9802 |
+@@ -649,15 +651,18 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) |
9803 |
+ rdev->bios = kmemdup(&vbios->VbiosContent, |
9804 |
+ vhdr->ImageLength, |
9805 |
+ GFP_KERNEL); |
9806 |
++ if (rdev->bios) |
9807 |
++ r = true; |
9808 |
+ |
9809 |
+- if (!rdev->bios) |
9810 |
+- return false; |
9811 |
+- return true; |
9812 |
++ goto out; |
9813 |
+ } |
9814 |
+ } |
9815 |
+ |
9816 |
+ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); |
9817 |
+- return false; |
9818 |
++ |
9819 |
++out: |
9820 |
++ acpi_put_table(hdr); |
9821 |
++ return r; |
9822 |
+ } |
9823 |
+ #else |
9824 |
+ static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev) |
9825 |
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c |
9826 |
+index 6b5d0722afa6c..20e63cadec8c7 100644 |
9827 |
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c |
9828 |
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c |
9829 |
+@@ -565,7 +565,7 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, |
9830 |
+ video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); |
9831 |
+ video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); |
9832 |
+ |
9833 |
+- memcpy(&dp->mode, adjusted, sizeof(*mode)); |
9834 |
++ drm_mode_copy(&dp->mode, adjusted); |
9835 |
+ } |
9836 |
+ |
9837 |
+ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) |
9838 |
+diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c |
9839 |
+index 7afdc54eb3ec1..78120da5e63aa 100644 |
9840 |
+--- a/drivers/gpu/drm/rockchip/inno_hdmi.c |
9841 |
++++ b/drivers/gpu/drm/rockchip/inno_hdmi.c |
9842 |
+@@ -488,7 +488,7 @@ static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder, |
9843 |
+ inno_hdmi_setup(hdmi, adj_mode); |
9844 |
+ |
9845 |
+ /* Store the display mode for plugin/DPMS poweron events */ |
9846 |
+- memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode)); |
9847 |
++ drm_mode_copy(&hdmi->previous_mode, adj_mode); |
9848 |
+ } |
9849 |
+ |
9850 |
+ static void inno_hdmi_encoder_enable(struct drm_encoder *encoder) |
9851 |
+diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c |
9852 |
+index 1c546c3a89984..17e7c40a9e7b9 100644 |
9853 |
+--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c |
9854 |
++++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c |
9855 |
+@@ -383,7 +383,7 @@ rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder, |
9856 |
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder); |
9857 |
+ |
9858 |
+ /* Store the display mode for plugin/DPMS poweron events. */ |
9859 |
+- memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode)); |
9860 |
++ drm_mode_copy(&hdmi->previous_mode, adj_mode); |
9861 |
+ } |
9862 |
+ |
9863 |
+ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder) |
9864 |
+diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c |
9865 |
+index 551653940e396..2550429df49fe 100644 |
9866 |
+--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c |
9867 |
++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c |
9868 |
+@@ -145,7 +145,7 @@ static int rk3288_lvds_poweron(struct rockchip_lvds *lvds) |
9869 |
+ DRM_DEV_ERROR(lvds->dev, "failed to enable lvds pclk %d\n", ret); |
9870 |
+ return ret; |
9871 |
+ } |
9872 |
+- ret = pm_runtime_get_sync(lvds->dev); |
9873 |
++ ret = pm_runtime_resume_and_get(lvds->dev); |
9874 |
+ if (ret < 0) { |
9875 |
+ DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret); |
9876 |
+ clk_disable(lvds->pclk); |
9877 |
+@@ -329,16 +329,20 @@ static int px30_lvds_poweron(struct rockchip_lvds *lvds) |
9878 |
+ { |
9879 |
+ int ret; |
9880 |
+ |
9881 |
+- ret = pm_runtime_get_sync(lvds->dev); |
9882 |
++ ret = pm_runtime_resume_and_get(lvds->dev); |
9883 |
+ if (ret < 0) { |
9884 |
+ DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret); |
9885 |
+ return ret; |
9886 |
+ } |
9887 |
+ |
9888 |
+ /* Enable LVDS mode */ |
9889 |
+- return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1, |
9890 |
++ ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1, |
9891 |
+ PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1), |
9892 |
+ PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1)); |
9893 |
++ if (ret) |
9894 |
++ pm_runtime_put(lvds->dev); |
9895 |
++ |
9896 |
++ return ret; |
9897 |
+ } |
9898 |
+ |
9899 |
+ static void px30_lvds_poweroff(struct rockchip_lvds *lvds) |
9900 |
+diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c |
9901 |
+index b6ee8a82e656c..577c477b5f467 100644 |
9902 |
+--- a/drivers/gpu/drm/sti/sti_dvo.c |
9903 |
++++ b/drivers/gpu/drm/sti/sti_dvo.c |
9904 |
+@@ -288,7 +288,7 @@ static void sti_dvo_set_mode(struct drm_bridge *bridge, |
9905 |
+ |
9906 |
+ DRM_DEBUG_DRIVER("\n"); |
9907 |
+ |
9908 |
+- memcpy(&dvo->mode, mode, sizeof(struct drm_display_mode)); |
9909 |
++ drm_mode_copy(&dvo->mode, mode); |
9910 |
+ |
9911 |
+ /* According to the path used (main or aux), the dvo clocks should |
9912 |
+ * have a different parent clock. */ |
9913 |
+@@ -346,8 +346,9 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector) |
9914 |
+ |
9915 |
+ #define CLK_TOLERANCE_HZ 50 |
9916 |
+ |
9917 |
+-static int sti_dvo_connector_mode_valid(struct drm_connector *connector, |
9918 |
+- struct drm_display_mode *mode) |
9919 |
++static enum drm_mode_status |
9920 |
++sti_dvo_connector_mode_valid(struct drm_connector *connector, |
9921 |
++ struct drm_display_mode *mode) |
9922 |
+ { |
9923 |
+ int target = mode->clock * 1000; |
9924 |
+ int target_min = target - CLK_TOLERANCE_HZ; |
9925 |
+diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c |
9926 |
+index 03f3377f918c0..aa54a6400ab84 100644 |
9927 |
+--- a/drivers/gpu/drm/sti/sti_hda.c |
9928 |
++++ b/drivers/gpu/drm/sti/sti_hda.c |
9929 |
+@@ -523,7 +523,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge, |
9930 |
+ |
9931 |
+ DRM_DEBUG_DRIVER("\n"); |
9932 |
+ |
9933 |
+- memcpy(&hda->mode, mode, sizeof(struct drm_display_mode)); |
9934 |
++ drm_mode_copy(&hda->mode, mode); |
9935 |
+ |
9936 |
+ if (!hda_get_mode_idx(hda->mode, &mode_idx)) { |
9937 |
+ DRM_ERROR("Undefined mode\n"); |
9938 |
+@@ -600,8 +600,9 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector) |
9939 |
+ |
9940 |
+ #define CLK_TOLERANCE_HZ 50 |
9941 |
+ |
9942 |
+-static int sti_hda_connector_mode_valid(struct drm_connector *connector, |
9943 |
+- struct drm_display_mode *mode) |
9944 |
++static enum drm_mode_status |
9945 |
++sti_hda_connector_mode_valid(struct drm_connector *connector, |
9946 |
++ struct drm_display_mode *mode) |
9947 |
+ { |
9948 |
+ int target = mode->clock * 1000; |
9949 |
+ int target_min = target - CLK_TOLERANCE_HZ; |
9950 |
+diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c |
9951 |
+index f3ace11209dd7..36bea1551ef84 100644 |
9952 |
+--- a/drivers/gpu/drm/sti/sti_hdmi.c |
9953 |
++++ b/drivers/gpu/drm/sti/sti_hdmi.c |
9954 |
+@@ -940,7 +940,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge, |
9955 |
+ DRM_DEBUG_DRIVER("\n"); |
9956 |
+ |
9957 |
+ /* Copy the drm display mode in the connector local structure */ |
9958 |
+- memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode)); |
9959 |
++ drm_mode_copy(&hdmi->mode, mode); |
9960 |
+ |
9961 |
+ /* Update clock framerate according to the selected mode */ |
9962 |
+ ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000); |
9963 |
+@@ -1003,8 +1003,9 @@ fail: |
9964 |
+ |
9965 |
+ #define CLK_TOLERANCE_HZ 50 |
9966 |
+ |
9967 |
+-static int sti_hdmi_connector_mode_valid(struct drm_connector *connector, |
9968 |
+- struct drm_display_mode *mode) |
9969 |
++static enum drm_mode_status |
9970 |
++sti_hdmi_connector_mode_valid(struct drm_connector *connector, |
9971 |
++ struct drm_display_mode *mode) |
9972 |
+ { |
9973 |
+ int target = mode->clock * 1000; |
9974 |
+ int target_min = target - CLK_TOLERANCE_HZ; |
9975 |
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c |
9976 |
+index a29d64f875635..abb409b08bc64 100644 |
9977 |
+--- a/drivers/gpu/drm/tegra/dc.c |
9978 |
++++ b/drivers/gpu/drm/tegra/dc.c |
9979 |
+@@ -3022,8 +3022,10 @@ static int tegra_dc_probe(struct platform_device *pdev) |
9980 |
+ usleep_range(2000, 4000); |
9981 |
+ |
9982 |
+ err = reset_control_assert(dc->rst); |
9983 |
+- if (err < 0) |
9984 |
++ if (err < 0) { |
9985 |
++ clk_disable_unprepare(dc->clk); |
9986 |
+ return err; |
9987 |
++ } |
9988 |
+ |
9989 |
+ usleep_range(2000, 4000); |
9990 |
+ |
9991 |
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c |
9992 |
+index ab149b80f86c1..911a23a9bcd1b 100644 |
9993 |
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c |
9994 |
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c |
9995 |
+@@ -166,6 +166,10 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) |
9996 |
+ in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8, |
9997 |
+ &cl_data->sensor_dma_addr[i], |
9998 |
+ GFP_KERNEL); |
9999 |
++ if (!in_data->sensor_virt_addr[i]) { |
10000 |
++ rc = -ENOMEM; |
10001 |
++ goto cleanup; |
10002 |
++ } |
10003 |
+ cl_data->sensor_sts[i] = SENSOR_DISABLED; |
10004 |
+ cl_data->sensor_requested_cnt[i] = 0; |
10005 |
+ cl_data->cur_hid_dev = i; |
10006 |
+diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c |
10007 |
+index de52e9f7bb8cb..560eeec4035aa 100644 |
10008 |
+--- a/drivers/hid/hid-mcp2221.c |
10009 |
++++ b/drivers/hid/hid-mcp2221.c |
10010 |
+@@ -840,12 +840,19 @@ static int mcp2221_probe(struct hid_device *hdev, |
10011 |
+ return ret; |
10012 |
+ } |
10013 |
+ |
10014 |
+- ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); |
10015 |
++ /* |
10016 |
++ * This driver uses the .raw_event callback and therefore does not need any |
10017 |
++ * HID_CONNECT_xxx flags. |
10018 |
++ */ |
10019 |
++ ret = hid_hw_start(hdev, 0); |
10020 |
+ if (ret) { |
10021 |
+ hid_err(hdev, "can't start hardware\n"); |
10022 |
+ return ret; |
10023 |
+ } |
10024 |
+ |
10025 |
++ hid_info(hdev, "USB HID v%x.%02x Device [%s] on %s\n", hdev->version >> 8, |
10026 |
++ hdev->version & 0xff, hdev->name, hdev->phys); |
10027 |
++ |
10028 |
+ ret = hid_hw_open(hdev); |
10029 |
+ if (ret) { |
10030 |
+ hid_err(hdev, "can't open device\n"); |
10031 |
+@@ -870,8 +877,7 @@ static int mcp2221_probe(struct hid_device *hdev, |
10032 |
+ mcp->adapter.retries = 1; |
10033 |
+ mcp->adapter.dev.parent = &hdev->dev; |
10034 |
+ snprintf(mcp->adapter.name, sizeof(mcp->adapter.name), |
10035 |
+- "MCP2221 usb-i2c bridge on hidraw%d", |
10036 |
+- ((struct hidraw *)hdev->hidraw)->minor); |
10037 |
++ "MCP2221 usb-i2c bridge"); |
10038 |
+ |
10039 |
+ ret = i2c_add_adapter(&mcp->adapter); |
10040 |
+ if (ret) { |
10041 |
+diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c |
10042 |
+index 32c2306e240d6..602465ad27458 100644 |
10043 |
+--- a/drivers/hid/hid-sensor-custom.c |
10044 |
++++ b/drivers/hid/hid-sensor-custom.c |
10045 |
+@@ -62,7 +62,7 @@ struct hid_sensor_sample { |
10046 |
+ u32 raw_len; |
10047 |
+ } __packed; |
10048 |
+ |
10049 |
+-static struct attribute hid_custom_attrs[] = { |
10050 |
++static struct attribute hid_custom_attrs[HID_CUSTOM_TOTAL_ATTRS] = { |
10051 |
+ {.name = "name", .mode = S_IRUGO}, |
10052 |
+ {.name = "units", .mode = S_IRUGO}, |
10053 |
+ {.name = "unit-expo", .mode = S_IRUGO}, |
10054 |
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c |
10055 |
+index 7e67c41d97a44..2b6388da545e9 100644 |
10056 |
+--- a/drivers/hid/wacom_sys.c |
10057 |
++++ b/drivers/hid/wacom_sys.c |
10058 |
+@@ -160,6 +160,9 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report, |
10059 |
+ { |
10060 |
+ struct wacom *wacom = hid_get_drvdata(hdev); |
10061 |
+ |
10062 |
++ if (wacom->wacom_wac.features.type == BOOTLOADER) |
10063 |
++ return 0; |
10064 |
++ |
10065 |
+ if (size > WACOM_PKGLEN_MAX) |
10066 |
+ return 1; |
10067 |
+ |
10068 |
+@@ -2792,6 +2795,11 @@ static int wacom_probe(struct hid_device *hdev, |
10069 |
+ return error; |
10070 |
+ } |
10071 |
+ |
10072 |
++ if (features->type == BOOTLOADER) { |
10073 |
++ hid_warn(hdev, "Using device in hidraw-only mode"); |
10074 |
++ return hid_hw_start(hdev, HID_CONNECT_HIDRAW); |
10075 |
++ } |
10076 |
++ |
10077 |
+ error = wacom_parse_and_register(wacom, false); |
10078 |
+ if (error) |
10079 |
+ return error; |
10080 |
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c |
10081 |
+index c454231afec89..546aaaaec016e 100644 |
10082 |
+--- a/drivers/hid/wacom_wac.c |
10083 |
++++ b/drivers/hid/wacom_wac.c |
10084 |
+@@ -4813,6 +4813,9 @@ static const struct wacom_features wacom_features_0x3c8 = |
10085 |
+ static const struct wacom_features wacom_features_HID_ANY_ID = |
10086 |
+ { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; |
10087 |
+ |
10088 |
++static const struct wacom_features wacom_features_0x94 = |
10089 |
++ { "Wacom Bootloader", .type = BOOTLOADER }; |
10090 |
++ |
10091 |
+ #define USB_DEVICE_WACOM(prod) \ |
10092 |
+ HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ |
10093 |
+ .driver_data = (kernel_ulong_t)&wacom_features_##prod |
10094 |
+@@ -4886,6 +4889,7 @@ const struct hid_device_id wacom_ids[] = { |
10095 |
+ { USB_DEVICE_WACOM(0x84) }, |
10096 |
+ { USB_DEVICE_WACOM(0x90) }, |
10097 |
+ { USB_DEVICE_WACOM(0x93) }, |
10098 |
++ { USB_DEVICE_WACOM(0x94) }, |
10099 |
+ { USB_DEVICE_WACOM(0x97) }, |
10100 |
+ { USB_DEVICE_WACOM(0x9A) }, |
10101 |
+ { USB_DEVICE_WACOM(0x9F) }, |
10102 |
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h |
10103 |
+index 466b62cc16dc1..44e0763a0ede8 100644 |
10104 |
+--- a/drivers/hid/wacom_wac.h |
10105 |
++++ b/drivers/hid/wacom_wac.h |
10106 |
+@@ -242,6 +242,7 @@ enum { |
10107 |
+ MTTPC, |
10108 |
+ MTTPC_B, |
10109 |
+ HID_GENERIC, |
10110 |
++ BOOTLOADER, |
10111 |
+ MAX_TYPE |
10112 |
+ }; |
10113 |
+ |
10114 |
+diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c |
10115 |
+index eb98201583185..26f2c3c012978 100644 |
10116 |
+--- a/drivers/hsi/controllers/omap_ssi_core.c |
10117 |
++++ b/drivers/hsi/controllers/omap_ssi_core.c |
10118 |
+@@ -502,8 +502,10 @@ static int ssi_probe(struct platform_device *pd) |
10119 |
+ platform_set_drvdata(pd, ssi); |
10120 |
+ |
10121 |
+ err = ssi_add_controller(ssi, pd); |
10122 |
+- if (err < 0) |
10123 |
++ if (err < 0) { |
10124 |
++ hsi_put_controller(ssi); |
10125 |
+ goto out1; |
10126 |
++ } |
10127 |
+ |
10128 |
+ pm_runtime_enable(&pd->dev); |
10129 |
+ |
10130 |
+@@ -536,9 +538,9 @@ out3: |
10131 |
+ device_for_each_child(&pd->dev, NULL, ssi_remove_ports); |
10132 |
+ out2: |
10133 |
+ ssi_remove_controller(ssi); |
10134 |
++ pm_runtime_disable(&pd->dev); |
10135 |
+ out1: |
10136 |
+ platform_set_drvdata(pd, NULL); |
10137 |
+- pm_runtime_disable(&pd->dev); |
10138 |
+ |
10139 |
+ return err; |
10140 |
+ } |
10141 |
+@@ -629,7 +631,13 @@ static int __init ssi_init(void) { |
10142 |
+ if (ret) |
10143 |
+ return ret; |
10144 |
+ |
10145 |
+- return platform_driver_register(&ssi_port_pdriver); |
10146 |
++ ret = platform_driver_register(&ssi_port_pdriver); |
10147 |
++ if (ret) { |
10148 |
++ platform_driver_unregister(&ssi_pdriver); |
10149 |
++ return ret; |
10150 |
++ } |
10151 |
++ |
10152 |
++ return 0; |
10153 |
+ } |
10154 |
+ module_init(ssi_init); |
10155 |
+ |
10156 |
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c |
10157 |
+index f4091143213b0..1475ea77351ef 100644 |
10158 |
+--- a/drivers/hv/ring_buffer.c |
10159 |
++++ b/drivers/hv/ring_buffer.c |
10160 |
+@@ -249,6 +249,19 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
10161 |
+ ring_info->pkt_buffer_size = 0; |
10162 |
+ } |
10163 |
+ |
10164 |
++/* |
10165 |
++ * Check if the ring buffer spinlock is available to take or not; used on |
10166 |
++ * atomic contexts, like panic path (see the Hyper-V framebuffer driver). |
10167 |
++ */ |
10168 |
++ |
10169 |
++bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel) |
10170 |
++{ |
10171 |
++ struct hv_ring_buffer_info *rinfo = &channel->outbound; |
10172 |
++ |
10173 |
++ return spin_is_locked(&rinfo->ring_lock); |
10174 |
++} |
10175 |
++EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy); |
10176 |
++ |
10177 |
+ /* Write to the ring buffer. */ |
10178 |
+ int hv_ringbuffer_write(struct vmbus_channel *channel, |
10179 |
+ const struct kvec *kv_list, u32 kv_count, |
10180 |
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig |
10181 |
+index 51f1caa10d113..17ba1d9ff0751 100644 |
10182 |
+--- a/drivers/hwmon/Kconfig |
10183 |
++++ b/drivers/hwmon/Kconfig |
10184 |
+@@ -776,6 +776,7 @@ config SENSORS_IT87 |
10185 |
+ config SENSORS_JC42 |
10186 |
+ tristate "JEDEC JC42.4 compliant memory module temperature sensors" |
10187 |
+ depends on I2C |
10188 |
++ select REGMAP_I2C |
10189 |
+ help |
10190 |
+ If you say yes here, you get support for JEDEC JC42.4 compliant |
10191 |
+ temperature sensors, which are used on many DDR3 memory modules for |
10192 |
+diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c |
10193 |
+index 4a03d010ec5a8..52f341d46029b 100644 |
10194 |
+--- a/drivers/hwmon/jc42.c |
10195 |
++++ b/drivers/hwmon/jc42.c |
10196 |
+@@ -19,6 +19,7 @@ |
10197 |
+ #include <linux/err.h> |
10198 |
+ #include <linux/mutex.h> |
10199 |
+ #include <linux/of.h> |
10200 |
++#include <linux/regmap.h> |
10201 |
+ |
10202 |
+ /* Addresses to scan */ |
10203 |
+ static const unsigned short normal_i2c[] = { |
10204 |
+@@ -189,31 +190,14 @@ static struct jc42_chips jc42_chips[] = { |
10205 |
+ { STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK }, |
10206 |
+ }; |
10207 |
+ |
10208 |
+-enum temp_index { |
10209 |
+- t_input = 0, |
10210 |
+- t_crit, |
10211 |
+- t_min, |
10212 |
+- t_max, |
10213 |
+- t_num_temp |
10214 |
+-}; |
10215 |
+- |
10216 |
+-static const u8 temp_regs[t_num_temp] = { |
10217 |
+- [t_input] = JC42_REG_TEMP, |
10218 |
+- [t_crit] = JC42_REG_TEMP_CRITICAL, |
10219 |
+- [t_min] = JC42_REG_TEMP_LOWER, |
10220 |
+- [t_max] = JC42_REG_TEMP_UPPER, |
10221 |
+-}; |
10222 |
+- |
10223 |
+ /* Each client has this additional data */ |
10224 |
+ struct jc42_data { |
10225 |
+- struct i2c_client *client; |
10226 |
+ struct mutex update_lock; /* protect register access */ |
10227 |
++ struct regmap *regmap; |
10228 |
+ bool extended; /* true if extended range supported */ |
10229 |
+ bool valid; |
10230 |
+- unsigned long last_updated; /* In jiffies */ |
10231 |
+ u16 orig_config; /* original configuration */ |
10232 |
+ u16 config; /* current configuration */ |
10233 |
+- u16 temp[t_num_temp];/* Temperatures */ |
10234 |
+ }; |
10235 |
+ |
10236 |
+ #define JC42_TEMP_MIN_EXTENDED (-40000) |
10237 |
+@@ -238,85 +222,102 @@ static int jc42_temp_from_reg(s16 reg) |
10238 |
+ return reg * 125 / 2; |
10239 |
+ } |
10240 |
+ |
10241 |
+-static struct jc42_data *jc42_update_device(struct device *dev) |
10242 |
+-{ |
10243 |
+- struct jc42_data *data = dev_get_drvdata(dev); |
10244 |
+- struct i2c_client *client = data->client; |
10245 |
+- struct jc42_data *ret = data; |
10246 |
+- int i, val; |
10247 |
+- |
10248 |
+- mutex_lock(&data->update_lock); |
10249 |
+- |
10250 |
+- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { |
10251 |
+- for (i = 0; i < t_num_temp; i++) { |
10252 |
+- val = i2c_smbus_read_word_swapped(client, temp_regs[i]); |
10253 |
+- if (val < 0) { |
10254 |
+- ret = ERR_PTR(val); |
10255 |
+- goto abort; |
10256 |
+- } |
10257 |
+- data->temp[i] = val; |
10258 |
+- } |
10259 |
+- data->last_updated = jiffies; |
10260 |
+- data->valid = true; |
10261 |
+- } |
10262 |
+-abort: |
10263 |
+- mutex_unlock(&data->update_lock); |
10264 |
+- return ret; |
10265 |
+-} |
10266 |
+- |
10267 |
+ static int jc42_read(struct device *dev, enum hwmon_sensor_types type, |
10268 |
+ u32 attr, int channel, long *val) |
10269 |
+ { |
10270 |
+- struct jc42_data *data = jc42_update_device(dev); |
10271 |
+- int temp, hyst; |
10272 |
++ struct jc42_data *data = dev_get_drvdata(dev); |
10273 |
++ unsigned int regval; |
10274 |
++ int ret, temp, hyst; |
10275 |
+ |
10276 |
+- if (IS_ERR(data)) |
10277 |
+- return PTR_ERR(data); |
10278 |
++ mutex_lock(&data->update_lock); |
10279 |
+ |
10280 |
+ switch (attr) { |
10281 |
+ case hwmon_temp_input: |
10282 |
+- *val = jc42_temp_from_reg(data->temp[t_input]); |
10283 |
+- return 0; |
10284 |
++ ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); |
10285 |
++ if (ret) |
10286 |
++ break; |
10287 |
++ |
10288 |
++ *val = jc42_temp_from_reg(regval); |
10289 |
++ break; |
10290 |
+ case hwmon_temp_min: |
10291 |
+- *val = jc42_temp_from_reg(data->temp[t_min]); |
10292 |
+- return 0; |
10293 |
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_LOWER, ®val); |
10294 |
++ if (ret) |
10295 |
++ break; |
10296 |
++ |
10297 |
++ *val = jc42_temp_from_reg(regval); |
10298 |
++ break; |
10299 |
+ case hwmon_temp_max: |
10300 |
+- *val = jc42_temp_from_reg(data->temp[t_max]); |
10301 |
+- return 0; |
10302 |
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_UPPER, ®val); |
10303 |
++ if (ret) |
10304 |
++ break; |
10305 |
++ |
10306 |
++ *val = jc42_temp_from_reg(regval); |
10307 |
++ break; |
10308 |
+ case hwmon_temp_crit: |
10309 |
+- *val = jc42_temp_from_reg(data->temp[t_crit]); |
10310 |
+- return 0; |
10311 |
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL, |
10312 |
++ ®val); |
10313 |
++ if (ret) |
10314 |
++ break; |
10315 |
++ |
10316 |
++ *val = jc42_temp_from_reg(regval); |
10317 |
++ break; |
10318 |
+ case hwmon_temp_max_hyst: |
10319 |
+- temp = jc42_temp_from_reg(data->temp[t_max]); |
10320 |
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_UPPER, ®val); |
10321 |
++ if (ret) |
10322 |
++ break; |
10323 |
++ |
10324 |
++ temp = jc42_temp_from_reg(regval); |
10325 |
+ hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK) |
10326 |
+ >> JC42_CFG_HYST_SHIFT]; |
10327 |
+ *val = temp - hyst; |
10328 |
+- return 0; |
10329 |
++ break; |
10330 |
+ case hwmon_temp_crit_hyst: |
10331 |
+- temp = jc42_temp_from_reg(data->temp[t_crit]); |
10332 |
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL, |
10333 |
++ ®val); |
10334 |
++ if (ret) |
10335 |
++ break; |
10336 |
++ |
10337 |
++ temp = jc42_temp_from_reg(regval); |
10338 |
+ hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK) |
10339 |
+ >> JC42_CFG_HYST_SHIFT]; |
10340 |
+ *val = temp - hyst; |
10341 |
+- return 0; |
10342 |
++ break; |
10343 |
+ case hwmon_temp_min_alarm: |
10344 |
+- *val = (data->temp[t_input] >> JC42_ALARM_MIN_BIT) & 1; |
10345 |
+- return 0; |
10346 |
++ ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); |
10347 |
++ if (ret) |
10348 |
++ break; |
10349 |
++ |
10350 |
++ *val = (regval >> JC42_ALARM_MIN_BIT) & 1; |
10351 |
++ break; |
10352 |
+ case hwmon_temp_max_alarm: |
10353 |
+- *val = (data->temp[t_input] >> JC42_ALARM_MAX_BIT) & 1; |
10354 |
+- return 0; |
10355 |
++ ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); |
10356 |
++ if (ret) |
10357 |
++ break; |
10358 |
++ |
10359 |
++ *val = (regval >> JC42_ALARM_MAX_BIT) & 1; |
10360 |
++ break; |
10361 |
+ case hwmon_temp_crit_alarm: |
10362 |
+- *val = (data->temp[t_input] >> JC42_ALARM_CRIT_BIT) & 1; |
10363 |
+- return 0; |
10364 |
++ ret = regmap_read(data->regmap, JC42_REG_TEMP, ®val); |
10365 |
++ if (ret) |
10366 |
++ break; |
10367 |
++ |
10368 |
++ *val = (regval >> JC42_ALARM_CRIT_BIT) & 1; |
10369 |
++ break; |
10370 |
+ default: |
10371 |
+- return -EOPNOTSUPP; |
10372 |
++ ret = -EOPNOTSUPP; |
10373 |
++ break; |
10374 |
+ } |
10375 |
++ |
10376 |
++ mutex_unlock(&data->update_lock); |
10377 |
++ |
10378 |
++ return ret; |
10379 |
+ } |
10380 |
+ |
10381 |
+ static int jc42_write(struct device *dev, enum hwmon_sensor_types type, |
10382 |
+ u32 attr, int channel, long val) |
10383 |
+ { |
10384 |
+ struct jc42_data *data = dev_get_drvdata(dev); |
10385 |
+- struct i2c_client *client = data->client; |
10386 |
++ unsigned int regval; |
10387 |
+ int diff, hyst; |
10388 |
+ int ret; |
10389 |
+ |
10390 |
+@@ -324,21 +325,23 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type, |
10391 |
+ |
10392 |
+ switch (attr) { |
10393 |
+ case hwmon_temp_min: |
10394 |
+- data->temp[t_min] = jc42_temp_to_reg(val, data->extended); |
10395 |
+- ret = i2c_smbus_write_word_swapped(client, temp_regs[t_min], |
10396 |
+- data->temp[t_min]); |
10397 |
++ ret = regmap_write(data->regmap, JC42_REG_TEMP_LOWER, |
10398 |
++ jc42_temp_to_reg(val, data->extended)); |
10399 |
+ break; |
10400 |
+ case hwmon_temp_max: |
10401 |
+- data->temp[t_max] = jc42_temp_to_reg(val, data->extended); |
10402 |
+- ret = i2c_smbus_write_word_swapped(client, temp_regs[t_max], |
10403 |
+- data->temp[t_max]); |
10404 |
++ ret = regmap_write(data->regmap, JC42_REG_TEMP_UPPER, |
10405 |
++ jc42_temp_to_reg(val, data->extended)); |
10406 |
+ break; |
10407 |
+ case hwmon_temp_crit: |
10408 |
+- data->temp[t_crit] = jc42_temp_to_reg(val, data->extended); |
10409 |
+- ret = i2c_smbus_write_word_swapped(client, temp_regs[t_crit], |
10410 |
+- data->temp[t_crit]); |
10411 |
++ ret = regmap_write(data->regmap, JC42_REG_TEMP_CRITICAL, |
10412 |
++ jc42_temp_to_reg(val, data->extended)); |
10413 |
+ break; |
10414 |
+ case hwmon_temp_crit_hyst: |
10415 |
++ ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL, |
10416 |
++ ®val); |
10417 |
++ if (ret) |
10418 |
++ break; |
10419 |
++ |
10420 |
+ /* |
10421 |
+ * JC42.4 compliant chips only support four hysteresis values. |
10422 |
+ * Pick best choice and go from there. |
10423 |
+@@ -346,7 +349,7 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type, |
10424 |
+ val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED |
10425 |
+ : JC42_TEMP_MIN) - 6000, |
10426 |
+ JC42_TEMP_MAX); |
10427 |
+- diff = jc42_temp_from_reg(data->temp[t_crit]) - val; |
10428 |
++ diff = jc42_temp_from_reg(regval) - val; |
10429 |
+ hyst = 0; |
10430 |
+ if (diff > 0) { |
10431 |
+ if (diff < 2250) |
10432 |
+@@ -358,9 +361,8 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type, |
10433 |
+ } |
10434 |
+ data->config = (data->config & ~JC42_CFG_HYST_MASK) | |
10435 |
+ (hyst << JC42_CFG_HYST_SHIFT); |
10436 |
+- ret = i2c_smbus_write_word_swapped(data->client, |
10437 |
+- JC42_REG_CONFIG, |
10438 |
+- data->config); |
10439 |
++ ret = regmap_write(data->regmap, JC42_REG_CONFIG, |
10440 |
++ data->config); |
10441 |
+ break; |
10442 |
+ default: |
10443 |
+ ret = -EOPNOTSUPP; |
10444 |
+@@ -458,51 +460,80 @@ static const struct hwmon_chip_info jc42_chip_info = { |
10445 |
+ .info = jc42_info, |
10446 |
+ }; |
10447 |
+ |
10448 |
++static bool jc42_readable_reg(struct device *dev, unsigned int reg) |
10449 |
++{ |
10450 |
++ return (reg >= JC42_REG_CAP && reg <= JC42_REG_DEVICEID) || |
10451 |
++ reg == JC42_REG_SMBUS; |
10452 |
++} |
10453 |
++ |
10454 |
++static bool jc42_writable_reg(struct device *dev, unsigned int reg) |
10455 |
++{ |
10456 |
++ return (reg >= JC42_REG_CONFIG && reg <= JC42_REG_TEMP_CRITICAL) || |
10457 |
++ reg == JC42_REG_SMBUS; |
10458 |
++} |
10459 |
++ |
10460 |
++static bool jc42_volatile_reg(struct device *dev, unsigned int reg) |
10461 |
++{ |
10462 |
++ return reg == JC42_REG_CONFIG || reg == JC42_REG_TEMP; |
10463 |
++} |
10464 |
++ |
10465 |
++static const struct regmap_config jc42_regmap_config = { |
10466 |
++ .reg_bits = 8, |
10467 |
++ .val_bits = 16, |
10468 |
++ .val_format_endian = REGMAP_ENDIAN_BIG, |
10469 |
++ .max_register = JC42_REG_SMBUS, |
10470 |
++ .writeable_reg = jc42_writable_reg, |
10471 |
++ .readable_reg = jc42_readable_reg, |
10472 |
++ .volatile_reg = jc42_volatile_reg, |
10473 |
++ .cache_type = REGCACHE_RBTREE, |
10474 |
++}; |
10475 |
++ |
10476 |
+ static int jc42_probe(struct i2c_client *client) |
10477 |
+ { |
10478 |
+ struct device *dev = &client->dev; |
10479 |
+ struct device *hwmon_dev; |
10480 |
++ unsigned int config, cap; |
10481 |
+ struct jc42_data *data; |
10482 |
+- int config, cap; |
10483 |
++ int ret; |
10484 |
+ |
10485 |
+ data = devm_kzalloc(dev, sizeof(struct jc42_data), GFP_KERNEL); |
10486 |
+ if (!data) |
10487 |
+ return -ENOMEM; |
10488 |
+ |
10489 |
+- data->client = client; |
10490 |
++ data->regmap = devm_regmap_init_i2c(client, &jc42_regmap_config); |
10491 |
++ if (IS_ERR(data->regmap)) |
10492 |
++ return PTR_ERR(data->regmap); |
10493 |
++ |
10494 |
+ i2c_set_clientdata(client, data); |
10495 |
+ mutex_init(&data->update_lock); |
10496 |
+ |
10497 |
+- cap = i2c_smbus_read_word_swapped(client, JC42_REG_CAP); |
10498 |
+- if (cap < 0) |
10499 |
+- return cap; |
10500 |
++ ret = regmap_read(data->regmap, JC42_REG_CAP, &cap); |
10501 |
++ if (ret) |
10502 |
++ return ret; |
10503 |
+ |
10504 |
+ data->extended = !!(cap & JC42_CAP_RANGE); |
10505 |
+ |
10506 |
+ if (device_property_read_bool(dev, "smbus-timeout-disable")) { |
10507 |
+- int smbus; |
10508 |
+- |
10509 |
+ /* |
10510 |
+ * Not all chips support this register, but from a |
10511 |
+ * quick read of various datasheets no chip appears |
10512 |
+ * incompatible with the below attempt to disable |
10513 |
+ * the timeout. And the whole thing is opt-in... |
10514 |
+ */ |
10515 |
+- smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS); |
10516 |
+- if (smbus < 0) |
10517 |
+- return smbus; |
10518 |
+- i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS, |
10519 |
+- smbus | SMBUS_STMOUT); |
10520 |
++ ret = regmap_set_bits(data->regmap, JC42_REG_SMBUS, |
10521 |
++ SMBUS_STMOUT); |
10522 |
++ if (ret) |
10523 |
++ return ret; |
10524 |
+ } |
10525 |
+ |
10526 |
+- config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG); |
10527 |
+- if (config < 0) |
10528 |
+- return config; |
10529 |
++ ret = regmap_read(data->regmap, JC42_REG_CONFIG, &config); |
10530 |
++ if (ret) |
10531 |
++ return ret; |
10532 |
+ |
10533 |
+ data->orig_config = config; |
10534 |
+ if (config & JC42_CFG_SHUTDOWN) { |
10535 |
+ config &= ~JC42_CFG_SHUTDOWN; |
10536 |
+- i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config); |
10537 |
++ regmap_write(data->regmap, JC42_REG_CONFIG, config); |
10538 |
+ } |
10539 |
+ data->config = config; |
10540 |
+ |
10541 |
+@@ -523,7 +554,7 @@ static int jc42_remove(struct i2c_client *client) |
10542 |
+ |
10543 |
+ config = (data->orig_config & ~JC42_CFG_HYST_MASK) |
10544 |
+ | (data->config & JC42_CFG_HYST_MASK); |
10545 |
+- i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config); |
10546 |
++ regmap_write(data->regmap, JC42_REG_CONFIG, config); |
10547 |
+ } |
10548 |
+ return 0; |
10549 |
+ } |
10550 |
+@@ -535,8 +566,11 @@ static int jc42_suspend(struct device *dev) |
10551 |
+ struct jc42_data *data = dev_get_drvdata(dev); |
10552 |
+ |
10553 |
+ data->config |= JC42_CFG_SHUTDOWN; |
10554 |
+- i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG, |
10555 |
+- data->config); |
10556 |
++ regmap_write(data->regmap, JC42_REG_CONFIG, data->config); |
10557 |
++ |
10558 |
++ regcache_cache_only(data->regmap, true); |
10559 |
++ regcache_mark_dirty(data->regmap); |
10560 |
++ |
10561 |
+ return 0; |
10562 |
+ } |
10563 |
+ |
10564 |
+@@ -544,10 +578,13 @@ static int jc42_resume(struct device *dev) |
10565 |
+ { |
10566 |
+ struct jc42_data *data = dev_get_drvdata(dev); |
10567 |
+ |
10568 |
++ regcache_cache_only(data->regmap, false); |
10569 |
++ |
10570 |
+ data->config &= ~JC42_CFG_SHUTDOWN; |
10571 |
+- i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG, |
10572 |
+- data->config); |
10573 |
+- return 0; |
10574 |
++ regmap_write(data->regmap, JC42_REG_CONFIG, data->config); |
10575 |
++ |
10576 |
++ /* Restore cached register values to hardware */ |
10577 |
++ return regcache_sync(data->regmap); |
10578 |
+ } |
10579 |
+ |
10580 |
+ static const struct dev_pm_ops jc42_dev_pm_ops = { |
10581 |
+diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c |
10582 |
+index 7dddb85b90591..fac63d092c7be 100644 |
10583 |
+--- a/drivers/hwtracing/coresight/coresight-trbe.c |
10584 |
++++ b/drivers/hwtracing/coresight/coresight-trbe.c |
10585 |
+@@ -1030,6 +1030,7 @@ static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata) |
10586 |
+ |
10587 |
+ static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata) |
10588 |
+ { |
10589 |
++ cpuhp_state_remove_instance(drvdata->trbe_online, &drvdata->hotplug_node); |
10590 |
+ cpuhp_remove_multi_state(drvdata->trbe_online); |
10591 |
+ } |
10592 |
+ |
10593 |
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c |
10594 |
+index 483428c5e30b9..10cdd501d4c52 100644 |
10595 |
+--- a/drivers/i2c/busses/i2c-ismt.c |
10596 |
++++ b/drivers/i2c/busses/i2c-ismt.c |
10597 |
+@@ -509,6 +509,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, |
10598 |
+ if (read_write == I2C_SMBUS_WRITE) { |
10599 |
+ /* Block Write */ |
10600 |
+ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n"); |
10601 |
++ if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) |
10602 |
++ return -EINVAL; |
10603 |
++ |
10604 |
+ dma_size = data->block[0] + 1; |
10605 |
+ dma_direction = DMA_TO_DEVICE; |
10606 |
+ desc->wr_len_cmd = dma_size; |
10607 |
+diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c |
10608 |
+index f614cade432bb..30e38bc8b6db8 100644 |
10609 |
+--- a/drivers/i2c/busses/i2c-pxa-pci.c |
10610 |
++++ b/drivers/i2c/busses/i2c-pxa-pci.c |
10611 |
+@@ -105,7 +105,7 @@ static int ce4100_i2c_probe(struct pci_dev *dev, |
10612 |
+ int i; |
10613 |
+ struct ce4100_devices *sds; |
10614 |
+ |
10615 |
+- ret = pci_enable_device_mem(dev); |
10616 |
++ ret = pcim_enable_device(dev); |
10617 |
+ if (ret) |
10618 |
+ return ret; |
10619 |
+ |
10620 |
+@@ -114,10 +114,8 @@ static int ce4100_i2c_probe(struct pci_dev *dev, |
10621 |
+ return -EINVAL; |
10622 |
+ } |
10623 |
+ sds = kzalloc(sizeof(*sds), GFP_KERNEL); |
10624 |
+- if (!sds) { |
10625 |
+- ret = -ENOMEM; |
10626 |
+- goto err_mem; |
10627 |
+- } |
10628 |
++ if (!sds) |
10629 |
++ return -ENOMEM; |
10630 |
+ |
10631 |
+ for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { |
10632 |
+ sds->pdev[i] = add_i2c_device(dev, i); |
10633 |
+@@ -133,8 +131,6 @@ static int ce4100_i2c_probe(struct pci_dev *dev, |
10634 |
+ |
10635 |
+ err_dev_add: |
10636 |
+ kfree(sds); |
10637 |
+-err_mem: |
10638 |
+- pci_disable_device(dev); |
10639 |
+ return ret; |
10640 |
+ } |
10641 |
+ |
10642 |
+diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c |
10643 |
+index 0e0679f65cf77..30a6de1694e07 100644 |
10644 |
+--- a/drivers/i2c/muxes/i2c-mux-reg.c |
10645 |
++++ b/drivers/i2c/muxes/i2c-mux-reg.c |
10646 |
+@@ -183,13 +183,12 @@ static int i2c_mux_reg_probe(struct platform_device *pdev) |
10647 |
+ if (!mux->data.reg) { |
10648 |
+ dev_info(&pdev->dev, |
10649 |
+ "Register not set, using platform resource\n"); |
10650 |
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
10651 |
+- mux->data.reg_size = resource_size(res); |
10652 |
+- mux->data.reg = devm_ioremap_resource(&pdev->dev, res); |
10653 |
++ mux->data.reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res); |
10654 |
+ if (IS_ERR(mux->data.reg)) { |
10655 |
+ ret = PTR_ERR(mux->data.reg); |
10656 |
+ goto err_put_parent; |
10657 |
+ } |
10658 |
++ mux->data.reg_size = resource_size(res); |
10659 |
+ } |
10660 |
+ |
10661 |
+ if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && |
10662 |
+diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c |
10663 |
+index 7a434e2884d43..dfb8e2e5bdf58 100644 |
10664 |
+--- a/drivers/iio/accel/adis16201.c |
10665 |
++++ b/drivers/iio/accel/adis16201.c |
10666 |
+@@ -300,3 +300,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@×××××.com>"); |
10667 |
+ MODULE_DESCRIPTION("Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"); |
10668 |
+ MODULE_LICENSE("GPL v2"); |
10669 |
+ MODULE_ALIAS("spi:adis16201"); |
10670 |
++MODULE_IMPORT_NS(IIO_ADISLIB); |
10671 |
+diff --git a/drivers/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c |
10672 |
+index ac08e866d6128..5a9c6e2296f1d 100644 |
10673 |
+--- a/drivers/iio/accel/adis16209.c |
10674 |
++++ b/drivers/iio/accel/adis16209.c |
10675 |
+@@ -310,3 +310,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@×××××.com>"); |
10676 |
+ MODULE_DESCRIPTION("Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"); |
10677 |
+ MODULE_LICENSE("GPL v2"); |
10678 |
+ MODULE_ALIAS("spi:adis16209"); |
10679 |
++MODULE_IMPORT_NS(IIO_ADISLIB); |
10680 |
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c |
10681 |
+index 1d652d9b2f5cd..bd5c49571d1ab 100644 |
10682 |
+--- a/drivers/iio/adc/ad_sigma_delta.c |
10683 |
++++ b/drivers/iio/adc/ad_sigma_delta.c |
10684 |
+@@ -280,10 +280,10 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, |
10685 |
+ unsigned int data_reg; |
10686 |
+ int ret = 0; |
10687 |
+ |
10688 |
+- if (iio_buffer_enabled(indio_dev)) |
10689 |
+- return -EBUSY; |
10690 |
++ ret = iio_device_claim_direct_mode(indio_dev); |
10691 |
++ if (ret) |
10692 |
++ return ret; |
10693 |
+ |
10694 |
+- mutex_lock(&indio_dev->mlock); |
10695 |
+ ad_sigma_delta_set_channel(sigma_delta, chan->address); |
10696 |
+ |
10697 |
+ spi_bus_lock(sigma_delta->spi->master); |
10698 |
+@@ -322,7 +322,7 @@ out: |
10699 |
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); |
10700 |
+ sigma_delta->bus_locked = false; |
10701 |
+ spi_bus_unlock(sigma_delta->spi->master); |
10702 |
+- mutex_unlock(&indio_dev->mlock); |
10703 |
++ iio_device_release_direct_mode(indio_dev); |
10704 |
+ |
10705 |
+ if (ret) |
10706 |
+ return ret; |
10707 |
+diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c |
10708 |
+index 83c1ae07b3e9a..8618ae7bc0671 100644 |
10709 |
+--- a/drivers/iio/adc/ti-adc128s052.c |
10710 |
++++ b/drivers/iio/adc/ti-adc128s052.c |
10711 |
+@@ -193,13 +193,13 @@ static int adc128_remove(struct spi_device *spi) |
10712 |
+ } |
10713 |
+ |
10714 |
+ static const struct of_device_id adc128_of_match[] = { |
10715 |
+- { .compatible = "ti,adc128s052", }, |
10716 |
+- { .compatible = "ti,adc122s021", }, |
10717 |
+- { .compatible = "ti,adc122s051", }, |
10718 |
+- { .compatible = "ti,adc122s101", }, |
10719 |
+- { .compatible = "ti,adc124s021", }, |
10720 |
+- { .compatible = "ti,adc124s051", }, |
10721 |
+- { .compatible = "ti,adc124s101", }, |
10722 |
++ { .compatible = "ti,adc128s052", .data = (void*)0L, }, |
10723 |
++ { .compatible = "ti,adc122s021", .data = (void*)1L, }, |
10724 |
++ { .compatible = "ti,adc122s051", .data = (void*)1L, }, |
10725 |
++ { .compatible = "ti,adc122s101", .data = (void*)1L, }, |
10726 |
++ { .compatible = "ti,adc124s021", .data = (void*)2L, }, |
10727 |
++ { .compatible = "ti,adc124s051", .data = (void*)2L, }, |
10728 |
++ { .compatible = "ti,adc124s101", .data = (void*)2L, }, |
10729 |
+ { /* sentinel */ }, |
10730 |
+ }; |
10731 |
+ MODULE_DEVICE_TABLE(of, adc128_of_match); |
10732 |
+diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c |
10733 |
+index 36879f01e28ca..71295709f2b96 100644 |
10734 |
+--- a/drivers/iio/gyro/adis16136.c |
10735 |
++++ b/drivers/iio/gyro/adis16136.c |
10736 |
+@@ -591,3 +591,4 @@ module_spi_driver(adis16136_driver); |
10737 |
+ MODULE_AUTHOR("Lars-Peter Clausen <lars@×××××××.de>"); |
10738 |
+ MODULE_DESCRIPTION("Analog Devices ADIS16133/ADIS16135/ADIS16136 gyroscope driver"); |
10739 |
+ MODULE_LICENSE("GPL v2"); |
10740 |
++MODULE_IMPORT_NS(IIO_ADISLIB); |
10741 |
+diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c |
10742 |
+index 66b6b7bd5e1bc..eaf57bd339edd 100644 |
10743 |
+--- a/drivers/iio/gyro/adis16260.c |
10744 |
++++ b/drivers/iio/gyro/adis16260.c |
10745 |
+@@ -433,3 +433,4 @@ module_spi_driver(adis16260_driver); |
10746 |
+ MODULE_AUTHOR("Barry Song <21cnbao@×××××.com>"); |
10747 |
+ MODULE_DESCRIPTION("Analog Devices ADIS16260/5 Digital Gyroscope Sensor"); |
10748 |
+ MODULE_LICENSE("GPL v2"); |
10749 |
++MODULE_IMPORT_NS(IIO_ADISLIB); |
10750 |
+diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c |
10751 |
+index d4e692b187cda..bc40240b29e26 100644 |
10752 |
+--- a/drivers/iio/imu/adis.c |
10753 |
++++ b/drivers/iio/imu/adis.c |
10754 |
+@@ -30,8 +30,8 @@ |
10755 |
+ * @value: The value to write to device (up to 4 bytes) |
10756 |
+ * @size: The size of the @value (in bytes) |
10757 |
+ */ |
10758 |
+-int __adis_write_reg(struct adis *adis, unsigned int reg, |
10759 |
+- unsigned int value, unsigned int size) |
10760 |
++int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value, |
10761 |
++ unsigned int size) |
10762 |
+ { |
10763 |
+ unsigned int page = reg / ADIS_PAGE_SIZE; |
10764 |
+ int ret, i; |
10765 |
+@@ -114,14 +114,14 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, |
10766 |
+ ret = spi_sync(adis->spi, &msg); |
10767 |
+ if (ret) { |
10768 |
+ dev_err(&adis->spi->dev, "Failed to write register 0x%02X: %d\n", |
10769 |
+- reg, ret); |
10770 |
++ reg, ret); |
10771 |
+ } else { |
10772 |
+ adis->current_page = page; |
10773 |
+ } |
10774 |
+ |
10775 |
+ return ret; |
10776 |
+ } |
10777 |
+-EXPORT_SYMBOL_GPL(__adis_write_reg); |
10778 |
++EXPORT_SYMBOL_NS_GPL(__adis_write_reg, IIO_ADISLIB); |
10779 |
+ |
10780 |
+ /** |
10781 |
+ * __adis_read_reg() - read N bytes from register (unlocked version) |
10782 |
+@@ -130,8 +130,8 @@ EXPORT_SYMBOL_GPL(__adis_write_reg); |
10783 |
+ * @val: The value read back from the device |
10784 |
+ * @size: The size of the @val buffer |
10785 |
+ */ |
10786 |
+-int __adis_read_reg(struct adis *adis, unsigned int reg, |
10787 |
+- unsigned int *val, unsigned int size) |
10788 |
++int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val, |
10789 |
++ unsigned int size) |
10790 |
+ { |
10791 |
+ unsigned int page = reg / ADIS_PAGE_SIZE; |
10792 |
+ struct spi_message msg; |
10793 |
+@@ -201,12 +201,12 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, |
10794 |
+ ret = spi_sync(adis->spi, &msg); |
10795 |
+ if (ret) { |
10796 |
+ dev_err(&adis->spi->dev, "Failed to read register 0x%02X: %d\n", |
10797 |
+- reg, ret); |
10798 |
++ reg, ret); |
10799 |
+ return ret; |
10800 |
+- } else { |
10801 |
+- adis->current_page = page; |
10802 |
+ } |
10803 |
+ |
10804 |
++ adis->current_page = page; |
10805 |
++ |
10806 |
+ switch (size) { |
10807 |
+ case 4: |
10808 |
+ *val = get_unaligned_be32(adis->rx); |
10809 |
+@@ -218,7 +218,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, |
10810 |
+ |
10811 |
+ return ret; |
10812 |
+ } |
10813 |
+-EXPORT_SYMBOL_GPL(__adis_read_reg); |
10814 |
++EXPORT_SYMBOL_NS_GPL(__adis_read_reg, IIO_ADISLIB); |
10815 |
+ /** |
10816 |
+ * __adis_update_bits_base() - ADIS Update bits function - Unlocked version |
10817 |
+ * @adis: The adis device |
10818 |
+@@ -243,17 +243,17 @@ int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask, |
10819 |
+ |
10820 |
+ return __adis_write_reg(adis, reg, __val, size); |
10821 |
+ } |
10822 |
+-EXPORT_SYMBOL_GPL(__adis_update_bits_base); |
10823 |
++EXPORT_SYMBOL_NS_GPL(__adis_update_bits_base, IIO_ADISLIB); |
10824 |
+ |
10825 |
+ #ifdef CONFIG_DEBUG_FS |
10826 |
+ |
10827 |
+-int adis_debugfs_reg_access(struct iio_dev *indio_dev, |
10828 |
+- unsigned int reg, unsigned int writeval, unsigned int *readval) |
10829 |
++int adis_debugfs_reg_access(struct iio_dev *indio_dev, unsigned int reg, |
10830 |
++ unsigned int writeval, unsigned int *readval) |
10831 |
+ { |
10832 |
+ struct adis *adis = iio_device_get_drvdata(indio_dev); |
10833 |
+ |
10834 |
+ if (readval) { |
10835 |
+- uint16_t val16; |
10836 |
++ u16 val16; |
10837 |
+ int ret; |
10838 |
+ |
10839 |
+ ret = adis_read_reg_16(adis, reg, &val16); |
10840 |
+@@ -261,36 +261,41 @@ int adis_debugfs_reg_access(struct iio_dev *indio_dev, |
10841 |
+ *readval = val16; |
10842 |
+ |
10843 |
+ return ret; |
10844 |
+- } else { |
10845 |
+- return adis_write_reg_16(adis, reg, writeval); |
10846 |
+ } |
10847 |
++ |
10848 |
++ return adis_write_reg_16(adis, reg, writeval); |
10849 |
+ } |
10850 |
+-EXPORT_SYMBOL(adis_debugfs_reg_access); |
10851 |
++EXPORT_SYMBOL_NS(adis_debugfs_reg_access, IIO_ADISLIB); |
10852 |
+ |
10853 |
+ #endif |
10854 |
+ |
10855 |
+ /** |
10856 |
+- * adis_enable_irq() - Enable or disable data ready IRQ |
10857 |
++ * __adis_enable_irq() - Enable or disable data ready IRQ (unlocked) |
10858 |
+ * @adis: The adis device |
10859 |
+ * @enable: Whether to enable the IRQ |
10860 |
+ * |
10861 |
+ * Returns 0 on success, negative error code otherwise |
10862 |
+ */ |
10863 |
+-int adis_enable_irq(struct adis *adis, bool enable) |
10864 |
++int __adis_enable_irq(struct adis *adis, bool enable) |
10865 |
+ { |
10866 |
+- int ret = 0; |
10867 |
+- uint16_t msc; |
10868 |
++ int ret; |
10869 |
++ u16 msc; |
10870 |
+ |
10871 |
+- mutex_lock(&adis->state_lock); |
10872 |
++ if (adis->data->enable_irq) |
10873 |
++ return adis->data->enable_irq(adis, enable); |
10874 |
+ |
10875 |
+- if (adis->data->enable_irq) { |
10876 |
+- ret = adis->data->enable_irq(adis, enable); |
10877 |
+- goto out_unlock; |
10878 |
++ if (adis->data->unmasked_drdy) { |
10879 |
++ if (enable) |
10880 |
++ enable_irq(adis->spi->irq); |
10881 |
++ else |
10882 |
++ disable_irq(adis->spi->irq); |
10883 |
++ |
10884 |
++ return 0; |
10885 |
+ } |
10886 |
+ |
10887 |
+ ret = __adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc); |
10888 |
+ if (ret) |
10889 |
+- goto out_unlock; |
10890 |
++ return ret; |
10891 |
+ |
10892 |
+ msc |= ADIS_MSC_CTRL_DATA_RDY_POL_HIGH; |
10893 |
+ msc &= ~ADIS_MSC_CTRL_DATA_RDY_DIO2; |
10894 |
+@@ -299,13 +304,9 @@ int adis_enable_irq(struct adis *adis, bool enable) |
10895 |
+ else |
10896 |
+ msc &= ~ADIS_MSC_CTRL_DATA_RDY_EN; |
10897 |
+ |
10898 |
+- ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc); |
10899 |
+- |
10900 |
+-out_unlock: |
10901 |
+- mutex_unlock(&adis->state_lock); |
10902 |
+- return ret; |
10903 |
++ return __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc); |
10904 |
+ } |
10905 |
+-EXPORT_SYMBOL(adis_enable_irq); |
10906 |
++EXPORT_SYMBOL_NS(__adis_enable_irq, IIO_ADISLIB); |
10907 |
+ |
10908 |
+ /** |
10909 |
+ * __adis_check_status() - Check the device for error conditions (unlocked) |
10910 |
+@@ -315,7 +316,7 @@ EXPORT_SYMBOL(adis_enable_irq); |
10911 |
+ */ |
10912 |
+ int __adis_check_status(struct adis *adis) |
10913 |
+ { |
10914 |
+- uint16_t status; |
10915 |
++ u16 status; |
10916 |
+ int ret; |
10917 |
+ int i; |
10918 |
+ |
10919 |
+@@ -337,7 +338,7 @@ int __adis_check_status(struct adis *adis) |
10920 |
+ |
10921 |
+ return -EIO; |
10922 |
+ } |
10923 |
+-EXPORT_SYMBOL_GPL(__adis_check_status); |
10924 |
++EXPORT_SYMBOL_NS_GPL(__adis_check_status, IIO_ADISLIB); |
10925 |
+ |
10926 |
+ /** |
10927 |
+ * __adis_reset() - Reset the device (unlocked version) |
10928 |
+@@ -351,7 +352,7 @@ int __adis_reset(struct adis *adis) |
10929 |
+ const struct adis_timeout *timeouts = adis->data->timeouts; |
10930 |
+ |
10931 |
+ ret = __adis_write_reg_8(adis, adis->data->glob_cmd_reg, |
10932 |
+- ADIS_GLOB_CMD_SW_RESET); |
10933 |
++ ADIS_GLOB_CMD_SW_RESET); |
10934 |
+ if (ret) { |
10935 |
+ dev_err(&adis->spi->dev, "Failed to reset device: %d\n", ret); |
10936 |
+ return ret; |
10937 |
+@@ -361,7 +362,7 @@ int __adis_reset(struct adis *adis) |
10938 |
+ |
10939 |
+ return 0; |
10940 |
+ } |
10941 |
+-EXPORT_SYMBOL_GPL(__adis_reset); |
10942 |
++EXPORT_SYMBOL_NS_GPL(__adis_reset, IIO_ADIS_LIB); |
10943 |
+ |
10944 |
+ static int adis_self_test(struct adis *adis) |
10945 |
+ { |
10946 |
+@@ -407,7 +408,7 @@ int __adis_initial_startup(struct adis *adis) |
10947 |
+ { |
10948 |
+ const struct adis_timeout *timeouts = adis->data->timeouts; |
10949 |
+ struct gpio_desc *gpio; |
10950 |
+- uint16_t prod_id; |
10951 |
++ u16 prod_id; |
10952 |
+ int ret; |
10953 |
+ |
10954 |
+ /* check if the device has rst pin low */ |
10955 |
+@@ -416,7 +417,7 @@ int __adis_initial_startup(struct adis *adis) |
10956 |
+ return PTR_ERR(gpio); |
10957 |
+ |
10958 |
+ if (gpio) { |
10959 |
+- msleep(10); |
10960 |
++ usleep_range(10, 12); |
10961 |
+ /* bring device out of reset */ |
10962 |
+ gpiod_set_value_cansleep(gpio, 0); |
10963 |
+ msleep(timeouts->reset_ms); |
10964 |
+@@ -430,7 +431,13 @@ int __adis_initial_startup(struct adis *adis) |
10965 |
+ if (ret) |
10966 |
+ return ret; |
10967 |
+ |
10968 |
+- adis_enable_irq(adis, false); |
10969 |
++ /* |
10970 |
++ * don't bother calling this if we can't unmask the IRQ as in this case |
10971 |
++ * the IRQ is most likely not yet requested and we will request it |
10972 |
++ * with 'IRQF_NO_AUTOEN' anyways. |
10973 |
++ */ |
10974 |
++ if (!adis->data->unmasked_drdy) |
10975 |
++ __adis_enable_irq(adis, false); |
10976 |
+ |
10977 |
+ if (!adis->data->prod_id_reg) |
10978 |
+ return 0; |
10979 |
+@@ -446,7 +453,7 @@ int __adis_initial_startup(struct adis *adis) |
10980 |
+ |
10981 |
+ return 0; |
10982 |
+ } |
10983 |
+-EXPORT_SYMBOL_GPL(__adis_initial_startup); |
10984 |
++EXPORT_SYMBOL_NS_GPL(__adis_initial_startup, IIO_ADISLIB); |
10985 |
+ |
10986 |
+ /** |
10987 |
+ * adis_single_conversion() - Performs a single sample conversion |
10988 |
+@@ -464,7 +471,8 @@ EXPORT_SYMBOL_GPL(__adis_initial_startup); |
10989 |
+ * a error bit in the channels raw value set error_mask to 0. |
10990 |
+ */ |
10991 |
+ int adis_single_conversion(struct iio_dev *indio_dev, |
10992 |
+- const struct iio_chan_spec *chan, unsigned int error_mask, int *val) |
10993 |
++ const struct iio_chan_spec *chan, |
10994 |
++ unsigned int error_mask, int *val) |
10995 |
+ { |
10996 |
+ struct adis *adis = iio_device_get_drvdata(indio_dev); |
10997 |
+ unsigned int uval; |
10998 |
+@@ -473,7 +481,7 @@ int adis_single_conversion(struct iio_dev *indio_dev, |
10999 |
+ mutex_lock(&adis->state_lock); |
11000 |
+ |
11001 |
+ ret = __adis_read_reg(adis, chan->address, &uval, |
11002 |
+- chan->scan_type.storagebits / 8); |
11003 |
++ chan->scan_type.storagebits / 8); |
11004 |
+ if (ret) |
11005 |
+ goto err_unlock; |
11006 |
+ |
11007 |
+@@ -493,7 +501,7 @@ err_unlock: |
11008 |
+ mutex_unlock(&adis->state_lock); |
11009 |
+ return ret; |
11010 |
+ } |
11011 |
+-EXPORT_SYMBOL_GPL(adis_single_conversion); |
11012 |
++EXPORT_SYMBOL_NS_GPL(adis_single_conversion, IIO_ADISLIB); |
11013 |
+ |
11014 |
+ /** |
11015 |
+ * adis_init() - Initialize adis device structure |
11016 |
+@@ -508,7 +516,7 @@ EXPORT_SYMBOL_GPL(adis_single_conversion); |
11017 |
+ * called. |
11018 |
+ */ |
11019 |
+ int adis_init(struct adis *adis, struct iio_dev *indio_dev, |
11020 |
+- struct spi_device *spi, const struct adis_data *data) |
11021 |
++ struct spi_device *spi, const struct adis_data *data) |
11022 |
+ { |
11023 |
+ if (!data || !data->timeouts) { |
11024 |
+ dev_err(&spi->dev, "No config data or timeouts not defined!\n"); |
11025 |
+@@ -530,7 +538,7 @@ int adis_init(struct adis *adis, struct iio_dev *indio_dev, |
11026 |
+ |
11027 |
+ return 0; |
11028 |
+ } |
11029 |
+-EXPORT_SYMBOL_GPL(adis_init); |
11030 |
++EXPORT_SYMBOL_NS_GPL(adis_init, IIO_ADISLIB); |
11031 |
+ |
11032 |
+ MODULE_LICENSE("GPL"); |
11033 |
+ MODULE_AUTHOR("Lars-Peter Clausen <lars@×××××××.de>"); |
11034 |
+diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c |
11035 |
+index b12917a7cb602..9bcd9a9261b92 100644 |
11036 |
+--- a/drivers/iio/imu/adis16400.c |
11037 |
++++ b/drivers/iio/imu/adis16400.c |
11038 |
+@@ -1230,3 +1230,4 @@ module_spi_driver(adis16400_driver); |
11039 |
+ MODULE_AUTHOR("Manuel Stahl <manuel.stahl@××××××××××××××.de>"); |
11040 |
+ MODULE_DESCRIPTION("Analog Devices ADIS16400/5 IMU SPI driver"); |
11041 |
+ MODULE_LICENSE("GPL v2"); |
11042 |
++MODULE_IMPORT_NS(IIO_ADISLIB); |
11043 |
+diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c |
11044 |
+index a6f9fba3e03f4..40fc0e582a9fd 100644 |
11045 |
+--- a/drivers/iio/imu/adis16460.c |
11046 |
++++ b/drivers/iio/imu/adis16460.c |
11047 |
+@@ -444,3 +444,4 @@ module_spi_driver(adis16460_driver); |
11048 |
+ MODULE_AUTHOR("Dragos Bogdan <dragos.bogdan@××××××.com>"); |
11049 |
+ MODULE_DESCRIPTION("Analog Devices ADIS16460 IMU driver"); |
11050 |
+ MODULE_LICENSE("GPL"); |
11051 |
++MODULE_IMPORT_NS(IIO_ADISLIB); |
11052 |
+diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c |
11053 |
+index 287fff39a927a..9d28534db3b08 100644 |
11054 |
+--- a/drivers/iio/imu/adis16475.c |
11055 |
++++ b/drivers/iio/imu/adis16475.c |
11056 |
+@@ -1382,3 +1382,4 @@ module_spi_driver(adis16475_driver); |
11057 |
+ MODULE_AUTHOR("Nuno Sa <nuno.sa@××××××.com>"); |
11058 |
+ MODULE_DESCRIPTION("Analog Devices ADIS16475 IMU driver"); |
11059 |
+ MODULE_LICENSE("GPL"); |
11060 |
++MODULE_IMPORT_NS(IIO_ADISLIB); |
11061 |
+diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c |
11062 |
+index f9b4540db1f43..44bbe3d199073 100644 |
11063 |
+--- a/drivers/iio/imu/adis16480.c |
11064 |
++++ b/drivers/iio/imu/adis16480.c |
11065 |
+@@ -1538,3 +1538,4 @@ module_spi_driver(adis16480_driver); |
11066 |
+ MODULE_AUTHOR("Lars-Peter Clausen <lars@×××××××.de>"); |
11067 |
+ MODULE_DESCRIPTION("Analog Devices ADIS16480 IMU driver"); |
11068 |
+ MODULE_LICENSE("GPL v2"); |
11069 |
++MODULE_IMPORT_NS(IIO_ADISLIB); |
11070 |
+diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c |
11071 |
+index 351c303c8a8c0..928933027ae34 100644 |
11072 |
+--- a/drivers/iio/imu/adis_buffer.c |
11073 |
++++ b/drivers/iio/imu/adis_buffer.c |
11074 |
+@@ -20,7 +20,7 @@ |
11075 |
+ #include <linux/iio/imu/adis.h> |
11076 |
+ |
11077 |
+ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, |
11078 |
+- const unsigned long *scan_mask) |
11079 |
++ const unsigned long *scan_mask) |
11080 |
+ { |
11081 |
+ struct adis *adis = iio_device_get_drvdata(indio_dev); |
11082 |
+ unsigned int burst_length, burst_max_length; |
11083 |
+@@ -67,7 +67,7 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, |
11084 |
+ } |
11085 |
+ |
11086 |
+ int adis_update_scan_mode(struct iio_dev *indio_dev, |
11087 |
+- const unsigned long *scan_mask) |
11088 |
++ const unsigned long *scan_mask) |
11089 |
+ { |
11090 |
+ struct adis *adis = iio_device_get_drvdata(indio_dev); |
11091 |
+ const struct iio_chan_spec *chan; |
11092 |
+@@ -124,7 +124,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, |
11093 |
+ |
11094 |
+ return 0; |
11095 |
+ } |
11096 |
+-EXPORT_SYMBOL_GPL(adis_update_scan_mode); |
11097 |
++EXPORT_SYMBOL_NS_GPL(adis_update_scan_mode, IIO_ADISLIB); |
11098 |
+ |
11099 |
+ static irqreturn_t adis_trigger_handler(int irq, void *p) |
11100 |
+ { |
11101 |
+@@ -158,7 +158,7 @@ static irqreturn_t adis_trigger_handler(int irq, void *p) |
11102 |
+ } |
11103 |
+ |
11104 |
+ iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, |
11105 |
+- pf->timestamp); |
11106 |
++ pf->timestamp); |
11107 |
+ |
11108 |
+ irq_done: |
11109 |
+ iio_trigger_notify_done(indio_dev->trig); |
11110 |
+@@ -212,5 +212,5 @@ devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, |
11111 |
+ return devm_add_action_or_reset(&adis->spi->dev, adis_buffer_cleanup, |
11112 |
+ adis); |
11113 |
+ } |
11114 |
+-EXPORT_SYMBOL_GPL(devm_adis_setup_buffer_and_trigger); |
11115 |
++EXPORT_SYMBOL_NS_GPL(devm_adis_setup_buffer_and_trigger, IIO_ADISLIB); |
11116 |
+ |
11117 |
+diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c |
11118 |
+index 48eedc29b28a8..f890bf842db86 100644 |
11119 |
+--- a/drivers/iio/imu/adis_trigger.c |
11120 |
++++ b/drivers/iio/imu/adis_trigger.c |
11121 |
+@@ -15,8 +15,7 @@ |
11122 |
+ #include <linux/iio/trigger.h> |
11123 |
+ #include <linux/iio/imu/adis.h> |
11124 |
+ |
11125 |
+-static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig, |
11126 |
+- bool state) |
11127 |
++static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) |
11128 |
+ { |
11129 |
+ struct adis *adis = iio_trigger_get_drvdata(trig); |
11130 |
+ |
11131 |
+@@ -30,6 +29,10 @@ static const struct iio_trigger_ops adis_trigger_ops = { |
11132 |
+ static int adis_validate_irq_flag(struct adis *adis) |
11133 |
+ { |
11134 |
+ unsigned long direction = adis->irq_flag & IRQF_TRIGGER_MASK; |
11135 |
++ |
11136 |
++ /* We cannot mask the interrupt so ensure it's not enabled at request */ |
11137 |
++ if (adis->data->unmasked_drdy) |
11138 |
++ adis->irq_flag |= IRQF_NO_AUTOEN; |
11139 |
+ /* |
11140 |
+ * Typically this devices have data ready either on the rising edge or |
11141 |
+ * on the falling edge of the data ready pin. This checks enforces that |
11142 |
+@@ -84,5 +87,5 @@ int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) |
11143 |
+ |
11144 |
+ return devm_iio_trigger_register(&adis->spi->dev, adis->trig); |
11145 |
+ } |
11146 |
+-EXPORT_SYMBOL_GPL(devm_adis_probe_trigger); |
11147 |
++EXPORT_SYMBOL_NS_GPL(devm_adis_probe_trigger, IIO_ADISLIB); |
11148 |
+ |
11149 |
+diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c |
11150 |
+index d0732eac0f0ac..07bf47a1a3567 100644 |
11151 |
+--- a/drivers/iio/industrialio-event.c |
11152 |
++++ b/drivers/iio/industrialio-event.c |
11153 |
+@@ -549,7 +549,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev) |
11154 |
+ |
11155 |
+ ret = iio_device_register_sysfs_group(indio_dev, &ev_int->group); |
11156 |
+ if (ret) |
11157 |
+- goto error_free_setup_event_lines; |
11158 |
++ goto error_free_group_attrs; |
11159 |
+ |
11160 |
+ ev_int->ioctl_handler.ioctl = iio_event_ioctl; |
11161 |
+ iio_device_ioctl_handler_register(&iio_dev_opaque->indio_dev, |
11162 |
+@@ -557,6 +557,8 @@ int iio_device_register_eventset(struct iio_dev *indio_dev) |
11163 |
+ |
11164 |
+ return 0; |
11165 |
+ |
11166 |
++error_free_group_attrs: |
11167 |
++ kfree(ev_int->group.attrs); |
11168 |
+ error_free_setup_event_lines: |
11169 |
+ iio_free_chan_devattr_list(&ev_int->dev_attr_list); |
11170 |
+ kfree(ev_int); |
11171 |
+diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c |
11172 |
+index 8306daa779081..b2ae2d2c7eefc 100644 |
11173 |
+--- a/drivers/iio/temperature/ltc2983.c |
11174 |
++++ b/drivers/iio/temperature/ltc2983.c |
11175 |
+@@ -205,6 +205,7 @@ struct ltc2983_data { |
11176 |
+ * Holds the converted temperature |
11177 |
+ */ |
11178 |
+ __be32 temp ____cacheline_aligned; |
11179 |
++ __be32 chan_val; |
11180 |
+ }; |
11181 |
+ |
11182 |
+ struct ltc2983_sensor { |
11183 |
+@@ -309,19 +310,18 @@ static int __ltc2983_fault_handler(const struct ltc2983_data *st, |
11184 |
+ return 0; |
11185 |
+ } |
11186 |
+ |
11187 |
+-static int __ltc2983_chan_assign_common(const struct ltc2983_data *st, |
11188 |
++static int __ltc2983_chan_assign_common(struct ltc2983_data *st, |
11189 |
+ const struct ltc2983_sensor *sensor, |
11190 |
+ u32 chan_val) |
11191 |
+ { |
11192 |
+ u32 reg = LTC2983_CHAN_START_ADDR(sensor->chan); |
11193 |
+- __be32 __chan_val; |
11194 |
+ |
11195 |
+ chan_val |= LTC2983_CHAN_TYPE(sensor->type); |
11196 |
+ dev_dbg(&st->spi->dev, "Assign reg:0x%04X, val:0x%08X\n", reg, |
11197 |
+ chan_val); |
11198 |
+- __chan_val = cpu_to_be32(chan_val); |
11199 |
+- return regmap_bulk_write(st->regmap, reg, &__chan_val, |
11200 |
+- sizeof(__chan_val)); |
11201 |
++ st->chan_val = cpu_to_be32(chan_val); |
11202 |
++ return regmap_bulk_write(st->regmap, reg, &st->chan_val, |
11203 |
++ sizeof(st->chan_val)); |
11204 |
+ } |
11205 |
+ |
11206 |
+ static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st, |
11207 |
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c |
11208 |
+index 1519379b116e2..ab2106a09f9c6 100644 |
11209 |
+--- a/drivers/infiniband/core/device.c |
11210 |
++++ b/drivers/infiniband/core/device.c |
11211 |
+@@ -2848,8 +2848,8 @@ err: |
11212 |
+ static void __exit ib_core_cleanup(void) |
11213 |
+ { |
11214 |
+ roce_gid_mgmt_cleanup(); |
11215 |
+- nldev_exit(); |
11216 |
+ rdma_nl_unregister(RDMA_NL_LS); |
11217 |
++ nldev_exit(); |
11218 |
+ unregister_pernet_device(&rdma_dev_net_ops); |
11219 |
+ unregister_blocking_lsm_notifier(&ibdev_lsm_nb); |
11220 |
+ ib_sa_cleanup(); |
11221 |
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c |
11222 |
+index 1893aa613ad73..674344eb8e2f4 100644 |
11223 |
+--- a/drivers/infiniband/core/mad.c |
11224 |
++++ b/drivers/infiniband/core/mad.c |
11225 |
+@@ -59,9 +59,6 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, |
11226 |
+ struct ib_mad_qp_info *qp_info, |
11227 |
+ struct trace_event_raw_ib_mad_send_template *entry) |
11228 |
+ { |
11229 |
+- u16 pkey; |
11230 |
+- struct ib_device *dev = qp_info->port_priv->device; |
11231 |
+- u32 pnum = qp_info->port_priv->port_num; |
11232 |
+ struct ib_ud_wr *wr = &mad_send_wr->send_wr; |
11233 |
+ struct rdma_ah_attr attr = {}; |
11234 |
+ |
11235 |
+@@ -69,8 +66,6 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, |
11236 |
+ |
11237 |
+ /* These are common */ |
11238 |
+ entry->sl = attr.sl; |
11239 |
+- ib_query_pkey(dev, pnum, wr->pkey_index, &pkey); |
11240 |
+- entry->pkey = pkey; |
11241 |
+ entry->rqpn = wr->remote_qpn; |
11242 |
+ entry->rqkey = wr->remote_qkey; |
11243 |
+ entry->dlid = rdma_ah_get_dlid(&attr); |
11244 |
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c |
11245 |
+index cd89e59cbe332..7ad3ba7d5a0a1 100644 |
11246 |
+--- a/drivers/infiniband/core/nldev.c |
11247 |
++++ b/drivers/infiniband/core/nldev.c |
11248 |
+@@ -511,7 +511,7 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, |
11249 |
+ |
11250 |
+ /* In create_qp() port is not set yet */ |
11251 |
+ if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) |
11252 |
+- return -EINVAL; |
11253 |
++ return -EMSGSIZE; |
11254 |
+ |
11255 |
+ ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); |
11256 |
+ if (ret) |
11257 |
+@@ -550,7 +550,7 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, |
11258 |
+ struct rdma_cm_id *cm_id = &id_priv->id; |
11259 |
+ |
11260 |
+ if (port && port != cm_id->port_num) |
11261 |
+- return 0; |
11262 |
++ return -EAGAIN; |
11263 |
+ |
11264 |
+ if (cm_id->port_num && |
11265 |
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) |
11266 |
+@@ -892,6 +892,8 @@ static int fill_stat_counter_qps(struct sk_buff *msg, |
11267 |
+ int ret = 0; |
11268 |
+ |
11269 |
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); |
11270 |
++ if (!table_attr) |
11271 |
++ return -EMSGSIZE; |
11272 |
+ |
11273 |
+ rt = &counter->device->res[RDMA_RESTRACK_QP]; |
11274 |
+ xa_lock(&rt->xa); |
11275 |
+diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c |
11276 |
+index 1f935d9f61785..01a499a8b88db 100644 |
11277 |
+--- a/drivers/infiniband/core/restrack.c |
11278 |
++++ b/drivers/infiniband/core/restrack.c |
11279 |
+@@ -343,8 +343,6 @@ void rdma_restrack_del(struct rdma_restrack_entry *res) |
11280 |
+ rt = &dev->res[res->type]; |
11281 |
+ |
11282 |
+ old = xa_erase(&rt->xa, res->id); |
11283 |
+- if (res->type == RDMA_RESTRACK_MR) |
11284 |
+- return; |
11285 |
+ WARN_ON(old != res); |
11286 |
+ |
11287 |
+ out: |
11288 |
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c |
11289 |
+index 8d709986b88c7..253ccaf343f69 100644 |
11290 |
+--- a/drivers/infiniband/core/sysfs.c |
11291 |
++++ b/drivers/infiniband/core/sysfs.c |
11292 |
+@@ -1198,6 +1198,9 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num, |
11293 |
+ p->port_num = port_num; |
11294 |
+ kobject_init(&p->kobj, &port_type); |
11295 |
+ |
11296 |
++ if (device->port_data && is_full_dev) |
11297 |
++ device->port_data[port_num].sysfs = p; |
11298 |
++ |
11299 |
+ cur_group = p->groups_list; |
11300 |
+ ret = alloc_port_table_group("gids", &p->groups[0], p->attrs_list, |
11301 |
+ attr->gid_tbl_len, show_port_gid); |
11302 |
+@@ -1243,9 +1246,6 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num, |
11303 |
+ } |
11304 |
+ |
11305 |
+ list_add_tail(&p->kobj.entry, &coredev->port_list); |
11306 |
+- if (device->port_data && is_full_dev) |
11307 |
+- device->port_data[port_num].sysfs = p; |
11308 |
+- |
11309 |
+ return p; |
11310 |
+ |
11311 |
+ err_groups: |
11312 |
+@@ -1253,6 +1253,8 @@ err_groups: |
11313 |
+ err_del: |
11314 |
+ kobject_del(&p->kobj); |
11315 |
+ err_put: |
11316 |
++ if (device->port_data && is_full_dev) |
11317 |
++ device->port_data[port_num].sysfs = NULL; |
11318 |
+ kobject_put(&p->kobj); |
11319 |
+ return ERR_PTR(ret); |
11320 |
+ } |
11321 |
+@@ -1261,14 +1263,17 @@ static void destroy_port(struct ib_core_device *coredev, struct ib_port *port) |
11322 |
+ { |
11323 |
+ bool is_full_dev = &port->ibdev->coredev == coredev; |
11324 |
+ |
11325 |
+- if (port->ibdev->port_data && |
11326 |
+- port->ibdev->port_data[port->port_num].sysfs == port) |
11327 |
+- port->ibdev->port_data[port->port_num].sysfs = NULL; |
11328 |
+ list_del(&port->kobj.entry); |
11329 |
+ if (is_full_dev) |
11330 |
+ sysfs_remove_groups(&port->kobj, port->ibdev->ops.port_groups); |
11331 |
++ |
11332 |
+ sysfs_remove_groups(&port->kobj, port->groups_list); |
11333 |
+ kobject_del(&port->kobj); |
11334 |
++ |
11335 |
++ if (port->ibdev->port_data && |
11336 |
++ port->ibdev->port_data[port->port_num].sysfs == port) |
11337 |
++ port->ibdev->port_data[port->port_num].sysfs = NULL; |
11338 |
++ |
11339 |
+ kobject_put(&port->kobj); |
11340 |
+ } |
11341 |
+ |
11342 |
+diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c |
11343 |
+index 98c813ba4304c..4c403d9e90cb3 100644 |
11344 |
+--- a/drivers/infiniband/hw/hfi1/affinity.c |
11345 |
++++ b/drivers/infiniband/hw/hfi1/affinity.c |
11346 |
+@@ -178,6 +178,8 @@ out: |
11347 |
+ for (node = 0; node < node_affinity.num_possible_nodes; node++) |
11348 |
+ hfi1_per_node_cntr[node] = 1; |
11349 |
+ |
11350 |
++ pci_dev_put(dev); |
11351 |
++ |
11352 |
+ return 0; |
11353 |
+ } |
11354 |
+ |
11355 |
+diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c |
11356 |
+index 31e63e245ea92..ddf3217893f86 100644 |
11357 |
+--- a/drivers/infiniband/hw/hfi1/firmware.c |
11358 |
++++ b/drivers/infiniband/hw/hfi1/firmware.c |
11359 |
+@@ -1744,6 +1744,7 @@ int parse_platform_config(struct hfi1_devdata *dd) |
11360 |
+ |
11361 |
+ if (!dd->platform_config.data) { |
11362 |
+ dd_dev_err(dd, "%s: Missing config file\n", __func__); |
11363 |
++ ret = -EINVAL; |
11364 |
+ goto bail; |
11365 |
+ } |
11366 |
+ ptr = (u32 *)dd->platform_config.data; |
11367 |
+@@ -1752,6 +1753,7 @@ int parse_platform_config(struct hfi1_devdata *dd) |
11368 |
+ ptr++; |
11369 |
+ if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) { |
11370 |
+ dd_dev_err(dd, "%s: Bad config file\n", __func__); |
11371 |
++ ret = -EINVAL; |
11372 |
+ goto bail; |
11373 |
+ } |
11374 |
+ |
11375 |
+@@ -1775,6 +1777,7 @@ int parse_platform_config(struct hfi1_devdata *dd) |
11376 |
+ if (file_length > dd->platform_config.size) { |
11377 |
+ dd_dev_info(dd, "%s:File claims to be larger than read size\n", |
11378 |
+ __func__); |
11379 |
++ ret = -EINVAL; |
11380 |
+ goto bail; |
11381 |
+ } else if (file_length < dd->platform_config.size) { |
11382 |
+ dd_dev_info(dd, |
11383 |
+@@ -1795,6 +1798,7 @@ int parse_platform_config(struct hfi1_devdata *dd) |
11384 |
+ dd_dev_err(dd, "%s: Failed validation at offset %ld\n", |
11385 |
+ __func__, (ptr - (u32 *) |
11386 |
+ dd->platform_config.data)); |
11387 |
++ ret = -EINVAL; |
11388 |
+ goto bail; |
11389 |
+ } |
11390 |
+ |
11391 |
+@@ -1838,6 +1842,7 @@ int parse_platform_config(struct hfi1_devdata *dd) |
11392 |
+ __func__, table_type, |
11393 |
+ (ptr - (u32 *) |
11394 |
+ dd->platform_config.data)); |
11395 |
++ ret = -EINVAL; |
11396 |
+ goto bail; /* We don't trust this file now */ |
11397 |
+ } |
11398 |
+ pcfgcache->config_tables[table_type].table = ptr; |
11399 |
+@@ -1857,6 +1862,7 @@ int parse_platform_config(struct hfi1_devdata *dd) |
11400 |
+ __func__, table_type, |
11401 |
+ (ptr - |
11402 |
+ (u32 *)dd->platform_config.data)); |
11403 |
++ ret = -EINVAL; |
11404 |
+ goto bail; /* We don't trust this file now */ |
11405 |
+ } |
11406 |
+ pcfgcache->config_tables[table_type].table_metadata = |
11407 |
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c |
11408 |
+index 1421896abaf09..79d92b7899849 100644 |
11409 |
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c |
11410 |
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c |
11411 |
+@@ -151,21 +151,29 @@ static void set_atomic_seg(const struct ib_send_wr *wr, |
11412 |
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); |
11413 |
+ } |
11414 |
+ |
11415 |
++static unsigned int get_std_sge_num(struct hns_roce_qp *qp) |
11416 |
++{ |
11417 |
++ if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD) |
11418 |
++ return 0; |
11419 |
++ |
11420 |
++ return HNS_ROCE_SGE_IN_WQE; |
11421 |
++} |
11422 |
++ |
11423 |
+ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, |
11424 |
+ const struct ib_send_wr *wr, |
11425 |
+ unsigned int *sge_idx, u32 msg_len) |
11426 |
+ { |
11427 |
+ struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; |
11428 |
+- unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg); |
11429 |
+- unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len; |
11430 |
+ unsigned int left_len_in_pg; |
11431 |
+ unsigned int idx = *sge_idx; |
11432 |
++ unsigned int std_sge_num; |
11433 |
+ unsigned int i = 0; |
11434 |
+ unsigned int len; |
11435 |
+ void *addr; |
11436 |
+ void *dseg; |
11437 |
+ |
11438 |
+- if (msg_len > ext_sge_sz) { |
11439 |
++ std_sge_num = get_std_sge_num(qp); |
11440 |
++ if (msg_len > (qp->sq.max_gs - std_sge_num) * HNS_ROCE_SGE_SIZE) { |
11441 |
+ ibdev_err(ibdev, |
11442 |
+ "no enough extended sge space for inline data.\n"); |
11443 |
+ return -EINVAL; |
11444 |
+@@ -185,7 +193,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, |
11445 |
+ if (len <= left_len_in_pg) { |
11446 |
+ memcpy(dseg, addr, len); |
11447 |
+ |
11448 |
+- idx += len / dseg_len; |
11449 |
++ idx += len / HNS_ROCE_SGE_SIZE; |
11450 |
+ |
11451 |
+ i++; |
11452 |
+ if (i >= wr->num_sge) |
11453 |
+@@ -200,7 +208,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, |
11454 |
+ |
11455 |
+ len -= left_len_in_pg; |
11456 |
+ addr += left_len_in_pg; |
11457 |
+- idx += left_len_in_pg / dseg_len; |
11458 |
++ idx += left_len_in_pg / HNS_ROCE_SGE_SIZE; |
11459 |
+ dseg = hns_roce_get_extend_sge(qp, |
11460 |
+ idx & (qp->sge.sge_cnt - 1)); |
11461 |
+ left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT; |
11462 |
+@@ -1249,6 +1257,30 @@ static void update_cmdq_status(struct hns_roce_dev *hr_dev) |
11463 |
+ hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR; |
11464 |
+ } |
11465 |
+ |
11466 |
++static int hns_roce_cmd_err_convert_errno(u16 desc_ret) |
11467 |
++{ |
11468 |
++ struct hns_roce_cmd_errcode errcode_table[] = { |
11469 |
++ {CMD_EXEC_SUCCESS, 0}, |
11470 |
++ {CMD_NO_AUTH, -EPERM}, |
11471 |
++ {CMD_NOT_EXIST, -EOPNOTSUPP}, |
11472 |
++ {CMD_CRQ_FULL, -EXFULL}, |
11473 |
++ {CMD_NEXT_ERR, -ENOSR}, |
11474 |
++ {CMD_NOT_EXEC, -ENOTBLK}, |
11475 |
++ {CMD_PARA_ERR, -EINVAL}, |
11476 |
++ {CMD_RESULT_ERR, -ERANGE}, |
11477 |
++ {CMD_TIMEOUT, -ETIME}, |
11478 |
++ {CMD_HILINK_ERR, -ENOLINK}, |
11479 |
++ {CMD_INFO_ILLEGAL, -ENXIO}, |
11480 |
++ {CMD_INVALID, -EBADR}, |
11481 |
++ }; |
11482 |
++ u16 i; |
11483 |
++ |
11484 |
++ for (i = 0; i < ARRAY_SIZE(errcode_table); i++) |
11485 |
++ if (desc_ret == errcode_table[i].return_status) |
11486 |
++ return errcode_table[i].errno; |
11487 |
++ return -EIO; |
11488 |
++} |
11489 |
++ |
11490 |
+ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, |
11491 |
+ struct hns_roce_cmq_desc *desc, int num) |
11492 |
+ { |
11493 |
+@@ -1293,7 +1325,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, |
11494 |
+ dev_err_ratelimited(hr_dev->dev, |
11495 |
+ "Cmdq IO error, opcode = %x, return = %x\n", |
11496 |
+ desc->opcode, desc_ret); |
11497 |
+- ret = -EIO; |
11498 |
++ ret = hns_roce_cmd_err_convert_errno(desc_ret); |
11499 |
+ } |
11500 |
+ } else { |
11501 |
+ /* FW/HW reset or incorrect number of desc */ |
11502 |
+@@ -2363,6 +2395,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) |
11503 |
+ V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M, |
11504 |
+ V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S); |
11505 |
+ |
11506 |
++ if (!(caps->page_size_cap & PAGE_SIZE)) |
11507 |
++ caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; |
11508 |
++ |
11509 |
+ return 0; |
11510 |
+ } |
11511 |
+ |
11512 |
+@@ -3016,7 +3051,8 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, |
11513 |
+ int i, count; |
11514 |
+ |
11515 |
+ count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, |
11516 |
+- ARRAY_SIZE(pages), &pbl_ba); |
11517 |
++ min_t(int, ARRAY_SIZE(pages), mr->npages), |
11518 |
++ &pbl_ba); |
11519 |
+ if (count < 1) { |
11520 |
+ ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n", |
11521 |
+ count); |
11522 |
+@@ -5121,6 +5157,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, |
11523 |
+ |
11524 |
+ rdma_ah_set_sl(&qp_attr->ah_attr, |
11525 |
+ hr_reg_read(&context, QPC_SL)); |
11526 |
++ rdma_ah_set_port_num(&qp_attr->ah_attr, hr_qp->port + 1); |
11527 |
++ rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH); |
11528 |
+ grh->flow_label = hr_reg_read(&context, QPC_FL); |
11529 |
+ grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX); |
11530 |
+ grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT); |
11531 |
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h |
11532 |
+index 2f4a0019a716d..67f5b6fcfa1b1 100644 |
11533 |
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h |
11534 |
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h |
11535 |
+@@ -276,6 +276,11 @@ enum hns_roce_cmd_return_status { |
11536 |
+ CMD_OTHER_ERR = 0xff |
11537 |
+ }; |
11538 |
+ |
11539 |
++struct hns_roce_cmd_errcode { |
11540 |
++ enum hns_roce_cmd_return_status return_status; |
11541 |
++ int errno; |
11542 |
++}; |
11543 |
++ |
11544 |
+ enum hns_roce_sgid_type { |
11545 |
+ GID_TYPE_FLAG_ROCE_V1 = 0, |
11546 |
+ GID_TYPE_FLAG_ROCE_V2_IPV4, |
11547 |
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c |
11548 |
+index 20360df25771c..a593c142cd6ba 100644 |
11549 |
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c |
11550 |
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c |
11551 |
+@@ -415,10 +415,10 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
11552 |
+ |
11553 |
+ return &mr->ibmr; |
11554 |
+ |
11555 |
+-err_key: |
11556 |
+- free_mr_key(hr_dev, mr); |
11557 |
+ err_pbl: |
11558 |
+ free_mr_pbl(hr_dev, mr); |
11559 |
++err_key: |
11560 |
++ free_mr_key(hr_dev, mr); |
11561 |
+ err_free: |
11562 |
+ kfree(mr); |
11563 |
+ return ERR_PTR(ret); |
11564 |
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c |
11565 |
+index 911902d2b93e4..c5971a840b876 100644 |
11566 |
+--- a/drivers/infiniband/hw/irdma/verbs.c |
11567 |
++++ b/drivers/infiniband/hw/irdma/verbs.c |
11568 |
+@@ -60,36 +60,6 @@ static int irdma_query_device(struct ib_device *ibdev, |
11569 |
+ return 0; |
11570 |
+ } |
11571 |
+ |
11572 |
+-/** |
11573 |
+- * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed |
11574 |
+- * @link_speed: netdev phy link speed |
11575 |
+- * @active_speed: IB port speed |
11576 |
+- * @active_width: IB port width |
11577 |
+- */ |
11578 |
+-static void irdma_get_eth_speed_and_width(u32 link_speed, u16 *active_speed, |
11579 |
+- u8 *active_width) |
11580 |
+-{ |
11581 |
+- if (link_speed <= SPEED_1000) { |
11582 |
+- *active_width = IB_WIDTH_1X; |
11583 |
+- *active_speed = IB_SPEED_SDR; |
11584 |
+- } else if (link_speed <= SPEED_10000) { |
11585 |
+- *active_width = IB_WIDTH_1X; |
11586 |
+- *active_speed = IB_SPEED_FDR10; |
11587 |
+- } else if (link_speed <= SPEED_20000) { |
11588 |
+- *active_width = IB_WIDTH_4X; |
11589 |
+- *active_speed = IB_SPEED_DDR; |
11590 |
+- } else if (link_speed <= SPEED_25000) { |
11591 |
+- *active_width = IB_WIDTH_1X; |
11592 |
+- *active_speed = IB_SPEED_EDR; |
11593 |
+- } else if (link_speed <= SPEED_40000) { |
11594 |
+- *active_width = IB_WIDTH_4X; |
11595 |
+- *active_speed = IB_SPEED_FDR10; |
11596 |
+- } else { |
11597 |
+- *active_width = IB_WIDTH_4X; |
11598 |
+- *active_speed = IB_SPEED_EDR; |
11599 |
+- } |
11600 |
+-} |
11601 |
+- |
11602 |
+ /** |
11603 |
+ * irdma_query_port - get port attributes |
11604 |
+ * @ibdev: device pointer from stack |
11605 |
+@@ -117,8 +87,9 @@ static int irdma_query_port(struct ib_device *ibdev, u32 port, |
11606 |
+ props->state = IB_PORT_DOWN; |
11607 |
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
11608 |
+ } |
11609 |
+- irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed, |
11610 |
+- &props->active_width); |
11611 |
++ |
11612 |
++ ib_get_eth_speed(ibdev, port, &props->active_speed, |
11613 |
++ &props->active_width); |
11614 |
+ |
11615 |
+ if (rdma_protocol_roce(ibdev, 1)) { |
11616 |
+ props->gid_tbl_len = 32; |
11617 |
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c |
11618 |
+index 57ebf4871608d..d7a968356a9bb 100644 |
11619 |
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c |
11620 |
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c |
11621 |
+@@ -830,12 +830,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work) |
11622 |
+ qp->resp.mr = NULL; |
11623 |
+ } |
11624 |
+ |
11625 |
+- if (qp_type(qp) == IB_QPT_RC) |
11626 |
+- sk_dst_reset(qp->sk->sk); |
11627 |
+- |
11628 |
+ free_rd_atomic_resources(qp); |
11629 |
+ |
11630 |
+ if (qp->sk) { |
11631 |
++ if (qp_type(qp) == IB_QPT_RC) |
11632 |
++ sk_dst_reset(qp->sk->sk); |
11633 |
++ |
11634 |
+ kernel_sock_shutdown(qp->sk, SHUT_RDWR); |
11635 |
+ sock_release(qp->sk); |
11636 |
+ } |
11637 |
+diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c |
11638 |
+index d68e37859e73b..403029de6b92d 100644 |
11639 |
+--- a/drivers/infiniband/sw/siw/siw_cq.c |
11640 |
++++ b/drivers/infiniband/sw/siw/siw_cq.c |
11641 |
+@@ -56,8 +56,6 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) |
11642 |
+ if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { |
11643 |
+ memset(wc, 0, sizeof(*wc)); |
11644 |
+ wc->wr_id = cqe->id; |
11645 |
+- wc->status = map_cqe_status[cqe->status].ib; |
11646 |
+- wc->opcode = map_wc_opcode[cqe->opcode]; |
11647 |
+ wc->byte_len = cqe->bytes; |
11648 |
+ |
11649 |
+ /* |
11650 |
+@@ -71,10 +69,32 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) |
11651 |
+ wc->wc_flags = IB_WC_WITH_INVALIDATE; |
11652 |
+ } |
11653 |
+ wc->qp = cqe->base_qp; |
11654 |
++ wc->opcode = map_wc_opcode[cqe->opcode]; |
11655 |
++ wc->status = map_cqe_status[cqe->status].ib; |
11656 |
+ siw_dbg_cq(cq, |
11657 |
+ "idx %u, type %d, flags %2x, id 0x%pK\n", |
11658 |
+ cq->cq_get % cq->num_cqe, cqe->opcode, |
11659 |
+ cqe->flags, (void *)(uintptr_t)cqe->id); |
11660 |
++ } else { |
11661 |
++ /* |
11662 |
++ * A malicious user may set invalid opcode or |
11663 |
++ * status in the user mmapped CQE array. |
11664 |
++ * Sanity check and correct values in that case |
11665 |
++ * to avoid out-of-bounds access to global arrays |
11666 |
++ * for opcode and status mapping. |
11667 |
++ */ |
11668 |
++ u8 opcode = cqe->opcode; |
11669 |
++ u16 status = cqe->status; |
11670 |
++ |
11671 |
++ if (opcode >= SIW_NUM_OPCODES) { |
11672 |
++ opcode = 0; |
11673 |
++ status = SIW_WC_GENERAL_ERR; |
11674 |
++ } else if (status >= SIW_NUM_WC_STATUS) { |
11675 |
++ status = SIW_WC_GENERAL_ERR; |
11676 |
++ } |
11677 |
++ wc->opcode = map_wc_opcode[opcode]; |
11678 |
++ wc->status = map_cqe_status[status].ib; |
11679 |
++ |
11680 |
+ } |
11681 |
+ WRITE_ONCE(cqe->flags, 0); |
11682 |
+ cq->cq_get++; |
11683 |
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c |
11684 |
+index 7d47b521070b1..05052b49107f2 100644 |
11685 |
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c |
11686 |
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c |
11687 |
+@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) |
11688 |
+ dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); |
11689 |
+ |
11690 |
+ if (paddr) |
11691 |
+- return virt_to_page((void *)paddr); |
11692 |
++ return virt_to_page((void *)(uintptr_t)paddr); |
11693 |
+ |
11694 |
+ return NULL; |
11695 |
+ } |
11696 |
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c |
11697 |
+index ff33659acffa9..9c7fbda9e068a 100644 |
11698 |
+--- a/drivers/infiniband/sw/siw/siw_verbs.c |
11699 |
++++ b/drivers/infiniband/sw/siw/siw_verbs.c |
11700 |
+@@ -674,13 +674,45 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, |
11701 |
+ static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, |
11702 |
+ const struct ib_send_wr **bad_wr) |
11703 |
+ { |
11704 |
+- struct siw_sqe sqe = {}; |
11705 |
+ int rv = 0; |
11706 |
+ |
11707 |
+ while (wr) { |
11708 |
+- sqe.id = wr->wr_id; |
11709 |
+- sqe.opcode = wr->opcode; |
11710 |
+- rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR); |
11711 |
++ struct siw_sqe sqe = {}; |
11712 |
++ |
11713 |
++ switch (wr->opcode) { |
11714 |
++ case IB_WR_RDMA_WRITE: |
11715 |
++ sqe.opcode = SIW_OP_WRITE; |
11716 |
++ break; |
11717 |
++ case IB_WR_RDMA_READ: |
11718 |
++ sqe.opcode = SIW_OP_READ; |
11719 |
++ break; |
11720 |
++ case IB_WR_RDMA_READ_WITH_INV: |
11721 |
++ sqe.opcode = SIW_OP_READ_LOCAL_INV; |
11722 |
++ break; |
11723 |
++ case IB_WR_SEND: |
11724 |
++ sqe.opcode = SIW_OP_SEND; |
11725 |
++ break; |
11726 |
++ case IB_WR_SEND_WITH_IMM: |
11727 |
++ sqe.opcode = SIW_OP_SEND_WITH_IMM; |
11728 |
++ break; |
11729 |
++ case IB_WR_SEND_WITH_INV: |
11730 |
++ sqe.opcode = SIW_OP_SEND_REMOTE_INV; |
11731 |
++ break; |
11732 |
++ case IB_WR_LOCAL_INV: |
11733 |
++ sqe.opcode = SIW_OP_INVAL_STAG; |
11734 |
++ break; |
11735 |
++ case IB_WR_REG_MR: |
11736 |
++ sqe.opcode = SIW_OP_REG_MR; |
11737 |
++ break; |
11738 |
++ default: |
11739 |
++ rv = -EINVAL; |
11740 |
++ break; |
11741 |
++ } |
11742 |
++ if (!rv) { |
11743 |
++ sqe.id = wr->wr_id; |
11744 |
++ rv = siw_sqe_complete(qp, &sqe, 0, |
11745 |
++ SIW_WC_WR_FLUSH_ERR); |
11746 |
++ } |
11747 |
+ if (rv) { |
11748 |
+ if (bad_wr) |
11749 |
+ *bad_wr = wr; |
11750 |
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c |
11751 |
+index 5b05cf3837da1..28e9b70844e44 100644 |
11752 |
+--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c |
11753 |
++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c |
11754 |
+@@ -42,6 +42,11 @@ static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = { |
11755 |
+ [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 }, |
11756 |
+ }; |
11757 |
+ |
11758 |
++static unsigned int ipoib_get_max_num_queues(void) |
11759 |
++{ |
11760 |
++ return min_t(unsigned int, num_possible_cpus(), 128); |
11761 |
++} |
11762 |
++ |
11763 |
+ static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev) |
11764 |
+ { |
11765 |
+ struct ipoib_dev_priv *priv = ipoib_priv(dev); |
11766 |
+@@ -173,6 +178,8 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = { |
11767 |
+ .changelink = ipoib_changelink, |
11768 |
+ .get_size = ipoib_get_size, |
11769 |
+ .fill_info = ipoib_fill_info, |
11770 |
++ .get_num_rx_queues = ipoib_get_max_num_queues, |
11771 |
++ .get_num_tx_queues = ipoib_get_max_num_queues, |
11772 |
+ }; |
11773 |
+ |
11774 |
+ struct rtnl_link_ops *ipoib_get_link_ops(void) |
11775 |
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c |
11776 |
+index 2f4991cea98c6..a6117a7d0ab17 100644 |
11777 |
+--- a/drivers/infiniband/ulp/srp/ib_srp.c |
11778 |
++++ b/drivers/infiniband/ulp/srp/ib_srp.c |
11779 |
+@@ -3397,7 +3397,8 @@ static int srp_parse_options(struct net *net, const char *buf, |
11780 |
+ break; |
11781 |
+ |
11782 |
+ case SRP_OPT_PKEY: |
11783 |
+- if (match_hex(args, &token)) { |
11784 |
++ ret = match_hex(args, &token); |
11785 |
++ if (ret) { |
11786 |
+ pr_warn("bad P_Key parameter '%s'\n", p); |
11787 |
+ goto out; |
11788 |
+ } |
11789 |
+@@ -3457,7 +3458,8 @@ static int srp_parse_options(struct net *net, const char *buf, |
11790 |
+ break; |
11791 |
+ |
11792 |
+ case SRP_OPT_MAX_SECT: |
11793 |
+- if (match_int(args, &token)) { |
11794 |
++ ret = match_int(args, &token); |
11795 |
++ if (ret) { |
11796 |
+ pr_warn("bad max sect parameter '%s'\n", p); |
11797 |
+ goto out; |
11798 |
+ } |
11799 |
+@@ -3465,8 +3467,15 @@ static int srp_parse_options(struct net *net, const char *buf, |
11800 |
+ break; |
11801 |
+ |
11802 |
+ case SRP_OPT_QUEUE_SIZE: |
11803 |
+- if (match_int(args, &token) || token < 1) { |
11804 |
++ ret = match_int(args, &token); |
11805 |
++ if (ret) { |
11806 |
++ pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n", |
11807 |
++ p, ret); |
11808 |
++ goto out; |
11809 |
++ } |
11810 |
++ if (token < 1) { |
11811 |
+ pr_warn("bad queue_size parameter '%s'\n", p); |
11812 |
++ ret = -EINVAL; |
11813 |
+ goto out; |
11814 |
+ } |
11815 |
+ target->scsi_host->can_queue = token; |
11816 |
+@@ -3477,25 +3486,40 @@ static int srp_parse_options(struct net *net, const char *buf, |
11817 |
+ break; |
11818 |
+ |
11819 |
+ case SRP_OPT_MAX_CMD_PER_LUN: |
11820 |
+- if (match_int(args, &token) || token < 1) { |
11821 |
++ ret = match_int(args, &token); |
11822 |
++ if (ret) { |
11823 |
++ pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n", |
11824 |
++ p, ret); |
11825 |
++ goto out; |
11826 |
++ } |
11827 |
++ if (token < 1) { |
11828 |
+ pr_warn("bad max cmd_per_lun parameter '%s'\n", |
11829 |
+ p); |
11830 |
++ ret = -EINVAL; |
11831 |
+ goto out; |
11832 |
+ } |
11833 |
+ target->scsi_host->cmd_per_lun = token; |
11834 |
+ break; |
11835 |
+ |
11836 |
+ case SRP_OPT_TARGET_CAN_QUEUE: |
11837 |
+- if (match_int(args, &token) || token < 1) { |
11838 |
++ ret = match_int(args, &token); |
11839 |
++ if (ret) { |
11840 |
++ pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n", |
11841 |
++ p, ret); |
11842 |
++ goto out; |
11843 |
++ } |
11844 |
++ if (token < 1) { |
11845 |
+ pr_warn("bad max target_can_queue parameter '%s'\n", |
11846 |
+ p); |
11847 |
++ ret = -EINVAL; |
11848 |
+ goto out; |
11849 |
+ } |
11850 |
+ target->target_can_queue = token; |
11851 |
+ break; |
11852 |
+ |
11853 |
+ case SRP_OPT_IO_CLASS: |
11854 |
+- if (match_hex(args, &token)) { |
11855 |
++ ret = match_hex(args, &token); |
11856 |
++ if (ret) { |
11857 |
+ pr_warn("bad IO class parameter '%s'\n", p); |
11858 |
+ goto out; |
11859 |
+ } |
11860 |
+@@ -3504,6 +3528,7 @@ static int srp_parse_options(struct net *net, const char *buf, |
11861 |
+ pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", |
11862 |
+ token, SRP_REV10_IB_IO_CLASS, |
11863 |
+ SRP_REV16A_IB_IO_CLASS); |
11864 |
++ ret = -EINVAL; |
11865 |
+ goto out; |
11866 |
+ } |
11867 |
+ target->io_class = token; |
11868 |
+@@ -3526,16 +3551,24 @@ static int srp_parse_options(struct net *net, const char *buf, |
11869 |
+ break; |
11870 |
+ |
11871 |
+ case SRP_OPT_CMD_SG_ENTRIES: |
11872 |
+- if (match_int(args, &token) || token < 1 || token > 255) { |
11873 |
++ ret = match_int(args, &token); |
11874 |
++ if (ret) { |
11875 |
++ pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n", |
11876 |
++ p, ret); |
11877 |
++ goto out; |
11878 |
++ } |
11879 |
++ if (token < 1 || token > 255) { |
11880 |
+ pr_warn("bad max cmd_sg_entries parameter '%s'\n", |
11881 |
+ p); |
11882 |
++ ret = -EINVAL; |
11883 |
+ goto out; |
11884 |
+ } |
11885 |
+ target->cmd_sg_cnt = token; |
11886 |
+ break; |
11887 |
+ |
11888 |
+ case SRP_OPT_ALLOW_EXT_SG: |
11889 |
+- if (match_int(args, &token)) { |
11890 |
++ ret = match_int(args, &token); |
11891 |
++ if (ret) { |
11892 |
+ pr_warn("bad allow_ext_sg parameter '%s'\n", p); |
11893 |
+ goto out; |
11894 |
+ } |
11895 |
+@@ -3543,43 +3576,77 @@ static int srp_parse_options(struct net *net, const char *buf, |
11896 |
+ break; |
11897 |
+ |
11898 |
+ case SRP_OPT_SG_TABLESIZE: |
11899 |
+- if (match_int(args, &token) || token < 1 || |
11900 |
+- token > SG_MAX_SEGMENTS) { |
11901 |
++ ret = match_int(args, &token); |
11902 |
++ if (ret) { |
11903 |
++ pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n", |
11904 |
++ p, ret); |
11905 |
++ goto out; |
11906 |
++ } |
11907 |
++ if (token < 1 || token > SG_MAX_SEGMENTS) { |
11908 |
+ pr_warn("bad max sg_tablesize parameter '%s'\n", |
11909 |
+ p); |
11910 |
++ ret = -EINVAL; |
11911 |
+ goto out; |
11912 |
+ } |
11913 |
+ target->sg_tablesize = token; |
11914 |
+ break; |
11915 |
+ |
11916 |
+ case SRP_OPT_COMP_VECTOR: |
11917 |
+- if (match_int(args, &token) || token < 0) { |
11918 |
++ ret = match_int(args, &token); |
11919 |
++ if (ret) { |
11920 |
++ pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n", |
11921 |
++ p, ret); |
11922 |
++ goto out; |
11923 |
++ } |
11924 |
++ if (token < 0) { |
11925 |
+ pr_warn("bad comp_vector parameter '%s'\n", p); |
11926 |
++ ret = -EINVAL; |
11927 |
+ goto out; |
11928 |
+ } |
11929 |
+ target->comp_vector = token; |
11930 |
+ break; |
11931 |
+ |
11932 |
+ case SRP_OPT_TL_RETRY_COUNT: |
11933 |
+- if (match_int(args, &token) || token < 2 || token > 7) { |
11934 |
++ ret = match_int(args, &token); |
11935 |
++ if (ret) { |
11936 |
++ pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n", |
11937 |
++ p, ret); |
11938 |
++ goto out; |
11939 |
++ } |
11940 |
++ if (token < 2 || token > 7) { |
11941 |
+ pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", |
11942 |
+ p); |
11943 |
++ ret = -EINVAL; |
11944 |
+ goto out; |
11945 |
+ } |
11946 |
+ target->tl_retry_count = token; |
11947 |
+ break; |
11948 |
+ |
11949 |
+ case SRP_OPT_MAX_IT_IU_SIZE: |
11950 |
+- if (match_int(args, &token) || token < 0) { |
11951 |
++ ret = match_int(args, &token); |
11952 |
++ if (ret) { |
11953 |
++ pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n", |
11954 |
++ p, ret); |
11955 |
++ goto out; |
11956 |
++ } |
11957 |
++ if (token < 0) { |
11958 |
+ pr_warn("bad maximum initiator to target IU size '%s'\n", p); |
11959 |
++ ret = -EINVAL; |
11960 |
+ goto out; |
11961 |
+ } |
11962 |
+ target->max_it_iu_size = token; |
11963 |
+ break; |
11964 |
+ |
11965 |
+ case SRP_OPT_CH_COUNT: |
11966 |
+- if (match_int(args, &token) || token < 1) { |
11967 |
++ ret = match_int(args, &token); |
11968 |
++ if (ret) { |
11969 |
++ pr_warn("match_int() failed for channel count parameter '%s', Error %d\n", |
11970 |
++ p, ret); |
11971 |
++ goto out; |
11972 |
++ } |
11973 |
++ if (token < 1) { |
11974 |
+ pr_warn("bad channel count %s\n", p); |
11975 |
++ ret = -EINVAL; |
11976 |
+ goto out; |
11977 |
+ } |
11978 |
+ target->ch_count = token; |
11979 |
+@@ -3588,6 +3655,7 @@ static int srp_parse_options(struct net *net, const char *buf, |
11980 |
+ default: |
11981 |
+ pr_warn("unknown parameter or missing value '%s' in target creation request\n", |
11982 |
+ p); |
11983 |
++ ret = -EINVAL; |
11984 |
+ goto out; |
11985 |
+ } |
11986 |
+ } |
11987 |
+diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig |
11988 |
+index 3b23078bc7b5b..db4135bbd279a 100644 |
11989 |
+--- a/drivers/input/joystick/Kconfig |
11990 |
++++ b/drivers/input/joystick/Kconfig |
11991 |
+@@ -46,6 +46,7 @@ config JOYSTICK_A3D |
11992 |
+ config JOYSTICK_ADC |
11993 |
+ tristate "Simple joystick connected over ADC" |
11994 |
+ depends on IIO |
11995 |
++ select IIO_BUFFER |
11996 |
+ select IIO_BUFFER_CB |
11997 |
+ help |
11998 |
+ Say Y here if you have a simple joystick connected over ADC. |
11999 |
+diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig |
12000 |
+index dd5227cf86964..b5b8ddb536be4 100644 |
12001 |
+--- a/drivers/input/misc/Kconfig |
12002 |
++++ b/drivers/input/misc/Kconfig |
12003 |
+@@ -330,7 +330,7 @@ config INPUT_CPCAP_PWRBUTTON |
12004 |
+ |
12005 |
+ config INPUT_WISTRON_BTNS |
12006 |
+ tristate "x86 Wistron laptop button interface" |
12007 |
+- depends on X86_32 |
12008 |
++ depends on X86_32 && !UML |
12009 |
+ select INPUT_SPARSEKMAP |
12010 |
+ select NEW_LEDS |
12011 |
+ select LEDS_CLASS |
12012 |
+diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c |
12013 |
+index b9e2219efbb8f..6858a3e20a0cc 100644 |
12014 |
+--- a/drivers/input/touchscreen/elants_i2c.c |
12015 |
++++ b/drivers/input/touchscreen/elants_i2c.c |
12016 |
+@@ -1329,14 +1329,12 @@ static int elants_i2c_power_on(struct elants_data *ts) |
12017 |
+ if (IS_ERR_OR_NULL(ts->reset_gpio)) |
12018 |
+ return 0; |
12019 |
+ |
12020 |
+- gpiod_set_value_cansleep(ts->reset_gpio, 1); |
12021 |
+- |
12022 |
+ error = regulator_enable(ts->vcc33); |
12023 |
+ if (error) { |
12024 |
+ dev_err(&ts->client->dev, |
12025 |
+ "failed to enable vcc33 regulator: %d\n", |
12026 |
+ error); |
12027 |
+- goto release_reset_gpio; |
12028 |
++ return error; |
12029 |
+ } |
12030 |
+ |
12031 |
+ error = regulator_enable(ts->vccio); |
12032 |
+@@ -1345,7 +1343,7 @@ static int elants_i2c_power_on(struct elants_data *ts) |
12033 |
+ "failed to enable vccio regulator: %d\n", |
12034 |
+ error); |
12035 |
+ regulator_disable(ts->vcc33); |
12036 |
+- goto release_reset_gpio; |
12037 |
++ return error; |
12038 |
+ } |
12039 |
+ |
12040 |
+ /* |
12041 |
+@@ -1354,7 +1352,6 @@ static int elants_i2c_power_on(struct elants_data *ts) |
12042 |
+ */ |
12043 |
+ udelay(ELAN_POWERON_DELAY_USEC); |
12044 |
+ |
12045 |
+-release_reset_gpio: |
12046 |
+ gpiod_set_value_cansleep(ts->reset_gpio, 0); |
12047 |
+ if (error) |
12048 |
+ return error; |
12049 |
+@@ -1462,7 +1459,7 @@ static int elants_i2c_probe(struct i2c_client *client) |
12050 |
+ return error; |
12051 |
+ } |
12052 |
+ |
12053 |
+- ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW); |
12054 |
++ ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH); |
12055 |
+ if (IS_ERR(ts->reset_gpio)) { |
12056 |
+ error = PTR_ERR(ts->reset_gpio); |
12057 |
+ |
12058 |
+diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c |
12059 |
+index a45c5536d2506..c96cf9b217197 100644 |
12060 |
+--- a/drivers/iommu/amd/iommu_v2.c |
12061 |
++++ b/drivers/iommu/amd/iommu_v2.c |
12062 |
+@@ -588,6 +588,7 @@ out_drop_state: |
12063 |
+ put_device_state(dev_state); |
12064 |
+ |
12065 |
+ out: |
12066 |
++ pci_dev_put(pdev); |
12067 |
+ return ret; |
12068 |
+ } |
12069 |
+ |
12070 |
+diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c |
12071 |
+index fc38b1fba7cff..bb5d253188a18 100644 |
12072 |
+--- a/drivers/iommu/fsl_pamu.c |
12073 |
++++ b/drivers/iommu/fsl_pamu.c |
12074 |
+@@ -865,7 +865,7 @@ static int fsl_pamu_probe(struct platform_device *pdev) |
12075 |
+ ret = create_csd(ppaact_phys, mem_size, csd_port_id); |
12076 |
+ if (ret) { |
12077 |
+ dev_err(dev, "could not create coherence subdomain\n"); |
12078 |
+- return ret; |
12079 |
++ goto error; |
12080 |
+ } |
12081 |
+ } |
12082 |
+ |
12083 |
+diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c |
12084 |
+index 7f23ad61c094f..823f1a7d8c6e2 100644 |
12085 |
+--- a/drivers/iommu/rockchip-iommu.c |
12086 |
++++ b/drivers/iommu/rockchip-iommu.c |
12087 |
+@@ -280,19 +280,17 @@ static u32 rk_mk_pte(phys_addr_t page, int prot) |
12088 |
+ * 11:9 - Page address bit 34:32 |
12089 |
+ * 8:4 - Page address bit 39:35 |
12090 |
+ * 3 - Security |
12091 |
+- * 2 - Readable |
12092 |
+- * 1 - Writable |
12093 |
++ * 2 - Writable |
12094 |
++ * 1 - Readable |
12095 |
+ * 0 - 1 if Page @ Page address is valid |
12096 |
+ */ |
12097 |
+-#define RK_PTE_PAGE_READABLE_V2 BIT(2) |
12098 |
+-#define RK_PTE_PAGE_WRITABLE_V2 BIT(1) |
12099 |
+ |
12100 |
+ static u32 rk_mk_pte_v2(phys_addr_t page, int prot) |
12101 |
+ { |
12102 |
+ u32 flags = 0; |
12103 |
+ |
12104 |
+- flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0; |
12105 |
+- flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0; |
12106 |
++ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; |
12107 |
++ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; |
12108 |
+ |
12109 |
+ return rk_mk_dte_v2(page) | flags; |
12110 |
+ } |
12111 |
+diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c |
12112 |
+index 92997021e188a..ed35741955997 100644 |
12113 |
+--- a/drivers/iommu/sun50i-iommu.c |
12114 |
++++ b/drivers/iommu/sun50i-iommu.c |
12115 |
+@@ -27,6 +27,7 @@ |
12116 |
+ #include <linux/types.h> |
12117 |
+ |
12118 |
+ #define IOMMU_RESET_REG 0x010 |
12119 |
++#define IOMMU_RESET_RELEASE_ALL 0xffffffff |
12120 |
+ #define IOMMU_ENABLE_REG 0x020 |
12121 |
+ #define IOMMU_ENABLE_ENABLE BIT(0) |
12122 |
+ |
12123 |
+@@ -270,7 +271,7 @@ static u32 sun50i_mk_pte(phys_addr_t page, int prot) |
12124 |
+ enum sun50i_iommu_aci aci; |
12125 |
+ u32 flags = 0; |
12126 |
+ |
12127 |
+- if (prot & (IOMMU_READ | IOMMU_WRITE)) |
12128 |
++ if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE)) |
12129 |
+ aci = SUN50I_IOMMU_ACI_RD_WR; |
12130 |
+ else if (prot & IOMMU_READ) |
12131 |
+ aci = SUN50I_IOMMU_ACI_RD; |
12132 |
+@@ -511,7 +512,7 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain, |
12133 |
+ sun50i_iommu_free_page_table(iommu, drop_pt); |
12134 |
+ } |
12135 |
+ |
12136 |
+- sun50i_table_flush(sun50i_domain, page_table, PT_SIZE); |
12137 |
++ sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES); |
12138 |
+ sun50i_table_flush(sun50i_domain, dte_addr, 1); |
12139 |
+ |
12140 |
+ return page_table; |
12141 |
+@@ -601,7 +602,6 @@ static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type) |
12142 |
+ struct sun50i_iommu_domain *sun50i_domain; |
12143 |
+ |
12144 |
+ if (type != IOMMU_DOMAIN_DMA && |
12145 |
+- type != IOMMU_DOMAIN_IDENTITY && |
12146 |
+ type != IOMMU_DOMAIN_UNMANAGED) |
12147 |
+ return NULL; |
12148 |
+ |
12149 |
+@@ -869,8 +869,8 @@ static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu) |
12150 |
+ |
12151 |
+ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) |
12152 |
+ { |
12153 |
++ u32 status, l1_status, l2_status, resets; |
12154 |
+ struct sun50i_iommu *iommu = dev_id; |
12155 |
+- u32 status; |
12156 |
+ |
12157 |
+ spin_lock(&iommu->iommu_lock); |
12158 |
+ |
12159 |
+@@ -880,6 +880,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) |
12160 |
+ return IRQ_NONE; |
12161 |
+ } |
12162 |
+ |
12163 |
++ l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG); |
12164 |
++ l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG); |
12165 |
++ |
12166 |
+ if (status & IOMMU_INT_INVALID_L2PG) |
12167 |
+ sun50i_iommu_handle_pt_irq(iommu, |
12168 |
+ IOMMU_INT_ERR_ADDR_L2_REG, |
12169 |
+@@ -893,8 +896,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id) |
12170 |
+ |
12171 |
+ iommu_write(iommu, IOMMU_INT_CLR_REG, status); |
12172 |
+ |
12173 |
+- iommu_write(iommu, IOMMU_RESET_REG, ~status); |
12174 |
+- iommu_write(iommu, IOMMU_RESET_REG, status); |
12175 |
++ resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK; |
12176 |
++ iommu_write(iommu, IOMMU_RESET_REG, ~resets); |
12177 |
++ iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL); |
12178 |
+ |
12179 |
+ spin_unlock(&iommu->iommu_lock); |
12180 |
+ |
12181 |
+diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c |
12182 |
+index b60e1853593f4..3989d16f997b3 100644 |
12183 |
+--- a/drivers/irqchip/irq-gic-pm.c |
12184 |
++++ b/drivers/irqchip/irq-gic-pm.c |
12185 |
+@@ -102,7 +102,7 @@ static int gic_probe(struct platform_device *pdev) |
12186 |
+ |
12187 |
+ pm_runtime_enable(dev); |
12188 |
+ |
12189 |
+- ret = pm_runtime_get_sync(dev); |
12190 |
++ ret = pm_runtime_resume_and_get(dev); |
12191 |
+ if (ret < 0) |
12192 |
+ goto rpm_disable; |
12193 |
+ |
12194 |
+diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c |
12195 |
+index f3ac392d5bc87..36d0d0cf3fa25 100644 |
12196 |
+--- a/drivers/irqchip/irq-wpcm450-aic.c |
12197 |
++++ b/drivers/irqchip/irq-wpcm450-aic.c |
12198 |
+@@ -146,6 +146,7 @@ static int __init wpcm450_aic_of_init(struct device_node *node, |
12199 |
+ aic->regs = of_iomap(node, 0); |
12200 |
+ if (!aic->regs) { |
12201 |
+ pr_err("Failed to map WPCM450 AIC registers\n"); |
12202 |
++ kfree(aic); |
12203 |
+ return -ENOMEM; |
12204 |
+ } |
12205 |
+ |
12206 |
+diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c |
12207 |
+index 4f7eaa17fb274..e840609c50eb7 100644 |
12208 |
+--- a/drivers/isdn/hardware/mISDN/hfcmulti.c |
12209 |
++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c |
12210 |
+@@ -3217,6 +3217,7 @@ static int |
12211 |
+ hfcm_l1callback(struct dchannel *dch, u_int cmd) |
12212 |
+ { |
12213 |
+ struct hfc_multi *hc = dch->hw; |
12214 |
++ struct sk_buff_head free_queue; |
12215 |
+ u_long flags; |
12216 |
+ |
12217 |
+ switch (cmd) { |
12218 |
+@@ -3245,6 +3246,7 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd) |
12219 |
+ l1_event(dch->l1, HW_POWERUP_IND); |
12220 |
+ break; |
12221 |
+ case HW_DEACT_REQ: |
12222 |
++ __skb_queue_head_init(&free_queue); |
12223 |
+ /* start deactivation */ |
12224 |
+ spin_lock_irqsave(&hc->lock, flags); |
12225 |
+ if (hc->ctype == HFC_TYPE_E1) { |
12226 |
+@@ -3264,20 +3266,21 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd) |
12227 |
+ plxsd_checksync(hc, 0); |
12228 |
+ } |
12229 |
+ } |
12230 |
+- skb_queue_purge(&dch->squeue); |
12231 |
++ skb_queue_splice_init(&dch->squeue, &free_queue); |
12232 |
+ if (dch->tx_skb) { |
12233 |
+- dev_kfree_skb(dch->tx_skb); |
12234 |
++ __skb_queue_tail(&free_queue, dch->tx_skb); |
12235 |
+ dch->tx_skb = NULL; |
12236 |
+ } |
12237 |
+ dch->tx_idx = 0; |
12238 |
+ if (dch->rx_skb) { |
12239 |
+- dev_kfree_skb(dch->rx_skb); |
12240 |
++ __skb_queue_tail(&free_queue, dch->rx_skb); |
12241 |
+ dch->rx_skb = NULL; |
12242 |
+ } |
12243 |
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); |
12244 |
+ if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) |
12245 |
+ del_timer(&dch->timer); |
12246 |
+ spin_unlock_irqrestore(&hc->lock, flags); |
12247 |
++ __skb_queue_purge(&free_queue); |
12248 |
+ break; |
12249 |
+ case HW_POWERUP_REQ: |
12250 |
+ spin_lock_irqsave(&hc->lock, flags); |
12251 |
+@@ -3384,6 +3387,9 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) |
12252 |
+ case PH_DEACTIVATE_REQ: |
12253 |
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); |
12254 |
+ if (dch->dev.D.protocol != ISDN_P_TE_S0) { |
12255 |
++ struct sk_buff_head free_queue; |
12256 |
++ |
12257 |
++ __skb_queue_head_init(&free_queue); |
12258 |
+ spin_lock_irqsave(&hc->lock, flags); |
12259 |
+ if (debug & DEBUG_HFCMULTI_MSG) |
12260 |
+ printk(KERN_DEBUG |
12261 |
+@@ -3405,14 +3411,14 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) |
12262 |
+ /* deactivate */ |
12263 |
+ dch->state = 1; |
12264 |
+ } |
12265 |
+- skb_queue_purge(&dch->squeue); |
12266 |
++ skb_queue_splice_init(&dch->squeue, &free_queue); |
12267 |
+ if (dch->tx_skb) { |
12268 |
+- dev_kfree_skb(dch->tx_skb); |
12269 |
++ __skb_queue_tail(&free_queue, dch->tx_skb); |
12270 |
+ dch->tx_skb = NULL; |
12271 |
+ } |
12272 |
+ dch->tx_idx = 0; |
12273 |
+ if (dch->rx_skb) { |
12274 |
+- dev_kfree_skb(dch->rx_skb); |
12275 |
++ __skb_queue_tail(&free_queue, dch->rx_skb); |
12276 |
+ dch->rx_skb = NULL; |
12277 |
+ } |
12278 |
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); |
12279 |
+@@ -3424,6 +3430,7 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) |
12280 |
+ #endif |
12281 |
+ ret = 0; |
12282 |
+ spin_unlock_irqrestore(&hc->lock, flags); |
12283 |
++ __skb_queue_purge(&free_queue); |
12284 |
+ } else |
12285 |
+ ret = l1_event(dch->l1, hh->prim); |
12286 |
+ break; |
12287 |
+diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c |
12288 |
+index af17459c1a5c0..eba58b99cd29d 100644 |
12289 |
+--- a/drivers/isdn/hardware/mISDN/hfcpci.c |
12290 |
++++ b/drivers/isdn/hardware/mISDN/hfcpci.c |
12291 |
+@@ -1617,16 +1617,19 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) |
12292 |
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); |
12293 |
+ spin_lock_irqsave(&hc->lock, flags); |
12294 |
+ if (hc->hw.protocol == ISDN_P_NT_S0) { |
12295 |
++ struct sk_buff_head free_queue; |
12296 |
++ |
12297 |
++ __skb_queue_head_init(&free_queue); |
12298 |
+ /* prepare deactivation */ |
12299 |
+ Write_hfc(hc, HFCPCI_STATES, 0x40); |
12300 |
+- skb_queue_purge(&dch->squeue); |
12301 |
++ skb_queue_splice_init(&dch->squeue, &free_queue); |
12302 |
+ if (dch->tx_skb) { |
12303 |
+- dev_kfree_skb(dch->tx_skb); |
12304 |
++ __skb_queue_tail(&free_queue, dch->tx_skb); |
12305 |
+ dch->tx_skb = NULL; |
12306 |
+ } |
12307 |
+ dch->tx_idx = 0; |
12308 |
+ if (dch->rx_skb) { |
12309 |
+- dev_kfree_skb(dch->rx_skb); |
12310 |
++ __skb_queue_tail(&free_queue, dch->rx_skb); |
12311 |
+ dch->rx_skb = NULL; |
12312 |
+ } |
12313 |
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); |
12314 |
+@@ -1639,10 +1642,12 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) |
12315 |
+ hc->hw.mst_m &= ~HFCPCI_MASTER; |
12316 |
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); |
12317 |
+ ret = 0; |
12318 |
++ spin_unlock_irqrestore(&hc->lock, flags); |
12319 |
++ __skb_queue_purge(&free_queue); |
12320 |
+ } else { |
12321 |
+ ret = l1_event(dch->l1, hh->prim); |
12322 |
++ spin_unlock_irqrestore(&hc->lock, flags); |
12323 |
+ } |
12324 |
+- spin_unlock_irqrestore(&hc->lock, flags); |
12325 |
+ break; |
12326 |
+ } |
12327 |
+ if (!ret) |
12328 |
+diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c |
12329 |
+index cd5642cef01fd..e8b37bd5e34a3 100644 |
12330 |
+--- a/drivers/isdn/hardware/mISDN/hfcsusb.c |
12331 |
++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c |
12332 |
+@@ -326,20 +326,24 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) |
12333 |
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); |
12334 |
+ |
12335 |
+ if (hw->protocol == ISDN_P_NT_S0) { |
12336 |
++ struct sk_buff_head free_queue; |
12337 |
++ |
12338 |
++ __skb_queue_head_init(&free_queue); |
12339 |
+ hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT); |
12340 |
+ spin_lock_irqsave(&hw->lock, flags); |
12341 |
+- skb_queue_purge(&dch->squeue); |
12342 |
++ skb_queue_splice_init(&dch->squeue, &free_queue); |
12343 |
+ if (dch->tx_skb) { |
12344 |
+- dev_kfree_skb(dch->tx_skb); |
12345 |
++ __skb_queue_tail(&free_queue, dch->tx_skb); |
12346 |
+ dch->tx_skb = NULL; |
12347 |
+ } |
12348 |
+ dch->tx_idx = 0; |
12349 |
+ if (dch->rx_skb) { |
12350 |
+- dev_kfree_skb(dch->rx_skb); |
12351 |
++ __skb_queue_tail(&free_queue, dch->rx_skb); |
12352 |
+ dch->rx_skb = NULL; |
12353 |
+ } |
12354 |
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); |
12355 |
+ spin_unlock_irqrestore(&hw->lock, flags); |
12356 |
++ __skb_queue_purge(&free_queue); |
12357 |
+ #ifdef FIXME |
12358 |
+ if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) |
12359 |
+ dchannel_sched_event(&hc->dch, D_CLEARBUSY); |
12360 |
+@@ -1330,7 +1334,7 @@ tx_iso_complete(struct urb *urb) |
12361 |
+ printk("\n"); |
12362 |
+ } |
12363 |
+ |
12364 |
+- dev_kfree_skb(tx_skb); |
12365 |
++ dev_consume_skb_irq(tx_skb); |
12366 |
+ tx_skb = NULL; |
12367 |
+ if (fifo->dch && get_next_dframe(fifo->dch)) |
12368 |
+ tx_skb = fifo->dch->tx_skb; |
12369 |
+diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c |
12370 |
+index dc634c2932fd3..dd24655861401 100644 |
12371 |
+--- a/drivers/macintosh/macio-adb.c |
12372 |
++++ b/drivers/macintosh/macio-adb.c |
12373 |
+@@ -105,6 +105,10 @@ int macio_init(void) |
12374 |
+ return -ENXIO; |
12375 |
+ } |
12376 |
+ adb = ioremap(r.start, sizeof(struct adb_regs)); |
12377 |
++ if (!adb) { |
12378 |
++ of_node_put(adbs); |
12379 |
++ return -ENOMEM; |
12380 |
++ } |
12381 |
+ |
12382 |
+ out_8(&adb->ctrl.r, 0); |
12383 |
+ out_8(&adb->intr.r, 0); |
12384 |
+diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c |
12385 |
+index c1fdf28960216..df69d648f6d0a 100644 |
12386 |
+--- a/drivers/macintosh/macio_asic.c |
12387 |
++++ b/drivers/macintosh/macio_asic.c |
12388 |
+@@ -423,7 +423,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, |
12389 |
+ if (of_device_register(&dev->ofdev) != 0) { |
12390 |
+ printk(KERN_DEBUG"macio: device registration error for %s!\n", |
12391 |
+ dev_name(&dev->ofdev.dev)); |
12392 |
+- kfree(dev); |
12393 |
++ put_device(&dev->ofdev.dev); |
12394 |
+ return NULL; |
12395 |
+ } |
12396 |
+ |
12397 |
+diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c |
12398 |
+index d997f8ebfa98c..3af15083a25af 100644 |
12399 |
+--- a/drivers/mailbox/arm_mhuv2.c |
12400 |
++++ b/drivers/mailbox/arm_mhuv2.c |
12401 |
+@@ -1061,8 +1061,8 @@ static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id) |
12402 |
+ int ret = -EINVAL; |
12403 |
+ |
12404 |
+ reg = devm_of_iomap(dev, dev->of_node, 0, NULL); |
12405 |
+- if (!reg) |
12406 |
+- return -ENOMEM; |
12407 |
++ if (IS_ERR(reg)) |
12408 |
++ return PTR_ERR(reg); |
12409 |
+ |
12410 |
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL); |
12411 |
+ if (!mhu) |
12412 |
+diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c |
12413 |
+index cfacb3f320a64..853901acaeec2 100644 |
12414 |
+--- a/drivers/mailbox/mailbox-mpfs.c |
12415 |
++++ b/drivers/mailbox/mailbox-mpfs.c |
12416 |
+@@ -2,7 +2,7 @@ |
12417 |
+ /* |
12418 |
+ * Microchip PolarFire SoC (MPFS) system controller/mailbox controller driver |
12419 |
+ * |
12420 |
+- * Copyright (c) 2020 Microchip Corporation. All rights reserved. |
12421 |
++ * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved. |
12422 |
+ * |
12423 |
+ * Author: Conor Dooley <conor.dooley@×××××××××.com> |
12424 |
+ * |
12425 |
+@@ -56,7 +56,7 @@ |
12426 |
+ #define SCB_STATUS_NOTIFY_MASK BIT(SCB_STATUS_NOTIFY) |
12427 |
+ |
12428 |
+ #define SCB_STATUS_POS (16) |
12429 |
+-#define SCB_STATUS_MASK GENMASK_ULL(SCB_STATUS_POS + SCB_MASK_WIDTH, SCB_STATUS_POS) |
12430 |
++#define SCB_STATUS_MASK GENMASK(SCB_STATUS_POS + SCB_MASK_WIDTH - 1, SCB_STATUS_POS) |
12431 |
+ |
12432 |
+ struct mpfs_mbox { |
12433 |
+ struct mbox_controller controller; |
12434 |
+@@ -130,13 +130,38 @@ static void mpfs_mbox_rx_data(struct mbox_chan *chan) |
12435 |
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv; |
12436 |
+ struct mpfs_mss_response *response = mbox->response; |
12437 |
+ u16 num_words = ALIGN((response->resp_size), (4)) / 4U; |
12438 |
+- u32 i; |
12439 |
++ u32 i, status; |
12440 |
+ |
12441 |
+ if (!response->resp_msg) { |
12442 |
+ dev_err(mbox->dev, "failed to assign memory for response %d\n", -ENOMEM); |
12443 |
+ return; |
12444 |
+ } |
12445 |
+ |
12446 |
++ /* |
12447 |
++ * The status is stored in bits 31:16 of the SERVICES_SR register. |
12448 |
++ * It is only valid when BUSY == 0. |
12449 |
++ * We should *never* get an interrupt while the controller is |
12450 |
++ * still in the busy state. If we do, something has gone badly |
12451 |
++ * wrong & the content of the mailbox would not be valid. |
12452 |
++ */ |
12453 |
++ if (mpfs_mbox_busy(mbox)) { |
12454 |
++ dev_err(mbox->dev, "got an interrupt but system controller is busy\n"); |
12455 |
++ response->resp_status = 0xDEAD; |
12456 |
++ return; |
12457 |
++ } |
12458 |
++ |
12459 |
++ status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET); |
12460 |
++ |
12461 |
++ /* |
12462 |
++ * If the status of the individual servers is non-zero, the service has |
12463 |
++ * failed. The contents of the mailbox at this point are not be valid, |
12464 |
++ * so don't bother reading them. Set the status so that the driver |
12465 |
++ * implementing the service can handle the result. |
12466 |
++ */ |
12467 |
++ response->resp_status = (status & SCB_STATUS_MASK) >> SCB_STATUS_POS; |
12468 |
++ if (response->resp_status) |
12469 |
++ return; |
12470 |
++ |
12471 |
+ if (!mpfs_mbox_busy(mbox)) { |
12472 |
+ for (i = 0; i < num_words; i++) { |
12473 |
+ response->resp_msg[i] = |
12474 |
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c |
12475 |
+index f44079d62b1a7..527204c6d5cd0 100644 |
12476 |
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c |
12477 |
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c |
12478 |
+@@ -493,6 +493,7 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, |
12479 |
+ ret = device_register(&ipi_mbox->dev); |
12480 |
+ if (ret) { |
12481 |
+ dev_err(dev, "Failed to register ipi mbox dev.\n"); |
12482 |
++ put_device(&ipi_mbox->dev); |
12483 |
+ return ret; |
12484 |
+ } |
12485 |
+ mdev = &ipi_mbox->dev; |
12486 |
+@@ -619,7 +620,8 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata) |
12487 |
+ ipi_mbox = &pdata->ipi_mboxes[i]; |
12488 |
+ if (ipi_mbox->dev.parent) { |
12489 |
+ mbox_controller_unregister(&ipi_mbox->mbox); |
12490 |
+- device_unregister(&ipi_mbox->dev); |
12491 |
++ if (device_is_registered(&ipi_mbox->dev)) |
12492 |
++ device_unregister(&ipi_mbox->dev); |
12493 |
+ } |
12494 |
+ } |
12495 |
+ } |
12496 |
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c |
12497 |
+index cf128b3471d78..0530db5482311 100644 |
12498 |
+--- a/drivers/mcb/mcb-core.c |
12499 |
++++ b/drivers/mcb/mcb-core.c |
12500 |
+@@ -71,8 +71,10 @@ static int mcb_probe(struct device *dev) |
12501 |
+ |
12502 |
+ get_device(dev); |
12503 |
+ ret = mdrv->probe(mdev, found_id); |
12504 |
+- if (ret) |
12505 |
++ if (ret) { |
12506 |
+ module_put(carrier_mod); |
12507 |
++ put_device(dev); |
12508 |
++ } |
12509 |
+ |
12510 |
+ return ret; |
12511 |
+ } |
12512 |
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c |
12513 |
+index 0266bfddfbe27..aa6938da0db85 100644 |
12514 |
+--- a/drivers/mcb/mcb-parse.c |
12515 |
++++ b/drivers/mcb/mcb-parse.c |
12516 |
+@@ -108,7 +108,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, |
12517 |
+ return 0; |
12518 |
+ |
12519 |
+ err: |
12520 |
+- mcb_free_dev(mdev); |
12521 |
++ put_device(&mdev->dev); |
12522 |
+ |
12523 |
+ return ret; |
12524 |
+ } |
12525 |
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c |
12526 |
+index 8cc11b1987ec8..650bfccd066fe 100644 |
12527 |
+--- a/drivers/md/md-bitmap.c |
12528 |
++++ b/drivers/md/md-bitmap.c |
12529 |
+@@ -2196,20 +2196,23 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, |
12530 |
+ |
12531 |
+ if (set) { |
12532 |
+ bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); |
12533 |
+- if (*bmc_new == 0) { |
12534 |
+- /* need to set on-disk bits too. */ |
12535 |
+- sector_t end = block + new_blocks; |
12536 |
+- sector_t start = block >> chunkshift; |
12537 |
+- start <<= chunkshift; |
12538 |
+- while (start < end) { |
12539 |
+- md_bitmap_file_set_bit(bitmap, block); |
12540 |
+- start += 1 << chunkshift; |
12541 |
++ if (bmc_new) { |
12542 |
++ if (*bmc_new == 0) { |
12543 |
++ /* need to set on-disk bits too. */ |
12544 |
++ sector_t end = block + new_blocks; |
12545 |
++ sector_t start = block >> chunkshift; |
12546 |
++ |
12547 |
++ start <<= chunkshift; |
12548 |
++ while (start < end) { |
12549 |
++ md_bitmap_file_set_bit(bitmap, block); |
12550 |
++ start += 1 << chunkshift; |
12551 |
++ } |
12552 |
++ *bmc_new = 2; |
12553 |
++ md_bitmap_count_page(&bitmap->counts, block, 1); |
12554 |
++ md_bitmap_set_pending(&bitmap->counts, block); |
12555 |
+ } |
12556 |
+- *bmc_new = 2; |
12557 |
+- md_bitmap_count_page(&bitmap->counts, block, 1); |
12558 |
+- md_bitmap_set_pending(&bitmap->counts, block); |
12559 |
++ *bmc_new |= NEEDED_MASK; |
12560 |
+ } |
12561 |
+- *bmc_new |= NEEDED_MASK; |
12562 |
+ if (new_blocks < old_blocks) |
12563 |
+ old_blocks = new_blocks; |
12564 |
+ } |
12565 |
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c |
12566 |
+index 9fa4794936426..783763f6845f4 100644 |
12567 |
+--- a/drivers/md/raid1.c |
12568 |
++++ b/drivers/md/raid1.c |
12569 |
+@@ -3141,6 +3141,7 @@ static int raid1_run(struct mddev *mddev) |
12570 |
+ * RAID1 needs at least one disk in active |
12571 |
+ */ |
12572 |
+ if (conf->raid_disks - mddev->degraded < 1) { |
12573 |
++ md_unregister_thread(&conf->thread); |
12574 |
+ ret = -EINVAL; |
12575 |
+ goto abort; |
12576 |
+ } |
12577 |
+diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c |
12578 |
+index 15a08d8c69ef8..c2d2792227f86 100644 |
12579 |
+--- a/drivers/media/dvb-core/dvb_ca_en50221.c |
12580 |
++++ b/drivers/media/dvb-core/dvb_ca_en50221.c |
12581 |
+@@ -157,7 +157,7 @@ static void dvb_ca_private_free(struct dvb_ca_private *ca) |
12582 |
+ { |
12583 |
+ unsigned int i; |
12584 |
+ |
12585 |
+- dvb_free_device(ca->dvbdev); |
12586 |
++ dvb_device_put(ca->dvbdev); |
12587 |
+ for (i = 0; i < ca->slot_count; i++) |
12588 |
+ vfree(ca->slot_info[i].rx_buffer.data); |
12589 |
+ |
12590 |
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c |
12591 |
+index 258637d762d64..09facc78d88aa 100644 |
12592 |
+--- a/drivers/media/dvb-core/dvb_frontend.c |
12593 |
++++ b/drivers/media/dvb-core/dvb_frontend.c |
12594 |
+@@ -136,7 +136,7 @@ static void __dvb_frontend_free(struct dvb_frontend *fe) |
12595 |
+ struct dvb_frontend_private *fepriv = fe->frontend_priv; |
12596 |
+ |
12597 |
+ if (fepriv) |
12598 |
+- dvb_free_device(fepriv->dvbdev); |
12599 |
++ dvb_device_put(fepriv->dvbdev); |
12600 |
+ |
12601 |
+ dvb_frontend_invoke_release(fe, fe->ops.release); |
12602 |
+ |
12603 |
+@@ -2985,6 +2985,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb, |
12604 |
+ .name = fe->ops.info.name, |
12605 |
+ #endif |
12606 |
+ }; |
12607 |
++ int ret; |
12608 |
+ |
12609 |
+ dev_dbg(dvb->device, "%s:\n", __func__); |
12610 |
+ |
12611 |
+@@ -3018,8 +3019,13 @@ int dvb_register_frontend(struct dvb_adapter *dvb, |
12612 |
+ "DVB: registering adapter %i frontend %i (%s)...\n", |
12613 |
+ fe->dvb->num, fe->id, fe->ops.info.name); |
12614 |
+ |
12615 |
+- dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template, |
12616 |
++ ret = dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template, |
12617 |
+ fe, DVB_DEVICE_FRONTEND, 0); |
12618 |
++ if (ret) { |
12619 |
++ dvb_frontend_put(fe); |
12620 |
++ mutex_unlock(&frontend_mutex); |
12621 |
++ return ret; |
12622 |
++ } |
12623 |
+ |
12624 |
+ /* |
12625 |
+ * Initialize the cache to the proper values according with the |
12626 |
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c |
12627 |
+index 795d9bfaba5cf..828a0069a2968 100644 |
12628 |
+--- a/drivers/media/dvb-core/dvbdev.c |
12629 |
++++ b/drivers/media/dvb-core/dvbdev.c |
12630 |
+@@ -107,7 +107,7 @@ static int dvb_device_open(struct inode *inode, struct file *file) |
12631 |
+ new_fops = fops_get(dvbdev->fops); |
12632 |
+ if (!new_fops) |
12633 |
+ goto fail; |
12634 |
+- file->private_data = dvbdev; |
12635 |
++ file->private_data = dvb_device_get(dvbdev); |
12636 |
+ replace_fops(file, new_fops); |
12637 |
+ if (file->f_op->open) |
12638 |
+ err = file->f_op->open(inode, file); |
12639 |
+@@ -171,6 +171,9 @@ int dvb_generic_release(struct inode *inode, struct file *file) |
12640 |
+ } |
12641 |
+ |
12642 |
+ dvbdev->users++; |
12643 |
++ |
12644 |
++ dvb_device_put(dvbdev); |
12645 |
++ |
12646 |
+ return 0; |
12647 |
+ } |
12648 |
+ EXPORT_SYMBOL(dvb_generic_release); |
12649 |
+@@ -488,6 +491,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, |
12650 |
+ } |
12651 |
+ |
12652 |
+ memcpy(dvbdev, template, sizeof(struct dvb_device)); |
12653 |
++ kref_init(&dvbdev->ref); |
12654 |
+ dvbdev->type = type; |
12655 |
+ dvbdev->id = id; |
12656 |
+ dvbdev->adapter = adap; |
12657 |
+@@ -518,7 +522,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, |
12658 |
+ #endif |
12659 |
+ |
12660 |
+ dvbdev->minor = minor; |
12661 |
+- dvb_minors[minor] = dvbdev; |
12662 |
++ dvb_minors[minor] = dvb_device_get(dvbdev); |
12663 |
+ up_write(&minor_rwsem); |
12664 |
+ |
12665 |
+ ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads); |
12666 |
+@@ -563,6 +567,7 @@ void dvb_remove_device(struct dvb_device *dvbdev) |
12667 |
+ |
12668 |
+ down_write(&minor_rwsem); |
12669 |
+ dvb_minors[dvbdev->minor] = NULL; |
12670 |
++ dvb_device_put(dvbdev); |
12671 |
+ up_write(&minor_rwsem); |
12672 |
+ |
12673 |
+ dvb_media_device_free(dvbdev); |
12674 |
+@@ -574,21 +579,34 @@ void dvb_remove_device(struct dvb_device *dvbdev) |
12675 |
+ EXPORT_SYMBOL(dvb_remove_device); |
12676 |
+ |
12677 |
+ |
12678 |
+-void dvb_free_device(struct dvb_device *dvbdev) |
12679 |
++static void dvb_free_device(struct kref *ref) |
12680 |
+ { |
12681 |
+- if (!dvbdev) |
12682 |
+- return; |
12683 |
++ struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref); |
12684 |
+ |
12685 |
+ kfree (dvbdev->fops); |
12686 |
+ kfree (dvbdev); |
12687 |
+ } |
12688 |
+-EXPORT_SYMBOL(dvb_free_device); |
12689 |
++ |
12690 |
++ |
12691 |
++struct dvb_device *dvb_device_get(struct dvb_device *dvbdev) |
12692 |
++{ |
12693 |
++ kref_get(&dvbdev->ref); |
12694 |
++ return dvbdev; |
12695 |
++} |
12696 |
++EXPORT_SYMBOL(dvb_device_get); |
12697 |
++ |
12698 |
++ |
12699 |
++void dvb_device_put(struct dvb_device *dvbdev) |
12700 |
++{ |
12701 |
++ if (dvbdev) |
12702 |
++ kref_put(&dvbdev->ref, dvb_free_device); |
12703 |
++} |
12704 |
+ |
12705 |
+ |
12706 |
+ void dvb_unregister_device(struct dvb_device *dvbdev) |
12707 |
+ { |
12708 |
+ dvb_remove_device(dvbdev); |
12709 |
+- dvb_free_device(dvbdev); |
12710 |
++ dvb_device_put(dvbdev); |
12711 |
+ } |
12712 |
+ EXPORT_SYMBOL(dvb_unregister_device); |
12713 |
+ |
12714 |
+diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c |
12715 |
+index da0ff7b44da41..68b92b4419cff 100644 |
12716 |
+--- a/drivers/media/dvb-frontends/bcm3510.c |
12717 |
++++ b/drivers/media/dvb-frontends/bcm3510.c |
12718 |
+@@ -649,6 +649,7 @@ static int bcm3510_download_firmware(struct dvb_frontend* fe) |
12719 |
+ deb_info("firmware chunk, addr: 0x%04x, len: 0x%04x, total length: 0x%04zx\n",addr,len,fw->size); |
12720 |
+ if ((ret = bcm3510_write_ram(st,addr,&b[i+4],len)) < 0) { |
12721 |
+ err("firmware download failed: %d\n",ret); |
12722 |
++ release_firmware(fw); |
12723 |
+ return ret; |
12724 |
+ } |
12725 |
+ i += 4 + len; |
12726 |
+diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c |
12727 |
+index 2958a46944614..07639ecc85aa8 100644 |
12728 |
+--- a/drivers/media/i2c/ad5820.c |
12729 |
++++ b/drivers/media/i2c/ad5820.c |
12730 |
+@@ -327,18 +327,18 @@ static int ad5820_probe(struct i2c_client *client, |
12731 |
+ |
12732 |
+ ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL); |
12733 |
+ if (ret < 0) |
12734 |
+- goto cleanup2; |
12735 |
++ goto clean_mutex; |
12736 |
+ |
12737 |
+ ret = v4l2_async_register_subdev(&coil->subdev); |
12738 |
+ if (ret < 0) |
12739 |
+- goto cleanup; |
12740 |
++ goto clean_entity; |
12741 |
+ |
12742 |
+ return ret; |
12743 |
+ |
12744 |
+-cleanup2: |
12745 |
+- mutex_destroy(&coil->power_lock); |
12746 |
+-cleanup: |
12747 |
++clean_entity: |
12748 |
+ media_entity_cleanup(&coil->subdev.entity); |
12749 |
++clean_mutex: |
12750 |
++ mutex_destroy(&coil->power_lock); |
12751 |
+ return ret; |
12752 |
+ } |
12753 |
+ |
12754 |
+diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c |
12755 |
+index 02eabe10ab970..00095c7762c24 100644 |
12756 |
+--- a/drivers/media/i2c/adv748x/adv748x-afe.c |
12757 |
++++ b/drivers/media/i2c/adv748x/adv748x-afe.c |
12758 |
+@@ -521,6 +521,10 @@ int adv748x_afe_init(struct adv748x_afe *afe) |
12759 |
+ } |
12760 |
+ } |
12761 |
+ |
12762 |
++ adv748x_afe_s_input(afe, afe->input); |
12763 |
++ |
12764 |
++ adv_dbg(state, "AFE Default input set to %d\n", afe->input); |
12765 |
++ |
12766 |
+ /* Entity pads and sinks are 0-indexed to match the pads */ |
12767 |
+ for (i = ADV748X_AFE_SINK_AIN0; i <= ADV748X_AFE_SINK_AIN7; i++) |
12768 |
+ afe->pads[i].flags = MEDIA_PAD_FL_SINK; |
12769 |
+diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c |
12770 |
+index 7973ae42873a6..c10997e2271d2 100644 |
12771 |
+--- a/drivers/media/pci/saa7164/saa7164-core.c |
12772 |
++++ b/drivers/media/pci/saa7164/saa7164-core.c |
12773 |
+@@ -1259,7 +1259,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev, |
12774 |
+ |
12775 |
+ if (saa7164_dev_setup(dev) < 0) { |
12776 |
+ err = -EINVAL; |
12777 |
+- goto fail_free; |
12778 |
++ goto fail_dev; |
12779 |
+ } |
12780 |
+ |
12781 |
+ /* print pci info */ |
12782 |
+@@ -1427,6 +1427,8 @@ fail_fw: |
12783 |
+ |
12784 |
+ fail_irq: |
12785 |
+ saa7164_dev_unregister(dev); |
12786 |
++fail_dev: |
12787 |
++ pci_disable_device(pci_dev); |
12788 |
+ fail_free: |
12789 |
+ v4l2_device_unregister(&dev->v4l2_dev); |
12790 |
+ kfree(dev); |
12791 |
+diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c |
12792 |
+index 4a546eeefe38f..6d87fbb0ee04a 100644 |
12793 |
+--- a/drivers/media/pci/solo6x10/solo6x10-core.c |
12794 |
++++ b/drivers/media/pci/solo6x10/solo6x10-core.c |
12795 |
+@@ -420,6 +420,7 @@ static int solo_sysfs_init(struct solo_dev *solo_dev) |
12796 |
+ solo_dev->nr_chans); |
12797 |
+ |
12798 |
+ if (device_register(dev)) { |
12799 |
++ put_device(dev); |
12800 |
+ dev->parent = NULL; |
12801 |
+ return -ENOMEM; |
12802 |
+ } |
12803 |
+diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c |
12804 |
+index c484c008ab027..582a6c581f3c3 100644 |
12805 |
+--- a/drivers/media/platform/coda/coda-bit.c |
12806 |
++++ b/drivers/media/platform/coda/coda-bit.c |
12807 |
+@@ -852,7 +852,7 @@ static void coda_setup_iram(struct coda_ctx *ctx) |
12808 |
+ /* Only H.264BP and H.263P3 are considered */ |
12809 |
+ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w64); |
12810 |
+ iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w64); |
12811 |
+- if (!iram_info->buf_dbk_c_use) |
12812 |
++ if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use) |
12813 |
+ goto out; |
12814 |
+ iram_info->axi_sram_use |= dbk_bits; |
12815 |
+ |
12816 |
+@@ -876,7 +876,7 @@ static void coda_setup_iram(struct coda_ctx *ctx) |
12817 |
+ |
12818 |
+ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w128); |
12819 |
+ iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w128); |
12820 |
+- if (!iram_info->buf_dbk_c_use) |
12821 |
++ if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use) |
12822 |
+ goto out; |
12823 |
+ iram_info->axi_sram_use |= dbk_bits; |
12824 |
+ |
12825 |
+@@ -1082,10 +1082,16 @@ static int coda_start_encoding(struct coda_ctx *ctx) |
12826 |
+ } |
12827 |
+ |
12828 |
+ if (dst_fourcc == V4L2_PIX_FMT_JPEG) { |
12829 |
+- if (!ctx->params.jpeg_qmat_tab[0]) |
12830 |
++ if (!ctx->params.jpeg_qmat_tab[0]) { |
12831 |
+ ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL); |
12832 |
+- if (!ctx->params.jpeg_qmat_tab[1]) |
12833 |
++ if (!ctx->params.jpeg_qmat_tab[0]) |
12834 |
++ return -ENOMEM; |
12835 |
++ } |
12836 |
++ if (!ctx->params.jpeg_qmat_tab[1]) { |
12837 |
+ ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL); |
12838 |
++ if (!ctx->params.jpeg_qmat_tab[1]) |
12839 |
++ return -ENOMEM; |
12840 |
++ } |
12841 |
+ coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality); |
12842 |
+ } |
12843 |
+ |
12844 |
+diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c |
12845 |
+index a72f4655e5ad5..b7bf529f18f77 100644 |
12846 |
+--- a/drivers/media/platform/coda/coda-jpeg.c |
12847 |
++++ b/drivers/media/platform/coda/coda-jpeg.c |
12848 |
+@@ -1052,10 +1052,16 @@ static int coda9_jpeg_start_encoding(struct coda_ctx *ctx) |
12849 |
+ v4l2_err(&dev->v4l2_dev, "error loading Huffman tables\n"); |
12850 |
+ return ret; |
12851 |
+ } |
12852 |
+- if (!ctx->params.jpeg_qmat_tab[0]) |
12853 |
++ if (!ctx->params.jpeg_qmat_tab[0]) { |
12854 |
+ ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL); |
12855 |
+- if (!ctx->params.jpeg_qmat_tab[1]) |
12856 |
++ if (!ctx->params.jpeg_qmat_tab[0]) |
12857 |
++ return -ENOMEM; |
12858 |
++ } |
12859 |
++ if (!ctx->params.jpeg_qmat_tab[1]) { |
12860 |
+ ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL); |
12861 |
++ if (!ctx->params.jpeg_qmat_tab[1]) |
12862 |
++ return -ENOMEM; |
12863 |
++ } |
12864 |
+ coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality); |
12865 |
+ |
12866 |
+ return 0; |
12867 |
+diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c |
12868 |
+index bfdee771cef9d..4afe0b9b17730 100644 |
12869 |
+--- a/drivers/media/platform/exynos4-is/fimc-core.c |
12870 |
++++ b/drivers/media/platform/exynos4-is/fimc-core.c |
12871 |
+@@ -1174,7 +1174,7 @@ int __init fimc_register_driver(void) |
12872 |
+ return platform_driver_register(&fimc_driver); |
12873 |
+ } |
12874 |
+ |
12875 |
+-void __exit fimc_unregister_driver(void) |
12876 |
++void fimc_unregister_driver(void) |
12877 |
+ { |
12878 |
+ platform_driver_unregister(&fimc_driver); |
12879 |
+ } |
12880 |
+diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c |
12881 |
+index fa648721eaab9..b19d7c8ddc06b 100644 |
12882 |
+--- a/drivers/media/platform/exynos4-is/media-dev.c |
12883 |
++++ b/drivers/media/platform/exynos4-is/media-dev.c |
12884 |
+@@ -1380,9 +1380,7 @@ static int subdev_notifier_bound(struct v4l2_async_notifier *notifier, |
12885 |
+ |
12886 |
+ /* Find platform data for this sensor subdev */ |
12887 |
+ for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++) |
12888 |
+- if (fmd->sensor[i].asd && |
12889 |
+- fmd->sensor[i].asd->match.fwnode == |
12890 |
+- of_fwnode_handle(subdev->dev->of_node)) |
12891 |
++ if (fmd->sensor[i].asd == asd) |
12892 |
+ si = &fmd->sensor[i]; |
12893 |
+ |
12894 |
+ if (si == NULL) |
12895 |
+@@ -1474,7 +1472,7 @@ static int fimc_md_probe(struct platform_device *pdev) |
12896 |
+ pinctrl = devm_pinctrl_get(dev); |
12897 |
+ if (IS_ERR(pinctrl)) { |
12898 |
+ ret = PTR_ERR(pinctrl); |
12899 |
+- if (ret != EPROBE_DEFER) |
12900 |
++ if (ret != -EPROBE_DEFER) |
12901 |
+ dev_err(dev, "Failed to get pinctrl: %d\n", ret); |
12902 |
+ goto err_clk; |
12903 |
+ } |
12904 |
+@@ -1586,7 +1584,11 @@ static int __init fimc_md_init(void) |
12905 |
+ if (ret) |
12906 |
+ return ret; |
12907 |
+ |
12908 |
+- return platform_driver_register(&fimc_md_driver); |
12909 |
++ ret = platform_driver_register(&fimc_md_driver); |
12910 |
++ if (ret) |
12911 |
++ fimc_unregister_driver(); |
12912 |
++ |
12913 |
++ return ret; |
12914 |
+ } |
12915 |
+ |
12916 |
+ static void __exit fimc_md_exit(void) |
12917 |
+diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.c b/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.c |
12918 |
+index 718b7b08f93e0..8936d5ce886c2 100644 |
12919 |
+--- a/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.c |
12920 |
++++ b/drivers/media/platform/imx-jpeg/mxc-jpeg-hw.c |
12921 |
+@@ -76,12 +76,14 @@ void print_wrapper_info(struct device *dev, void __iomem *reg) |
12922 |
+ |
12923 |
+ void mxc_jpeg_enable_irq(void __iomem *reg, int slot) |
12924 |
+ { |
12925 |
+- writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN)); |
12926 |
++ writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_STATUS)); |
12927 |
++ writel(0xF0C, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN)); |
12928 |
+ } |
12929 |
+ |
12930 |
+ void mxc_jpeg_disable_irq(void __iomem *reg, int slot) |
12931 |
+ { |
12932 |
+ writel(0x0, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN)); |
12933 |
++ writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_STATUS)); |
12934 |
+ } |
12935 |
+ |
12936 |
+ void mxc_jpeg_sw_reset(void __iomem *reg) |
12937 |
+diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c |
12938 |
+index f282275af626f..5173b79995ee7 100644 |
12939 |
+--- a/drivers/media/platform/qcom/camss/camss-video.c |
12940 |
++++ b/drivers/media/platform/qcom/camss/camss-video.c |
12941 |
+@@ -493,7 +493,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count) |
12942 |
+ |
12943 |
+ ret = media_pipeline_start(&vdev->entity, &video->pipe); |
12944 |
+ if (ret < 0) |
12945 |
+- return ret; |
12946 |
++ goto flush_buffers; |
12947 |
+ |
12948 |
+ ret = video_check_format(video); |
12949 |
+ if (ret < 0) |
12950 |
+@@ -522,6 +522,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count) |
12951 |
+ error: |
12952 |
+ media_pipeline_stop(&vdev->entity); |
12953 |
+ |
12954 |
++flush_buffers: |
12955 |
+ video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED); |
12956 |
+ |
12957 |
+ return ret; |
12958 |
+diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c |
12959 |
+index 03fc82cb3fead..055513a7301f1 100644 |
12960 |
+--- a/drivers/media/platform/qcom/venus/pm_helpers.c |
12961 |
++++ b/drivers/media/platform/qcom/venus/pm_helpers.c |
12962 |
+@@ -869,8 +869,8 @@ static int vcodec_domains_get(struct venus_core *core) |
12963 |
+ for (i = 0; i < res->vcodec_pmdomains_num; i++) { |
12964 |
+ pd = dev_pm_domain_attach_by_name(dev, |
12965 |
+ res->vcodec_pmdomains[i]); |
12966 |
+- if (IS_ERR(pd)) |
12967 |
+- return PTR_ERR(pd); |
12968 |
++ if (IS_ERR_OR_NULL(pd)) |
12969 |
++ return PTR_ERR(pd) ? : -ENODATA; |
12970 |
+ core->pmdomains[i] = pd; |
12971 |
+ } |
12972 |
+ |
12973 |
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c |
12974 |
+index 4fc135d9f38bd..4c511b026bd72 100644 |
12975 |
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c |
12976 |
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c |
12977 |
+@@ -1583,8 +1583,18 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = { |
12978 |
+ .port_num = MFC_NUM_PORTS_V7, |
12979 |
+ .buf_size = &buf_size_v7, |
12980 |
+ .fw_name[0] = "s5p-mfc-v7.fw", |
12981 |
+- .clk_names = {"mfc", "sclk_mfc"}, |
12982 |
+- .num_clocks = 2, |
12983 |
++ .clk_names = {"mfc"}, |
12984 |
++ .num_clocks = 1, |
12985 |
++}; |
12986 |
++ |
12987 |
++static struct s5p_mfc_variant mfc_drvdata_v7_3250 = { |
12988 |
++ .version = MFC_VERSION_V7, |
12989 |
++ .version_bit = MFC_V7_BIT, |
12990 |
++ .port_num = MFC_NUM_PORTS_V7, |
12991 |
++ .buf_size = &buf_size_v7, |
12992 |
++ .fw_name[0] = "s5p-mfc-v7.fw", |
12993 |
++ .clk_names = {"mfc", "sclk_mfc"}, |
12994 |
++ .num_clocks = 2, |
12995 |
+ }; |
12996 |
+ |
12997 |
+ static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = { |
12998 |
+@@ -1654,6 +1664,9 @@ static const struct of_device_id exynos_mfc_match[] = { |
12999 |
+ }, { |
13000 |
+ .compatible = "samsung,mfc-v7", |
13001 |
+ .data = &mfc_drvdata_v7, |
13002 |
++ }, { |
13003 |
++ .compatible = "samsung,exynos3250-mfc", |
13004 |
++ .data = &mfc_drvdata_v7_3250, |
13005 |
+ }, { |
13006 |
+ .compatible = "samsung,mfc-v8", |
13007 |
+ .data = &mfc_drvdata_v8, |
13008 |
+diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c |
13009 |
+index 338b205ae3a79..88d0188397e7b 100644 |
13010 |
+--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c |
13011 |
++++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c |
13012 |
+@@ -940,6 +940,7 @@ static int configure_channels(struct c8sectpfei *fei) |
13013 |
+ if (ret) { |
13014 |
+ dev_err(fei->dev, |
13015 |
+ "configure_memdma_and_inputblock failed\n"); |
13016 |
++ of_node_put(child); |
13017 |
+ goto err_unmap; |
13018 |
+ } |
13019 |
+ index++; |
13020 |
+diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c |
13021 |
+index 3f8634a465730..1365ae732b799 100644 |
13022 |
+--- a/drivers/media/radio/si470x/radio-si470x-usb.c |
13023 |
++++ b/drivers/media/radio/si470x/radio-si470x-usb.c |
13024 |
+@@ -733,8 +733,10 @@ static int si470x_usb_driver_probe(struct usb_interface *intf, |
13025 |
+ |
13026 |
+ /* start radio */ |
13027 |
+ retval = si470x_start_usb(radio); |
13028 |
+- if (retval < 0) |
13029 |
++ if (retval < 0 && !radio->int_in_running) |
13030 |
+ goto err_buf; |
13031 |
++ else if (retval < 0) /* in case of radio->int_in_running == 1 */ |
13032 |
++ goto err_all; |
13033 |
+ |
13034 |
+ /* set initial frequency */ |
13035 |
+ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ |
13036 |
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c |
13037 |
+index 97355e3ebdfd4..72e4bb0fb71ec 100644 |
13038 |
+--- a/drivers/media/rc/imon.c |
13039 |
++++ b/drivers/media/rc/imon.c |
13040 |
+@@ -646,15 +646,14 @@ static int send_packet(struct imon_context *ictx) |
13041 |
+ pr_err_ratelimited("error submitting urb(%d)\n", retval); |
13042 |
+ } else { |
13043 |
+ /* Wait for transmission to complete (or abort) */ |
13044 |
+- mutex_unlock(&ictx->lock); |
13045 |
+ retval = wait_for_completion_interruptible( |
13046 |
+ &ictx->tx.finished); |
13047 |
+ if (retval) { |
13048 |
+ usb_kill_urb(ictx->tx_urb); |
13049 |
+ pr_err_ratelimited("task interrupted\n"); |
13050 |
+ } |
13051 |
+- mutex_lock(&ictx->lock); |
13052 |
+ |
13053 |
++ ictx->tx.busy = false; |
13054 |
+ retval = ictx->tx.status; |
13055 |
+ if (retval) |
13056 |
+ pr_err_ratelimited("packet tx failed (%d)\n", retval); |
13057 |
+@@ -955,7 +954,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf, |
13058 |
+ if (ictx->disconnected) |
13059 |
+ return -ENODEV; |
13060 |
+ |
13061 |
+- mutex_lock(&ictx->lock); |
13062 |
++ if (mutex_lock_interruptible(&ictx->lock)) |
13063 |
++ return -ERESTARTSYS; |
13064 |
+ |
13065 |
+ if (!ictx->dev_present_intf0) { |
13066 |
+ pr_err_ratelimited("no iMON device present\n"); |
13067 |
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c |
13068 |
+index 82620613d56b8..dff7265a42ca2 100644 |
13069 |
+--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c |
13070 |
++++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c |
13071 |
+@@ -459,26 +459,20 @@ fail_dmx_conn: |
13072 |
+ for (j = j - 1; j >= 0; --j) |
13073 |
+ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, |
13074 |
+ &dvb->dmx_fe[j]); |
13075 |
+-fail_dmx_dev: |
13076 |
+ dvb_dmxdev_release(&dvb->dmx_dev); |
13077 |
+-fail_dmx: |
13078 |
++fail_dmx_dev: |
13079 |
+ dvb_dmx_release(&dvb->demux); |
13080 |
++fail_dmx: |
13081 |
++fail_demod_probe: |
13082 |
++ for (i = i - 1; i >= 0; --i) { |
13083 |
++ dvb_unregister_frontend(dvb->fe[i]); |
13084 |
+ fail_fe: |
13085 |
+- for (j = i; j >= 0; --j) |
13086 |
+- dvb_unregister_frontend(dvb->fe[j]); |
13087 |
++ dvb_module_release(dvb->i2c_client_tuner[i]); |
13088 |
+ fail_tuner_probe: |
13089 |
+- for (j = i; j >= 0; --j) |
13090 |
+- if (dvb->i2c_client_tuner[j]) |
13091 |
+- dvb_module_release(dvb->i2c_client_tuner[j]); |
13092 |
+- |
13093 |
+-fail_demod_probe: |
13094 |
+- for (j = i; j >= 0; --j) |
13095 |
+- if (dvb->i2c_client_demod[j]) |
13096 |
+- dvb_module_release(dvb->i2c_client_demod[j]); |
13097 |
+- |
13098 |
++ dvb_module_release(dvb->i2c_client_demod[i]); |
13099 |
++ } |
13100 |
+ fail_adapter: |
13101 |
+ dvb_unregister_adapter(&dvb->adapter); |
13102 |
+- |
13103 |
+ fail_i2c: |
13104 |
+ i2c_del_adapter(&dvb->i2c_adapter); |
13105 |
+ |
13106 |
+diff --git a/drivers/media/test-drivers/vimc/vimc-core.c b/drivers/media/test-drivers/vimc/vimc-core.c |
13107 |
+index 4b0ae6f51d765..857529ce3638a 100644 |
13108 |
+--- a/drivers/media/test-drivers/vimc/vimc-core.c |
13109 |
++++ b/drivers/media/test-drivers/vimc/vimc-core.c |
13110 |
+@@ -357,7 +357,7 @@ static int __init vimc_init(void) |
13111 |
+ if (ret) { |
13112 |
+ dev_err(&vimc_pdev.dev, |
13113 |
+ "platform driver registration failed (err=%d)\n", ret); |
13114 |
+- platform_driver_unregister(&vimc_pdrv); |
13115 |
++ platform_device_unregister(&vimc_pdev); |
13116 |
+ return ret; |
13117 |
+ } |
13118 |
+ |
13119 |
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c |
13120 |
+index 99139a8cd4c4f..331a3f4286d2e 100644 |
13121 |
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c |
13122 |
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c |
13123 |
+@@ -961,6 +961,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection |
13124 |
+ if (dev->has_compose_cap) { |
13125 |
+ v4l2_rect_set_min_size(compose, &min_rect); |
13126 |
+ v4l2_rect_set_max_size(compose, &max_rect); |
13127 |
++ v4l2_rect_map_inside(compose, &fmt); |
13128 |
+ } |
13129 |
+ dev->fmt_cap_rect = fmt; |
13130 |
+ tpg_s_buf_height(&dev->tpg, fmt.height); |
13131 |
+diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c |
13132 |
+index 86788771175b7..32b4ee65c2802 100644 |
13133 |
+--- a/drivers/media/usb/dvb-usb/az6027.c |
13134 |
++++ b/drivers/media/usb/dvb-usb/az6027.c |
13135 |
+@@ -975,6 +975,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n |
13136 |
+ if (msg[i].addr == 0x99) { |
13137 |
+ req = 0xBE; |
13138 |
+ index = 0; |
13139 |
++ if (msg[i].len < 1) { |
13140 |
++ i = -EOPNOTSUPP; |
13141 |
++ break; |
13142 |
++ } |
13143 |
+ value = msg[i].buf[0] & 0x00ff; |
13144 |
+ length = 1; |
13145 |
+ az6027_usb_out_op(d, req, value, index, data, length); |
13146 |
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c |
13147 |
+index 61439c8f33cab..58eea8ab54779 100644 |
13148 |
+--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c |
13149 |
++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c |
13150 |
+@@ -81,7 +81,7 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) |
13151 |
+ |
13152 |
+ ret = dvb_usb_adapter_stream_init(adap); |
13153 |
+ if (ret) |
13154 |
+- return ret; |
13155 |
++ goto stream_init_err; |
13156 |
+ |
13157 |
+ ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs); |
13158 |
+ if (ret) |
13159 |
+@@ -114,6 +114,8 @@ frontend_init_err: |
13160 |
+ dvb_usb_adapter_dvb_exit(adap); |
13161 |
+ dvb_init_err: |
13162 |
+ dvb_usb_adapter_stream_exit(adap); |
13163 |
++stream_init_err: |
13164 |
++ kfree(adap->priv); |
13165 |
+ return ret; |
13166 |
+ } |
13167 |
+ |
13168 |
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c |
13169 |
+index 45a76f40deeb3..3798a57bbbd43 100644 |
13170 |
+--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c |
13171 |
++++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c |
13172 |
+@@ -1456,7 +1456,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl, |
13173 |
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU) |
13174 |
+ qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len); |
13175 |
+ |
13176 |
+- if ((!qmenu && !qmenu_int) || (qmenu_int && max > qmenu_int_len)) { |
13177 |
++ if ((!qmenu && !qmenu_int) || (qmenu_int && max >= qmenu_int_len)) { |
13178 |
+ handler_set_err(hdl, -EINVAL); |
13179 |
+ return NULL; |
13180 |
+ } |
13181 |
+diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c |
13182 |
+index 52312ce2ba056..f2c4393595574 100644 |
13183 |
+--- a/drivers/media/v4l2-core/videobuf-dma-contig.c |
13184 |
++++ b/drivers/media/v4l2-core/videobuf-dma-contig.c |
13185 |
+@@ -36,12 +36,11 @@ struct videobuf_dma_contig_memory { |
13186 |
+ |
13187 |
+ static int __videobuf_dc_alloc(struct device *dev, |
13188 |
+ struct videobuf_dma_contig_memory *mem, |
13189 |
+- unsigned long size, gfp_t flags) |
13190 |
++ unsigned long size) |
13191 |
+ { |
13192 |
+ mem->size = size; |
13193 |
+- mem->vaddr = dma_alloc_coherent(dev, mem->size, |
13194 |
+- &mem->dma_handle, flags); |
13195 |
+- |
13196 |
++ mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle, |
13197 |
++ GFP_KERNEL); |
13198 |
+ if (!mem->vaddr) { |
13199 |
+ dev_err(dev, "memory alloc size %ld failed\n", mem->size); |
13200 |
+ return -ENOMEM; |
13201 |
+@@ -258,8 +257,7 @@ static int __videobuf_iolock(struct videobuf_queue *q, |
13202 |
+ return videobuf_dma_contig_user_get(mem, vb); |
13203 |
+ |
13204 |
+ /* allocate memory for the read() method */ |
13205 |
+- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size), |
13206 |
+- GFP_KERNEL)) |
13207 |
++ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size))) |
13208 |
+ return -ENOMEM; |
13209 |
+ break; |
13210 |
+ case V4L2_MEMORY_OVERLAY: |
13211 |
+@@ -295,22 +293,18 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, |
13212 |
+ BUG_ON(!mem); |
13213 |
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); |
13214 |
+ |
13215 |
+- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize), |
13216 |
+- GFP_KERNEL | __GFP_COMP)) |
13217 |
++ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize))) |
13218 |
+ goto error; |
13219 |
+ |
13220 |
+- /* Try to remap memory */ |
13221 |
+- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
13222 |
+- |
13223 |
+ /* the "vm_pgoff" is just used in v4l2 to find the |
13224 |
+ * corresponding buffer data structure which is allocated |
13225 |
+ * earlier and it does not mean the offset from the physical |
13226 |
+ * buffer start address as usual. So set it to 0 to pass |
13227 |
+- * the sanity check in vm_iomap_memory(). |
13228 |
++ * the sanity check in dma_mmap_coherent(). |
13229 |
+ */ |
13230 |
+ vma->vm_pgoff = 0; |
13231 |
+- |
13232 |
+- retval = vm_iomap_memory(vma, mem->dma_handle, mem->size); |
13233 |
++ retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle, |
13234 |
++ mem->size); |
13235 |
+ if (retval) { |
13236 |
+ dev_err(q->dev, "mmap: remap failed with error %d. ", |
13237 |
+ retval); |
13238 |
+diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c |
13239 |
+index f854822f84d6c..7619c30b4ee10 100644 |
13240 |
+--- a/drivers/memstick/core/ms_block.c |
13241 |
++++ b/drivers/memstick/core/ms_block.c |
13242 |
+@@ -2150,6 +2150,11 @@ static int msb_init_disk(struct memstick_dev *card) |
13243 |
+ |
13244 |
+ msb->usage_count = 1; |
13245 |
+ msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM); |
13246 |
++ if (!msb->io_queue) { |
13247 |
++ rc = -ENOMEM; |
13248 |
++ goto out_cleanup_disk; |
13249 |
++ } |
13250 |
++ |
13251 |
+ INIT_WORK(&msb->io_work, msb_io_work); |
13252 |
+ sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1); |
13253 |
+ |
13254 |
+@@ -2157,10 +2162,16 @@ static int msb_init_disk(struct memstick_dev *card) |
13255 |
+ set_disk_ro(msb->disk, 1); |
13256 |
+ |
13257 |
+ msb_start(card); |
13258 |
+- device_add_disk(&card->dev, msb->disk, NULL); |
13259 |
++ rc = device_add_disk(&card->dev, msb->disk, NULL); |
13260 |
++ if (rc) |
13261 |
++ goto out_destroy_workqueue; |
13262 |
+ dbg("Disk added"); |
13263 |
+ return 0; |
13264 |
+ |
13265 |
++out_destroy_workqueue: |
13266 |
++ destroy_workqueue(msb->io_queue); |
13267 |
++out_cleanup_disk: |
13268 |
++ blk_cleanup_disk(msb->disk); |
13269 |
+ out_free_tag_set: |
13270 |
+ blk_mq_free_tag_set(&msb->tag_set); |
13271 |
+ out_release_id: |
13272 |
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig |
13273 |
+index d2f3452455389..5dd7ea0ebd46c 100644 |
13274 |
+--- a/drivers/mfd/Kconfig |
13275 |
++++ b/drivers/mfd/Kconfig |
13276 |
+@@ -1995,6 +1995,7 @@ config MFD_ROHM_BD957XMUF |
13277 |
+ depends on I2C=y |
13278 |
+ depends on OF |
13279 |
+ select REGMAP_I2C |
13280 |
++ select REGMAP_IRQ |
13281 |
+ select MFD_CORE |
13282 |
+ help |
13283 |
+ Select this option to get support for the ROHM BD9576MUF and |
13284 |
+diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c |
13285 |
+index c472d7f8103c4..9f3c4a01b4c1c 100644 |
13286 |
+--- a/drivers/mfd/qcom-pm8008.c |
13287 |
++++ b/drivers/mfd/qcom-pm8008.c |
13288 |
+@@ -54,13 +54,6 @@ enum { |
13289 |
+ |
13290 |
+ #define PM8008_PERIPH_OFFSET(paddr) (paddr - PM8008_PERIPH_0_BASE) |
13291 |
+ |
13292 |
+-struct pm8008_data { |
13293 |
+- struct device *dev; |
13294 |
+- struct regmap *regmap; |
13295 |
+- int irq; |
13296 |
+- struct regmap_irq_chip_data *irq_data; |
13297 |
+-}; |
13298 |
+- |
13299 |
+ static unsigned int p0_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_0_BASE)}; |
13300 |
+ static unsigned int p1_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_1_BASE)}; |
13301 |
+ static unsigned int p2_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_2_BASE)}; |
13302 |
+@@ -150,7 +143,7 @@ static struct regmap_config qcom_mfd_regmap_cfg = { |
13303 |
+ .max_register = 0xFFFF, |
13304 |
+ }; |
13305 |
+ |
13306 |
+-static int pm8008_init(struct pm8008_data *chip) |
13307 |
++static int pm8008_init(struct regmap *regmap) |
13308 |
+ { |
13309 |
+ int rc; |
13310 |
+ |
13311 |
+@@ -160,34 +153,31 @@ static int pm8008_init(struct pm8008_data *chip) |
13312 |
+ * This is required to enable the writing of TYPE registers in |
13313 |
+ * regmap_irq_sync_unlock(). |
13314 |
+ */ |
13315 |
+- rc = regmap_write(chip->regmap, |
13316 |
+- (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET), |
13317 |
+- BIT(0)); |
13318 |
++ rc = regmap_write(regmap, (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET), BIT(0)); |
13319 |
+ if (rc) |
13320 |
+ return rc; |
13321 |
+ |
13322 |
+ /* Do the same for GPIO1 and GPIO2 peripherals */ |
13323 |
+- rc = regmap_write(chip->regmap, |
13324 |
+- (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0)); |
13325 |
++ rc = regmap_write(regmap, (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0)); |
13326 |
+ if (rc) |
13327 |
+ return rc; |
13328 |
+ |
13329 |
+- rc = regmap_write(chip->regmap, |
13330 |
+- (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0)); |
13331 |
++ rc = regmap_write(regmap, (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0)); |
13332 |
+ |
13333 |
+ return rc; |
13334 |
+ } |
13335 |
+ |
13336 |
+-static int pm8008_probe_irq_peripherals(struct pm8008_data *chip, |
13337 |
++static int pm8008_probe_irq_peripherals(struct device *dev, |
13338 |
++ struct regmap *regmap, |
13339 |
+ int client_irq) |
13340 |
+ { |
13341 |
+ int rc, i; |
13342 |
+ struct regmap_irq_type *type; |
13343 |
+ struct regmap_irq_chip_data *irq_data; |
13344 |
+ |
13345 |
+- rc = pm8008_init(chip); |
13346 |
++ rc = pm8008_init(regmap); |
13347 |
+ if (rc) { |
13348 |
+- dev_err(chip->dev, "Init failed: %d\n", rc); |
13349 |
++ dev_err(dev, "Init failed: %d\n", rc); |
13350 |
+ return rc; |
13351 |
+ } |
13352 |
+ |
13353 |
+@@ -207,10 +197,10 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip, |
13354 |
+ IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW); |
13355 |
+ } |
13356 |
+ |
13357 |
+- rc = devm_regmap_add_irq_chip(chip->dev, chip->regmap, client_irq, |
13358 |
++ rc = devm_regmap_add_irq_chip(dev, regmap, client_irq, |
13359 |
+ IRQF_SHARED, 0, &pm8008_irq_chip, &irq_data); |
13360 |
+ if (rc) { |
13361 |
+- dev_err(chip->dev, "Failed to add IRQ chip: %d\n", rc); |
13362 |
++ dev_err(dev, "Failed to add IRQ chip: %d\n", rc); |
13363 |
+ return rc; |
13364 |
+ } |
13365 |
+ |
13366 |
+@@ -220,26 +210,23 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip, |
13367 |
+ static int pm8008_probe(struct i2c_client *client) |
13368 |
+ { |
13369 |
+ int rc; |
13370 |
+- struct pm8008_data *chip; |
13371 |
+- |
13372 |
+- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); |
13373 |
+- if (!chip) |
13374 |
+- return -ENOMEM; |
13375 |
++ struct device *dev; |
13376 |
++ struct regmap *regmap; |
13377 |
+ |
13378 |
+- chip->dev = &client->dev; |
13379 |
+- chip->regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg); |
13380 |
+- if (!chip->regmap) |
13381 |
+- return -ENODEV; |
13382 |
++ dev = &client->dev; |
13383 |
++ regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg); |
13384 |
++ if (IS_ERR(regmap)) |
13385 |
++ return PTR_ERR(regmap); |
13386 |
+ |
13387 |
+- i2c_set_clientdata(client, chip); |
13388 |
++ i2c_set_clientdata(client, regmap); |
13389 |
+ |
13390 |
+- if (of_property_read_bool(chip->dev->of_node, "interrupt-controller")) { |
13391 |
+- rc = pm8008_probe_irq_peripherals(chip, client->irq); |
13392 |
++ if (of_property_read_bool(dev->of_node, "interrupt-controller")) { |
13393 |
++ rc = pm8008_probe_irq_peripherals(dev, regmap, client->irq); |
13394 |
+ if (rc) |
13395 |
+- dev_err(chip->dev, "Failed to probe irq periphs: %d\n", rc); |
13396 |
++ dev_err(dev, "Failed to probe irq periphs: %d\n", rc); |
13397 |
+ } |
13398 |
+ |
13399 |
+- return devm_of_platform_populate(chip->dev); |
13400 |
++ return devm_of_platform_populate(dev); |
13401 |
+ } |
13402 |
+ |
13403 |
+ static const struct of_device_id pm8008_match[] = { |
13404 |
+diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c |
13405 |
+index 71bc34b74bc9c..8fea0e511550a 100644 |
13406 |
+--- a/drivers/mfd/qcom_rpm.c |
13407 |
++++ b/drivers/mfd/qcom_rpm.c |
13408 |
+@@ -547,7 +547,7 @@ static int qcom_rpm_probe(struct platform_device *pdev) |
13409 |
+ init_completion(&rpm->ack); |
13410 |
+ |
13411 |
+ /* Enable message RAM clock */ |
13412 |
+- rpm->ramclk = devm_clk_get(&pdev->dev, "ram"); |
13413 |
++ rpm->ramclk = devm_clk_get_enabled(&pdev->dev, "ram"); |
13414 |
+ if (IS_ERR(rpm->ramclk)) { |
13415 |
+ ret = PTR_ERR(rpm->ramclk); |
13416 |
+ if (ret == -EPROBE_DEFER) |
13417 |
+@@ -558,7 +558,6 @@ static int qcom_rpm_probe(struct platform_device *pdev) |
13418 |
+ */ |
13419 |
+ rpm->ramclk = NULL; |
13420 |
+ } |
13421 |
+- clk_prepare_enable(rpm->ramclk); /* Accepts NULL */ |
13422 |
+ |
13423 |
+ irq_ack = platform_get_irq_byname(pdev, "ack"); |
13424 |
+ if (irq_ack < 0) |
13425 |
+@@ -673,22 +672,11 @@ static int qcom_rpm_probe(struct platform_device *pdev) |
13426 |
+ if (ret) |
13427 |
+ dev_warn(&pdev->dev, "failed to mark wakeup irq as wakeup\n"); |
13428 |
+ |
13429 |
+- return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); |
13430 |
+-} |
13431 |
+- |
13432 |
+-static int qcom_rpm_remove(struct platform_device *pdev) |
13433 |
+-{ |
13434 |
+- struct qcom_rpm *rpm = dev_get_drvdata(&pdev->dev); |
13435 |
+- |
13436 |
+- of_platform_depopulate(&pdev->dev); |
13437 |
+- clk_disable_unprepare(rpm->ramclk); |
13438 |
+- |
13439 |
+- return 0; |
13440 |
++ return devm_of_platform_populate(&pdev->dev); |
13441 |
+ } |
13442 |
+ |
13443 |
+ static struct platform_driver qcom_rpm_driver = { |
13444 |
+ .probe = qcom_rpm_probe, |
13445 |
+- .remove = qcom_rpm_remove, |
13446 |
+ .driver = { |
13447 |
+ .name = "qcom_rpm", |
13448 |
+ .of_match_table = qcom_rpm_of_match, |
13449 |
+diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c |
13450 |
+index 186308f1f8eba..6334376826a92 100644 |
13451 |
+--- a/drivers/misc/cxl/guest.c |
13452 |
++++ b/drivers/misc/cxl/guest.c |
13453 |
+@@ -959,10 +959,10 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n |
13454 |
+ * if it returns an error! |
13455 |
+ */ |
13456 |
+ if ((rc = cxl_register_afu(afu))) |
13457 |
+- goto err_put1; |
13458 |
++ goto err_put_dev; |
13459 |
+ |
13460 |
+ if ((rc = cxl_sysfs_afu_add(afu))) |
13461 |
+- goto err_put1; |
13462 |
++ goto err_del_dev; |
13463 |
+ |
13464 |
+ /* |
13465 |
+ * pHyp doesn't expose the programming models supported by the |
13466 |
+@@ -978,7 +978,7 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n |
13467 |
+ afu->modes_supported = CXL_MODE_DIRECTED; |
13468 |
+ |
13469 |
+ if ((rc = cxl_afu_select_best_mode(afu))) |
13470 |
+- goto err_put2; |
13471 |
++ goto err_remove_sysfs; |
13472 |
+ |
13473 |
+ adapter->afu[afu->slice] = afu; |
13474 |
+ |
13475 |
+@@ -998,10 +998,12 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n |
13476 |
+ |
13477 |
+ return 0; |
13478 |
+ |
13479 |
+-err_put2: |
13480 |
++err_remove_sysfs: |
13481 |
+ cxl_sysfs_afu_remove(afu); |
13482 |
+-err_put1: |
13483 |
+- device_unregister(&afu->dev); |
13484 |
++err_del_dev: |
13485 |
++ device_del(&afu->dev); |
13486 |
++err_put_dev: |
13487 |
++ put_device(&afu->dev); |
13488 |
+ free = false; |
13489 |
+ guest_release_serr_irq(afu); |
13490 |
+ err2: |
13491 |
+@@ -1135,18 +1137,20 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic |
13492 |
+ * even if it returns an error! |
13493 |
+ */ |
13494 |
+ if ((rc = cxl_register_adapter(adapter))) |
13495 |
+- goto err_put1; |
13496 |
++ goto err_put_dev; |
13497 |
+ |
13498 |
+ if ((rc = cxl_sysfs_adapter_add(adapter))) |
13499 |
+- goto err_put1; |
13500 |
++ goto err_del_dev; |
13501 |
+ |
13502 |
+ /* release the context lock as the adapter is configured */ |
13503 |
+ cxl_adapter_context_unlock(adapter); |
13504 |
+ |
13505 |
+ return adapter; |
13506 |
+ |
13507 |
+-err_put1: |
13508 |
+- device_unregister(&adapter->dev); |
13509 |
++err_del_dev: |
13510 |
++ device_del(&adapter->dev); |
13511 |
++err_put_dev: |
13512 |
++ put_device(&adapter->dev); |
13513 |
+ free = false; |
13514 |
+ cxl_guest_remove_chardev(adapter); |
13515 |
+ err1: |
13516 |
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c |
13517 |
+index 2ba899f5659ff..d183836d80e3f 100644 |
13518 |
+--- a/drivers/misc/cxl/pci.c |
13519 |
++++ b/drivers/misc/cxl/pci.c |
13520 |
+@@ -387,6 +387,7 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, |
13521 |
+ rc = get_phb_index(np, phb_index); |
13522 |
+ if (rc) { |
13523 |
+ pr_err("cxl: invalid phb index\n"); |
13524 |
++ of_node_put(np); |
13525 |
+ return rc; |
13526 |
+ } |
13527 |
+ |
13528 |
+@@ -1164,10 +1165,10 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) |
13529 |
+ * if it returns an error! |
13530 |
+ */ |
13531 |
+ if ((rc = cxl_register_afu(afu))) |
13532 |
+- goto err_put1; |
13533 |
++ goto err_put_dev; |
13534 |
+ |
13535 |
+ if ((rc = cxl_sysfs_afu_add(afu))) |
13536 |
+- goto err_put1; |
13537 |
++ goto err_del_dev; |
13538 |
+ |
13539 |
+ adapter->afu[afu->slice] = afu; |
13540 |
+ |
13541 |
+@@ -1176,10 +1177,12 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) |
13542 |
+ |
13543 |
+ return 0; |
13544 |
+ |
13545 |
+-err_put1: |
13546 |
++err_del_dev: |
13547 |
++ device_del(&afu->dev); |
13548 |
++err_put_dev: |
13549 |
+ pci_deconfigure_afu(afu); |
13550 |
+ cxl_debugfs_afu_remove(afu); |
13551 |
+- device_unregister(&afu->dev); |
13552 |
++ put_device(&afu->dev); |
13553 |
+ return rc; |
13554 |
+ |
13555 |
+ err_free_native: |
13556 |
+@@ -1667,23 +1670,25 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) |
13557 |
+ * even if it returns an error! |
13558 |
+ */ |
13559 |
+ if ((rc = cxl_register_adapter(adapter))) |
13560 |
+- goto err_put1; |
13561 |
++ goto err_put_dev; |
13562 |
+ |
13563 |
+ if ((rc = cxl_sysfs_adapter_add(adapter))) |
13564 |
+- goto err_put1; |
13565 |
++ goto err_del_dev; |
13566 |
+ |
13567 |
+ /* Release the context lock as adapter is configured */ |
13568 |
+ cxl_adapter_context_unlock(adapter); |
13569 |
+ |
13570 |
+ return adapter; |
13571 |
+ |
13572 |
+-err_put1: |
13573 |
++err_del_dev: |
13574 |
++ device_del(&adapter->dev); |
13575 |
++err_put_dev: |
13576 |
+ /* This should mirror cxl_remove_adapter, except without the |
13577 |
+ * sysfs parts |
13578 |
+ */ |
13579 |
+ cxl_debugfs_adapter_remove(adapter); |
13580 |
+ cxl_deconfigure_adapter(adapter); |
13581 |
+- device_unregister(&adapter->dev); |
13582 |
++ put_device(&adapter->dev); |
13583 |
+ return ERR_PTR(rc); |
13584 |
+ |
13585 |
+ err_release: |
13586 |
+diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c |
13587 |
+index a68738f382521..f1f669efe050d 100644 |
13588 |
+--- a/drivers/misc/ocxl/config.c |
13589 |
++++ b/drivers/misc/ocxl/config.c |
13590 |
+@@ -204,6 +204,18 @@ static int read_dvsec_vendor(struct pci_dev *dev) |
13591 |
+ return 0; |
13592 |
+ } |
13593 |
+ |
13594 |
++/** |
13595 |
++ * get_dvsec_vendor0() - Find a related PCI device (function 0) |
13596 |
++ * @dev: PCI device to match |
13597 |
++ * @dev0: The PCI device (function 0) found |
13598 |
++ * @out_pos: The position of PCI device (function 0) |
13599 |
++ * |
13600 |
++ * Returns 0 on success, negative on failure. |
13601 |
++ * |
13602 |
++ * NOTE: If it's successful, the reference of dev0 is increased, |
13603 |
++ * so after using it, the callers must call pci_dev_put() to give |
13604 |
++ * up the reference. |
13605 |
++ */ |
13606 |
+ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, |
13607 |
+ int *out_pos) |
13608 |
+ { |
13609 |
+@@ -213,10 +225,14 @@ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, |
13610 |
+ dev = get_function_0(dev); |
13611 |
+ if (!dev) |
13612 |
+ return -1; |
13613 |
++ } else { |
13614 |
++ dev = pci_dev_get(dev); |
13615 |
+ } |
13616 |
+ pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID); |
13617 |
+- if (!pos) |
13618 |
++ if (!pos) { |
13619 |
++ pci_dev_put(dev); |
13620 |
+ return -1; |
13621 |
++ } |
13622 |
+ *dev0 = dev; |
13623 |
+ *out_pos = pos; |
13624 |
+ return 0; |
13625 |
+@@ -233,6 +249,7 @@ int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val) |
13626 |
+ |
13627 |
+ pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, |
13628 |
+ &reset_reload); |
13629 |
++ pci_dev_put(dev0); |
13630 |
+ *val = !!(reset_reload & BIT(0)); |
13631 |
+ return 0; |
13632 |
+ } |
13633 |
+@@ -254,6 +271,7 @@ int ocxl_config_set_reset_reload(struct pci_dev *dev, int val) |
13634 |
+ reset_reload &= ~BIT(0); |
13635 |
+ pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, |
13636 |
+ reset_reload); |
13637 |
++ pci_dev_put(dev0); |
13638 |
+ return 0; |
13639 |
+ } |
13640 |
+ |
13641 |
+diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c |
13642 |
+index 134806c2e67ec..a199c7ce3f81d 100644 |
13643 |
+--- a/drivers/misc/ocxl/file.c |
13644 |
++++ b/drivers/misc/ocxl/file.c |
13645 |
+@@ -543,8 +543,11 @@ int ocxl_file_register_afu(struct ocxl_afu *afu) |
13646 |
+ goto err_put; |
13647 |
+ |
13648 |
+ rc = device_register(&info->dev); |
13649 |
+- if (rc) |
13650 |
+- goto err_put; |
13651 |
++ if (rc) { |
13652 |
++ free_minor(info); |
13653 |
++ put_device(&info->dev); |
13654 |
++ return rc; |
13655 |
++ } |
13656 |
+ |
13657 |
+ rc = ocxl_sysfs_register_afu(info); |
13658 |
+ if (rc) |
13659 |
+diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c |
13660 |
+index d7ef61e602ede..b836936e97471 100644 |
13661 |
+--- a/drivers/misc/sgi-gru/grufault.c |
13662 |
++++ b/drivers/misc/sgi-gru/grufault.c |
13663 |
+@@ -648,6 +648,7 @@ int gru_handle_user_call_os(unsigned long cb) |
13664 |
+ if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) |
13665 |
+ return -EINVAL; |
13666 |
+ |
13667 |
++again: |
13668 |
+ gts = gru_find_lock_gts(cb); |
13669 |
+ if (!gts) |
13670 |
+ return -EINVAL; |
13671 |
+@@ -656,7 +657,11 @@ int gru_handle_user_call_os(unsigned long cb) |
13672 |
+ if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) |
13673 |
+ goto exit; |
13674 |
+ |
13675 |
+- gru_check_context_placement(gts); |
13676 |
++ if (gru_check_context_placement(gts)) { |
13677 |
++ gru_unlock_gts(gts); |
13678 |
++ gru_unload_context(gts, 1); |
13679 |
++ goto again; |
13680 |
++ } |
13681 |
+ |
13682 |
+ /* |
13683 |
+ * CCH may contain stale data if ts_force_cch_reload is set. |
13684 |
+@@ -874,7 +879,11 @@ int gru_set_context_option(unsigned long arg) |
13685 |
+ } else { |
13686 |
+ gts->ts_user_blade_id = req.val1; |
13687 |
+ gts->ts_user_chiplet_id = req.val0; |
13688 |
+- gru_check_context_placement(gts); |
13689 |
++ if (gru_check_context_placement(gts)) { |
13690 |
++ gru_unlock_gts(gts); |
13691 |
++ gru_unload_context(gts, 1); |
13692 |
++ return ret; |
13693 |
++ } |
13694 |
+ } |
13695 |
+ break; |
13696 |
+ case sco_gseg_owner: |
13697 |
+diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c |
13698 |
+index 9afda47efbf2e..3a16eb8e03f73 100644 |
13699 |
+--- a/drivers/misc/sgi-gru/grumain.c |
13700 |
++++ b/drivers/misc/sgi-gru/grumain.c |
13701 |
+@@ -716,9 +716,10 @@ static int gru_check_chiplet_assignment(struct gru_state *gru, |
13702 |
+ * chiplet. Misassignment can occur if the process migrates to a different |
13703 |
+ * blade or if the user changes the selected blade/chiplet. |
13704 |
+ */ |
13705 |
+-void gru_check_context_placement(struct gru_thread_state *gts) |
13706 |
++int gru_check_context_placement(struct gru_thread_state *gts) |
13707 |
+ { |
13708 |
+ struct gru_state *gru; |
13709 |
++ int ret = 0; |
13710 |
+ |
13711 |
+ /* |
13712 |
+ * If the current task is the context owner, verify that the |
13713 |
+@@ -726,15 +727,23 @@ void gru_check_context_placement(struct gru_thread_state *gts) |
13714 |
+ * references. Pthread apps use non-owner references to the CBRs. |
13715 |
+ */ |
13716 |
+ gru = gts->ts_gru; |
13717 |
++ /* |
13718 |
++ * If gru or gts->ts_tgid_owner isn't initialized properly, return |
13719 |
++ * success to indicate that the caller does not need to unload the |
13720 |
++ * gru context.The caller is responsible for their inspection and |
13721 |
++ * reinitialization if needed. |
13722 |
++ */ |
13723 |
+ if (!gru || gts->ts_tgid_owner != current->tgid) |
13724 |
+- return; |
13725 |
++ return ret; |
13726 |
+ |
13727 |
+ if (!gru_check_chiplet_assignment(gru, gts)) { |
13728 |
+ STAT(check_context_unload); |
13729 |
+- gru_unload_context(gts, 1); |
13730 |
++ ret = -EINVAL; |
13731 |
+ } else if (gru_retarget_intr(gts)) { |
13732 |
+ STAT(check_context_retarget_intr); |
13733 |
+ } |
13734 |
++ |
13735 |
++ return ret; |
13736 |
+ } |
13737 |
+ |
13738 |
+ |
13739 |
+@@ -934,7 +943,12 @@ again: |
13740 |
+ mutex_lock(>s->ts_ctxlock); |
13741 |
+ preempt_disable(); |
13742 |
+ |
13743 |
+- gru_check_context_placement(gts); |
13744 |
++ if (gru_check_context_placement(gts)) { |
13745 |
++ preempt_enable(); |
13746 |
++ mutex_unlock(>s->ts_ctxlock); |
13747 |
++ gru_unload_context(gts, 1); |
13748 |
++ return VM_FAULT_NOPAGE; |
13749 |
++ } |
13750 |
+ |
13751 |
+ if (!gts->ts_gru) { |
13752 |
+ STAT(load_user_context); |
13753 |
+diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h |
13754 |
+index e4c067c61251b..5c9783150cdfa 100644 |
13755 |
+--- a/drivers/misc/sgi-gru/grutables.h |
13756 |
++++ b/drivers/misc/sgi-gru/grutables.h |
13757 |
+@@ -638,7 +638,7 @@ extern int gru_user_flush_tlb(unsigned long arg); |
13758 |
+ extern int gru_user_unload_context(unsigned long arg); |
13759 |
+ extern int gru_get_exception_detail(unsigned long arg); |
13760 |
+ extern int gru_set_context_option(unsigned long address); |
13761 |
+-extern void gru_check_context_placement(struct gru_thread_state *gts); |
13762 |
++extern int gru_check_context_placement(struct gru_thread_state *gts); |
13763 |
+ extern int gru_cpu_fault_map_id(void); |
13764 |
+ extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); |
13765 |
+ extern void gru_flush_all_tlb(struct gru_state *gru); |
13766 |
+diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c |
13767 |
+index 228f2eb1d4762..2aebbfda104d8 100644 |
13768 |
+--- a/drivers/misc/tifm_7xx1.c |
13769 |
++++ b/drivers/misc/tifm_7xx1.c |
13770 |
+@@ -190,7 +190,7 @@ static void tifm_7xx1_switch_media(struct work_struct *work) |
13771 |
+ spin_unlock_irqrestore(&fm->lock, flags); |
13772 |
+ } |
13773 |
+ if (sock) |
13774 |
+- tifm_free_device(&sock->dev); |
13775 |
++ put_device(&sock->dev); |
13776 |
+ } |
13777 |
+ spin_lock_irqsave(&fm->lock, flags); |
13778 |
+ } |
13779 |
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c |
13780 |
+index 86a8a1f565839..592166e53dce8 100644 |
13781 |
+--- a/drivers/mmc/core/sd.c |
13782 |
++++ b/drivers/mmc/core/sd.c |
13783 |
+@@ -1252,7 +1252,7 @@ static int sd_read_ext_regs(struct mmc_card *card) |
13784 |
+ */ |
13785 |
+ err = sd_read_ext_reg(card, 0, 0, 0, 512, gen_info_buf); |
13786 |
+ if (err) { |
13787 |
+- pr_warn("%s: error %d reading general info of SD ext reg\n", |
13788 |
++ pr_err("%s: error %d reading general info of SD ext reg\n", |
13789 |
+ mmc_hostname(card->host), err); |
13790 |
+ goto out; |
13791 |
+ } |
13792 |
+@@ -1266,7 +1266,12 @@ static int sd_read_ext_regs(struct mmc_card *card) |
13793 |
+ /* Number of extensions to be find. */ |
13794 |
+ num_ext = gen_info_buf[4]; |
13795 |
+ |
13796 |
+- /* We support revision 0, but limit it to 512 bytes for simplicity. */ |
13797 |
++ /* |
13798 |
++ * We only support revision 0 and limit it to 512 bytes for simplicity. |
13799 |
++ * No matter what, let's return zero to allow us to continue using the |
13800 |
++ * card, even if we can't support the features from the SD function |
13801 |
++ * extensions registers. |
13802 |
++ */ |
13803 |
+ if (rev != 0 || len > 512) { |
13804 |
+ pr_warn("%s: non-supported SD ext reg layout\n", |
13805 |
+ mmc_hostname(card->host)); |
13806 |
+@@ -1281,7 +1286,7 @@ static int sd_read_ext_regs(struct mmc_card *card) |
13807 |
+ for (i = 0; i < num_ext; i++) { |
13808 |
+ err = sd_parse_ext_reg(card, gen_info_buf, &next_ext_addr); |
13809 |
+ if (err) { |
13810 |
+- pr_warn("%s: error %d parsing SD ext reg\n", |
13811 |
++ pr_err("%s: error %d parsing SD ext reg\n", |
13812 |
+ mmc_hostname(card->host), err); |
13813 |
+ goto out; |
13814 |
+ } |
13815 |
+diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c |
13816 |
+index bfb8efeb7eb80..d01df01d4b4d1 100644 |
13817 |
+--- a/drivers/mmc/host/alcor.c |
13818 |
++++ b/drivers/mmc/host/alcor.c |
13819 |
+@@ -1114,7 +1114,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) |
13820 |
+ alcor_hw_init(host); |
13821 |
+ |
13822 |
+ dev_set_drvdata(&pdev->dev, host); |
13823 |
+- mmc_add_host(mmc); |
13824 |
++ ret = mmc_add_host(mmc); |
13825 |
++ if (ret) |
13826 |
++ goto free_host; |
13827 |
++ |
13828 |
+ return 0; |
13829 |
+ |
13830 |
+ free_host: |
13831 |
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c |
13832 |
+index 807177c953f3d..6f971a3e7e494 100644 |
13833 |
+--- a/drivers/mmc/host/atmel-mci.c |
13834 |
++++ b/drivers/mmc/host/atmel-mci.c |
13835 |
+@@ -2223,6 +2223,7 @@ static int atmci_init_slot(struct atmel_mci *host, |
13836 |
+ { |
13837 |
+ struct mmc_host *mmc; |
13838 |
+ struct atmel_mci_slot *slot; |
13839 |
++ int ret; |
13840 |
+ |
13841 |
+ mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev); |
13842 |
+ if (!mmc) |
13843 |
+@@ -2306,11 +2307,13 @@ static int atmci_init_slot(struct atmel_mci *host, |
13844 |
+ |
13845 |
+ host->slot[id] = slot; |
13846 |
+ mmc_regulator_get_supply(mmc); |
13847 |
+- mmc_add_host(mmc); |
13848 |
++ ret = mmc_add_host(mmc); |
13849 |
++ if (ret) { |
13850 |
++ mmc_free_host(mmc); |
13851 |
++ return ret; |
13852 |
++ } |
13853 |
+ |
13854 |
+ if (gpio_is_valid(slot->detect_pin)) { |
13855 |
+- int ret; |
13856 |
+- |
13857 |
+ timer_setup(&slot->detect_timer, atmci_detect_change, 0); |
13858 |
+ |
13859 |
+ ret = request_irq(gpio_to_irq(slot->detect_pin), |
13860 |
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c |
13861 |
+index 9b2e2548bd18b..753f9ea254d49 100644 |
13862 |
+--- a/drivers/mmc/host/meson-gx-mmc.c |
13863 |
++++ b/drivers/mmc/host/meson-gx-mmc.c |
13864 |
+@@ -1291,7 +1291,9 @@ static int meson_mmc_probe(struct platform_device *pdev) |
13865 |
+ } |
13866 |
+ |
13867 |
+ mmc->ops = &meson_mmc_ops; |
13868 |
+- mmc_add_host(mmc); |
13869 |
++ ret = mmc_add_host(mmc); |
13870 |
++ if (ret) |
13871 |
++ goto err_free_irq; |
13872 |
+ |
13873 |
+ return 0; |
13874 |
+ |
13875 |
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c |
13876 |
+index 3765e2f4ad98a..2c4eda83ca181 100644 |
13877 |
+--- a/drivers/mmc/host/mmci.c |
13878 |
++++ b/drivers/mmc/host/mmci.c |
13879 |
+@@ -2254,7 +2254,9 @@ static int mmci_probe(struct amba_device *dev, |
13880 |
+ pm_runtime_set_autosuspend_delay(&dev->dev, 50); |
13881 |
+ pm_runtime_use_autosuspend(&dev->dev); |
13882 |
+ |
13883 |
+- mmc_add_host(mmc); |
13884 |
++ ret = mmc_add_host(mmc); |
13885 |
++ if (ret) |
13886 |
++ goto clk_disable; |
13887 |
+ |
13888 |
+ pm_runtime_put(&dev->dev); |
13889 |
+ return 0; |
13890 |
+diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c |
13891 |
+index dfc3ffd5b1f8c..52ed30f2d9f4f 100644 |
13892 |
+--- a/drivers/mmc/host/moxart-mmc.c |
13893 |
++++ b/drivers/mmc/host/moxart-mmc.c |
13894 |
+@@ -665,7 +665,9 @@ static int moxart_probe(struct platform_device *pdev) |
13895 |
+ goto out; |
13896 |
+ |
13897 |
+ dev_set_drvdata(dev, mmc); |
13898 |
+- mmc_add_host(mmc); |
13899 |
++ ret = mmc_add_host(mmc); |
13900 |
++ if (ret) |
13901 |
++ goto out; |
13902 |
+ |
13903 |
+ dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width); |
13904 |
+ |
13905 |
+diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c |
13906 |
+index 9bf95ba217fac..97227ad717150 100644 |
13907 |
+--- a/drivers/mmc/host/mxcmmc.c |
13908 |
++++ b/drivers/mmc/host/mxcmmc.c |
13909 |
+@@ -1143,7 +1143,9 @@ static int mxcmci_probe(struct platform_device *pdev) |
13910 |
+ |
13911 |
+ timer_setup(&host->watchdog, mxcmci_watchdog, 0); |
13912 |
+ |
13913 |
+- mmc_add_host(mmc); |
13914 |
++ ret = mmc_add_host(mmc); |
13915 |
++ if (ret) |
13916 |
++ goto out_free_dma; |
13917 |
+ |
13918 |
+ return 0; |
13919 |
+ |
13920 |
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c |
13921 |
+index 2f8038d69f677..eb0bd46b7e81e 100644 |
13922 |
+--- a/drivers/mmc/host/omap_hsmmc.c |
13923 |
++++ b/drivers/mmc/host/omap_hsmmc.c |
13924 |
+@@ -1987,7 +1987,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev) |
13925 |
+ if (!ret) |
13926 |
+ mmc->caps |= MMC_CAP_SDIO_IRQ; |
13927 |
+ |
13928 |
+- mmc_add_host(mmc); |
13929 |
++ ret = mmc_add_host(mmc); |
13930 |
++ if (ret) |
13931 |
++ goto err_irq; |
13932 |
+ |
13933 |
+ if (mmc_pdata(host)->name != NULL) { |
13934 |
+ ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name); |
13935 |
+diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c |
13936 |
+index 55868b6b86583..e25e9bb34eb39 100644 |
13937 |
+--- a/drivers/mmc/host/pxamci.c |
13938 |
++++ b/drivers/mmc/host/pxamci.c |
13939 |
+@@ -763,7 +763,12 @@ static int pxamci_probe(struct platform_device *pdev) |
13940 |
+ dev_warn(dev, "gpio_ro and get_ro() both defined\n"); |
13941 |
+ } |
13942 |
+ |
13943 |
+- mmc_add_host(mmc); |
13944 |
++ ret = mmc_add_host(mmc); |
13945 |
++ if (ret) { |
13946 |
++ if (host->pdata && host->pdata->exit) |
13947 |
++ host->pdata->exit(dev, mmc); |
13948 |
++ goto out; |
13949 |
++ } |
13950 |
+ |
13951 |
+ return 0; |
13952 |
+ |
13953 |
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c |
13954 |
+index 387f2a4f693a0..12921fba4f52b 100644 |
13955 |
+--- a/drivers/mmc/host/renesas_sdhi_core.c |
13956 |
++++ b/drivers/mmc/host/renesas_sdhi_core.c |
13957 |
+@@ -520,7 +520,7 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host, |
13958 |
+ SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) & |
13959 |
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2)); |
13960 |
+ |
13961 |
+- if (priv->adjust_hs400_calib_table) |
13962 |
++ if (priv->quirks && (priv->quirks->hs400_calib_table || priv->quirks->hs400_bad_taps)) |
13963 |
+ renesas_sdhi_adjust_hs400_mode_disable(host); |
13964 |
+ |
13965 |
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | |
13966 |
+@@ -1037,11 +1037,14 @@ int renesas_sdhi_probe(struct platform_device *pdev, |
13967 |
+ if (ver >= SDHI_VER_GEN3_SD) |
13968 |
+ host->get_timeout_cycles = renesas_sdhi_gen3_get_cycles; |
13969 |
+ |
13970 |
++ /* Check for SCC so we can reset it if needed */ |
13971 |
++ if (of_data && of_data->scc_offset && ver >= SDHI_VER_GEN2_SDR104) |
13972 |
++ priv->scc_ctl = host->ctl + of_data->scc_offset; |
13973 |
++ |
13974 |
+ /* Enable tuning iff we have an SCC and a supported mode */ |
13975 |
+- if (of_data && of_data->scc_offset && |
13976 |
+- (host->mmc->caps & MMC_CAP_UHS_SDR104 || |
13977 |
+- host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | |
13978 |
+- MMC_CAP2_HS400_1_8V))) { |
13979 |
++ if (priv->scc_ctl && (host->mmc->caps & MMC_CAP_UHS_SDR104 || |
13980 |
++ host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | |
13981 |
++ MMC_CAP2_HS400_1_8V))) { |
13982 |
+ const struct renesas_sdhi_scc *taps = of_data->taps; |
13983 |
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; |
13984 |
+ bool hit = false; |
13985 |
+@@ -1061,7 +1064,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, |
13986 |
+ if (!hit) |
13987 |
+ dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n"); |
13988 |
+ |
13989 |
+- priv->scc_ctl = host->ctl + of_data->scc_offset; |
13990 |
+ host->check_retune = renesas_sdhi_check_scc_error; |
13991 |
+ host->ops.execute_tuning = renesas_sdhi_execute_tuning; |
13992 |
+ host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning; |
13993 |
+diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c |
13994 |
+index e1580f78c6b2d..8098726dcc0bf 100644 |
13995 |
+--- a/drivers/mmc/host/rtsx_pci_sdmmc.c |
13996 |
++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c |
13997 |
+@@ -1474,6 +1474,7 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) |
13998 |
+ struct realtek_pci_sdmmc *host; |
13999 |
+ struct rtsx_pcr *pcr; |
14000 |
+ struct pcr_handle *handle = pdev->dev.platform_data; |
14001 |
++ int ret; |
14002 |
+ |
14003 |
+ if (!handle) |
14004 |
+ return -ENXIO; |
14005 |
+@@ -1511,7 +1512,13 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) |
14006 |
+ pm_runtime_mark_last_busy(&pdev->dev); |
14007 |
+ pm_runtime_use_autosuspend(&pdev->dev); |
14008 |
+ |
14009 |
+- mmc_add_host(mmc); |
14010 |
++ ret = mmc_add_host(mmc); |
14011 |
++ if (ret) { |
14012 |
++ pm_runtime_dont_use_autosuspend(&pdev->dev); |
14013 |
++ pm_runtime_disable(&pdev->dev); |
14014 |
++ mmc_free_host(mmc); |
14015 |
++ return ret; |
14016 |
++ } |
14017 |
+ |
14018 |
+ return 0; |
14019 |
+ } |
14020 |
+diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c |
14021 |
+index 5fe4528e296e6..1be3a355f10d5 100644 |
14022 |
+--- a/drivers/mmc/host/rtsx_usb_sdmmc.c |
14023 |
++++ b/drivers/mmc/host/rtsx_usb_sdmmc.c |
14024 |
+@@ -1332,6 +1332,7 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev) |
14025 |
+ #ifdef RTSX_USB_USE_LEDS_CLASS |
14026 |
+ int err; |
14027 |
+ #endif |
14028 |
++ int ret; |
14029 |
+ |
14030 |
+ ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent)); |
14031 |
+ if (!ucr) |
14032 |
+@@ -1368,7 +1369,15 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev) |
14033 |
+ INIT_WORK(&host->led_work, rtsx_usb_update_led); |
14034 |
+ |
14035 |
+ #endif |
14036 |
+- mmc_add_host(mmc); |
14037 |
++ ret = mmc_add_host(mmc); |
14038 |
++ if (ret) { |
14039 |
++#ifdef RTSX_USB_USE_LEDS_CLASS |
14040 |
++ led_classdev_unregister(&host->led); |
14041 |
++#endif |
14042 |
++ mmc_free_host(mmc); |
14043 |
++ pm_runtime_disable(&pdev->dev); |
14044 |
++ return ret; |
14045 |
++ } |
14046 |
+ |
14047 |
+ return 0; |
14048 |
+ } |
14049 |
+diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c |
14050 |
+index 3f5977979cf25..6c4f43e112826 100644 |
14051 |
+--- a/drivers/mmc/host/sdhci_f_sdh30.c |
14052 |
++++ b/drivers/mmc/host/sdhci_f_sdh30.c |
14053 |
+@@ -168,6 +168,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) |
14054 |
+ if (reg & SDHCI_CAN_DO_8BIT) |
14055 |
+ priv->vendor_hs200 = F_SDH30_EMMC_HS200; |
14056 |
+ |
14057 |
++ if (!(reg & SDHCI_TIMEOUT_CLK_MASK)) |
14058 |
++ host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; |
14059 |
++ |
14060 |
+ ret = sdhci_add_host(host); |
14061 |
+ if (ret) |
14062 |
+ goto err_add_host; |
14063 |
+diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c |
14064 |
+index 8d037c2071abc..497791ffada6d 100644 |
14065 |
+--- a/drivers/mmc/host/toshsd.c |
14066 |
++++ b/drivers/mmc/host/toshsd.c |
14067 |
+@@ -651,7 +651,9 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
14068 |
+ if (ret) |
14069 |
+ goto unmap; |
14070 |
+ |
14071 |
+- mmc_add_host(mmc); |
14072 |
++ ret = mmc_add_host(mmc); |
14073 |
++ if (ret) |
14074 |
++ goto free_irq; |
14075 |
+ |
14076 |
+ base = pci_resource_start(pdev, 0); |
14077 |
+ dev_dbg(&pdev->dev, "MMIO %pa, IRQ %d\n", &base, pdev->irq); |
14078 |
+@@ -660,6 +662,8 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
14079 |
+ |
14080 |
+ return 0; |
14081 |
+ |
14082 |
++free_irq: |
14083 |
++ free_irq(pdev->irq, host); |
14084 |
+ unmap: |
14085 |
+ pci_iounmap(pdev, host->ioaddr); |
14086 |
+ release: |
14087 |
+diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c |
14088 |
+index 88662a90ed960..a2b0d9461665b 100644 |
14089 |
+--- a/drivers/mmc/host/via-sdmmc.c |
14090 |
++++ b/drivers/mmc/host/via-sdmmc.c |
14091 |
+@@ -1151,7 +1151,9 @@ static int via_sd_probe(struct pci_dev *pcidev, |
14092 |
+ pcidev->subsystem_device == 0x3891) |
14093 |
+ sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY; |
14094 |
+ |
14095 |
+- mmc_add_host(mmc); |
14096 |
++ ret = mmc_add_host(mmc); |
14097 |
++ if (ret) |
14098 |
++ goto unmap; |
14099 |
+ |
14100 |
+ return 0; |
14101 |
+ |
14102 |
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c |
14103 |
+index 97beece62fec4..ab36ec4797478 100644 |
14104 |
+--- a/drivers/mmc/host/vub300.c |
14105 |
++++ b/drivers/mmc/host/vub300.c |
14106 |
+@@ -2299,14 +2299,14 @@ static int vub300_probe(struct usb_interface *interface, |
14107 |
+ 0x0000, 0x0000, &vub300->system_port_status, |
14108 |
+ sizeof(vub300->system_port_status), 1000); |
14109 |
+ if (retval < 0) { |
14110 |
+- goto error4; |
14111 |
++ goto error5; |
14112 |
+ } else if (sizeof(vub300->system_port_status) == retval) { |
14113 |
+ vub300->card_present = |
14114 |
+ (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; |
14115 |
+ vub300->read_only = |
14116 |
+ (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; |
14117 |
+ } else { |
14118 |
+- goto error4; |
14119 |
++ goto error5; |
14120 |
+ } |
14121 |
+ usb_set_intfdata(interface, vub300); |
14122 |
+ INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread); |
14123 |
+@@ -2329,8 +2329,13 @@ static int vub300_probe(struct usb_interface *interface, |
14124 |
+ "USB vub300 remote SDIO host controller[%d]" |
14125 |
+ "connected with no SD/SDIO card inserted\n", |
14126 |
+ interface_to_InterfaceNumber(interface)); |
14127 |
+- mmc_add_host(mmc); |
14128 |
++ retval = mmc_add_host(mmc); |
14129 |
++ if (retval) |
14130 |
++ goto error6; |
14131 |
++ |
14132 |
+ return 0; |
14133 |
++error6: |
14134 |
++ del_timer_sync(&vub300->inactivity_timer); |
14135 |
+ error5: |
14136 |
+ mmc_free_host(mmc); |
14137 |
+ /* |
14138 |
+diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c |
14139 |
+index 67ecd342fe5f1..7c7ec8d10232b 100644 |
14140 |
+--- a/drivers/mmc/host/wbsd.c |
14141 |
++++ b/drivers/mmc/host/wbsd.c |
14142 |
+@@ -1698,7 +1698,17 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, |
14143 |
+ */ |
14144 |
+ wbsd_init_device(host); |
14145 |
+ |
14146 |
+- mmc_add_host(mmc); |
14147 |
++ ret = mmc_add_host(mmc); |
14148 |
++ if (ret) { |
14149 |
++ if (!pnp) |
14150 |
++ wbsd_chip_poweroff(host); |
14151 |
++ |
14152 |
++ wbsd_release_resources(host); |
14153 |
++ wbsd_free_mmc(dev); |
14154 |
++ |
14155 |
++ mmc_free_host(mmc); |
14156 |
++ return ret; |
14157 |
++ } |
14158 |
+ |
14159 |
+ pr_info("%s: W83L51xD", mmc_hostname(mmc)); |
14160 |
+ if (host->chip_id != 0) |
14161 |
+diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c |
14162 |
+index 8df722ec57edc..3933195488575 100644 |
14163 |
+--- a/drivers/mmc/host/wmt-sdmmc.c |
14164 |
++++ b/drivers/mmc/host/wmt-sdmmc.c |
14165 |
+@@ -859,11 +859,15 @@ static int wmt_mci_probe(struct platform_device *pdev) |
14166 |
+ /* configure the controller to a known 'ready' state */ |
14167 |
+ wmt_reset_hardware(mmc); |
14168 |
+ |
14169 |
+- mmc_add_host(mmc); |
14170 |
++ ret = mmc_add_host(mmc); |
14171 |
++ if (ret) |
14172 |
++ goto fail7; |
14173 |
+ |
14174 |
+ dev_info(&pdev->dev, "WMT SDHC Controller initialized\n"); |
14175 |
+ |
14176 |
+ return 0; |
14177 |
++fail7: |
14178 |
++ clk_disable_unprepare(priv->clk_sdmmc); |
14179 |
+ fail6: |
14180 |
+ clk_put(priv->clk_sdmmc); |
14181 |
+ fail5_and_a_half: |
14182 |
+diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c |
14183 |
+index 72f5c7b300790..add4386f99f00 100644 |
14184 |
+--- a/drivers/mtd/lpddr/lpddr2_nvm.c |
14185 |
++++ b/drivers/mtd/lpddr/lpddr2_nvm.c |
14186 |
+@@ -433,6 +433,8 @@ static int lpddr2_nvm_probe(struct platform_device *pdev) |
14187 |
+ |
14188 |
+ /* lpddr2_nvm address range */ |
14189 |
+ add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
14190 |
++ if (!add_range) |
14191 |
++ return -ENODEV; |
14192 |
+ |
14193 |
+ /* Populate map_info data structure */ |
14194 |
+ *map = (struct map_info) { |
14195 |
+diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c |
14196 |
+index 7d96758a8f04e..6e5e557559704 100644 |
14197 |
+--- a/drivers/mtd/maps/pxa2xx-flash.c |
14198 |
++++ b/drivers/mtd/maps/pxa2xx-flash.c |
14199 |
+@@ -66,6 +66,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev) |
14200 |
+ if (!info->map.virt) { |
14201 |
+ printk(KERN_WARNING "Failed to ioremap %s\n", |
14202 |
+ info->map.name); |
14203 |
++ kfree(info); |
14204 |
+ return -ENOMEM; |
14205 |
+ } |
14206 |
+ info->map.cached = ioremap_cache(info->map.phys, info->map.size); |
14207 |
+@@ -87,6 +88,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev) |
14208 |
+ iounmap((void *)info->map.virt); |
14209 |
+ if (info->map.cached) |
14210 |
+ iounmap(info->map.cached); |
14211 |
++ kfree(info); |
14212 |
+ return -EIO; |
14213 |
+ } |
14214 |
+ info->mtd->dev.parent = &pdev->dev; |
14215 |
+diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c |
14216 |
+index 61f236e0378a6..3abaac109e75e 100644 |
14217 |
+--- a/drivers/mtd/mtdcore.c |
14218 |
++++ b/drivers/mtd/mtdcore.c |
14219 |
+@@ -671,8 +671,10 @@ int add_mtd_device(struct mtd_info *mtd) |
14220 |
+ dev_set_drvdata(&mtd->dev, mtd); |
14221 |
+ of_node_get(mtd_get_of_node(mtd)); |
14222 |
+ error = device_register(&mtd->dev); |
14223 |
+- if (error) |
14224 |
++ if (error) { |
14225 |
++ put_device(&mtd->dev); |
14226 |
+ goto fail_added; |
14227 |
++ } |
14228 |
+ |
14229 |
+ /* Add the nvmem provider */ |
14230 |
+ error = mtd_nvmem_add(mtd); |
14231 |
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c |
14232 |
+index eb5d7b3d18609..aad7076ae0202 100644 |
14233 |
+--- a/drivers/mtd/spi-nor/core.c |
14234 |
++++ b/drivers/mtd/spi-nor/core.c |
14235 |
+@@ -2155,7 +2155,8 @@ static int spi_nor_spimem_check_readop(struct spi_nor *nor, |
14236 |
+ spi_nor_spimem_setup_op(nor, &op, read->proto); |
14237 |
+ |
14238 |
+ /* convert the dummy cycles to the number of bytes */ |
14239 |
+- op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; |
14240 |
++ op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) * |
14241 |
++ op.dummy.buswidth / 8; |
14242 |
+ if (spi_nor_protocol_is_dtr(nor->read_proto)) |
14243 |
+ op.dummy.nbytes *= 2; |
14244 |
+ |
14245 |
+diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c |
14246 |
+index 9aec9d8a98ada..4c3b351aef245 100644 |
14247 |
+--- a/drivers/mtd/spi-nor/sysfs.c |
14248 |
++++ b/drivers/mtd/spi-nor/sysfs.c |
14249 |
+@@ -67,6 +67,19 @@ static struct bin_attribute *spi_nor_sysfs_bin_entries[] = { |
14250 |
+ NULL |
14251 |
+ }; |
14252 |
+ |
14253 |
++static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj, |
14254 |
++ struct attribute *attr, int n) |
14255 |
++{ |
14256 |
++ struct spi_device *spi = to_spi_device(kobj_to_dev(kobj)); |
14257 |
++ struct spi_mem *spimem = spi_get_drvdata(spi); |
14258 |
++ struct spi_nor *nor = spi_mem_get_drvdata(spimem); |
14259 |
++ |
14260 |
++ if (attr == &dev_attr_jedec_id.attr && !nor->info->id_len) |
14261 |
++ return 0; |
14262 |
++ |
14263 |
++ return 0444; |
14264 |
++} |
14265 |
++ |
14266 |
+ static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj, |
14267 |
+ struct bin_attribute *attr, int n) |
14268 |
+ { |
14269 |
+@@ -82,6 +95,7 @@ static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj, |
14270 |
+ |
14271 |
+ static const struct attribute_group spi_nor_sysfs_group = { |
14272 |
+ .name = "spi-nor", |
14273 |
++ .is_visible = spi_nor_sysfs_is_visible, |
14274 |
+ .is_bin_visible = spi_nor_sysfs_is_bin_visible, |
14275 |
+ .attrs = spi_nor_sysfs_entries, |
14276 |
+ .bin_attrs = spi_nor_sysfs_bin_entries, |
14277 |
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
14278 |
+index 402dffc508efb..456298919d541 100644 |
14279 |
+--- a/drivers/net/bonding/bond_main.c |
14280 |
++++ b/drivers/net/bonding/bond_main.c |
14281 |
+@@ -2499,12 +2499,21 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in |
14282 |
+ /* called with rcu_read_lock() */ |
14283 |
+ static int bond_miimon_inspect(struct bonding *bond) |
14284 |
+ { |
14285 |
++ bool ignore_updelay = false; |
14286 |
+ int link_state, commit = 0; |
14287 |
+ struct list_head *iter; |
14288 |
+ struct slave *slave; |
14289 |
+- bool ignore_updelay; |
14290 |
+ |
14291 |
+- ignore_updelay = !rcu_dereference(bond->curr_active_slave); |
14292 |
++ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { |
14293 |
++ ignore_updelay = !rcu_dereference(bond->curr_active_slave); |
14294 |
++ } else { |
14295 |
++ struct bond_up_slave *usable_slaves; |
14296 |
++ |
14297 |
++ usable_slaves = rcu_dereference(bond->usable_slaves); |
14298 |
++ |
14299 |
++ if (usable_slaves && usable_slaves->count == 0) |
14300 |
++ ignore_updelay = true; |
14301 |
++ } |
14302 |
+ |
14303 |
+ bond_for_each_slave_rcu(bond, slave, iter) { |
14304 |
+ bond_propose_link_state(slave, BOND_LINK_NOCHANGE); |
14305 |
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c |
14306 |
+index 46ab6155795c0..e027229c1955b 100644 |
14307 |
+--- a/drivers/net/can/m_can/m_can.c |
14308 |
++++ b/drivers/net/can/m_can/m_can.c |
14309 |
+@@ -1248,10 +1248,17 @@ static int m_can_set_bittiming(struct net_device *dev) |
14310 |
+ * - setup bittiming |
14311 |
+ * - configure timestamp generation |
14312 |
+ */ |
14313 |
+-static void m_can_chip_config(struct net_device *dev) |
14314 |
++static int m_can_chip_config(struct net_device *dev) |
14315 |
+ { |
14316 |
+ struct m_can_classdev *cdev = netdev_priv(dev); |
14317 |
+ u32 cccr, test; |
14318 |
++ int err; |
14319 |
++ |
14320 |
++ err = m_can_init_ram(cdev); |
14321 |
++ if (err) { |
14322 |
++ dev_err(cdev->dev, "Message RAM configuration failed\n"); |
14323 |
++ return err; |
14324 |
++ } |
14325 |
+ |
14326 |
+ m_can_config_endisable(cdev, true); |
14327 |
+ |
14328 |
+@@ -1375,18 +1382,25 @@ static void m_can_chip_config(struct net_device *dev) |
14329 |
+ |
14330 |
+ if (cdev->ops->init) |
14331 |
+ cdev->ops->init(cdev); |
14332 |
++ |
14333 |
++ return 0; |
14334 |
+ } |
14335 |
+ |
14336 |
+-static void m_can_start(struct net_device *dev) |
14337 |
++static int m_can_start(struct net_device *dev) |
14338 |
+ { |
14339 |
+ struct m_can_classdev *cdev = netdev_priv(dev); |
14340 |
++ int ret; |
14341 |
+ |
14342 |
+ /* basic m_can configuration */ |
14343 |
+- m_can_chip_config(dev); |
14344 |
++ ret = m_can_chip_config(dev); |
14345 |
++ if (ret) |
14346 |
++ return ret; |
14347 |
+ |
14348 |
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE; |
14349 |
+ |
14350 |
+ m_can_enable_all_interrupts(cdev); |
14351 |
++ |
14352 |
++ return 0; |
14353 |
+ } |
14354 |
+ |
14355 |
+ static int m_can_set_mode(struct net_device *dev, enum can_mode mode) |
14356 |
+@@ -1824,7 +1838,9 @@ static int m_can_open(struct net_device *dev) |
14357 |
+ } |
14358 |
+ |
14359 |
+ /* start the m_can controller */ |
14360 |
+- m_can_start(dev); |
14361 |
++ err = m_can_start(dev); |
14362 |
++ if (err) |
14363 |
++ goto exit_irq_fail; |
14364 |
+ |
14365 |
+ can_led_event(dev, CAN_LED_EVENT_OPEN); |
14366 |
+ |
14367 |
+@@ -2082,9 +2098,13 @@ int m_can_class_resume(struct device *dev) |
14368 |
+ ret = m_can_clk_start(cdev); |
14369 |
+ if (ret) |
14370 |
+ return ret; |
14371 |
++ ret = m_can_start(ndev); |
14372 |
++ if (ret) { |
14373 |
++ m_can_clk_stop(cdev); |
14374 |
++ |
14375 |
++ return ret; |
14376 |
++ } |
14377 |
+ |
14378 |
+- m_can_init_ram(cdev); |
14379 |
+- m_can_start(ndev); |
14380 |
+ netif_device_attach(ndev); |
14381 |
+ netif_start_queue(ndev); |
14382 |
+ } |
14383 |
+diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c |
14384 |
+index eee47bad05920..de6d8e01bf2e8 100644 |
14385 |
+--- a/drivers/net/can/m_can/m_can_platform.c |
14386 |
++++ b/drivers/net/can/m_can/m_can_platform.c |
14387 |
+@@ -140,10 +140,6 @@ static int m_can_plat_probe(struct platform_device *pdev) |
14388 |
+ |
14389 |
+ platform_set_drvdata(pdev, mcan_class); |
14390 |
+ |
14391 |
+- ret = m_can_init_ram(mcan_class); |
14392 |
+- if (ret) |
14393 |
+- goto probe_fail; |
14394 |
+- |
14395 |
+ pm_runtime_enable(mcan_class->dev); |
14396 |
+ ret = m_can_class_register(mcan_class); |
14397 |
+ if (ret) |
14398 |
+diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c |
14399 |
+index 04687b15b250e..c83b347be1cfd 100644 |
14400 |
+--- a/drivers/net/can/m_can/tcan4x5x-core.c |
14401 |
++++ b/drivers/net/can/m_can/tcan4x5x-core.c |
14402 |
+@@ -10,7 +10,7 @@ |
14403 |
+ #define TCAN4X5X_DEV_ID1 0x04 |
14404 |
+ #define TCAN4X5X_REV 0x08 |
14405 |
+ #define TCAN4X5X_STATUS 0x0C |
14406 |
+-#define TCAN4X5X_ERROR_STATUS 0x10 |
14407 |
++#define TCAN4X5X_ERROR_STATUS_MASK 0x10 |
14408 |
+ #define TCAN4X5X_CONTROL 0x14 |
14409 |
+ |
14410 |
+ #define TCAN4X5X_CONFIG 0x800 |
14411 |
+@@ -204,17 +204,7 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev) |
14412 |
+ if (ret) |
14413 |
+ return ret; |
14414 |
+ |
14415 |
+- ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG, |
14416 |
+- TCAN4X5X_ENABLE_MCAN_INT); |
14417 |
+- if (ret) |
14418 |
+- return ret; |
14419 |
+- |
14420 |
+- ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS, |
14421 |
+- TCAN4X5X_CLEAR_ALL_INT); |
14422 |
+- if (ret) |
14423 |
+- return ret; |
14424 |
+- |
14425 |
+- return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS, |
14426 |
++ return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS, |
14427 |
+ TCAN4X5X_CLEAR_ALL_INT); |
14428 |
+ } |
14429 |
+ |
14430 |
+@@ -234,8 +224,8 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) |
14431 |
+ if (ret) |
14432 |
+ return ret; |
14433 |
+ |
14434 |
+- /* Zero out the MCAN buffers */ |
14435 |
+- ret = m_can_init_ram(cdev); |
14436 |
++ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS_MASK, |
14437 |
++ TCAN4X5X_CLEAR_ALL_INT); |
14438 |
+ if (ret) |
14439 |
+ return ret; |
14440 |
+ |
14441 |
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h |
14442 |
+index 62958f04a2f20..5699531f87873 100644 |
14443 |
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h |
14444 |
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h |
14445 |
+@@ -76,6 +76,14 @@ struct kvaser_usb_tx_urb_context { |
14446 |
+ int dlc; |
14447 |
+ }; |
14448 |
+ |
14449 |
++struct kvaser_usb_busparams { |
14450 |
++ __le32 bitrate; |
14451 |
++ u8 tseg1; |
14452 |
++ u8 tseg2; |
14453 |
++ u8 sjw; |
14454 |
++ u8 nsamples; |
14455 |
++} __packed; |
14456 |
++ |
14457 |
+ struct kvaser_usb { |
14458 |
+ struct usb_device *udev; |
14459 |
+ struct usb_interface *intf; |
14460 |
+@@ -104,13 +112,19 @@ struct kvaser_usb_net_priv { |
14461 |
+ struct can_priv can; |
14462 |
+ struct can_berr_counter bec; |
14463 |
+ |
14464 |
++ /* subdriver-specific data */ |
14465 |
++ void *sub_priv; |
14466 |
++ |
14467 |
+ struct kvaser_usb *dev; |
14468 |
+ struct net_device *netdev; |
14469 |
+ int channel; |
14470 |
+ |
14471 |
+- struct completion start_comp, stop_comp, flush_comp; |
14472 |
++ struct completion start_comp, stop_comp, flush_comp, |
14473 |
++ get_busparams_comp; |
14474 |
+ struct usb_anchor tx_submitted; |
14475 |
+ |
14476 |
++ struct kvaser_usb_busparams busparams_nominal, busparams_data; |
14477 |
++ |
14478 |
+ spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */ |
14479 |
+ int active_tx_contexts; |
14480 |
+ struct kvaser_usb_tx_urb_context tx_contexts[]; |
14481 |
+@@ -120,11 +134,15 @@ struct kvaser_usb_net_priv { |
14482 |
+ * struct kvaser_usb_dev_ops - Device specific functions |
14483 |
+ * @dev_set_mode: used for can.do_set_mode |
14484 |
+ * @dev_set_bittiming: used for can.do_set_bittiming |
14485 |
++ * @dev_get_busparams: readback arbitration busparams |
14486 |
+ * @dev_set_data_bittiming: used for can.do_set_data_bittiming |
14487 |
++ * @dev_get_data_busparams: readback data busparams |
14488 |
+ * @dev_get_berr_counter: used for can.do_get_berr_counter |
14489 |
+ * |
14490 |
+ * @dev_setup_endpoints: setup USB in and out endpoints |
14491 |
+ * @dev_init_card: initialize card |
14492 |
++ * @dev_init_channel: initialize channel |
14493 |
++ * @dev_remove_channel: uninitialize channel |
14494 |
+ * @dev_get_software_info: get software info |
14495 |
+ * @dev_get_software_details: get software details |
14496 |
+ * @dev_get_card_info: get card info |
14497 |
+@@ -140,12 +158,18 @@ struct kvaser_usb_net_priv { |
14498 |
+ */ |
14499 |
+ struct kvaser_usb_dev_ops { |
14500 |
+ int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode); |
14501 |
+- int (*dev_set_bittiming)(struct net_device *netdev); |
14502 |
+- int (*dev_set_data_bittiming)(struct net_device *netdev); |
14503 |
++ int (*dev_set_bittiming)(const struct net_device *netdev, |
14504 |
++ const struct kvaser_usb_busparams *busparams); |
14505 |
++ int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv); |
14506 |
++ int (*dev_set_data_bittiming)(const struct net_device *netdev, |
14507 |
++ const struct kvaser_usb_busparams *busparams); |
14508 |
++ int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv); |
14509 |
+ int (*dev_get_berr_counter)(const struct net_device *netdev, |
14510 |
+ struct can_berr_counter *bec); |
14511 |
+ int (*dev_setup_endpoints)(struct kvaser_usb *dev); |
14512 |
+ int (*dev_init_card)(struct kvaser_usb *dev); |
14513 |
++ int (*dev_init_channel)(struct kvaser_usb_net_priv *priv); |
14514 |
++ void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv); |
14515 |
+ int (*dev_get_software_info)(struct kvaser_usb *dev); |
14516 |
+ int (*dev_get_software_details)(struct kvaser_usb *dev); |
14517 |
+ int (*dev_get_card_info)(struct kvaser_usb *dev); |
14518 |
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c |
14519 |
+index bdcaccf8e2b28..09dbc51347d70 100644 |
14520 |
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c |
14521 |
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c |
14522 |
+@@ -443,10 +443,6 @@ static int kvaser_usb_open(struct net_device *netdev) |
14523 |
+ if (err) |
14524 |
+ return err; |
14525 |
+ |
14526 |
+- err = kvaser_usb_setup_rx_urbs(dev); |
14527 |
+- if (err) |
14528 |
+- goto error; |
14529 |
+- |
14530 |
+ err = ops->dev_set_opt_mode(priv); |
14531 |
+ if (err) |
14532 |
+ goto error; |
14533 |
+@@ -537,6 +533,93 @@ static int kvaser_usb_close(struct net_device *netdev) |
14534 |
+ return 0; |
14535 |
+ } |
14536 |
+ |
14537 |
++static int kvaser_usb_set_bittiming(struct net_device *netdev) |
14538 |
++{ |
14539 |
++ struct kvaser_usb_net_priv *priv = netdev_priv(netdev); |
14540 |
++ struct kvaser_usb *dev = priv->dev; |
14541 |
++ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; |
14542 |
++ struct can_bittiming *bt = &priv->can.bittiming; |
14543 |
++ |
14544 |
++ struct kvaser_usb_busparams busparams; |
14545 |
++ int tseg1 = bt->prop_seg + bt->phase_seg1; |
14546 |
++ int tseg2 = bt->phase_seg2; |
14547 |
++ int sjw = bt->sjw; |
14548 |
++ int err = -EOPNOTSUPP; |
14549 |
++ |
14550 |
++ busparams.bitrate = cpu_to_le32(bt->bitrate); |
14551 |
++ busparams.sjw = (u8)sjw; |
14552 |
++ busparams.tseg1 = (u8)tseg1; |
14553 |
++ busparams.tseg2 = (u8)tseg2; |
14554 |
++ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) |
14555 |
++ busparams.nsamples = 3; |
14556 |
++ else |
14557 |
++ busparams.nsamples = 1; |
14558 |
++ |
14559 |
++ err = ops->dev_set_bittiming(netdev, &busparams); |
14560 |
++ if (err) |
14561 |
++ return err; |
14562 |
++ |
14563 |
++ err = kvaser_usb_setup_rx_urbs(priv->dev); |
14564 |
++ if (err) |
14565 |
++ return err; |
14566 |
++ |
14567 |
++ err = ops->dev_get_busparams(priv); |
14568 |
++ if (err) { |
14569 |
++ /* Treat EOPNOTSUPP as success */ |
14570 |
++ if (err == -EOPNOTSUPP) |
14571 |
++ err = 0; |
14572 |
++ return err; |
14573 |
++ } |
14574 |
++ |
14575 |
++ if (memcmp(&busparams, &priv->busparams_nominal, |
14576 |
++ sizeof(priv->busparams_nominal)) != 0) |
14577 |
++ err = -EINVAL; |
14578 |
++ |
14579 |
++ return err; |
14580 |
++} |
14581 |
++ |
14582 |
++static int kvaser_usb_set_data_bittiming(struct net_device *netdev) |
14583 |
++{ |
14584 |
++ struct kvaser_usb_net_priv *priv = netdev_priv(netdev); |
14585 |
++ struct kvaser_usb *dev = priv->dev; |
14586 |
++ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; |
14587 |
++ struct can_bittiming *dbt = &priv->can.data_bittiming; |
14588 |
++ |
14589 |
++ struct kvaser_usb_busparams busparams; |
14590 |
++ int tseg1 = dbt->prop_seg + dbt->phase_seg1; |
14591 |
++ int tseg2 = dbt->phase_seg2; |
14592 |
++ int sjw = dbt->sjw; |
14593 |
++ int err; |
14594 |
++ |
14595 |
++ if (!ops->dev_set_data_bittiming || |
14596 |
++ !ops->dev_get_data_busparams) |
14597 |
++ return -EOPNOTSUPP; |
14598 |
++ |
14599 |
++ busparams.bitrate = cpu_to_le32(dbt->bitrate); |
14600 |
++ busparams.sjw = (u8)sjw; |
14601 |
++ busparams.tseg1 = (u8)tseg1; |
14602 |
++ busparams.tseg2 = (u8)tseg2; |
14603 |
++ busparams.nsamples = 1; |
14604 |
++ |
14605 |
++ err = ops->dev_set_data_bittiming(netdev, &busparams); |
14606 |
++ if (err) |
14607 |
++ return err; |
14608 |
++ |
14609 |
++ err = kvaser_usb_setup_rx_urbs(priv->dev); |
14610 |
++ if (err) |
14611 |
++ return err; |
14612 |
++ |
14613 |
++ err = ops->dev_get_data_busparams(priv); |
14614 |
++ if (err) |
14615 |
++ return err; |
14616 |
++ |
14617 |
++ if (memcmp(&busparams, &priv->busparams_data, |
14618 |
++ sizeof(priv->busparams_data)) != 0) |
14619 |
++ err = -EINVAL; |
14620 |
++ |
14621 |
++ return err; |
14622 |
++} |
14623 |
++ |
14624 |
+ static void kvaser_usb_write_bulk_callback(struct urb *urb) |
14625 |
+ { |
14626 |
+ struct kvaser_usb_tx_urb_context *context = urb->context; |
14627 |
+@@ -672,6 +755,7 @@ static const struct net_device_ops kvaser_usb_netdev_ops = { |
14628 |
+ |
14629 |
+ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) |
14630 |
+ { |
14631 |
++ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; |
14632 |
+ int i; |
14633 |
+ |
14634 |
+ for (i = 0; i < dev->nchannels; i++) { |
14635 |
+@@ -687,6 +771,9 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) |
14636 |
+ if (!dev->nets[i]) |
14637 |
+ continue; |
14638 |
+ |
14639 |
++ if (ops->dev_remove_channel) |
14640 |
++ ops->dev_remove_channel(dev->nets[i]); |
14641 |
++ |
14642 |
+ free_candev(dev->nets[i]->netdev); |
14643 |
+ } |
14644 |
+ } |
14645 |
+@@ -718,6 +805,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) |
14646 |
+ init_completion(&priv->start_comp); |
14647 |
+ init_completion(&priv->stop_comp); |
14648 |
+ init_completion(&priv->flush_comp); |
14649 |
++ init_completion(&priv->get_busparams_comp); |
14650 |
+ priv->can.ctrlmode_supported = 0; |
14651 |
+ |
14652 |
+ priv->dev = dev; |
14653 |
+@@ -730,7 +818,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) |
14654 |
+ priv->can.state = CAN_STATE_STOPPED; |
14655 |
+ priv->can.clock.freq = dev->cfg->clock.freq; |
14656 |
+ priv->can.bittiming_const = dev->cfg->bittiming_const; |
14657 |
+- priv->can.do_set_bittiming = ops->dev_set_bittiming; |
14658 |
++ priv->can.do_set_bittiming = kvaser_usb_set_bittiming; |
14659 |
+ priv->can.do_set_mode = ops->dev_set_mode; |
14660 |
+ if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) || |
14661 |
+ (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP)) |
14662 |
+@@ -742,7 +830,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) |
14663 |
+ |
14664 |
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { |
14665 |
+ priv->can.data_bittiming_const = dev->cfg->data_bittiming_const; |
14666 |
+- priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming; |
14667 |
++ priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming; |
14668 |
+ } |
14669 |
+ |
14670 |
+ netdev->flags |= IFF_ECHO; |
14671 |
+@@ -754,17 +842,26 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) |
14672 |
+ |
14673 |
+ dev->nets[channel] = priv; |
14674 |
+ |
14675 |
++ if (ops->dev_init_channel) { |
14676 |
++ err = ops->dev_init_channel(priv); |
14677 |
++ if (err) |
14678 |
++ goto err; |
14679 |
++ } |
14680 |
++ |
14681 |
+ err = register_candev(netdev); |
14682 |
+ if (err) { |
14683 |
+ dev_err(&dev->intf->dev, "Failed to register CAN device\n"); |
14684 |
+- free_candev(netdev); |
14685 |
+- dev->nets[channel] = NULL; |
14686 |
+- return err; |
14687 |
++ goto err; |
14688 |
+ } |
14689 |
+ |
14690 |
+ netdev_dbg(netdev, "device registered\n"); |
14691 |
+ |
14692 |
+ return 0; |
14693 |
++ |
14694 |
++err: |
14695 |
++ free_candev(netdev); |
14696 |
++ dev->nets[channel] = NULL; |
14697 |
++ return err; |
14698 |
+ } |
14699 |
+ |
14700 |
+ static int kvaser_usb_probe(struct usb_interface *intf, |
14701 |
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c |
14702 |
+index 3ff2cd9828d29..6cc65bf28d03e 100644 |
14703 |
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c |
14704 |
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c |
14705 |
+@@ -22,6 +22,7 @@ |
14706 |
+ #include <linux/spinlock.h> |
14707 |
+ #include <linux/string.h> |
14708 |
+ #include <linux/types.h> |
14709 |
++#include <linux/units.h> |
14710 |
+ #include <linux/usb.h> |
14711 |
+ |
14712 |
+ #include <linux/can.h> |
14713 |
+@@ -44,6 +45,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt; |
14714 |
+ |
14715 |
+ /* Minihydra command IDs */ |
14716 |
+ #define CMD_SET_BUSPARAMS_REQ 16 |
14717 |
++#define CMD_GET_BUSPARAMS_REQ 17 |
14718 |
++#define CMD_GET_BUSPARAMS_RESP 18 |
14719 |
+ #define CMD_GET_CHIP_STATE_REQ 19 |
14720 |
+ #define CMD_CHIP_STATE_EVENT 20 |
14721 |
+ #define CMD_SET_DRIVERMODE_REQ 21 |
14722 |
+@@ -195,21 +198,26 @@ struct kvaser_cmd_chip_state_event { |
14723 |
+ #define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01 |
14724 |
+ #define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02 |
14725 |
+ struct kvaser_cmd_set_busparams { |
14726 |
+- __le32 bitrate; |
14727 |
+- u8 tseg1; |
14728 |
+- u8 tseg2; |
14729 |
+- u8 sjw; |
14730 |
+- u8 nsamples; |
14731 |
++ struct kvaser_usb_busparams busparams_nominal; |
14732 |
+ u8 reserved0[4]; |
14733 |
+- __le32 bitrate_d; |
14734 |
+- u8 tseg1_d; |
14735 |
+- u8 tseg2_d; |
14736 |
+- u8 sjw_d; |
14737 |
+- u8 nsamples_d; |
14738 |
++ struct kvaser_usb_busparams busparams_data; |
14739 |
+ u8 canfd_mode; |
14740 |
+ u8 reserved1[7]; |
14741 |
+ } __packed; |
14742 |
+ |
14743 |
++/* Busparam type */ |
14744 |
++#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00 |
14745 |
++#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01 |
14746 |
++struct kvaser_cmd_get_busparams_req { |
14747 |
++ u8 type; |
14748 |
++ u8 reserved[27]; |
14749 |
++} __packed; |
14750 |
++ |
14751 |
++struct kvaser_cmd_get_busparams_res { |
14752 |
++ struct kvaser_usb_busparams busparams; |
14753 |
++ u8 reserved[20]; |
14754 |
++} __packed; |
14755 |
++ |
14756 |
+ /* Ctrl modes */ |
14757 |
+ #define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01 |
14758 |
+ #define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02 |
14759 |
+@@ -280,6 +288,8 @@ struct kvaser_cmd { |
14760 |
+ struct kvaser_cmd_error_event error_event; |
14761 |
+ |
14762 |
+ struct kvaser_cmd_set_busparams set_busparams_req; |
14763 |
++ struct kvaser_cmd_get_busparams_req get_busparams_req; |
14764 |
++ struct kvaser_cmd_get_busparams_res get_busparams_res; |
14765 |
+ |
14766 |
+ struct kvaser_cmd_chip_state_event chip_state_event; |
14767 |
+ |
14768 |
+@@ -295,6 +305,7 @@ struct kvaser_cmd { |
14769 |
+ #define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1) |
14770 |
+ #define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4) |
14771 |
+ #define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5) |
14772 |
++#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6) |
14773 |
+ /* CAN frame flags. Used in ext_rx_can and ext_tx_can */ |
14774 |
+ #define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12) |
14775 |
+ #define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13) |
14776 |
+@@ -361,6 +372,10 @@ struct kvaser_cmd_ext { |
14777 |
+ } __packed; |
14778 |
+ } __packed; |
14779 |
+ |
14780 |
++struct kvaser_usb_net_hydra_priv { |
14781 |
++ int pending_get_busparams_type; |
14782 |
++}; |
14783 |
++ |
14784 |
+ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { |
14785 |
+ .name = "kvaser_usb_kcan", |
14786 |
+ .tseg1_min = 1, |
14787 |
+@@ -838,6 +853,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev, |
14788 |
+ complete(&priv->flush_comp); |
14789 |
+ } |
14790 |
+ |
14791 |
++static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev, |
14792 |
++ const struct kvaser_cmd *cmd) |
14793 |
++{ |
14794 |
++ struct kvaser_usb_net_priv *priv; |
14795 |
++ struct kvaser_usb_net_hydra_priv *hydra; |
14796 |
++ |
14797 |
++ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); |
14798 |
++ if (!priv) |
14799 |
++ return; |
14800 |
++ |
14801 |
++ hydra = priv->sub_priv; |
14802 |
++ if (!hydra) |
14803 |
++ return; |
14804 |
++ |
14805 |
++ switch (hydra->pending_get_busparams_type) { |
14806 |
++ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN: |
14807 |
++ memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams, |
14808 |
++ sizeof(priv->busparams_nominal)); |
14809 |
++ break; |
14810 |
++ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD: |
14811 |
++ memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams, |
14812 |
++ sizeof(priv->busparams_nominal)); |
14813 |
++ break; |
14814 |
++ default: |
14815 |
++ dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n", |
14816 |
++ hydra->pending_get_busparams_type); |
14817 |
++ break; |
14818 |
++ } |
14819 |
++ hydra->pending_get_busparams_type = -1; |
14820 |
++ |
14821 |
++ complete(&priv->get_busparams_comp); |
14822 |
++} |
14823 |
++ |
14824 |
+ static void |
14825 |
+ kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv, |
14826 |
+ u8 bus_status, |
14827 |
+@@ -1125,6 +1173,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, |
14828 |
+ struct kvaser_usb_net_priv *priv; |
14829 |
+ unsigned long irq_flags; |
14830 |
+ bool one_shot_fail = false; |
14831 |
++ bool is_err_frame = false; |
14832 |
+ u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); |
14833 |
+ |
14834 |
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); |
14835 |
+@@ -1143,10 +1192,13 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, |
14836 |
+ kvaser_usb_hydra_one_shot_fail(priv, cmd_ext); |
14837 |
+ one_shot_fail = true; |
14838 |
+ } |
14839 |
++ |
14840 |
++ is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK && |
14841 |
++ flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME; |
14842 |
+ } |
14843 |
+ |
14844 |
+ context = &priv->tx_contexts[transid % dev->max_tx_urbs]; |
14845 |
+- if (!one_shot_fail) { |
14846 |
++ if (!one_shot_fail && !is_err_frame) { |
14847 |
+ struct net_device_stats *stats = &priv->netdev->stats; |
14848 |
+ |
14849 |
+ stats->tx_packets++; |
14850 |
+@@ -1320,6 +1372,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev, |
14851 |
+ kvaser_usb_hydra_state_event(dev, cmd); |
14852 |
+ break; |
14853 |
+ |
14854 |
++ case CMD_GET_BUSPARAMS_RESP: |
14855 |
++ kvaser_usb_hydra_get_busparams_reply(dev, cmd); |
14856 |
++ break; |
14857 |
++ |
14858 |
+ case CMD_ERROR_EVENT: |
14859 |
+ kvaser_usb_hydra_error_event(dev, cmd); |
14860 |
+ break; |
14861 |
+@@ -1520,15 +1576,58 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev, |
14862 |
+ return err; |
14863 |
+ } |
14864 |
+ |
14865 |
+-static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) |
14866 |
++static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, |
14867 |
++ int busparams_type) |
14868 |
++{ |
14869 |
++ struct kvaser_usb *dev = priv->dev; |
14870 |
++ struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv; |
14871 |
++ struct kvaser_cmd *cmd; |
14872 |
++ int err; |
14873 |
++ |
14874 |
++ if (!hydra) |
14875 |
++ return -EINVAL; |
14876 |
++ |
14877 |
++ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); |
14878 |
++ if (!cmd) |
14879 |
++ return -ENOMEM; |
14880 |
++ |
14881 |
++ cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ; |
14882 |
++ kvaser_usb_hydra_set_cmd_dest_he |
14883 |
++ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); |
14884 |
++ kvaser_usb_hydra_set_cmd_transid |
14885 |
++ (cmd, kvaser_usb_hydra_get_next_transid(dev)); |
14886 |
++ cmd->get_busparams_req.type = busparams_type; |
14887 |
++ hydra->pending_get_busparams_type = busparams_type; |
14888 |
++ |
14889 |
++ reinit_completion(&priv->get_busparams_comp); |
14890 |
++ |
14891 |
++ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); |
14892 |
++ if (err) |
14893 |
++ return err; |
14894 |
++ |
14895 |
++ if (!wait_for_completion_timeout(&priv->get_busparams_comp, |
14896 |
++ msecs_to_jiffies(KVASER_USB_TIMEOUT))) |
14897 |
++ return -ETIMEDOUT; |
14898 |
++ |
14899 |
++ return err; |
14900 |
++} |
14901 |
++ |
14902 |
++static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv) |
14903 |
++{ |
14904 |
++ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN); |
14905 |
++} |
14906 |
++ |
14907 |
++static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv) |
14908 |
++{ |
14909 |
++ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD); |
14910 |
++} |
14911 |
++ |
14912 |
++static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, |
14913 |
++ const struct kvaser_usb_busparams *busparams) |
14914 |
+ { |
14915 |
+ struct kvaser_cmd *cmd; |
14916 |
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev); |
14917 |
+- struct can_bittiming *bt = &priv->can.bittiming; |
14918 |
+ struct kvaser_usb *dev = priv->dev; |
14919 |
+- int tseg1 = bt->prop_seg + bt->phase_seg1; |
14920 |
+- int tseg2 = bt->phase_seg2; |
14921 |
+- int sjw = bt->sjw; |
14922 |
+ int err; |
14923 |
+ |
14924 |
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); |
14925 |
+@@ -1536,11 +1635,8 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) |
14926 |
+ return -ENOMEM; |
14927 |
+ |
14928 |
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; |
14929 |
+- cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate); |
14930 |
+- cmd->set_busparams_req.sjw = (u8)sjw; |
14931 |
+- cmd->set_busparams_req.tseg1 = (u8)tseg1; |
14932 |
+- cmd->set_busparams_req.tseg2 = (u8)tseg2; |
14933 |
+- cmd->set_busparams_req.nsamples = 1; |
14934 |
++ memcpy(&cmd->set_busparams_req.busparams_nominal, busparams, |
14935 |
++ sizeof(cmd->set_busparams_req.busparams_nominal)); |
14936 |
+ |
14937 |
+ kvaser_usb_hydra_set_cmd_dest_he |
14938 |
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); |
14939 |
+@@ -1554,15 +1650,12 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) |
14940 |
+ return err; |
14941 |
+ } |
14942 |
+ |
14943 |
+-static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) |
14944 |
++static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, |
14945 |
++ const struct kvaser_usb_busparams *busparams) |
14946 |
+ { |
14947 |
+ struct kvaser_cmd *cmd; |
14948 |
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev); |
14949 |
+- struct can_bittiming *dbt = &priv->can.data_bittiming; |
14950 |
+ struct kvaser_usb *dev = priv->dev; |
14951 |
+- int tseg1 = dbt->prop_seg + dbt->phase_seg1; |
14952 |
+- int tseg2 = dbt->phase_seg2; |
14953 |
+- int sjw = dbt->sjw; |
14954 |
+ int err; |
14955 |
+ |
14956 |
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); |
14957 |
+@@ -1570,11 +1663,8 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) |
14958 |
+ return -ENOMEM; |
14959 |
+ |
14960 |
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; |
14961 |
+- cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate); |
14962 |
+- cmd->set_busparams_req.sjw_d = (u8)sjw; |
14963 |
+- cmd->set_busparams_req.tseg1_d = (u8)tseg1; |
14964 |
+- cmd->set_busparams_req.tseg2_d = (u8)tseg2; |
14965 |
+- cmd->set_busparams_req.nsamples_d = 1; |
14966 |
++ memcpy(&cmd->set_busparams_req.busparams_data, busparams, |
14967 |
++ sizeof(cmd->set_busparams_req.busparams_data)); |
14968 |
+ |
14969 |
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { |
14970 |
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) |
14971 |
+@@ -1681,6 +1771,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev) |
14972 |
+ return 0; |
14973 |
+ } |
14974 |
+ |
14975 |
++static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv) |
14976 |
++{ |
14977 |
++ struct kvaser_usb_net_hydra_priv *hydra; |
14978 |
++ |
14979 |
++ hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL); |
14980 |
++ if (!hydra) |
14981 |
++ return -ENOMEM; |
14982 |
++ |
14983 |
++ priv->sub_priv = hydra; |
14984 |
++ |
14985 |
++ return 0; |
14986 |
++} |
14987 |
++ |
14988 |
+ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) |
14989 |
+ { |
14990 |
+ struct kvaser_cmd cmd; |
14991 |
+@@ -2025,10 +2128,13 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv, |
14992 |
+ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { |
14993 |
+ .dev_set_mode = kvaser_usb_hydra_set_mode, |
14994 |
+ .dev_set_bittiming = kvaser_usb_hydra_set_bittiming, |
14995 |
++ .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams, |
14996 |
+ .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming, |
14997 |
++ .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams, |
14998 |
+ .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter, |
14999 |
+ .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints, |
15000 |
+ .dev_init_card = kvaser_usb_hydra_init_card, |
15001 |
++ .dev_init_channel = kvaser_usb_hydra_init_channel, |
15002 |
+ .dev_get_software_info = kvaser_usb_hydra_get_software_info, |
15003 |
+ .dev_get_software_details = kvaser_usb_hydra_get_software_details, |
15004 |
+ .dev_get_card_info = kvaser_usb_hydra_get_card_info, |
15005 |
+@@ -2044,7 +2150,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { |
15006 |
+ |
15007 |
+ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = { |
15008 |
+ .clock = { |
15009 |
+- .freq = 80000000, |
15010 |
++ .freq = 80 * MEGA /* Hz */, |
15011 |
+ }, |
15012 |
+ .timestamp_freq = 80, |
15013 |
+ .bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c, |
15014 |
+@@ -2053,7 +2159,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = { |
15015 |
+ |
15016 |
+ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { |
15017 |
+ .clock = { |
15018 |
+- .freq = 24000000, |
15019 |
++ .freq = 24 * MEGA /* Hz */, |
15020 |
+ }, |
15021 |
+ .timestamp_freq = 1, |
15022 |
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const, |
15023 |
+@@ -2061,7 +2167,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { |
15024 |
+ |
15025 |
+ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = { |
15026 |
+ .clock = { |
15027 |
+- .freq = 80000000, |
15028 |
++ .freq = 80 * MEGA /* Hz */, |
15029 |
+ }, |
15030 |
+ .timestamp_freq = 24, |
15031 |
+ .bittiming_const = &kvaser_usb_hydra_rt_bittiming_c, |
15032 |
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c |
15033 |
+index 7a71097281c20..ad3103391c793 100644 |
15034 |
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c |
15035 |
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c |
15036 |
+@@ -19,7 +19,9 @@ |
15037 |
+ #include <linux/spinlock.h> |
15038 |
+ #include <linux/string.h> |
15039 |
+ #include <linux/types.h> |
15040 |
++#include <linux/units.h> |
15041 |
+ #include <linux/usb.h> |
15042 |
++#include <linux/workqueue.h> |
15043 |
+ |
15044 |
+ #include <linux/can.h> |
15045 |
+ #include <linux/can/dev.h> |
15046 |
+@@ -55,6 +57,9 @@ |
15047 |
+ #define CMD_RX_EXT_MESSAGE 14 |
15048 |
+ #define CMD_TX_EXT_MESSAGE 15 |
15049 |
+ #define CMD_SET_BUS_PARAMS 16 |
15050 |
++#define CMD_GET_BUS_PARAMS 17 |
15051 |
++#define CMD_GET_BUS_PARAMS_REPLY 18 |
15052 |
++#define CMD_GET_CHIP_STATE 19 |
15053 |
+ #define CMD_CHIP_STATE_EVENT 20 |
15054 |
+ #define CMD_SET_CTRL_MODE 21 |
15055 |
+ #define CMD_RESET_CHIP 24 |
15056 |
+@@ -69,10 +74,13 @@ |
15057 |
+ #define CMD_GET_CARD_INFO_REPLY 35 |
15058 |
+ #define CMD_GET_SOFTWARE_INFO 38 |
15059 |
+ #define CMD_GET_SOFTWARE_INFO_REPLY 39 |
15060 |
++#define CMD_ERROR_EVENT 45 |
15061 |
+ #define CMD_FLUSH_QUEUE 48 |
15062 |
+ #define CMD_TX_ACKNOWLEDGE 50 |
15063 |
+ #define CMD_CAN_ERROR_EVENT 51 |
15064 |
+ #define CMD_FLUSH_QUEUE_REPLY 68 |
15065 |
++#define CMD_GET_CAPABILITIES_REQ 95 |
15066 |
++#define CMD_GET_CAPABILITIES_RESP 96 |
15067 |
+ |
15068 |
+ #define CMD_LEAF_LOG_MESSAGE 106 |
15069 |
+ |
15070 |
+@@ -82,6 +90,8 @@ |
15071 |
+ #define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) |
15072 |
+ #define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) |
15073 |
+ |
15074 |
++#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12) |
15075 |
++ |
15076 |
+ /* error factors */ |
15077 |
+ #define M16C_EF_ACKE BIT(0) |
15078 |
+ #define M16C_EF_CRCE BIT(1) |
15079 |
+@@ -156,11 +166,7 @@ struct usbcan_cmd_softinfo { |
15080 |
+ struct kvaser_cmd_busparams { |
15081 |
+ u8 tid; |
15082 |
+ u8 channel; |
15083 |
+- __le32 bitrate; |
15084 |
+- u8 tseg1; |
15085 |
+- u8 tseg2; |
15086 |
+- u8 sjw; |
15087 |
+- u8 no_samp; |
15088 |
++ struct kvaser_usb_busparams busparams; |
15089 |
+ } __packed; |
15090 |
+ |
15091 |
+ struct kvaser_cmd_tx_can { |
15092 |
+@@ -229,7 +235,7 @@ struct kvaser_cmd_tx_acknowledge_header { |
15093 |
+ u8 tid; |
15094 |
+ } __packed; |
15095 |
+ |
15096 |
+-struct leaf_cmd_error_event { |
15097 |
++struct leaf_cmd_can_error_event { |
15098 |
+ u8 tid; |
15099 |
+ u8 flags; |
15100 |
+ __le16 time[3]; |
15101 |
+@@ -241,7 +247,7 @@ struct leaf_cmd_error_event { |
15102 |
+ u8 error_factor; |
15103 |
+ } __packed; |
15104 |
+ |
15105 |
+-struct usbcan_cmd_error_event { |
15106 |
++struct usbcan_cmd_can_error_event { |
15107 |
+ u8 tid; |
15108 |
+ u8 padding; |
15109 |
+ u8 tx_errors_count_ch0; |
15110 |
+@@ -253,6 +259,28 @@ struct usbcan_cmd_error_event { |
15111 |
+ __le16 time; |
15112 |
+ } __packed; |
15113 |
+ |
15114 |
++/* CMD_ERROR_EVENT error codes */ |
15115 |
++#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8 |
15116 |
++#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9 |
15117 |
++ |
15118 |
++struct leaf_cmd_error_event { |
15119 |
++ u8 tid; |
15120 |
++ u8 error_code; |
15121 |
++ __le16 timestamp[3]; |
15122 |
++ __le16 padding; |
15123 |
++ __le16 info1; |
15124 |
++ __le16 info2; |
15125 |
++} __packed; |
15126 |
++ |
15127 |
++struct usbcan_cmd_error_event { |
15128 |
++ u8 tid; |
15129 |
++ u8 error_code; |
15130 |
++ __le16 info1; |
15131 |
++ __le16 info2; |
15132 |
++ __le16 timestamp; |
15133 |
++ __le16 padding; |
15134 |
++} __packed; |
15135 |
++ |
15136 |
+ struct kvaser_cmd_ctrl_mode { |
15137 |
+ u8 tid; |
15138 |
+ u8 channel; |
15139 |
+@@ -277,6 +305,28 @@ struct leaf_cmd_log_message { |
15140 |
+ u8 data[8]; |
15141 |
+ } __packed; |
15142 |
+ |
15143 |
++/* Sub commands for cap_req and cap_res */ |
15144 |
++#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02 |
15145 |
++#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05 |
15146 |
++struct kvaser_cmd_cap_req { |
15147 |
++ __le16 padding0; |
15148 |
++ __le16 cap_cmd; |
15149 |
++ __le16 padding1; |
15150 |
++ __le16 channel; |
15151 |
++} __packed; |
15152 |
++ |
15153 |
++/* Status codes for cap_res */ |
15154 |
++#define KVASER_USB_LEAF_CAP_STAT_OK 0x00 |
15155 |
++#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01 |
15156 |
++#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02 |
15157 |
++struct kvaser_cmd_cap_res { |
15158 |
++ __le16 padding; |
15159 |
++ __le16 cap_cmd; |
15160 |
++ __le16 status; |
15161 |
++ __le32 mask; |
15162 |
++ __le32 value; |
15163 |
++} __packed; |
15164 |
++ |
15165 |
+ struct kvaser_cmd { |
15166 |
+ u8 len; |
15167 |
+ u8 id; |
15168 |
+@@ -292,14 +342,18 @@ struct kvaser_cmd { |
15169 |
+ struct leaf_cmd_softinfo softinfo; |
15170 |
+ struct leaf_cmd_rx_can rx_can; |
15171 |
+ struct leaf_cmd_chip_state_event chip_state_event; |
15172 |
+- struct leaf_cmd_error_event error_event; |
15173 |
++ struct leaf_cmd_can_error_event can_error_event; |
15174 |
+ struct leaf_cmd_log_message log_message; |
15175 |
++ struct leaf_cmd_error_event error_event; |
15176 |
++ struct kvaser_cmd_cap_req cap_req; |
15177 |
++ struct kvaser_cmd_cap_res cap_res; |
15178 |
+ } __packed leaf; |
15179 |
+ |
15180 |
+ union { |
15181 |
+ struct usbcan_cmd_softinfo softinfo; |
15182 |
+ struct usbcan_cmd_rx_can rx_can; |
15183 |
+ struct usbcan_cmd_chip_state_event chip_state_event; |
15184 |
++ struct usbcan_cmd_can_error_event can_error_event; |
15185 |
+ struct usbcan_cmd_error_event error_event; |
15186 |
+ } __packed usbcan; |
15187 |
+ |
15188 |
+@@ -322,7 +376,10 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = { |
15189 |
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can), |
15190 |
+ [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message), |
15191 |
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event), |
15192 |
+- [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), |
15193 |
++ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event), |
15194 |
++ [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res), |
15195 |
++ [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams), |
15196 |
++ [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), |
15197 |
+ /* ignored events: */ |
15198 |
+ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY, |
15199 |
+ }; |
15200 |
+@@ -336,7 +393,8 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = { |
15201 |
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), |
15202 |
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), |
15203 |
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event), |
15204 |
+- [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), |
15205 |
++ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event), |
15206 |
++ [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), |
15207 |
+ /* ignored events: */ |
15208 |
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY, |
15209 |
+ }; |
15210 |
+@@ -364,6 +422,12 @@ struct kvaser_usb_err_summary { |
15211 |
+ }; |
15212 |
+ }; |
15213 |
+ |
15214 |
++struct kvaser_usb_net_leaf_priv { |
15215 |
++ struct kvaser_usb_net_priv *net; |
15216 |
++ |
15217 |
++ struct delayed_work chip_state_req_work; |
15218 |
++}; |
15219 |
++ |
15220 |
+ static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = { |
15221 |
+ .name = "kvaser_usb_ucii", |
15222 |
+ .tseg1_min = 4, |
15223 |
+@@ -390,7 +454,7 @@ static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = { |
15224 |
+ |
15225 |
+ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = { |
15226 |
+ .clock = { |
15227 |
+- .freq = 8000000, |
15228 |
++ .freq = 8 * MEGA /* Hz */, |
15229 |
+ }, |
15230 |
+ .timestamp_freq = 1, |
15231 |
+ .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const, |
15232 |
+@@ -398,7 +462,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = { |
15233 |
+ |
15234 |
+ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = { |
15235 |
+ .clock = { |
15236 |
+- .freq = 16000000, |
15237 |
++ .freq = 16 * MEGA /* Hz */, |
15238 |
+ }, |
15239 |
+ .timestamp_freq = 1, |
15240 |
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, |
15241 |
+@@ -414,7 +478,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = { |
15242 |
+ |
15243 |
+ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = { |
15244 |
+ .clock = { |
15245 |
+- .freq = 24000000, |
15246 |
++ .freq = 24 * MEGA /* Hz */, |
15247 |
+ }, |
15248 |
+ .timestamp_freq = 1, |
15249 |
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const, |
15250 |
+@@ -422,7 +486,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = { |
15251 |
+ |
15252 |
+ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = { |
15253 |
+ .clock = { |
15254 |
+- .freq = 32000000, |
15255 |
++ .freq = 32 * MEGA /* Hz */, |
15256 |
+ }, |
15257 |
+ .timestamp_freq = 1, |
15258 |
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const, |
15259 |
+@@ -607,6 +671,9 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, |
15260 |
+ dev->fw_version = le32_to_cpu(softinfo->fw_version); |
15261 |
+ dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); |
15262 |
+ |
15263 |
++ if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP) |
15264 |
++ dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP; |
15265 |
++ |
15266 |
+ if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) { |
15267 |
+ /* Firmware expects bittiming parameters calculated for 16MHz |
15268 |
+ * clock, regardless of the actual clock |
15269 |
+@@ -694,6 +761,116 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev) |
15270 |
+ return 0; |
15271 |
+ } |
15272 |
+ |
15273 |
++static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev, |
15274 |
++ u16 cap_cmd_req, u16 *status) |
15275 |
++{ |
15276 |
++ struct kvaser_usb_dev_card_data *card_data = &dev->card_data; |
15277 |
++ struct kvaser_cmd *cmd; |
15278 |
++ u32 value = 0; |
15279 |
++ u32 mask = 0; |
15280 |
++ u16 cap_cmd_res; |
15281 |
++ int err; |
15282 |
++ int i; |
15283 |
++ |
15284 |
++ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); |
15285 |
++ if (!cmd) |
15286 |
++ return -ENOMEM; |
15287 |
++ |
15288 |
++ cmd->id = CMD_GET_CAPABILITIES_REQ; |
15289 |
++ cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); |
15290 |
++ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req); |
15291 |
++ |
15292 |
++ err = kvaser_usb_send_cmd(dev, cmd, cmd->len); |
15293 |
++ if (err) |
15294 |
++ goto end; |
15295 |
++ |
15296 |
++ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd); |
15297 |
++ if (err) |
15298 |
++ goto end; |
15299 |
++ |
15300 |
++ *status = le16_to_cpu(cmd->u.leaf.cap_res.status); |
15301 |
++ |
15302 |
++ if (*status != KVASER_USB_LEAF_CAP_STAT_OK) |
15303 |
++ goto end; |
15304 |
++ |
15305 |
++ cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd); |
15306 |
++ switch (cap_cmd_res) { |
15307 |
++ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: |
15308 |
++ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: |
15309 |
++ value = le32_to_cpu(cmd->u.leaf.cap_res.value); |
15310 |
++ mask = le32_to_cpu(cmd->u.leaf.cap_res.mask); |
15311 |
++ break; |
15312 |
++ default: |
15313 |
++ dev_warn(&dev->intf->dev, "Unknown capability command %u\n", |
15314 |
++ cap_cmd_res); |
15315 |
++ break; |
15316 |
++ } |
15317 |
++ |
15318 |
++ for (i = 0; i < dev->nchannels; i++) { |
15319 |
++ if (BIT(i) & (value & mask)) { |
15320 |
++ switch (cap_cmd_res) { |
15321 |
++ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: |
15322 |
++ card_data->ctrlmode_supported |= |
15323 |
++ CAN_CTRLMODE_LISTENONLY; |
15324 |
++ break; |
15325 |
++ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: |
15326 |
++ card_data->capabilities |= |
15327 |
++ KVASER_USB_CAP_BERR_CAP; |
15328 |
++ break; |
15329 |
++ } |
15330 |
++ } |
15331 |
++ } |
15332 |
++ |
15333 |
++end: |
15334 |
++ kfree(cmd); |
15335 |
++ |
15336 |
++ return err; |
15337 |
++} |
15338 |
++ |
15339 |
++static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev) |
15340 |
++{ |
15341 |
++ int err; |
15342 |
++ u16 status; |
15343 |
++ |
15344 |
++ if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) { |
15345 |
++ dev_info(&dev->intf->dev, |
15346 |
++ "No extended capability support. Upgrade device firmware.\n"); |
15347 |
++ return 0; |
15348 |
++ } |
15349 |
++ |
15350 |
++ err = kvaser_usb_leaf_get_single_capability(dev, |
15351 |
++ KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE, |
15352 |
++ &status); |
15353 |
++ if (err) |
15354 |
++ return err; |
15355 |
++ if (status) |
15356 |
++ dev_info(&dev->intf->dev, |
15357 |
++ "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n", |
15358 |
++ status); |
15359 |
++ |
15360 |
++ err = kvaser_usb_leaf_get_single_capability(dev, |
15361 |
++ KVASER_USB_LEAF_CAP_CMD_ERR_REPORT, |
15362 |
++ &status); |
15363 |
++ if (err) |
15364 |
++ return err; |
15365 |
++ if (status) |
15366 |
++ dev_info(&dev->intf->dev, |
15367 |
++ "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n", |
15368 |
++ status); |
15369 |
++ |
15370 |
++ return 0; |
15371 |
++} |
15372 |
++ |
15373 |
++static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev) |
15374 |
++{ |
15375 |
++ int err = 0; |
15376 |
++ |
15377 |
++ if (dev->driver_info->family == KVASER_LEAF) |
15378 |
++ err = kvaser_usb_leaf_get_capabilities_leaf(dev); |
15379 |
++ |
15380 |
++ return err; |
15381 |
++} |
15382 |
++ |
15383 |
+ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, |
15384 |
+ const struct kvaser_cmd *cmd) |
15385 |
+ { |
15386 |
+@@ -722,7 +899,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, |
15387 |
+ context = &priv->tx_contexts[tid % dev->max_tx_urbs]; |
15388 |
+ |
15389 |
+ /* Sometimes the state change doesn't come after a bus-off event */ |
15390 |
+- if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) { |
15391 |
++ if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) { |
15392 |
+ struct sk_buff *skb; |
15393 |
+ struct can_frame *cf; |
15394 |
+ |
15395 |
+@@ -778,6 +955,16 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, |
15396 |
+ return err; |
15397 |
+ } |
15398 |
+ |
15399 |
++static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work) |
15400 |
++{ |
15401 |
++ struct kvaser_usb_net_leaf_priv *leaf = |
15402 |
++ container_of(work, struct kvaser_usb_net_leaf_priv, |
15403 |
++ chip_state_req_work.work); |
15404 |
++ struct kvaser_usb_net_priv *priv = leaf->net; |
15405 |
++ |
15406 |
++ kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE); |
15407 |
++} |
15408 |
++ |
15409 |
+ static void |
15410 |
+ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, |
15411 |
+ const struct kvaser_usb_err_summary *es, |
15412 |
+@@ -796,20 +983,16 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, |
15413 |
+ new_state = CAN_STATE_BUS_OFF; |
15414 |
+ } else if (es->status & M16C_STATE_BUS_PASSIVE) { |
15415 |
+ new_state = CAN_STATE_ERROR_PASSIVE; |
15416 |
+- } else if (es->status & M16C_STATE_BUS_ERROR) { |
15417 |
++ } else if ((es->status & M16C_STATE_BUS_ERROR) && |
15418 |
++ cur_state >= CAN_STATE_BUS_OFF) { |
15419 |
+ /* Guard against spurious error events after a busoff */ |
15420 |
+- if (cur_state < CAN_STATE_BUS_OFF) { |
15421 |
+- if (es->txerr >= 128 || es->rxerr >= 128) |
15422 |
+- new_state = CAN_STATE_ERROR_PASSIVE; |
15423 |
+- else if (es->txerr >= 96 || es->rxerr >= 96) |
15424 |
+- new_state = CAN_STATE_ERROR_WARNING; |
15425 |
+- else if (cur_state > CAN_STATE_ERROR_ACTIVE) |
15426 |
+- new_state = CAN_STATE_ERROR_ACTIVE; |
15427 |
+- } |
15428 |
+- } |
15429 |
+- |
15430 |
+- if (!es->status) |
15431 |
++ } else if (es->txerr >= 128 || es->rxerr >= 128) { |
15432 |
++ new_state = CAN_STATE_ERROR_PASSIVE; |
15433 |
++ } else if (es->txerr >= 96 || es->rxerr >= 96) { |
15434 |
++ new_state = CAN_STATE_ERROR_WARNING; |
15435 |
++ } else { |
15436 |
+ new_state = CAN_STATE_ERROR_ACTIVE; |
15437 |
++ } |
15438 |
+ |
15439 |
+ if (new_state != cur_state) { |
15440 |
+ tx_state = (es->txerr >= es->rxerr) ? new_state : 0; |
15441 |
+@@ -819,7 +1002,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, |
15442 |
+ } |
15443 |
+ |
15444 |
+ if (priv->can.restart_ms && |
15445 |
+- cur_state >= CAN_STATE_BUS_OFF && |
15446 |
++ cur_state == CAN_STATE_BUS_OFF && |
15447 |
+ new_state < CAN_STATE_BUS_OFF) |
15448 |
+ priv->can.can_stats.restarts++; |
15449 |
+ |
15450 |
+@@ -853,6 +1036,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, |
15451 |
+ struct sk_buff *skb; |
15452 |
+ struct net_device_stats *stats; |
15453 |
+ struct kvaser_usb_net_priv *priv; |
15454 |
++ struct kvaser_usb_net_leaf_priv *leaf; |
15455 |
+ enum can_state old_state, new_state; |
15456 |
+ |
15457 |
+ if (es->channel >= dev->nchannels) { |
15458 |
+@@ -862,8 +1046,13 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, |
15459 |
+ } |
15460 |
+ |
15461 |
+ priv = dev->nets[es->channel]; |
15462 |
++ leaf = priv->sub_priv; |
15463 |
+ stats = &priv->netdev->stats; |
15464 |
+ |
15465 |
++ /* Ignore e.g. state change to bus-off reported just after stopping */ |
15466 |
++ if (!netif_running(priv->netdev)) |
15467 |
++ return; |
15468 |
++ |
15469 |
+ /* Update all of the CAN interface's state and error counters before |
15470 |
+ * trying any memory allocation that can actually fail with -ENOMEM. |
15471 |
+ * |
15472 |
+@@ -878,6 +1067,14 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, |
15473 |
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf); |
15474 |
+ new_state = priv->can.state; |
15475 |
+ |
15476 |
++ /* If there are errors, request status updates periodically as we do |
15477 |
++ * not get automatic notifications of improved state. |
15478 |
++ */ |
15479 |
++ if (new_state < CAN_STATE_BUS_OFF && |
15480 |
++ (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE)) |
15481 |
++ schedule_delayed_work(&leaf->chip_state_req_work, |
15482 |
++ msecs_to_jiffies(500)); |
15483 |
++ |
15484 |
+ skb = alloc_can_err_skb(priv->netdev, &cf); |
15485 |
+ if (!skb) { |
15486 |
+ stats->rx_dropped++; |
15487 |
+@@ -895,7 +1092,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, |
15488 |
+ } |
15489 |
+ |
15490 |
+ if (priv->can.restart_ms && |
15491 |
+- old_state >= CAN_STATE_BUS_OFF && |
15492 |
++ old_state == CAN_STATE_BUS_OFF && |
15493 |
+ new_state < CAN_STATE_BUS_OFF) { |
15494 |
+ cf->can_id |= CAN_ERR_RESTARTED; |
15495 |
+ netif_carrier_on(priv->netdev); |
15496 |
+@@ -995,11 +1192,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, |
15497 |
+ |
15498 |
+ case CMD_CAN_ERROR_EVENT: |
15499 |
+ es.channel = 0; |
15500 |
+- es.status = cmd->u.usbcan.error_event.status_ch0; |
15501 |
+- es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0; |
15502 |
+- es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0; |
15503 |
++ es.status = cmd->u.usbcan.can_error_event.status_ch0; |
15504 |
++ es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0; |
15505 |
++ es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0; |
15506 |
+ es.usbcan.other_ch_status = |
15507 |
+- cmd->u.usbcan.error_event.status_ch1; |
15508 |
++ cmd->u.usbcan.can_error_event.status_ch1; |
15509 |
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); |
15510 |
+ |
15511 |
+ /* The USBCAN firmware supports up to 2 channels. |
15512 |
+@@ -1007,13 +1204,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, |
15513 |
+ */ |
15514 |
+ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { |
15515 |
+ es.channel = 1; |
15516 |
+- es.status = cmd->u.usbcan.error_event.status_ch1; |
15517 |
++ es.status = cmd->u.usbcan.can_error_event.status_ch1; |
15518 |
+ es.txerr = |
15519 |
+- cmd->u.usbcan.error_event.tx_errors_count_ch1; |
15520 |
++ cmd->u.usbcan.can_error_event.tx_errors_count_ch1; |
15521 |
+ es.rxerr = |
15522 |
+- cmd->u.usbcan.error_event.rx_errors_count_ch1; |
15523 |
++ cmd->u.usbcan.can_error_event.rx_errors_count_ch1; |
15524 |
+ es.usbcan.other_ch_status = |
15525 |
+- cmd->u.usbcan.error_event.status_ch0; |
15526 |
++ cmd->u.usbcan.can_error_event.status_ch0; |
15527 |
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); |
15528 |
+ } |
15529 |
+ break; |
15530 |
+@@ -1030,11 +1227,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev, |
15531 |
+ |
15532 |
+ switch (cmd->id) { |
15533 |
+ case CMD_CAN_ERROR_EVENT: |
15534 |
+- es.channel = cmd->u.leaf.error_event.channel; |
15535 |
+- es.status = cmd->u.leaf.error_event.status; |
15536 |
+- es.txerr = cmd->u.leaf.error_event.tx_errors_count; |
15537 |
+- es.rxerr = cmd->u.leaf.error_event.rx_errors_count; |
15538 |
+- es.leaf.error_factor = cmd->u.leaf.error_event.error_factor; |
15539 |
++ es.channel = cmd->u.leaf.can_error_event.channel; |
15540 |
++ es.status = cmd->u.leaf.can_error_event.status; |
15541 |
++ es.txerr = cmd->u.leaf.can_error_event.tx_errors_count; |
15542 |
++ es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count; |
15543 |
++ es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor; |
15544 |
+ break; |
15545 |
+ case CMD_LEAF_LOG_MESSAGE: |
15546 |
+ es.channel = cmd->u.leaf.log_message.channel; |
15547 |
+@@ -1166,6 +1363,74 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, |
15548 |
+ netif_rx(skb); |
15549 |
+ } |
15550 |
+ |
15551 |
++static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev, |
15552 |
++ const struct kvaser_cmd *cmd) |
15553 |
++{ |
15554 |
++ u16 info1 = 0; |
15555 |
++ |
15556 |
++ switch (dev->driver_info->family) { |
15557 |
++ case KVASER_LEAF: |
15558 |
++ info1 = le16_to_cpu(cmd->u.leaf.error_event.info1); |
15559 |
++ break; |
15560 |
++ case KVASER_USBCAN: |
15561 |
++ info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1); |
15562 |
++ break; |
15563 |
++ } |
15564 |
++ |
15565 |
++ /* info1 will contain the offending cmd_no */ |
15566 |
++ switch (info1) { |
15567 |
++ case CMD_SET_CTRL_MODE: |
15568 |
++ dev_warn(&dev->intf->dev, |
15569 |
++ "CMD_SET_CTRL_MODE error in parameter\n"); |
15570 |
++ break; |
15571 |
++ |
15572 |
++ case CMD_SET_BUS_PARAMS: |
15573 |
++ dev_warn(&dev->intf->dev, |
15574 |
++ "CMD_SET_BUS_PARAMS error in parameter\n"); |
15575 |
++ break; |
15576 |
++ |
15577 |
++ default: |
15578 |
++ dev_warn(&dev->intf->dev, |
15579 |
++ "Unhandled parameter error event cmd_no (%u)\n", |
15580 |
++ info1); |
15581 |
++ break; |
15582 |
++ } |
15583 |
++} |
15584 |
++ |
15585 |
++static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev, |
15586 |
++ const struct kvaser_cmd *cmd) |
15587 |
++{ |
15588 |
++ u8 error_code = 0; |
15589 |
++ |
15590 |
++ switch (dev->driver_info->family) { |
15591 |
++ case KVASER_LEAF: |
15592 |
++ error_code = cmd->u.leaf.error_event.error_code; |
15593 |
++ break; |
15594 |
++ case KVASER_USBCAN: |
15595 |
++ error_code = cmd->u.usbcan.error_event.error_code; |
15596 |
++ break; |
15597 |
++ } |
15598 |
++ |
15599 |
++ switch (error_code) { |
15600 |
++ case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL: |
15601 |
++ /* Received additional CAN message, when firmware TX queue is |
15602 |
++ * already full. Something is wrong with the driver. |
15603 |
++ * This should never happen! |
15604 |
++ */ |
15605 |
++ dev_err(&dev->intf->dev, |
15606 |
++ "Received error event TX_QUEUE_FULL\n"); |
15607 |
++ break; |
15608 |
++ case KVASER_USB_LEAF_ERROR_EVENT_PARAM: |
15609 |
++ kvaser_usb_leaf_error_event_parameter(dev, cmd); |
15610 |
++ break; |
15611 |
++ |
15612 |
++ default: |
15613 |
++ dev_warn(&dev->intf->dev, |
15614 |
++ "Unhandled error event (%d)\n", error_code); |
15615 |
++ break; |
15616 |
++ } |
15617 |
++} |
15618 |
++ |
15619 |
+ static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev, |
15620 |
+ const struct kvaser_cmd *cmd) |
15621 |
+ { |
15622 |
+@@ -1206,6 +1471,25 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev, |
15623 |
+ complete(&priv->stop_comp); |
15624 |
+ } |
15625 |
+ |
15626 |
++static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev, |
15627 |
++ const struct kvaser_cmd *cmd) |
15628 |
++{ |
15629 |
++ struct kvaser_usb_net_priv *priv; |
15630 |
++ u8 channel = cmd->u.busparams.channel; |
15631 |
++ |
15632 |
++ if (channel >= dev->nchannels) { |
15633 |
++ dev_err(&dev->intf->dev, |
15634 |
++ "Invalid channel number (%d)\n", channel); |
15635 |
++ return; |
15636 |
++ } |
15637 |
++ |
15638 |
++ priv = dev->nets[channel]; |
15639 |
++ memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams, |
15640 |
++ sizeof(priv->busparams_nominal)); |
15641 |
++ |
15642 |
++ complete(&priv->get_busparams_comp); |
15643 |
++} |
15644 |
++ |
15645 |
+ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, |
15646 |
+ const struct kvaser_cmd *cmd) |
15647 |
+ { |
15648 |
+@@ -1244,6 +1528,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, |
15649 |
+ kvaser_usb_leaf_tx_acknowledge(dev, cmd); |
15650 |
+ break; |
15651 |
+ |
15652 |
++ case CMD_ERROR_EVENT: |
15653 |
++ kvaser_usb_leaf_error_event(dev, cmd); |
15654 |
++ break; |
15655 |
++ |
15656 |
++ case CMD_GET_BUS_PARAMS_REPLY: |
15657 |
++ kvaser_usb_leaf_get_busparams_reply(dev, cmd); |
15658 |
++ break; |
15659 |
++ |
15660 |
+ /* Ignored commands */ |
15661 |
+ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: |
15662 |
+ if (dev->driver_info->family != KVASER_USBCAN) |
15663 |
+@@ -1340,10 +1632,13 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) |
15664 |
+ |
15665 |
+ static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv) |
15666 |
+ { |
15667 |
++ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; |
15668 |
+ int err; |
15669 |
+ |
15670 |
+ reinit_completion(&priv->stop_comp); |
15671 |
+ |
15672 |
++ cancel_delayed_work(&leaf->chip_state_req_work); |
15673 |
++ |
15674 |
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP, |
15675 |
+ priv->channel); |
15676 |
+ if (err) |
15677 |
+@@ -1390,10 +1685,35 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) |
15678 |
+ return 0; |
15679 |
+ } |
15680 |
+ |
15681 |
+-static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) |
15682 |
++static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv) |
15683 |
++{ |
15684 |
++ struct kvaser_usb_net_leaf_priv *leaf; |
15685 |
++ |
15686 |
++ leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL); |
15687 |
++ if (!leaf) |
15688 |
++ return -ENOMEM; |
15689 |
++ |
15690 |
++ leaf->net = priv; |
15691 |
++ INIT_DELAYED_WORK(&leaf->chip_state_req_work, |
15692 |
++ kvaser_usb_leaf_chip_state_req_work); |
15693 |
++ |
15694 |
++ priv->sub_priv = leaf; |
15695 |
++ |
15696 |
++ return 0; |
15697 |
++} |
15698 |
++ |
15699 |
++static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv) |
15700 |
++{ |
15701 |
++ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; |
15702 |
++ |
15703 |
++ if (leaf) |
15704 |
++ cancel_delayed_work_sync(&leaf->chip_state_req_work); |
15705 |
++} |
15706 |
++ |
15707 |
++static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev, |
15708 |
++ const struct kvaser_usb_busparams *busparams) |
15709 |
+ { |
15710 |
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev); |
15711 |
+- struct can_bittiming *bt = &priv->can.bittiming; |
15712 |
+ struct kvaser_usb *dev = priv->dev; |
15713 |
+ struct kvaser_cmd *cmd; |
15714 |
+ int rc; |
15715 |
+@@ -1406,15 +1726,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) |
15716 |
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams); |
15717 |
+ cmd->u.busparams.channel = priv->channel; |
15718 |
+ cmd->u.busparams.tid = 0xff; |
15719 |
+- cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate); |
15720 |
+- cmd->u.busparams.sjw = bt->sjw; |
15721 |
+- cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; |
15722 |
+- cmd->u.busparams.tseg2 = bt->phase_seg2; |
15723 |
+- |
15724 |
+- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) |
15725 |
+- cmd->u.busparams.no_samp = 3; |
15726 |
+- else |
15727 |
+- cmd->u.busparams.no_samp = 1; |
15728 |
++ memcpy(&cmd->u.busparams.busparams, busparams, |
15729 |
++ sizeof(cmd->u.busparams.busparams)); |
15730 |
+ |
15731 |
+ rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); |
15732 |
+ |
15733 |
+@@ -1422,6 +1735,27 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) |
15734 |
+ return rc; |
15735 |
+ } |
15736 |
+ |
15737 |
++static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv) |
15738 |
++{ |
15739 |
++ int err; |
15740 |
++ |
15741 |
++ if (priv->dev->driver_info->family == KVASER_USBCAN) |
15742 |
++ return -EOPNOTSUPP; |
15743 |
++ |
15744 |
++ reinit_completion(&priv->get_busparams_comp); |
15745 |
++ |
15746 |
++ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS, |
15747 |
++ priv->channel); |
15748 |
++ if (err) |
15749 |
++ return err; |
15750 |
++ |
15751 |
++ if (!wait_for_completion_timeout(&priv->get_busparams_comp, |
15752 |
++ msecs_to_jiffies(KVASER_USB_TIMEOUT))) |
15753 |
++ return -ETIMEDOUT; |
15754 |
++ |
15755 |
++ return 0; |
15756 |
++} |
15757 |
++ |
15758 |
+ static int kvaser_usb_leaf_set_mode(struct net_device *netdev, |
15759 |
+ enum can_mode mode) |
15760 |
+ { |
15761 |
+@@ -1483,14 +1817,18 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) |
15762 |
+ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { |
15763 |
+ .dev_set_mode = kvaser_usb_leaf_set_mode, |
15764 |
+ .dev_set_bittiming = kvaser_usb_leaf_set_bittiming, |
15765 |
++ .dev_get_busparams = kvaser_usb_leaf_get_busparams, |
15766 |
+ .dev_set_data_bittiming = NULL, |
15767 |
++ .dev_get_data_busparams = NULL, |
15768 |
+ .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter, |
15769 |
+ .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints, |
15770 |
+ .dev_init_card = kvaser_usb_leaf_init_card, |
15771 |
++ .dev_init_channel = kvaser_usb_leaf_init_channel, |
15772 |
++ .dev_remove_channel = kvaser_usb_leaf_remove_channel, |
15773 |
+ .dev_get_software_info = kvaser_usb_leaf_get_software_info, |
15774 |
+ .dev_get_software_details = NULL, |
15775 |
+ .dev_get_card_info = kvaser_usb_leaf_get_card_info, |
15776 |
+- .dev_get_capabilities = NULL, |
15777 |
++ .dev_get_capabilities = kvaser_usb_leaf_get_capabilities, |
15778 |
+ .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode, |
15779 |
+ .dev_start_chip = kvaser_usb_leaf_start_chip, |
15780 |
+ .dev_stop_chip = kvaser_usb_leaf_stop_chip, |
15781 |
+diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c |
15782 |
+index 59a803e3c8d0c..22547b10dfe50 100644 |
15783 |
+--- a/drivers/net/dsa/lan9303-core.c |
15784 |
++++ b/drivers/net/dsa/lan9303-core.c |
15785 |
+@@ -1003,9 +1003,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, |
15786 |
+ ret = lan9303_read_switch_port( |
15787 |
+ chip, port, lan9303_mib[u].offset, ®); |
15788 |
+ |
15789 |
+- if (ret) |
15790 |
++ if (ret) { |
15791 |
+ dev_warn(chip->dev, "Reading status port %d reg %u failed\n", |
15792 |
+ port, lan9303_mib[u].offset); |
15793 |
++ reg = 0; |
15794 |
++ } |
15795 |
+ data[u] = reg; |
15796 |
+ } |
15797 |
+ } |
15798 |
+diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c |
15799 |
+index 9d2f49fd945ed..a0971ed00453c 100644 |
15800 |
+--- a/drivers/net/ethernet/amd/atarilance.c |
15801 |
++++ b/drivers/net/ethernet/amd/atarilance.c |
15802 |
+@@ -821,7 +821,7 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev) |
15803 |
+ lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); |
15804 |
+ head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; |
15805 |
+ dev->stats.tx_bytes += skb->len; |
15806 |
+- dev_kfree_skb( skb ); |
15807 |
++ dev_consume_skb_irq(skb); |
15808 |
+ lp->cur_tx++; |
15809 |
+ while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { |
15810 |
+ lp->cur_tx -= TX_RING_SIZE; |
15811 |
+diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c |
15812 |
+index 945bf1d875072..6c2d72024e218 100644 |
15813 |
+--- a/drivers/net/ethernet/amd/lance.c |
15814 |
++++ b/drivers/net/ethernet/amd/lance.c |
15815 |
+@@ -999,7 +999,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb, |
15816 |
+ skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); |
15817 |
+ lp->tx_ring[entry].base = |
15818 |
+ ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; |
15819 |
+- dev_kfree_skb(skb); |
15820 |
++ dev_consume_skb_irq(skb); |
15821 |
+ } else { |
15822 |
+ lp->tx_skbuff[entry] = skb; |
15823 |
+ lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000; |
15824 |
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c |
15825 |
+index a7166cd1179f2..97e32c0490f8a 100644 |
15826 |
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c |
15827 |
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c |
15828 |
+@@ -189,6 +189,7 @@ enum xgbe_sfp_cable { |
15829 |
+ XGBE_SFP_CABLE_UNKNOWN = 0, |
15830 |
+ XGBE_SFP_CABLE_ACTIVE, |
15831 |
+ XGBE_SFP_CABLE_PASSIVE, |
15832 |
++ XGBE_SFP_CABLE_FIBER, |
15833 |
+ }; |
15834 |
+ |
15835 |
+ enum xgbe_sfp_base { |
15836 |
+@@ -236,10 +237,7 @@ enum xgbe_sfp_speed { |
15837 |
+ |
15838 |
+ #define XGBE_SFP_BASE_BR 12 |
15839 |
+ #define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a |
15840 |
+-#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d |
15841 |
+ #define XGBE_SFP_BASE_BR_10GBE_MIN 0x64 |
15842 |
+-#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68 |
15843 |
+-#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78 |
15844 |
+ |
15845 |
+ #define XGBE_SFP_BASE_CU_CABLE_LEN 18 |
15846 |
+ |
15847 |
+@@ -826,29 +824,22 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) |
15848 |
+ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom, |
15849 |
+ enum xgbe_sfp_speed sfp_speed) |
15850 |
+ { |
15851 |
+- u8 *sfp_base, min, max; |
15852 |
++ u8 *sfp_base, min; |
15853 |
+ |
15854 |
+ sfp_base = sfp_eeprom->base; |
15855 |
+ |
15856 |
+ switch (sfp_speed) { |
15857 |
+ case XGBE_SFP_SPEED_1000: |
15858 |
+ min = XGBE_SFP_BASE_BR_1GBE_MIN; |
15859 |
+- max = XGBE_SFP_BASE_BR_1GBE_MAX; |
15860 |
+ break; |
15861 |
+ case XGBE_SFP_SPEED_10000: |
15862 |
+ min = XGBE_SFP_BASE_BR_10GBE_MIN; |
15863 |
+- if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], |
15864 |
+- XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0) |
15865 |
+- max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX; |
15866 |
+- else |
15867 |
+- max = XGBE_SFP_BASE_BR_10GBE_MAX; |
15868 |
+ break; |
15869 |
+ default: |
15870 |
+ return false; |
15871 |
+ } |
15872 |
+ |
15873 |
+- return ((sfp_base[XGBE_SFP_BASE_BR] >= min) && |
15874 |
+- (sfp_base[XGBE_SFP_BASE_BR] <= max)); |
15875 |
++ return sfp_base[XGBE_SFP_BASE_BR] >= min; |
15876 |
+ } |
15877 |
+ |
15878 |
+ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) |
15879 |
+@@ -1149,16 +1140,18 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) |
15880 |
+ phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); |
15881 |
+ phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); |
15882 |
+ |
15883 |
+- /* Assume ACTIVE cable unless told it is PASSIVE */ |
15884 |
++ /* Assume FIBER cable unless told otherwise */ |
15885 |
+ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) { |
15886 |
+ phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE; |
15887 |
+ phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN]; |
15888 |
+- } else { |
15889 |
++ } else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) { |
15890 |
+ phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE; |
15891 |
++ } else { |
15892 |
++ phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER; |
15893 |
+ } |
15894 |
+ |
15895 |
+ /* Determine the type of SFP */ |
15896 |
+- if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE && |
15897 |
++ if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER && |
15898 |
+ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) |
15899 |
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; |
15900 |
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) |
15901 |
+diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c |
15902 |
+index a989d2df59ad0..7a966361d83f7 100644 |
15903 |
+--- a/drivers/net/ethernet/apple/bmac.c |
15904 |
++++ b/drivers/net/ethernet/apple/bmac.c |
15905 |
+@@ -1511,7 +1511,7 @@ static void bmac_tx_timeout(struct timer_list *t) |
15906 |
+ i = bp->tx_empty; |
15907 |
+ ++dev->stats.tx_errors; |
15908 |
+ if (i != bp->tx_fill) { |
15909 |
+- dev_kfree_skb(bp->tx_bufs[i]); |
15910 |
++ dev_kfree_skb_irq(bp->tx_bufs[i]); |
15911 |
+ bp->tx_bufs[i] = NULL; |
15912 |
+ if (++i >= N_TX_RING) i = 0; |
15913 |
+ bp->tx_empty = i; |
15914 |
+diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c |
15915 |
+index 4b80e3a52a199..44037e9e197fa 100644 |
15916 |
+--- a/drivers/net/ethernet/apple/mace.c |
15917 |
++++ b/drivers/net/ethernet/apple/mace.c |
15918 |
+@@ -841,7 +841,7 @@ static void mace_tx_timeout(struct timer_list *t) |
15919 |
+ if (mp->tx_bad_runt) { |
15920 |
+ mp->tx_bad_runt = 0; |
15921 |
+ } else if (i != mp->tx_fill) { |
15922 |
+- dev_kfree_skb(mp->tx_bufs[i]); |
15923 |
++ dev_kfree_skb_irq(mp->tx_bufs[i]); |
15924 |
+ if (++i >= N_TX_RING) |
15925 |
+ i = 0; |
15926 |
+ mp->tx_empty = i; |
15927 |
+diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c |
15928 |
+index 6c51cf991dad5..14dc2e13bf038 100644 |
15929 |
+--- a/drivers/net/ethernet/dnet.c |
15930 |
++++ b/drivers/net/ethernet/dnet.c |
15931 |
+@@ -550,11 +550,11 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
15932 |
+ |
15933 |
+ skb_tx_timestamp(skb); |
15934 |
+ |
15935 |
++ spin_unlock_irqrestore(&bp->lock, flags); |
15936 |
++ |
15937 |
+ /* free the buffer */ |
15938 |
+ dev_kfree_skb(skb); |
15939 |
+ |
15940 |
+- spin_unlock_irqrestore(&bp->lock, flags); |
15941 |
+- |
15942 |
+ return NETDEV_TX_OK; |
15943 |
+ } |
15944 |
+ |
15945 |
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c |
15946 |
+index 222a250fba84a..adccb14c1644e 100644 |
15947 |
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c |
15948 |
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c |
15949 |
+@@ -1220,23 +1220,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, |
15950 |
+ rx_ring->stats.xdp_drops++; |
15951 |
+ } |
15952 |
+ |
15953 |
+-static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first, |
15954 |
+- int rx_ring_last) |
15955 |
+-{ |
15956 |
+- while (rx_ring_first != rx_ring_last) { |
15957 |
+- struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; |
15958 |
+- |
15959 |
+- if (rx_swbd->page) { |
15960 |
+- dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, |
15961 |
+- rx_swbd->dir); |
15962 |
+- __free_page(rx_swbd->page); |
15963 |
+- rx_swbd->page = NULL; |
15964 |
+- } |
15965 |
+- enetc_bdr_idx_inc(rx_ring, &rx_ring_first); |
15966 |
+- } |
15967 |
+- rx_ring->stats.xdp_redirect_failures++; |
15968 |
+-} |
15969 |
+- |
15970 |
+ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, |
15971 |
+ struct napi_struct *napi, int work_limit, |
15972 |
+ struct bpf_prog *prog) |
15973 |
+@@ -1258,8 +1241,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, |
15974 |
+ int orig_i, orig_cleaned_cnt; |
15975 |
+ struct xdp_buff xdp_buff; |
15976 |
+ struct sk_buff *skb; |
15977 |
+- int tmp_orig_i, err; |
15978 |
+ u32 bd_status; |
15979 |
++ int err; |
15980 |
+ |
15981 |
+ rxbd = enetc_rxbd(rx_ring, i); |
15982 |
+ bd_status = le32_to_cpu(rxbd->r.lstatus); |
15983 |
+@@ -1346,18 +1329,16 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, |
15984 |
+ break; |
15985 |
+ } |
15986 |
+ |
15987 |
+- tmp_orig_i = orig_i; |
15988 |
+- |
15989 |
+- while (orig_i != i) { |
15990 |
+- enetc_flip_rx_buff(rx_ring, |
15991 |
+- &rx_ring->rx_swbd[orig_i]); |
15992 |
+- enetc_bdr_idx_inc(rx_ring, &orig_i); |
15993 |
+- } |
15994 |
+- |
15995 |
+ err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); |
15996 |
+ if (unlikely(err)) { |
15997 |
+- enetc_xdp_free(rx_ring, tmp_orig_i, i); |
15998 |
++ enetc_xdp_drop(rx_ring, orig_i, i); |
15999 |
++ rx_ring->stats.xdp_redirect_failures++; |
16000 |
+ } else { |
16001 |
++ while (orig_i != i) { |
16002 |
++ enetc_flip_rx_buff(rx_ring, |
16003 |
++ &rx_ring->rx_swbd[orig_i]); |
16004 |
++ enetc_bdr_idx_inc(rx_ring, &orig_i); |
16005 |
++ } |
16006 |
+ xdp_redirect_frm_cnt++; |
16007 |
+ rx_ring->stats.xdp_redirect++; |
16008 |
+ } |
16009 |
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c |
16010 |
+index ed2c961902b6c..c013d86559af9 100644 |
16011 |
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c |
16012 |
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c |
16013 |
+@@ -3548,6 +3548,24 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) |
16014 |
+ return err; |
16015 |
+ } |
16016 |
+ |
16017 |
++/** |
16018 |
++ * i40e_calculate_vsi_rx_buf_len - Calculates buffer length |
16019 |
++ * |
16020 |
++ * @vsi: VSI to calculate rx_buf_len from |
16021 |
++ */ |
16022 |
++static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi) |
16023 |
++{ |
16024 |
++ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) |
16025 |
++ return I40E_RXBUFFER_2048; |
16026 |
++ |
16027 |
++#if (PAGE_SIZE < 8192) |
16028 |
++ if (!I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN) |
16029 |
++ return I40E_RXBUFFER_1536 - NET_IP_ALIGN; |
16030 |
++#endif |
16031 |
++ |
16032 |
++ return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048; |
16033 |
++} |
16034 |
++ |
16035 |
+ /** |
16036 |
+ * i40e_vsi_configure_rx - Configure the VSI for Rx |
16037 |
+ * @vsi: the VSI being configured |
16038 |
+@@ -3559,20 +3577,14 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) |
16039 |
+ int err = 0; |
16040 |
+ u16 i; |
16041 |
+ |
16042 |
+- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { |
16043 |
+- vsi->max_frame = I40E_MAX_RXBUFFER; |
16044 |
+- vsi->rx_buf_len = I40E_RXBUFFER_2048; |
16045 |
++ vsi->max_frame = I40E_MAX_RXBUFFER; |
16046 |
++ vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); |
16047 |
++ |
16048 |
+ #if (PAGE_SIZE < 8192) |
16049 |
+- } else if (!I40E_2K_TOO_SMALL_WITH_PADDING && |
16050 |
+- (vsi->netdev->mtu <= ETH_DATA_LEN)) { |
16051 |
++ if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING && |
16052 |
++ vsi->netdev->mtu <= ETH_DATA_LEN) |
16053 |
+ vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; |
16054 |
+- vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; |
16055 |
+ #endif |
16056 |
+- } else { |
16057 |
+- vsi->max_frame = I40E_MAX_RXBUFFER; |
16058 |
+- vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : |
16059 |
+- I40E_RXBUFFER_2048; |
16060 |
+- } |
16061 |
+ |
16062 |
+ /* set up individual rings */ |
16063 |
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++) |
16064 |
+@@ -13147,7 +13159,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, |
16065 |
+ int i; |
16066 |
+ |
16067 |
+ /* Don't allow frames that span over multiple buffers */ |
16068 |
+- if (frame_size > vsi->rx_buf_len) { |
16069 |
++ if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) { |
16070 |
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); |
16071 |
+ return -EINVAL; |
16072 |
+ } |
16073 |
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c |
16074 |
+index 70667b46858a8..b246ff8b7c208 100644 |
16075 |
+--- a/drivers/net/ethernet/intel/igb/igb_main.c |
16076 |
++++ b/drivers/net/ethernet/intel/igb/igb_main.c |
16077 |
+@@ -1204,8 +1204,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, |
16078 |
+ if (!q_vector) { |
16079 |
+ q_vector = kzalloc(size, GFP_KERNEL); |
16080 |
+ } else if (size > ksize(q_vector)) { |
16081 |
+- kfree_rcu(q_vector, rcu); |
16082 |
+- q_vector = kzalloc(size, GFP_KERNEL); |
16083 |
++ struct igb_q_vector *new_q_vector; |
16084 |
++ |
16085 |
++ new_q_vector = kzalloc(size, GFP_KERNEL); |
16086 |
++ if (new_q_vector) |
16087 |
++ kfree_rcu(q_vector, rcu); |
16088 |
++ q_vector = new_q_vector; |
16089 |
+ } else { |
16090 |
+ memset(q_vector, 0, size); |
16091 |
+ } |
16092 |
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h |
16093 |
+index 3e386c38d016c..66678cd72a6cd 100644 |
16094 |
+--- a/drivers/net/ethernet/intel/igc/igc.h |
16095 |
++++ b/drivers/net/ethernet/intel/igc/igc.h |
16096 |
+@@ -94,6 +94,8 @@ struct igc_ring { |
16097 |
+ u8 queue_index; /* logical index of the ring*/ |
16098 |
+ u8 reg_idx; /* physical index of the ring */ |
16099 |
+ bool launchtime_enable; /* true if LaunchTime is enabled */ |
16100 |
++ ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ |
16101 |
++ ktime_t last_ff_cycle; /* Last cycle with an active first flag */ |
16102 |
+ |
16103 |
+ u32 start_time; |
16104 |
+ u32 end_time; |
16105 |
+@@ -182,6 +184,7 @@ struct igc_adapter { |
16106 |
+ |
16107 |
+ ktime_t base_time; |
16108 |
+ ktime_t cycle_time; |
16109 |
++ bool qbv_enable; |
16110 |
+ |
16111 |
+ /* OS defined structs */ |
16112 |
+ struct pci_dev *pdev; |
16113 |
+diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h |
16114 |
+index a4bbee7487984..f171bc99e58ca 100644 |
16115 |
+--- a/drivers/net/ethernet/intel/igc/igc_defines.h |
16116 |
++++ b/drivers/net/ethernet/intel/igc/igc_defines.h |
16117 |
+@@ -324,6 +324,8 @@ |
16118 |
+ #define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ |
16119 |
+ #define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ |
16120 |
+ |
16121 |
++#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080 |
16122 |
++ |
16123 |
+ /* Transmit Control */ |
16124 |
+ #define IGC_TCTL_EN 0x00000002 /* enable Tx */ |
16125 |
+ #define IGC_TCTL_PSP 0x00000008 /* pad short packets */ |
16126 |
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c |
16127 |
+index 2a84f57ea68b4..3726c8413c741 100644 |
16128 |
+--- a/drivers/net/ethernet/intel/igc/igc_main.c |
16129 |
++++ b/drivers/net/ethernet/intel/igc/igc_main.c |
16130 |
+@@ -999,25 +999,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev) |
16131 |
+ return netdev_mc_count(netdev); |
16132 |
+ } |
16133 |
+ |
16134 |
+-static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) |
16135 |
++static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, |
16136 |
++ bool *first_flag, bool *insert_empty) |
16137 |
+ { |
16138 |
++ struct igc_adapter *adapter = netdev_priv(ring->netdev); |
16139 |
+ ktime_t cycle_time = adapter->cycle_time; |
16140 |
+ ktime_t base_time = adapter->base_time; |
16141 |
++ ktime_t now = ktime_get_clocktai(); |
16142 |
++ ktime_t baset_est, end_of_cycle; |
16143 |
+ u32 launchtime; |
16144 |
++ s64 n; |
16145 |
+ |
16146 |
+- /* FIXME: when using ETF together with taprio, we may have a |
16147 |
+- * case where 'delta' is larger than the cycle_time, this may |
16148 |
+- * cause problems if we don't read the current value of |
16149 |
+- * IGC_BASET, as the value writen into the launchtime |
16150 |
+- * descriptor field may be misinterpreted. |
16151 |
++ n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); |
16152 |
++ |
16153 |
++ baset_est = ktime_add_ns(base_time, cycle_time * (n)); |
16154 |
++ end_of_cycle = ktime_add_ns(baset_est, cycle_time); |
16155 |
++ |
16156 |
++ if (ktime_compare(txtime, end_of_cycle) >= 0) { |
16157 |
++ if (baset_est != ring->last_ff_cycle) { |
16158 |
++ *first_flag = true; |
16159 |
++ ring->last_ff_cycle = baset_est; |
16160 |
++ |
16161 |
++ if (ktime_compare(txtime, ring->last_tx_cycle) > 0) |
16162 |
++ *insert_empty = true; |
16163 |
++ } |
16164 |
++ } |
16165 |
++ |
16166 |
++ /* Introducing a window at end of cycle on which packets |
16167 |
++ * potentially not honor launchtime. Window of 5us chosen |
16168 |
++ * considering software update the tail pointer and packets |
16169 |
++ * are dma'ed to packet buffer. |
16170 |
+ */ |
16171 |
+- div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); |
16172 |
++ if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) |
16173 |
++ netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", |
16174 |
++ txtime); |
16175 |
++ |
16176 |
++ ring->last_tx_cycle = end_of_cycle; |
16177 |
++ |
16178 |
++ launchtime = ktime_sub_ns(txtime, baset_est); |
16179 |
++ if (launchtime > 0) |
16180 |
++ div_s64_rem(launchtime, cycle_time, &launchtime); |
16181 |
++ else |
16182 |
++ launchtime = 0; |
16183 |
+ |
16184 |
+ return cpu_to_le32(launchtime); |
16185 |
+ } |
16186 |
+ |
16187 |
++static int igc_init_empty_frame(struct igc_ring *ring, |
16188 |
++ struct igc_tx_buffer *buffer, |
16189 |
++ struct sk_buff *skb) |
16190 |
++{ |
16191 |
++ unsigned int size; |
16192 |
++ dma_addr_t dma; |
16193 |
++ |
16194 |
++ size = skb_headlen(skb); |
16195 |
++ |
16196 |
++ dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); |
16197 |
++ if (dma_mapping_error(ring->dev, dma)) { |
16198 |
++ netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); |
16199 |
++ return -ENOMEM; |
16200 |
++ } |
16201 |
++ |
16202 |
++ buffer->skb = skb; |
16203 |
++ buffer->protocol = 0; |
16204 |
++ buffer->bytecount = skb->len; |
16205 |
++ buffer->gso_segs = 1; |
16206 |
++ buffer->time_stamp = jiffies; |
16207 |
++ dma_unmap_len_set(buffer, len, skb->len); |
16208 |
++ dma_unmap_addr_set(buffer, dma, dma); |
16209 |
++ |
16210 |
++ return 0; |
16211 |
++} |
16212 |
++ |
16213 |
++static int igc_init_tx_empty_descriptor(struct igc_ring *ring, |
16214 |
++ struct sk_buff *skb, |
16215 |
++ struct igc_tx_buffer *first) |
16216 |
++{ |
16217 |
++ union igc_adv_tx_desc *desc; |
16218 |
++ u32 cmd_type, olinfo_status; |
16219 |
++ int err; |
16220 |
++ |
16221 |
++ if (!igc_desc_unused(ring)) |
16222 |
++ return -EBUSY; |
16223 |
++ |
16224 |
++ err = igc_init_empty_frame(ring, first, skb); |
16225 |
++ if (err) |
16226 |
++ return err; |
16227 |
++ |
16228 |
++ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | |
16229 |
++ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | |
16230 |
++ first->bytecount; |
16231 |
++ olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; |
16232 |
++ |
16233 |
++ desc = IGC_TX_DESC(ring, ring->next_to_use); |
16234 |
++ desc->read.cmd_type_len = cpu_to_le32(cmd_type); |
16235 |
++ desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
16236 |
++ desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); |
16237 |
++ |
16238 |
++ netdev_tx_sent_queue(txring_txq(ring), skb->len); |
16239 |
++ |
16240 |
++ first->next_to_watch = desc; |
16241 |
++ |
16242 |
++ ring->next_to_use++; |
16243 |
++ if (ring->next_to_use == ring->count) |
16244 |
++ ring->next_to_use = 0; |
16245 |
++ |
16246 |
++ return 0; |
16247 |
++} |
16248 |
++ |
16249 |
++#define IGC_EMPTY_FRAME_SIZE 60 |
16250 |
++ |
16251 |
+ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, |
16252 |
+- struct igc_tx_buffer *first, |
16253 |
++ __le32 launch_time, bool first_flag, |
16254 |
+ u32 vlan_macip_lens, u32 type_tucmd, |
16255 |
+ u32 mss_l4len_idx) |
16256 |
+ { |
16257 |
+@@ -1036,26 +1129,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, |
16258 |
+ if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) |
16259 |
+ mss_l4len_idx |= tx_ring->reg_idx << 4; |
16260 |
+ |
16261 |
++ if (first_flag) |
16262 |
++ mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; |
16263 |
++ |
16264 |
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); |
16265 |
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); |
16266 |
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); |
16267 |
+- |
16268 |
+- /* We assume there is always a valid Tx time available. Invalid times |
16269 |
+- * should have been handled by the upper layers. |
16270 |
+- */ |
16271 |
+- if (tx_ring->launchtime_enable) { |
16272 |
+- struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); |
16273 |
+- ktime_t txtime = first->skb->tstamp; |
16274 |
+- |
16275 |
+- skb_txtime_consumed(first->skb); |
16276 |
+- context_desc->launch_time = igc_tx_launchtime(adapter, |
16277 |
+- txtime); |
16278 |
+- } else { |
16279 |
+- context_desc->launch_time = 0; |
16280 |
+- } |
16281 |
++ context_desc->launch_time = launch_time; |
16282 |
+ } |
16283 |
+ |
16284 |
+-static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) |
16285 |
++static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, |
16286 |
++ __le32 launch_time, bool first_flag) |
16287 |
+ { |
16288 |
+ struct sk_buff *skb = first->skb; |
16289 |
+ u32 vlan_macip_lens = 0; |
16290 |
+@@ -1095,7 +1179,8 @@ no_csum: |
16291 |
+ vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; |
16292 |
+ vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; |
16293 |
+ |
16294 |
+- igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); |
16295 |
++ igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, |
16296 |
++ vlan_macip_lens, type_tucmd, 0); |
16297 |
+ } |
16298 |
+ |
16299 |
+ static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) |
16300 |
+@@ -1319,6 +1404,7 @@ dma_error: |
16301 |
+ |
16302 |
+ static int igc_tso(struct igc_ring *tx_ring, |
16303 |
+ struct igc_tx_buffer *first, |
16304 |
++ __le32 launch_time, bool first_flag, |
16305 |
+ u8 *hdr_len) |
16306 |
+ { |
16307 |
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; |
16308 |
+@@ -1405,8 +1491,8 @@ static int igc_tso(struct igc_ring *tx_ring, |
16309 |
+ vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; |
16310 |
+ vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; |
16311 |
+ |
16312 |
+- igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, |
16313 |
+- type_tucmd, mss_l4len_idx); |
16314 |
++ igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, |
16315 |
++ vlan_macip_lens, type_tucmd, mss_l4len_idx); |
16316 |
+ |
16317 |
+ return 1; |
16318 |
+ } |
16319 |
+@@ -1414,11 +1500,14 @@ static int igc_tso(struct igc_ring *tx_ring, |
16320 |
+ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, |
16321 |
+ struct igc_ring *tx_ring) |
16322 |
+ { |
16323 |
++ bool first_flag = false, insert_empty = false; |
16324 |
+ u16 count = TXD_USE_COUNT(skb_headlen(skb)); |
16325 |
+ __be16 protocol = vlan_get_protocol(skb); |
16326 |
+ struct igc_tx_buffer *first; |
16327 |
++ __le32 launch_time = 0; |
16328 |
+ u32 tx_flags = 0; |
16329 |
+ unsigned short f; |
16330 |
++ ktime_t txtime; |
16331 |
+ u8 hdr_len = 0; |
16332 |
+ int tso = 0; |
16333 |
+ |
16334 |
+@@ -1432,11 +1521,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, |
16335 |
+ count += TXD_USE_COUNT(skb_frag_size( |
16336 |
+ &skb_shinfo(skb)->frags[f])); |
16337 |
+ |
16338 |
+- if (igc_maybe_stop_tx(tx_ring, count + 3)) { |
16339 |
++ if (igc_maybe_stop_tx(tx_ring, count + 5)) { |
16340 |
+ /* this is a hard error */ |
16341 |
+ return NETDEV_TX_BUSY; |
16342 |
+ } |
16343 |
+ |
16344 |
++ if (!tx_ring->launchtime_enable) |
16345 |
++ goto done; |
16346 |
++ |
16347 |
++ txtime = skb->tstamp; |
16348 |
++ skb->tstamp = ktime_set(0, 0); |
16349 |
++ launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); |
16350 |
++ |
16351 |
++ if (insert_empty) { |
16352 |
++ struct igc_tx_buffer *empty_info; |
16353 |
++ struct sk_buff *empty; |
16354 |
++ void *data; |
16355 |
++ |
16356 |
++ empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
16357 |
++ empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); |
16358 |
++ if (!empty) |
16359 |
++ goto done; |
16360 |
++ |
16361 |
++ data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); |
16362 |
++ memset(data, 0, IGC_EMPTY_FRAME_SIZE); |
16363 |
++ |
16364 |
++ igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); |
16365 |
++ |
16366 |
++ if (igc_init_tx_empty_descriptor(tx_ring, |
16367 |
++ empty, |
16368 |
++ empty_info) < 0) |
16369 |
++ dev_kfree_skb_any(empty); |
16370 |
++ } |
16371 |
++ |
16372 |
++done: |
16373 |
+ /* record the location of the first descriptor for this packet */ |
16374 |
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
16375 |
+ first->type = IGC_TX_BUFFER_TYPE_SKB; |
16376 |
+@@ -1473,11 +1591,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, |
16377 |
+ first->tx_flags = tx_flags; |
16378 |
+ first->protocol = protocol; |
16379 |
+ |
16380 |
+- tso = igc_tso(tx_ring, first, &hdr_len); |
16381 |
++ tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); |
16382 |
+ if (tso < 0) |
16383 |
+ goto out_drop; |
16384 |
+ else if (!tso) |
16385 |
+- igc_tx_csum(tx_ring, first); |
16386 |
++ igc_tx_csum(tx_ring, first, launch_time, first_flag); |
16387 |
+ |
16388 |
+ igc_tx_map(tx_ring, first, hdr_len); |
16389 |
+ |
16390 |
+@@ -5801,9 +5919,10 @@ static bool validate_schedule(struct igc_adapter *adapter, |
16391 |
+ return false; |
16392 |
+ |
16393 |
+ for (n = 0; n < qopt->num_entries; n++) { |
16394 |
+- const struct tc_taprio_sched_entry *e; |
16395 |
++ const struct tc_taprio_sched_entry *e, *prev; |
16396 |
+ int i; |
16397 |
+ |
16398 |
++ prev = n ? &qopt->entries[n - 1] : NULL; |
16399 |
+ e = &qopt->entries[n]; |
16400 |
+ |
16401 |
+ /* i225 only supports "global" frame preemption |
16402 |
+@@ -5816,7 +5935,12 @@ static bool validate_schedule(struct igc_adapter *adapter, |
16403 |
+ if (e->gate_mask & BIT(i)) |
16404 |
+ queue_uses[i]++; |
16405 |
+ |
16406 |
+- if (queue_uses[i] > 1) |
16407 |
++ /* There are limitations: A single queue cannot be |
16408 |
++ * opened and closed multiple times per cycle unless the |
16409 |
++ * gate stays open. Check for it. |
16410 |
++ */ |
16411 |
++ if (queue_uses[i] > 1 && |
16412 |
++ !(prev->gate_mask & BIT(i))) |
16413 |
+ return false; |
16414 |
+ } |
16415 |
+ } |
16416 |
+@@ -5860,12 +5984,19 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter) |
16417 |
+ static int igc_save_qbv_schedule(struct igc_adapter *adapter, |
16418 |
+ struct tc_taprio_qopt_offload *qopt) |
16419 |
+ { |
16420 |
++ bool queue_configured[IGC_MAX_TX_QUEUES] = { }; |
16421 |
+ u32 start_time = 0, end_time = 0; |
16422 |
+ size_t n; |
16423 |
++ int i; |
16424 |
++ |
16425 |
++ adapter->qbv_enable = qopt->enable; |
16426 |
+ |
16427 |
+ if (!qopt->enable) |
16428 |
+ return igc_tsn_clear_schedule(adapter); |
16429 |
+ |
16430 |
++ if (qopt->base_time < 0) |
16431 |
++ return -ERANGE; |
16432 |
++ |
16433 |
+ if (adapter->base_time) |
16434 |
+ return -EALREADY; |
16435 |
+ |
16436 |
+@@ -5875,28 +6006,58 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, |
16437 |
+ adapter->cycle_time = qopt->cycle_time; |
16438 |
+ adapter->base_time = qopt->base_time; |
16439 |
+ |
16440 |
+- /* FIXME: be a little smarter about cases when the gate for a |
16441 |
+- * queue stays open for more than one entry. |
16442 |
+- */ |
16443 |
+ for (n = 0; n < qopt->num_entries; n++) { |
16444 |
+ struct tc_taprio_sched_entry *e = &qopt->entries[n]; |
16445 |
+- int i; |
16446 |
+ |
16447 |
+ end_time += e->interval; |
16448 |
+ |
16449 |
++ /* If any of the conditions below are true, we need to manually |
16450 |
++ * control the end time of the cycle. |
16451 |
++ * 1. Qbv users can specify a cycle time that is not equal |
16452 |
++ * to the total GCL intervals. Hence, recalculation is |
16453 |
++ * necessary here to exclude the time interval that |
16454 |
++ * exceeds the cycle time. |
16455 |
++ * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, |
16456 |
++ * once the end of the list is reached, it will switch |
16457 |
++ * to the END_OF_CYCLE state and leave the gates in the |
16458 |
++ * same state until the next cycle is started. |
16459 |
++ */ |
16460 |
++ if (end_time > adapter->cycle_time || |
16461 |
++ n + 1 == qopt->num_entries) |
16462 |
++ end_time = adapter->cycle_time; |
16463 |
++ |
16464 |
+ for (i = 0; i < adapter->num_tx_queues; i++) { |
16465 |
+ struct igc_ring *ring = adapter->tx_ring[i]; |
16466 |
+ |
16467 |
+ if (!(e->gate_mask & BIT(i))) |
16468 |
+ continue; |
16469 |
+ |
16470 |
+- ring->start_time = start_time; |
16471 |
++ /* Check whether a queue stays open for more than one |
16472 |
++ * entry. If so, keep the start and advance the end |
16473 |
++ * time. |
16474 |
++ */ |
16475 |
++ if (!queue_configured[i]) |
16476 |
++ ring->start_time = start_time; |
16477 |
+ ring->end_time = end_time; |
16478 |
++ |
16479 |
++ queue_configured[i] = true; |
16480 |
+ } |
16481 |
+ |
16482 |
+ start_time += e->interval; |
16483 |
+ } |
16484 |
+ |
16485 |
++ /* Check whether a queue gets configured. |
16486 |
++ * If not, set the start and end time to be end time. |
16487 |
++ */ |
16488 |
++ for (i = 0; i < adapter->num_tx_queues; i++) { |
16489 |
++ if (!queue_configured[i]) { |
16490 |
++ struct igc_ring *ring = adapter->tx_ring[i]; |
16491 |
++ |
16492 |
++ ring->start_time = end_time; |
16493 |
++ ring->end_time = end_time; |
16494 |
++ } |
16495 |
++ } |
16496 |
++ |
16497 |
+ return 0; |
16498 |
+ } |
16499 |
+ |
16500 |
+diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c |
16501 |
+index 0fce22de2ab85..356c7455c5cee 100644 |
16502 |
+--- a/drivers/net/ethernet/intel/igc/igc_tsn.c |
16503 |
++++ b/drivers/net/ethernet/intel/igc/igc_tsn.c |
16504 |
+@@ -36,7 +36,7 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) |
16505 |
+ { |
16506 |
+ unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; |
16507 |
+ |
16508 |
+- if (adapter->base_time) |
16509 |
++ if (adapter->qbv_enable) |
16510 |
+ new_flags |= IGC_FLAG_TSN_QBV_ENABLED; |
16511 |
+ |
16512 |
+ if (is_any_launchtime(adapter)) |
16513 |
+@@ -110,15 +110,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) |
16514 |
+ wr32(IGC_STQT(i), ring->start_time); |
16515 |
+ wr32(IGC_ENDQT(i), ring->end_time); |
16516 |
+ |
16517 |
+- if (adapter->base_time) { |
16518 |
+- /* If we have a base_time we are in "taprio" |
16519 |
+- * mode and we need to be strict about the |
16520 |
+- * cycles: only transmit a packet if it can be |
16521 |
+- * completed during that cycle. |
16522 |
+- */ |
16523 |
+- txqctl |= IGC_TXQCTL_STRICT_CYCLE | |
16524 |
+- IGC_TXQCTL_STRICT_END; |
16525 |
+- } |
16526 |
++ txqctl |= IGC_TXQCTL_STRICT_CYCLE | |
16527 |
++ IGC_TXQCTL_STRICT_END; |
16528 |
+ |
16529 |
+ if (ring->launchtime_enable) |
16530 |
+ txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; |
16531 |
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
16532 |
+index 052696ce50963..97c2604df019a 100644 |
16533 |
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
16534 |
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
16535 |
+@@ -3923,6 +3923,7 @@ abort_with_slices: |
16536 |
+ myri10ge_free_slices(mgp); |
16537 |
+ |
16538 |
+ abort_with_firmware: |
16539 |
++ kfree(mgp->msix_vectors); |
16540 |
+ myri10ge_dummy_rdma(mgp, 0); |
16541 |
+ |
16542 |
+ abort_with_ioremap: |
16543 |
+diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c |
16544 |
+index f4703f53bcdca..65ccdbe665e5c 100644 |
16545 |
+--- a/drivers/net/ethernet/neterion/s2io.c |
16546 |
++++ b/drivers/net/ethernet/neterion/s2io.c |
16547 |
+@@ -2386,7 +2386,7 @@ static void free_tx_buffers(struct s2io_nic *nic) |
16548 |
+ skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); |
16549 |
+ if (skb) { |
16550 |
+ swstats->mem_freed += skb->truesize; |
16551 |
+- dev_kfree_skb(skb); |
16552 |
++ dev_kfree_skb_irq(skb); |
16553 |
+ cnt++; |
16554 |
+ } |
16555 |
+ } |
16556 |
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c |
16557 |
+index 6ab3e60d4928c..4b4077cf2d266 100644 |
16558 |
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c |
16559 |
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c |
16560 |
+@@ -1796,9 +1796,10 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, |
16561 |
+ u8 split_id) |
16562 |
+ { |
16563 |
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; |
16564 |
+- u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0; |
16565 |
++ u8 port_id = 0, pf_id = 0, vf_id = 0; |
16566 |
+ bool read_using_dmae = false; |
16567 |
+ u32 thresh; |
16568 |
++ u16 fid; |
16569 |
+ |
16570 |
+ if (!dump) |
16571 |
+ return len; |
16572 |
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c |
16573 |
+index 42a44c97572ae..df9b84f6600fe 100644 |
16574 |
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c |
16575 |
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c |
16576 |
+@@ -221,6 +221,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) |
16577 |
+ return 0; |
16578 |
+ |
16579 |
+ qlcnic_destroy_async_wq: |
16580 |
++ while (i--) |
16581 |
++ kfree(sriov->vf_info[i].vp); |
16582 |
+ destroy_workqueue(bc->bc_async_wq); |
16583 |
+ |
16584 |
+ qlcnic_destroy_trans_wq: |
16585 |
+diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c |
16586 |
+index 01ef5efd7bc2a..5a8a6977ec9a7 100644 |
16587 |
+--- a/drivers/net/ethernet/rdc/r6040.c |
16588 |
++++ b/drivers/net/ethernet/rdc/r6040.c |
16589 |
+@@ -1159,10 +1159,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
16590 |
+ err = register_netdev(dev); |
16591 |
+ if (err) { |
16592 |
+ dev_err(&pdev->dev, "Failed to register net device\n"); |
16593 |
+- goto err_out_mdio_unregister; |
16594 |
++ goto err_out_phy_disconnect; |
16595 |
+ } |
16596 |
+ return 0; |
16597 |
+ |
16598 |
++err_out_phy_disconnect: |
16599 |
++ phy_disconnect(dev->phydev); |
16600 |
+ err_out_mdio_unregister: |
16601 |
+ mdiobus_unregister(lp->mii_bus); |
16602 |
+ err_out_mdio: |
16603 |
+@@ -1186,6 +1188,7 @@ static void r6040_remove_one(struct pci_dev *pdev) |
16604 |
+ struct r6040_private *lp = netdev_priv(dev); |
16605 |
+ |
16606 |
+ unregister_netdev(dev); |
16607 |
++ phy_disconnect(dev->phydev); |
16608 |
+ mdiobus_unregister(lp->mii_bus); |
16609 |
+ mdiobus_free(lp->mii_bus); |
16610 |
+ netif_napi_del(&lp->napi); |
16611 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c |
16612 |
+index d68ef72dcdde0..4538e4fd81898 100644 |
16613 |
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c |
16614 |
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c |
16615 |
+@@ -47,7 +47,8 @@ static void config_sub_second_increment(void __iomem *ioaddr, |
16616 |
+ if (!(value & PTP_TCR_TSCTRLSSR)) |
16617 |
+ data = (data * 1000) / 465; |
16618 |
+ |
16619 |
+- data &= PTP_SSIR_SSINC_MASK; |
16620 |
++ if (data > PTP_SSIR_SSINC_MAX) |
16621 |
++ data = PTP_SSIR_SSINC_MAX; |
16622 |
+ |
16623 |
+ reg_value = data; |
16624 |
+ if (gmac4) |
16625 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
16626 |
+index eba97adaf1fb8..15b0daf416f37 100644 |
16627 |
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
16628 |
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
16629 |
+@@ -7046,7 +7046,8 @@ int stmmac_dvr_probe(struct device *device, |
16630 |
+ priv->wq = create_singlethread_workqueue("stmmac_wq"); |
16631 |
+ if (!priv->wq) { |
16632 |
+ dev_err(priv->device, "failed to create workqueue\n"); |
16633 |
+- return -ENOMEM; |
16634 |
++ ret = -ENOMEM; |
16635 |
++ goto error_wq_init; |
16636 |
+ } |
16637 |
+ |
16638 |
+ INIT_WORK(&priv->service_task, stmmac_service_task); |
16639 |
+@@ -7273,6 +7274,7 @@ error_mdio_register: |
16640 |
+ stmmac_napi_del(ndev); |
16641 |
+ error_hw_init: |
16642 |
+ destroy_workqueue(priv->wq); |
16643 |
++error_wq_init: |
16644 |
+ bitmap_free(priv->af_xdp_zc_qps); |
16645 |
+ |
16646 |
+ return ret; |
16647 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h |
16648 |
+index 53172a4398101..bf619295d079f 100644 |
16649 |
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h |
16650 |
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h |
16651 |
+@@ -64,7 +64,7 @@ |
16652 |
+ #define PTP_TCR_TSENMACADDR BIT(18) |
16653 |
+ |
16654 |
+ /* SSIR defines */ |
16655 |
+-#define PTP_SSIR_SSINC_MASK 0xff |
16656 |
++#define PTP_SSIR_SSINC_MAX 0xff |
16657 |
+ #define GMAC4_PTP_SSIR_SSINC_SHIFT 16 |
16658 |
+ |
16659 |
+ /* Auxiliary Control defines */ |
16660 |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c |
16661 |
+index dd5c4ef92ef3c..ea7200b7b6477 100644 |
16662 |
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c |
16663 |
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c |
16664 |
+@@ -1654,12 +1654,16 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv) |
16665 |
+ } |
16666 |
+ |
16667 |
+ ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr); |
16668 |
+- if (ret) |
16669 |
++ if (ret) { |
16670 |
++ kfree_skb(skb); |
16671 |
+ goto cleanup; |
16672 |
++ } |
16673 |
+ |
16674 |
+ ret = dev_set_promiscuity(priv->dev, 1); |
16675 |
+- if (ret) |
16676 |
++ if (ret) { |
16677 |
++ kfree_skb(skb); |
16678 |
+ goto cleanup; |
16679 |
++ } |
16680 |
+ |
16681 |
+ ret = dev_direct_xmit(skb, 0); |
16682 |
+ if (ret) |
16683 |
+diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c |
16684 |
+index eda2961c0fe2a..07bdeece1723d 100644 |
16685 |
+--- a/drivers/net/ethernet/ti/netcp_core.c |
16686 |
++++ b/drivers/net/ethernet/ti/netcp_core.c |
16687 |
+@@ -1262,7 +1262,7 @@ out: |
16688 |
+ } |
16689 |
+ |
16690 |
+ /* Submit the packet */ |
16691 |
+-static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
16692 |
++static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
16693 |
+ { |
16694 |
+ struct netcp_intf *netcp = netdev_priv(ndev); |
16695 |
+ struct netcp_stats *tx_stats = &netcp->stats; |
16696 |
+diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c |
16697 |
+index 97c1d1ecba34c..b1971c4d5313e 100644 |
16698 |
+--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c |
16699 |
++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c |
16700 |
+@@ -543,7 +543,7 @@ static void xemaclite_tx_timeout(struct net_device *dev, unsigned int txqueue) |
16701 |
+ xemaclite_enable_interrupts(lp); |
16702 |
+ |
16703 |
+ if (lp->deferred_skb) { |
16704 |
+- dev_kfree_skb(lp->deferred_skb); |
16705 |
++ dev_kfree_skb_irq(lp->deferred_skb); |
16706 |
+ lp->deferred_skb = NULL; |
16707 |
+ dev->stats.tx_errors++; |
16708 |
+ } |
16709 |
+diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c |
16710 |
+index 6d1e3f49a3d3d..ebf502290e5f3 100644 |
16711 |
+--- a/drivers/net/fddi/defxx.c |
16712 |
++++ b/drivers/net/fddi/defxx.c |
16713 |
+@@ -3831,10 +3831,24 @@ static int dfx_init(void) |
16714 |
+ int status; |
16715 |
+ |
16716 |
+ status = pci_register_driver(&dfx_pci_driver); |
16717 |
+- if (!status) |
16718 |
+- status = eisa_driver_register(&dfx_eisa_driver); |
16719 |
+- if (!status) |
16720 |
+- status = tc_register_driver(&dfx_tc_driver); |
16721 |
++ if (status) |
16722 |
++ goto err_pci_register; |
16723 |
++ |
16724 |
++ status = eisa_driver_register(&dfx_eisa_driver); |
16725 |
++ if (status) |
16726 |
++ goto err_eisa_register; |
16727 |
++ |
16728 |
++ status = tc_register_driver(&dfx_tc_driver); |
16729 |
++ if (status) |
16730 |
++ goto err_tc_register; |
16731 |
++ |
16732 |
++ return 0; |
16733 |
++ |
16734 |
++err_tc_register: |
16735 |
++ eisa_driver_unregister(&dfx_eisa_driver); |
16736 |
++err_eisa_register: |
16737 |
++ pci_unregister_driver(&dfx_pci_driver); |
16738 |
++err_pci_register: |
16739 |
+ return status; |
16740 |
+ } |
16741 |
+ |
16742 |
+diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c |
16743 |
+index 6b6f28d5b8d5d..f9d03f7b9101e 100644 |
16744 |
+--- a/drivers/net/hamradio/baycom_epp.c |
16745 |
++++ b/drivers/net/hamradio/baycom_epp.c |
16746 |
+@@ -758,7 +758,7 @@ static void epp_bh(struct work_struct *work) |
16747 |
+ * ===================== network driver interface ========================= |
16748 |
+ */ |
16749 |
+ |
16750 |
+-static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev) |
16751 |
++static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev) |
16752 |
+ { |
16753 |
+ struct baycom_state *bc = netdev_priv(dev); |
16754 |
+ |
16755 |
+diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c |
16756 |
+index e0bb131a33d76..39db3cae4dd1a 100644 |
16757 |
+--- a/drivers/net/hamradio/scc.c |
16758 |
++++ b/drivers/net/hamradio/scc.c |
16759 |
+@@ -301,12 +301,12 @@ static inline void scc_discard_buffers(struct scc_channel *scc) |
16760 |
+ spin_lock_irqsave(&scc->lock, flags); |
16761 |
+ if (scc->tx_buff != NULL) |
16762 |
+ { |
16763 |
+- dev_kfree_skb(scc->tx_buff); |
16764 |
++ dev_kfree_skb_irq(scc->tx_buff); |
16765 |
+ scc->tx_buff = NULL; |
16766 |
+ } |
16767 |
+ |
16768 |
+ while (!skb_queue_empty(&scc->tx_queue)) |
16769 |
+- dev_kfree_skb(skb_dequeue(&scc->tx_queue)); |
16770 |
++ dev_kfree_skb_irq(skb_dequeue(&scc->tx_queue)); |
16771 |
+ |
16772 |
+ spin_unlock_irqrestore(&scc->lock, flags); |
16773 |
+ } |
16774 |
+@@ -1668,7 +1668,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev) |
16775 |
+ if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) { |
16776 |
+ struct sk_buff *skb_del; |
16777 |
+ skb_del = skb_dequeue(&scc->tx_queue); |
16778 |
+- dev_kfree_skb(skb_del); |
16779 |
++ dev_kfree_skb_irq(skb_del); |
16780 |
+ } |
16781 |
+ skb_queue_tail(&scc->tx_queue, skb); |
16782 |
+ netif_trans_update(dev); |
16783 |
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c |
16784 |
+index 88e44eb392851..10b3f4fb2612c 100644 |
16785 |
+--- a/drivers/net/macsec.c |
16786 |
++++ b/drivers/net/macsec.c |
16787 |
+@@ -2580,7 +2580,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) |
16788 |
+ const struct macsec_ops *ops; |
16789 |
+ struct macsec_context ctx; |
16790 |
+ struct macsec_dev *macsec; |
16791 |
+- int ret; |
16792 |
++ int ret = 0; |
16793 |
+ |
16794 |
+ if (!attrs[MACSEC_ATTR_IFINDEX]) |
16795 |
+ return -EINVAL; |
16796 |
+@@ -2593,28 +2593,36 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) |
16797 |
+ macsec_genl_offload_policy, NULL)) |
16798 |
+ return -EINVAL; |
16799 |
+ |
16800 |
++ rtnl_lock(); |
16801 |
++ |
16802 |
+ dev = get_dev_from_nl(genl_info_net(info), attrs); |
16803 |
+- if (IS_ERR(dev)) |
16804 |
+- return PTR_ERR(dev); |
16805 |
++ if (IS_ERR(dev)) { |
16806 |
++ ret = PTR_ERR(dev); |
16807 |
++ goto out; |
16808 |
++ } |
16809 |
+ macsec = macsec_priv(dev); |
16810 |
+ |
16811 |
+- if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) |
16812 |
+- return -EINVAL; |
16813 |
++ if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { |
16814 |
++ ret = -EINVAL; |
16815 |
++ goto out; |
16816 |
++ } |
16817 |
+ |
16818 |
+ offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); |
16819 |
+ if (macsec->offload == offload) |
16820 |
+- return 0; |
16821 |
++ goto out; |
16822 |
+ |
16823 |
+ /* Check if the offloading mode is supported by the underlying layers */ |
16824 |
+ if (offload != MACSEC_OFFLOAD_OFF && |
16825 |
+- !macsec_check_offload(offload, macsec)) |
16826 |
+- return -EOPNOTSUPP; |
16827 |
++ !macsec_check_offload(offload, macsec)) { |
16828 |
++ ret = -EOPNOTSUPP; |
16829 |
++ goto out; |
16830 |
++ } |
16831 |
+ |
16832 |
+ /* Check if the net device is busy. */ |
16833 |
+- if (netif_running(dev)) |
16834 |
+- return -EBUSY; |
16835 |
+- |
16836 |
+- rtnl_lock(); |
16837 |
++ if (netif_running(dev)) { |
16838 |
++ ret = -EBUSY; |
16839 |
++ goto out; |
16840 |
++ } |
16841 |
+ |
16842 |
+ prev_offload = macsec->offload; |
16843 |
+ macsec->offload = offload; |
16844 |
+@@ -2649,7 +2657,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) |
16845 |
+ |
16846 |
+ rollback: |
16847 |
+ macsec->offload = prev_offload; |
16848 |
+- |
16849 |
++out: |
16850 |
+ rtnl_unlock(); |
16851 |
+ return ret; |
16852 |
+ } |
16853 |
+diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c |
16854 |
+index 1b7d588ff3c5c..b701ee83e64a8 100644 |
16855 |
+--- a/drivers/net/ntb_netdev.c |
16856 |
++++ b/drivers/net/ntb_netdev.c |
16857 |
+@@ -137,7 +137,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, |
16858 |
+ enqueue_again: |
16859 |
+ rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); |
16860 |
+ if (rc) { |
16861 |
+- dev_kfree_skb(skb); |
16862 |
++ dev_kfree_skb_any(skb); |
16863 |
+ ndev->stats.rx_errors++; |
16864 |
+ ndev->stats.rx_fifo_errors++; |
16865 |
+ } |
16866 |
+@@ -192,7 +192,7 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, |
16867 |
+ ndev->stats.tx_aborted_errors++; |
16868 |
+ } |
16869 |
+ |
16870 |
+- dev_kfree_skb(skb); |
16871 |
++ dev_kfree_skb_any(skb); |
16872 |
+ |
16873 |
+ if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) { |
16874 |
+ /* Make sure anybody stopping the queue after this sees the new |
16875 |
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c |
16876 |
+index 829d6ada1704c..c1f11d1df4cd6 100644 |
16877 |
+--- a/drivers/net/ppp/ppp_generic.c |
16878 |
++++ b/drivers/net/ppp/ppp_generic.c |
16879 |
+@@ -1742,6 +1742,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) |
16880 |
+ int len; |
16881 |
+ unsigned char *cp; |
16882 |
+ |
16883 |
++ skb->dev = ppp->dev; |
16884 |
++ |
16885 |
+ if (proto < 0x8000) { |
16886 |
+ #ifdef CONFIG_PPP_FILTER |
16887 |
+ /* check if we should pass this packet */ |
16888 |
+diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c |
16889 |
+index 6a212c085435b..5b01642ca44e0 100644 |
16890 |
+--- a/drivers/net/wan/farsync.c |
16891 |
++++ b/drivers/net/wan/farsync.c |
16892 |
+@@ -2545,6 +2545,7 @@ fst_remove_one(struct pci_dev *pdev) |
16893 |
+ struct net_device *dev = port_to_dev(&card->ports[i]); |
16894 |
+ |
16895 |
+ unregister_hdlc_device(dev); |
16896 |
++ free_netdev(dev); |
16897 |
+ } |
16898 |
+ |
16899 |
+ fst_disable_intr(card); |
16900 |
+@@ -2564,6 +2565,7 @@ fst_remove_one(struct pci_dev *pdev) |
16901 |
+ card->tx_dma_handle_card); |
16902 |
+ } |
16903 |
+ fst_card_array[card->card_no] = NULL; |
16904 |
++ kfree(card); |
16905 |
+ } |
16906 |
+ |
16907 |
+ static struct pci_driver fst_driver = { |
16908 |
+diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c |
16909 |
+index 1baec4b412c8d..efe38b2c1df73 100644 |
16910 |
+--- a/drivers/net/wireless/ath/ar5523/ar5523.c |
16911 |
++++ b/drivers/net/wireless/ath/ar5523/ar5523.c |
16912 |
+@@ -241,6 +241,11 @@ static void ar5523_cmd_tx_cb(struct urb *urb) |
16913 |
+ } |
16914 |
+ } |
16915 |
+ |
16916 |
++static void ar5523_cancel_tx_cmd(struct ar5523 *ar) |
16917 |
++{ |
16918 |
++ usb_kill_urb(ar->tx_cmd.urb_tx); |
16919 |
++} |
16920 |
++ |
16921 |
+ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, |
16922 |
+ int ilen, void *odata, int olen, int flags) |
16923 |
+ { |
16924 |
+@@ -280,6 +285,7 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, |
16925 |
+ } |
16926 |
+ |
16927 |
+ if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) { |
16928 |
++ ar5523_cancel_tx_cmd(ar); |
16929 |
+ cmd->odata = NULL; |
16930 |
+ ar5523_err(ar, "timeout waiting for command %02x reply\n", |
16931 |
+ code); |
16932 |
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c |
16933 |
+index 4d4e2f91e15cf..85a30c301dad7 100644 |
16934 |
+--- a/drivers/net/wireless/ath/ath10k/pci.c |
16935 |
++++ b/drivers/net/wireless/ath/ath10k/pci.c |
16936 |
+@@ -3793,18 +3793,22 @@ static struct pci_driver ath10k_pci_driver = { |
16937 |
+ |
16938 |
+ static int __init ath10k_pci_init(void) |
16939 |
+ { |
16940 |
+- int ret; |
16941 |
++ int ret1, ret2; |
16942 |
+ |
16943 |
+- ret = pci_register_driver(&ath10k_pci_driver); |
16944 |
+- if (ret) |
16945 |
++ ret1 = pci_register_driver(&ath10k_pci_driver); |
16946 |
++ if (ret1) |
16947 |
+ printk(KERN_ERR "failed to register ath10k pci driver: %d\n", |
16948 |
+- ret); |
16949 |
++ ret1); |
16950 |
+ |
16951 |
+- ret = ath10k_ahb_init(); |
16952 |
+- if (ret) |
16953 |
+- printk(KERN_ERR "ahb init failed: %d\n", ret); |
16954 |
++ ret2 = ath10k_ahb_init(); |
16955 |
++ if (ret2) |
16956 |
++ printk(KERN_ERR "ahb init failed: %d\n", ret2); |
16957 |
+ |
16958 |
+- return ret; |
16959 |
++ if (ret1 && ret2) |
16960 |
++ return ret1; |
16961 |
++ |
16962 |
++ /* registered to at least one bus */ |
16963 |
++ return 0; |
16964 |
+ } |
16965 |
+ module_init(ath10k_pci_init); |
16966 |
+ |
16967 |
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c |
16968 |
+index f06eec99de688..f938ac1a4abd4 100644 |
16969 |
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c |
16970 |
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c |
16971 |
+@@ -709,14 +709,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) |
16972 |
+ struct rx_buf *rx_buf = (struct rx_buf *)urb->context; |
16973 |
+ struct hif_device_usb *hif_dev = rx_buf->hif_dev; |
16974 |
+ struct sk_buff *skb = rx_buf->skb; |
16975 |
+- struct sk_buff *nskb; |
16976 |
+ int ret; |
16977 |
+ |
16978 |
+ if (!skb) |
16979 |
+ return; |
16980 |
+ |
16981 |
+ if (!hif_dev) |
16982 |
+- goto free; |
16983 |
++ goto free_skb; |
16984 |
+ |
16985 |
+ switch (urb->status) { |
16986 |
+ case 0: |
16987 |
+@@ -725,7 +724,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) |
16988 |
+ case -ECONNRESET: |
16989 |
+ case -ENODEV: |
16990 |
+ case -ESHUTDOWN: |
16991 |
+- goto free; |
16992 |
++ goto free_skb; |
16993 |
+ default: |
16994 |
+ skb_reset_tail_pointer(skb); |
16995 |
+ skb_trim(skb, 0); |
16996 |
+@@ -736,25 +735,27 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) |
16997 |
+ if (likely(urb->actual_length != 0)) { |
16998 |
+ skb_put(skb, urb->actual_length); |
16999 |
+ |
17000 |
+- /* Process the command first */ |
17001 |
++ /* |
17002 |
++ * Process the command first. |
17003 |
++ * skb is either freed here or passed to be |
17004 |
++ * managed to another callback function. |
17005 |
++ */ |
17006 |
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb, |
17007 |
+ skb->len, USB_REG_IN_PIPE); |
17008 |
+ |
17009 |
+- |
17010 |
+- nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC); |
17011 |
+- if (!nskb) { |
17012 |
++ skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC); |
17013 |
++ if (!skb) { |
17014 |
+ dev_err(&hif_dev->udev->dev, |
17015 |
+ "ath9k_htc: REG_IN memory allocation failure\n"); |
17016 |
+- urb->context = NULL; |
17017 |
+- return; |
17018 |
++ goto free_rx_buf; |
17019 |
+ } |
17020 |
+ |
17021 |
+- rx_buf->skb = nskb; |
17022 |
++ rx_buf->skb = skb; |
17023 |
+ |
17024 |
+ usb_fill_int_urb(urb, hif_dev->udev, |
17025 |
+ usb_rcvintpipe(hif_dev->udev, |
17026 |
+ USB_REG_IN_PIPE), |
17027 |
+- nskb->data, MAX_REG_IN_BUF_SIZE, |
17028 |
++ skb->data, MAX_REG_IN_BUF_SIZE, |
17029 |
+ ath9k_hif_usb_reg_in_cb, rx_buf, 1); |
17030 |
+ } |
17031 |
+ |
17032 |
+@@ -763,12 +764,13 @@ resubmit: |
17033 |
+ ret = usb_submit_urb(urb, GFP_ATOMIC); |
17034 |
+ if (ret) { |
17035 |
+ usb_unanchor_urb(urb); |
17036 |
+- goto free; |
17037 |
++ goto free_skb; |
17038 |
+ } |
17039 |
+ |
17040 |
+ return; |
17041 |
+-free: |
17042 |
++free_skb: |
17043 |
+ kfree_skb(skb); |
17044 |
++free_rx_buf: |
17045 |
+ kfree(rx_buf); |
17046 |
+ urb->context = NULL; |
17047 |
+ } |
17048 |
+@@ -781,14 +783,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev) |
17049 |
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); |
17050 |
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp, |
17051 |
+ &hif_dev->tx.tx_buf, list) { |
17052 |
+- usb_get_urb(tx_buf->urb); |
17053 |
+- spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); |
17054 |
+- usb_kill_urb(tx_buf->urb); |
17055 |
+ list_del(&tx_buf->list); |
17056 |
+ usb_free_urb(tx_buf->urb); |
17057 |
+ kfree(tx_buf->buf); |
17058 |
+ kfree(tx_buf); |
17059 |
+- spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); |
17060 |
+ } |
17061 |
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); |
17062 |
+ |
17063 |
+@@ -1330,10 +1328,24 @@ static int send_eject_command(struct usb_interface *interface) |
17064 |
+ static int ath9k_hif_usb_probe(struct usb_interface *interface, |
17065 |
+ const struct usb_device_id *id) |
17066 |
+ { |
17067 |
++ struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in, *int_out; |
17068 |
+ struct usb_device *udev = interface_to_usbdev(interface); |
17069 |
++ struct usb_host_interface *alt; |
17070 |
+ struct hif_device_usb *hif_dev; |
17071 |
+ int ret = 0; |
17072 |
+ |
17073 |
++ /* Verify the expected endpoints are present */ |
17074 |
++ alt = interface->cur_altsetting; |
17075 |
++ if (usb_find_common_endpoints(alt, &bulk_in, &bulk_out, &int_in, &int_out) < 0 || |
17076 |
++ usb_endpoint_num(bulk_in) != USB_WLAN_RX_PIPE || |
17077 |
++ usb_endpoint_num(bulk_out) != USB_WLAN_TX_PIPE || |
17078 |
++ usb_endpoint_num(int_in) != USB_REG_IN_PIPE || |
17079 |
++ usb_endpoint_num(int_out) != USB_REG_OUT_PIPE) { |
17080 |
++ dev_err(&udev->dev, |
17081 |
++ "ath9k_htc: Device endpoint numbers are not the expected ones\n"); |
17082 |
++ return -ENODEV; |
17083 |
++ } |
17084 |
++ |
17085 |
+ if (id->driver_info == STORAGE_DEVICE) |
17086 |
+ return send_eject_command(interface); |
17087 |
+ |
17088 |
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c |
17089 |
+index dcbe55b56e437..c54d8722e755c 100644 |
17090 |
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c |
17091 |
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c |
17092 |
+@@ -746,6 +746,11 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, |
17093 |
+ u32 i, j; |
17094 |
+ char end = '\0'; |
17095 |
+ |
17096 |
++ if (chiprev >= BITS_PER_TYPE(u32)) { |
17097 |
++ brcmf_err("Invalid chip revision %u\n", chiprev); |
17098 |
++ return NULL; |
17099 |
++ } |
17100 |
++ |
17101 |
+ for (i = 0; i < table_size; i++) { |
17102 |
+ if (mapping_table[i].chipid == chip && |
17103 |
+ mapping_table[i].revmask & BIT(chiprev)) |
17104 |
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c |
17105 |
+index 3ff4997e1c97a..f5f5a002fdcf9 100644 |
17106 |
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c |
17107 |
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c |
17108 |
+@@ -626,7 +626,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo, |
17109 |
+ } |
17110 |
+ |
17111 |
+ if (!brcmf_chip_set_active(devinfo->ci, resetintr)) |
17112 |
+- return -EINVAL; |
17113 |
++ return -EIO; |
17114 |
+ return 0; |
17115 |
+ } |
17116 |
+ |
17117 |
+@@ -1118,6 +1118,10 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) |
17118 |
+ BRCMF_NROF_H2D_COMMON_MSGRINGS; |
17119 |
+ max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS; |
17120 |
+ } |
17121 |
++ if (max_flowrings > 256) { |
17122 |
++ brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings); |
17123 |
++ return -EIO; |
17124 |
++ } |
17125 |
+ |
17126 |
+ if (devinfo->dma_idx_sz != 0) { |
17127 |
+ bufsz = (max_submissionrings + max_completionrings) * |
17128 |
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c |
17129 |
+index f7961b22e0518..5006aa8317513 100644 |
17130 |
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c |
17131 |
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c |
17132 |
+@@ -3411,6 +3411,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus, |
17133 |
+ /* Take arm out of reset */ |
17134 |
+ if (!brcmf_chip_set_active(bus->ci, rstvec)) { |
17135 |
+ brcmf_err("error getting out of ARM core reset\n"); |
17136 |
++ bcmerror = -EIO; |
17137 |
+ goto err; |
17138 |
+ } |
17139 |
+ |
17140 |
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c |
17141 |
+index b5368cb57ca8c..e354918c2480f 100644 |
17142 |
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c |
17143 |
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c |
17144 |
+@@ -1150,6 +1150,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, |
17145 |
+ struct sk_buff_head mpdus_skbs; |
17146 |
+ unsigned int payload_len; |
17147 |
+ int ret; |
17148 |
++ struct sk_buff *orig_skb = skb; |
17149 |
+ |
17150 |
+ if (WARN_ON_ONCE(!mvmsta)) |
17151 |
+ return -1; |
17152 |
+@@ -1182,8 +1183,17 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, |
17153 |
+ |
17154 |
+ ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); |
17155 |
+ if (ret) { |
17156 |
++ /* Free skbs created as part of TSO logic that have not yet been dequeued */ |
17157 |
+ __skb_queue_purge(&mpdus_skbs); |
17158 |
+- return ret; |
17159 |
++ /* skb here is not necessarily same as skb that entered this method, |
17160 |
++ * so free it explicitly. |
17161 |
++ */ |
17162 |
++ if (skb == orig_skb) |
17163 |
++ ieee80211_free_txskb(mvm->hw, skb); |
17164 |
++ else |
17165 |
++ kfree_skb(skb); |
17166 |
++ /* there was error, but we consumed skb one way or another, so return 0 */ |
17167 |
++ return 0; |
17168 |
+ } |
17169 |
+ } |
17170 |
+ |
17171 |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h |
17172 |
+index d1f00706d41ec..4e4af6e17b503 100644 |
17173 |
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h |
17174 |
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h |
17175 |
+@@ -1021,8 +1021,9 @@ static inline bool mt76_is_skb_pktid(u8 pktid) |
17176 |
+ static inline u8 mt76_tx_power_nss_delta(u8 nss) |
17177 |
+ { |
17178 |
+ static const u8 nss_delta[4] = { 0, 6, 9, 12 }; |
17179 |
++ u8 idx = nss - 1; |
17180 |
+ |
17181 |
+- return nss_delta[nss - 1]; |
17182 |
++ return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0; |
17183 |
+ } |
17184 |
+ |
17185 |
+ static inline bool mt76_testmode_enabled(struct mt76_phy *phy) |
17186 |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c |
17187 |
+index 8f1338dae2114..96667b7d722d5 100644 |
17188 |
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c |
17189 |
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c |
17190 |
+@@ -290,7 +290,8 @@ static void mt7615_init_dfs_state(struct mt7615_phy *phy) |
17191 |
+ if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) |
17192 |
+ return; |
17193 |
+ |
17194 |
+- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) |
17195 |
++ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR) && |
17196 |
++ !(mphy->chandef.chan->flags & IEEE80211_CHAN_RADAR)) |
17197 |
+ return; |
17198 |
+ |
17199 |
+ if (mphy->chandef.chan->center_freq == chandef->chan->center_freq && |
17200 |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c |
17201 |
+index 6aca470e24013..7a4f277a16223 100644 |
17202 |
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c |
17203 |
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c |
17204 |
+@@ -302,7 +302,8 @@ static void mt7915_init_dfs_state(struct mt7915_phy *phy) |
17205 |
+ if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) |
17206 |
+ return; |
17207 |
+ |
17208 |
+- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) |
17209 |
++ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR) && |
17210 |
++ !(mphy->chandef.chan->flags & IEEE80211_CHAN_RADAR)) |
17211 |
+ return; |
17212 |
+ |
17213 |
+ if (mphy->chandef.chan->center_freq == chandef->chan->center_freq && |
17214 |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c |
17215 |
+index 426e7a32bdc86..6cf0c9b1b8b98 100644 |
17216 |
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c |
17217 |
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c |
17218 |
+@@ -1476,7 +1476,7 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy) |
17219 |
+ mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0), |
17220 |
+ MT_MIB_RTS_FAIL_COUNT_MASK); |
17221 |
+ |
17222 |
+- for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { |
17223 |
++ for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) { |
17224 |
+ u32 val, val2; |
17225 |
+ |
17226 |
+ val = mt76_rr(dev, MT_TX_AGG_CNT(0, i)); |
17227 |
+diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c |
17228 |
+index 1e9f60bb811ad..b47343e321b81 100644 |
17229 |
+--- a/drivers/net/wireless/mediatek/mt76/usb.c |
17230 |
++++ b/drivers/net/wireless/mediatek/mt76/usb.c |
17231 |
+@@ -814,6 +814,9 @@ static void mt76u_status_worker(struct mt76_worker *w) |
17232 |
+ struct mt76_queue *q; |
17233 |
+ int i; |
17234 |
+ |
17235 |
++ if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state)) |
17236 |
++ return; |
17237 |
++ |
17238 |
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) { |
17239 |
+ q = dev->phy.q_tx[i]; |
17240 |
+ if (!q) |
17241 |
+@@ -833,11 +836,11 @@ static void mt76u_status_worker(struct mt76_worker *w) |
17242 |
+ wake_up(&dev->tx_wait); |
17243 |
+ |
17244 |
+ mt76_worker_schedule(&dev->tx_worker); |
17245 |
+- |
17246 |
+- if (dev->drv->tx_status_data && |
17247 |
+- !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) |
17248 |
+- queue_work(dev->wq, &dev->usb.stat_work); |
17249 |
+ } |
17250 |
++ |
17251 |
++ if (dev->drv->tx_status_data && |
17252 |
++ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) |
17253 |
++ queue_work(dev->wq, &dev->usb.stat_work); |
17254 |
+ } |
17255 |
+ |
17256 |
+ static void mt76u_tx_status_data(struct work_struct *work) |
17257 |
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h |
17258 |
+index 7ddce3c3f0c48..cd3ff9847ced3 100644 |
17259 |
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h |
17260 |
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h |
17261 |
+@@ -1190,7 +1190,7 @@ struct rtl8723bu_c2h { |
17262 |
+ u8 bw; |
17263 |
+ } __packed ra_report; |
17264 |
+ }; |
17265 |
+-}; |
17266 |
++} __packed; |
17267 |
+ |
17268 |
+ struct rtl8xxxu_fileops; |
17269 |
+ |
17270 |
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c |
17271 |
+index e74c885a04e50..7370d92a3bdad 100644 |
17272 |
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c |
17273 |
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c |
17274 |
+@@ -1607,18 +1607,18 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv) |
17275 |
+ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) |
17276 |
+ { |
17277 |
+ struct device *dev = &priv->udev->dev; |
17278 |
+- u32 val32, bonding; |
17279 |
++ u32 val32, bonding, sys_cfg; |
17280 |
+ u16 val16; |
17281 |
+ |
17282 |
+- val32 = rtl8xxxu_read32(priv, REG_SYS_CFG); |
17283 |
+- priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >> |
17284 |
++ sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG); |
17285 |
++ priv->chip_cut = (sys_cfg & SYS_CFG_CHIP_VERSION_MASK) >> |
17286 |
+ SYS_CFG_CHIP_VERSION_SHIFT; |
17287 |
+- if (val32 & SYS_CFG_TRP_VAUX_EN) { |
17288 |
++ if (sys_cfg & SYS_CFG_TRP_VAUX_EN) { |
17289 |
+ dev_info(dev, "Unsupported test chip\n"); |
17290 |
+ return -ENOTSUPP; |
17291 |
+ } |
17292 |
+ |
17293 |
+- if (val32 & SYS_CFG_BT_FUNC) { |
17294 |
++ if (sys_cfg & SYS_CFG_BT_FUNC) { |
17295 |
+ if (priv->chip_cut >= 3) { |
17296 |
+ sprintf(priv->chip_name, "8723BU"); |
17297 |
+ priv->rtl_chip = RTL8723B; |
17298 |
+@@ -1640,7 +1640,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) |
17299 |
+ if (val32 & MULTI_GPS_FUNC_EN) |
17300 |
+ priv->has_gps = 1; |
17301 |
+ priv->is_multi_func = 1; |
17302 |
+- } else if (val32 & SYS_CFG_TYPE_ID) { |
17303 |
++ } else if (sys_cfg & SYS_CFG_TYPE_ID) { |
17304 |
+ bonding = rtl8xxxu_read32(priv, REG_HPON_FSM); |
17305 |
+ bonding &= HPON_FSM_BONDING_MASK; |
17306 |
+ if (priv->fops->tx_desc_size == |
17307 |
+@@ -1688,7 +1688,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) |
17308 |
+ case RTL8188E: |
17309 |
+ case RTL8192E: |
17310 |
+ case RTL8723B: |
17311 |
+- switch (val32 & SYS_CFG_VENDOR_EXT_MASK) { |
17312 |
++ switch (sys_cfg & SYS_CFG_VENDOR_EXT_MASK) { |
17313 |
+ case SYS_CFG_VENDOR_ID_TSMC: |
17314 |
+ sprintf(priv->chip_vendor, "TSMC"); |
17315 |
+ break; |
17316 |
+@@ -1705,7 +1705,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) |
17317 |
+ } |
17318 |
+ break; |
17319 |
+ default: |
17320 |
+- if (val32 & SYS_CFG_VENDOR_ID) { |
17321 |
++ if (sys_cfg & SYS_CFG_VENDOR_ID) { |
17322 |
+ sprintf(priv->chip_vendor, "UMC"); |
17323 |
+ priv->vendor_umc = 1; |
17324 |
+ } else { |
17325 |
+@@ -5517,7 +5517,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) |
17326 |
+ rarpt->txrate.flags = 0; |
17327 |
+ rate = c2h->ra_report.rate; |
17328 |
+ sgi = c2h->ra_report.sgi; |
17329 |
+- bw = c2h->ra_report.bw; |
17330 |
+ |
17331 |
+ if (rate < DESC_RATE_MCS0) { |
17332 |
+ rarpt->txrate.legacy = |
17333 |
+@@ -5534,8 +5533,13 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) |
17334 |
+ RATE_INFO_FLAGS_SHORT_GI; |
17335 |
+ } |
17336 |
+ |
17337 |
+- if (bw == RATE_INFO_BW_20) |
17338 |
+- rarpt->txrate.bw |= RATE_INFO_BW_20; |
17339 |
++ if (skb->len >= offsetofend(typeof(*c2h), ra_report.bw)) { |
17340 |
++ if (c2h->ra_report.bw == RTL8XXXU_CHANNEL_WIDTH_40) |
17341 |
++ bw = RATE_INFO_BW_40; |
17342 |
++ else |
17343 |
++ bw = RATE_INFO_BW_20; |
17344 |
++ rarpt->txrate.bw = bw; |
17345 |
++ } |
17346 |
+ } |
17347 |
+ bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate); |
17348 |
+ rarpt->bit_rate = bit_rate; |
17349 |
+diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c |
17350 |
+index 6bfaab48b507d..6b64a103f39f0 100644 |
17351 |
+--- a/drivers/net/wireless/rsi/rsi_91x_core.c |
17352 |
++++ b/drivers/net/wireless/rsi/rsi_91x_core.c |
17353 |
+@@ -465,7 +465,9 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) |
17354 |
+ tid, 0); |
17355 |
+ } |
17356 |
+ } |
17357 |
+- if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { |
17358 |
++ |
17359 |
++ if (IEEE80211_SKB_CB(skb)->control.flags & |
17360 |
++ IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { |
17361 |
+ q_num = MGMT_SOFT_Q; |
17362 |
+ skb->priority = q_num; |
17363 |
+ } |
17364 |
+diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c |
17365 |
+index dca81a4bbdd7f..30d2eccbcadd5 100644 |
17366 |
+--- a/drivers/net/wireless/rsi/rsi_91x_hal.c |
17367 |
++++ b/drivers/net/wireless/rsi/rsi_91x_hal.c |
17368 |
+@@ -162,12 +162,16 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) |
17369 |
+ u8 header_size; |
17370 |
+ u8 vap_id = 0; |
17371 |
+ u8 dword_align_bytes; |
17372 |
++ bool tx_eapol; |
17373 |
+ u16 seq_num; |
17374 |
+ |
17375 |
+ info = IEEE80211_SKB_CB(skb); |
17376 |
+ vif = info->control.vif; |
17377 |
+ tx_params = (struct skb_info *)info->driver_data; |
17378 |
+ |
17379 |
++ tx_eapol = IEEE80211_SKB_CB(skb)->control.flags & |
17380 |
++ IEEE80211_TX_CTRL_PORT_CTRL_PROTO; |
17381 |
++ |
17382 |
+ header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc); |
17383 |
+ if (header_size > skb_headroom(skb)) { |
17384 |
+ rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); |
17385 |
+@@ -231,7 +235,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) |
17386 |
+ } |
17387 |
+ } |
17388 |
+ |
17389 |
+- if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { |
17390 |
++ if (tx_eapol) { |
17391 |
+ rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n"); |
17392 |
+ |
17393 |
+ data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); |
17394 |
+diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c |
17395 |
+index 6dc0af63440f4..939d27652a4c9 100644 |
17396 |
+--- a/drivers/nfc/pn533/pn533.c |
17397 |
++++ b/drivers/nfc/pn533/pn533.c |
17398 |
+@@ -1297,6 +1297,8 @@ static int pn533_poll_dep_complete(struct pn533 *dev, void *arg, |
17399 |
+ if (IS_ERR(resp)) |
17400 |
+ return PTR_ERR(resp); |
17401 |
+ |
17402 |
++ memset(&nfc_target, 0, sizeof(struct nfc_target)); |
17403 |
++ |
17404 |
+ rsp = (struct pn533_cmd_jump_dep_response *)resp->data; |
17405 |
+ |
17406 |
+ rc = rsp->status & PN533_CMD_RET_MASK; |
17407 |
+@@ -1928,6 +1930,8 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, |
17408 |
+ |
17409 |
+ dev_dbg(dev->dev, "Creating new target\n"); |
17410 |
+ |
17411 |
++ memset(&nfc_target, 0, sizeof(struct nfc_target)); |
17412 |
++ |
17413 |
+ nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK; |
17414 |
+ nfc_target.nfcid1_len = 10; |
17415 |
+ memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len); |
17416 |
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c |
17417 |
+index 692ee0f4a1ec3..2d5b5e0fb66a3 100644 |
17418 |
+--- a/drivers/nvme/host/core.c |
17419 |
++++ b/drivers/nvme/host/core.c |
17420 |
+@@ -2874,7 +2874,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) |
17421 |
+ |
17422 |
+ id = kzalloc(sizeof(*id), GFP_KERNEL); |
17423 |
+ if (!id) |
17424 |
+- return 0; |
17425 |
++ return -ENOMEM; |
17426 |
+ |
17427 |
+ c.identify.opcode = nvme_admin_identify; |
17428 |
+ c.identify.cns = NVME_ID_CNS_CS_CTRL; |
17429 |
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c |
17430 |
+index 87a347248c38f..cfd0385511564 100644 |
17431 |
+--- a/drivers/nvme/target/core.c |
17432 |
++++ b/drivers/nvme/target/core.c |
17433 |
+@@ -15,6 +15,7 @@ |
17434 |
+ |
17435 |
+ #include "nvmet.h" |
17436 |
+ |
17437 |
++struct kmem_cache *nvmet_bvec_cache; |
17438 |
+ struct workqueue_struct *buffered_io_wq; |
17439 |
+ struct workqueue_struct *zbd_wq; |
17440 |
+ static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; |
17441 |
+@@ -1607,26 +1608,28 @@ void nvmet_subsys_put(struct nvmet_subsys *subsys) |
17442 |
+ |
17443 |
+ static int __init nvmet_init(void) |
17444 |
+ { |
17445 |
+- int error; |
17446 |
++ int error = -ENOMEM; |
17447 |
+ |
17448 |
+ nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1; |
17449 |
+ |
17450 |
++ nvmet_bvec_cache = kmem_cache_create("nvmet-bvec", |
17451 |
++ NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0, |
17452 |
++ SLAB_HWCACHE_ALIGN, NULL); |
17453 |
++ if (!nvmet_bvec_cache) |
17454 |
++ return -ENOMEM; |
17455 |
++ |
17456 |
+ zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0); |
17457 |
+ if (!zbd_wq) |
17458 |
+- return -ENOMEM; |
17459 |
++ goto out_destroy_bvec_cache; |
17460 |
+ |
17461 |
+ buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq", |
17462 |
+ WQ_MEM_RECLAIM, 0); |
17463 |
+- if (!buffered_io_wq) { |
17464 |
+- error = -ENOMEM; |
17465 |
++ if (!buffered_io_wq) |
17466 |
+ goto out_free_zbd_work_queue; |
17467 |
+- } |
17468 |
+ |
17469 |
+ nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0); |
17470 |
+- if (!nvmet_wq) { |
17471 |
+- error = -ENOMEM; |
17472 |
++ if (!nvmet_wq) |
17473 |
+ goto out_free_buffered_work_queue; |
17474 |
+- } |
17475 |
+ |
17476 |
+ error = nvmet_init_discovery(); |
17477 |
+ if (error) |
17478 |
+@@ -1645,6 +1648,8 @@ out_free_buffered_work_queue: |
17479 |
+ destroy_workqueue(buffered_io_wq); |
17480 |
+ out_free_zbd_work_queue: |
17481 |
+ destroy_workqueue(zbd_wq); |
17482 |
++out_destroy_bvec_cache: |
17483 |
++ kmem_cache_destroy(nvmet_bvec_cache); |
17484 |
+ return error; |
17485 |
+ } |
17486 |
+ |
17487 |
+@@ -1656,6 +1661,7 @@ static void __exit nvmet_exit(void) |
17488 |
+ destroy_workqueue(nvmet_wq); |
17489 |
+ destroy_workqueue(buffered_io_wq); |
17490 |
+ destroy_workqueue(zbd_wq); |
17491 |
++ kmem_cache_destroy(nvmet_bvec_cache); |
17492 |
+ |
17493 |
+ BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); |
17494 |
+ BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); |
17495 |
+diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c |
17496 |
+index 228871d48106b..eadba13b276de 100644 |
17497 |
+--- a/drivers/nvme/target/io-cmd-file.c |
17498 |
++++ b/drivers/nvme/target/io-cmd-file.c |
17499 |
+@@ -11,7 +11,6 @@ |
17500 |
+ #include <linux/fs.h> |
17501 |
+ #include "nvmet.h" |
17502 |
+ |
17503 |
+-#define NVMET_MAX_MPOOL_BVEC 16 |
17504 |
+ #define NVMET_MIN_MPOOL_OBJ 16 |
17505 |
+ |
17506 |
+ int nvmet_file_ns_revalidate(struct nvmet_ns *ns) |
17507 |
+@@ -33,8 +32,6 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns) |
17508 |
+ flush_workqueue(buffered_io_wq); |
17509 |
+ mempool_destroy(ns->bvec_pool); |
17510 |
+ ns->bvec_pool = NULL; |
17511 |
+- kmem_cache_destroy(ns->bvec_cache); |
17512 |
+- ns->bvec_cache = NULL; |
17513 |
+ fput(ns->file); |
17514 |
+ ns->file = NULL; |
17515 |
+ } |
17516 |
+@@ -68,16 +65,8 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns) |
17517 |
+ ns->blksize_shift = min_t(u8, |
17518 |
+ file_inode(ns->file)->i_blkbits, 12); |
17519 |
+ |
17520 |
+- ns->bvec_cache = kmem_cache_create("nvmet-bvec", |
17521 |
+- NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), |
17522 |
+- 0, SLAB_HWCACHE_ALIGN, NULL); |
17523 |
+- if (!ns->bvec_cache) { |
17524 |
+- ret = -ENOMEM; |
17525 |
+- goto err; |
17526 |
+- } |
17527 |
+- |
17528 |
+ ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab, |
17529 |
+- mempool_free_slab, ns->bvec_cache); |
17530 |
++ mempool_free_slab, nvmet_bvec_cache); |
17531 |
+ |
17532 |
+ if (!ns->bvec_pool) { |
17533 |
+ ret = -ENOMEM; |
17534 |
+@@ -86,9 +75,10 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns) |
17535 |
+ |
17536 |
+ return ret; |
17537 |
+ err: |
17538 |
++ fput(ns->file); |
17539 |
++ ns->file = NULL; |
17540 |
+ ns->size = 0; |
17541 |
+ ns->blksize_shift = 0; |
17542 |
+- nvmet_file_ns_disable(ns); |
17543 |
+ return ret; |
17544 |
+ } |
17545 |
+ |
17546 |
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h |
17547 |
+index dbeb0b8c11947..fdb06a9d430d2 100644 |
17548 |
+--- a/drivers/nvme/target/nvmet.h |
17549 |
++++ b/drivers/nvme/target/nvmet.h |
17550 |
+@@ -77,7 +77,6 @@ struct nvmet_ns { |
17551 |
+ |
17552 |
+ struct completion disable_done; |
17553 |
+ mempool_t *bvec_pool; |
17554 |
+- struct kmem_cache *bvec_cache; |
17555 |
+ |
17556 |
+ int use_p2pmem; |
17557 |
+ struct pci_dev *p2p_dev; |
17558 |
+@@ -363,6 +362,8 @@ struct nvmet_req { |
17559 |
+ u64 error_slba; |
17560 |
+ }; |
17561 |
+ |
17562 |
++#define NVMET_MAX_MPOOL_BVEC 16 |
17563 |
++extern struct kmem_cache *nvmet_bvec_cache; |
17564 |
+ extern struct workqueue_struct *buffered_io_wq; |
17565 |
+ extern struct workqueue_struct *zbd_wq; |
17566 |
+ extern struct workqueue_struct *nvmet_wq; |
17567 |
+diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c |
17568 |
+index d1187123c4fc4..424682372417d 100644 |
17569 |
+--- a/drivers/of/overlay.c |
17570 |
++++ b/drivers/of/overlay.c |
17571 |
+@@ -547,7 +547,7 @@ static int find_dup_cset_node_entry(struct overlay_changeset *ovcs, |
17572 |
+ |
17573 |
+ fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np); |
17574 |
+ fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np); |
17575 |
+- node_path_match = !strcmp(fn_1, fn_2); |
17576 |
++ node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2); |
17577 |
+ kfree(fn_1); |
17578 |
+ kfree(fn_2); |
17579 |
+ if (node_path_match) { |
17580 |
+@@ -582,7 +582,7 @@ static int find_dup_cset_prop(struct overlay_changeset *ovcs, |
17581 |
+ |
17582 |
+ fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np); |
17583 |
+ fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np); |
17584 |
+- node_path_match = !strcmp(fn_1, fn_2); |
17585 |
++ node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2); |
17586 |
+ kfree(fn_1); |
17587 |
+ kfree(fn_2); |
17588 |
+ if (node_path_match && |
17589 |
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c |
17590 |
+index e408ebf5bd738..00972a7bc9768 100644 |
17591 |
+--- a/drivers/pci/controller/dwc/pcie-designware.c |
17592 |
++++ b/drivers/pci/controller/dwc/pcie-designware.c |
17593 |
+@@ -730,7 +730,7 @@ void dw_pcie_setup(struct dw_pcie *pci) |
17594 |
+ if (pci->n_fts[1]) { |
17595 |
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); |
17596 |
+ val &= ~PORT_LOGIC_N_FTS_MASK; |
17597 |
+- val |= pci->n_fts[pci->link_gen - 1]; |
17598 |
++ val |= pci->n_fts[1]; |
17599 |
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); |
17600 |
+ } |
17601 |
+ |
17602 |
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c |
17603 |
+index a5987e52700e3..8dce71142e10f 100644 |
17604 |
+--- a/drivers/pci/controller/vmd.c |
17605 |
++++ b/drivers/pci/controller/vmd.c |
17606 |
+@@ -900,6 +900,11 @@ static int vmd_resume(struct device *dev) |
17607 |
+ struct vmd_dev *vmd = pci_get_drvdata(pdev); |
17608 |
+ int err, i; |
17609 |
+ |
17610 |
++ if (vmd->irq_domain) |
17611 |
++ vmd_set_msi_remapping(vmd, true); |
17612 |
++ else |
17613 |
++ vmd_set_msi_remapping(vmd, false); |
17614 |
++ |
17615 |
+ for (i = 0; i < vmd->msix_count; i++) { |
17616 |
+ err = devm_request_irq(dev, pci_irq_vector(pdev, i), |
17617 |
+ vmd_irq, IRQF_NO_THREAD, |
17618 |
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c |
17619 |
+index a5ed779b0a512..45535d4ae6445 100644 |
17620 |
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c |
17621 |
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c |
17622 |
+@@ -883,7 +883,7 @@ static int pci_epf_test_bind(struct pci_epf *epf) |
17623 |
+ if (ret) |
17624 |
+ epf_test->dma_supported = false; |
17625 |
+ |
17626 |
+- if (linkup_notifier) { |
17627 |
++ if (linkup_notifier || core_init_notifier) { |
17628 |
+ epf->nb.notifier_call = pci_epf_test_notifier; |
17629 |
+ pci_epc_register_notifier(epc, &epf->nb); |
17630 |
+ } else { |
17631 |
+diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c |
17632 |
+index 12ecd0aaa28d6..0050e8f6814ed 100644 |
17633 |
+--- a/drivers/pci/irq.c |
17634 |
++++ b/drivers/pci/irq.c |
17635 |
+@@ -44,6 +44,8 @@ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler, |
17636 |
+ va_start(ap, fmt); |
17637 |
+ devname = kvasprintf(GFP_KERNEL, fmt, ap); |
17638 |
+ va_end(ap); |
17639 |
++ if (!devname) |
17640 |
++ return -ENOMEM; |
17641 |
+ |
17642 |
+ ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn, |
17643 |
+ irqflags, devname, dev_id); |
17644 |
+diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c |
17645 |
+index 280a6ae3e27cf..54aa4658fb36e 100644 |
17646 |
+--- a/drivers/perf/arm_dmc620_pmu.c |
17647 |
++++ b/drivers/perf/arm_dmc620_pmu.c |
17648 |
+@@ -725,6 +725,8 @@ static struct platform_driver dmc620_pmu_driver = { |
17649 |
+ |
17650 |
+ static int __init dmc620_pmu_init(void) |
17651 |
+ { |
17652 |
++ int ret; |
17653 |
++ |
17654 |
+ cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, |
17655 |
+ DMC620_DRVNAME, |
17656 |
+ NULL, |
17657 |
+@@ -732,7 +734,11 @@ static int __init dmc620_pmu_init(void) |
17658 |
+ if (cpuhp_state_num < 0) |
17659 |
+ return cpuhp_state_num; |
17660 |
+ |
17661 |
+- return platform_driver_register(&dmc620_pmu_driver); |
17662 |
++ ret = platform_driver_register(&dmc620_pmu_driver); |
17663 |
++ if (ret) |
17664 |
++ cpuhp_remove_multi_state(cpuhp_state_num); |
17665 |
++ |
17666 |
++ return ret; |
17667 |
+ } |
17668 |
+ |
17669 |
+ static void __exit dmc620_pmu_exit(void) |
17670 |
+diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c |
17671 |
+index a36698a90d2f2..54b8ba032c787 100644 |
17672 |
+--- a/drivers/perf/arm_dsu_pmu.c |
17673 |
++++ b/drivers/perf/arm_dsu_pmu.c |
17674 |
+@@ -858,7 +858,11 @@ static int __init dsu_pmu_init(void) |
17675 |
+ if (ret < 0) |
17676 |
+ return ret; |
17677 |
+ dsu_pmu_cpuhp_state = ret; |
17678 |
+- return platform_driver_register(&dsu_pmu_driver); |
17679 |
++ ret = platform_driver_register(&dsu_pmu_driver); |
17680 |
++ if (ret) |
17681 |
++ cpuhp_remove_multi_state(dsu_pmu_cpuhp_state); |
17682 |
++ |
17683 |
++ return ret; |
17684 |
+ } |
17685 |
+ |
17686 |
+ static void __exit dsu_pmu_exit(void) |
17687 |
+diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c |
17688 |
+index 226348822ab39..5933ad151f869 100644 |
17689 |
+--- a/drivers/perf/arm_smmuv3_pmu.c |
17690 |
++++ b/drivers/perf/arm_smmuv3_pmu.c |
17691 |
+@@ -896,6 +896,8 @@ static struct platform_driver smmu_pmu_driver = { |
17692 |
+ |
17693 |
+ static int __init arm_smmu_pmu_init(void) |
17694 |
+ { |
17695 |
++ int ret; |
17696 |
++ |
17697 |
+ cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, |
17698 |
+ "perf/arm/pmcg:online", |
17699 |
+ NULL, |
17700 |
+@@ -903,7 +905,11 @@ static int __init arm_smmu_pmu_init(void) |
17701 |
+ if (cpuhp_state_num < 0) |
17702 |
+ return cpuhp_state_num; |
17703 |
+ |
17704 |
+- return platform_driver_register(&smmu_pmu_driver); |
17705 |
++ ret = platform_driver_register(&smmu_pmu_driver); |
17706 |
++ if (ret) |
17707 |
++ cpuhp_remove_multi_state(cpuhp_state_num); |
17708 |
++ |
17709 |
++ return ret; |
17710 |
+ } |
17711 |
+ module_init(arm_smmu_pmu_init); |
17712 |
+ |
17713 |
+diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c |
17714 |
+index 2cb3779fcdf82..c0c3ab9b2a153 100644 |
17715 |
+--- a/drivers/phy/broadcom/phy-brcm-usb.c |
17716 |
++++ b/drivers/phy/broadcom/phy-brcm-usb.c |
17717 |
+@@ -102,9 +102,9 @@ static int brcm_pm_notifier(struct notifier_block *notifier, |
17718 |
+ |
17719 |
+ static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id) |
17720 |
+ { |
17721 |
+- struct phy *gphy = dev_id; |
17722 |
++ struct device *dev = dev_id; |
17723 |
+ |
17724 |
+- pm_wakeup_event(&gphy->dev, 0); |
17725 |
++ pm_wakeup_event(dev, 0); |
17726 |
+ |
17727 |
+ return IRQ_HANDLED; |
17728 |
+ } |
17729 |
+@@ -451,7 +451,7 @@ static int brcm_usb_phy_dvr_init(struct platform_device *pdev, |
17730 |
+ if (priv->wake_irq >= 0) { |
17731 |
+ err = devm_request_irq(dev, priv->wake_irq, |
17732 |
+ brcm_usb_phy_wake_isr, 0, |
17733 |
+- dev_name(dev), gphy); |
17734 |
++ dev_name(dev), dev); |
17735 |
+ if (err < 0) |
17736 |
+ return err; |
17737 |
+ device_set_wakeup_capable(dev, 1); |
17738 |
+diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c |
17739 |
+index b1db28007986e..e6fe1330eab9f 100644 |
17740 |
+--- a/drivers/pinctrl/pinconf-generic.c |
17741 |
++++ b/drivers/pinctrl/pinconf-generic.c |
17742 |
+@@ -393,8 +393,10 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev, |
17743 |
+ for_each_available_child_of_node(np_config, np) { |
17744 |
+ ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map, |
17745 |
+ &reserved_maps, num_maps, type); |
17746 |
+- if (ret < 0) |
17747 |
++ if (ret < 0) { |
17748 |
++ of_node_put(np); |
17749 |
+ goto exit; |
17750 |
++ } |
17751 |
+ } |
17752 |
+ return 0; |
17753 |
+ |
17754 |
+diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c |
17755 |
+index ecab6bf63dc6d..ad4db99094a79 100644 |
17756 |
+--- a/drivers/pinctrl/pinctrl-k210.c |
17757 |
++++ b/drivers/pinctrl/pinctrl-k210.c |
17758 |
+@@ -862,8 +862,10 @@ static int k210_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev, |
17759 |
+ for_each_available_child_of_node(np_config, np) { |
17760 |
+ ret = k210_pinctrl_dt_subnode_to_map(pctldev, np, map, |
17761 |
+ &reserved_maps, num_maps); |
17762 |
+- if (ret < 0) |
17763 |
++ if (ret < 0) { |
17764 |
++ of_node_put(np); |
17765 |
+ goto err; |
17766 |
++ } |
17767 |
+ } |
17768 |
+ return 0; |
17769 |
+ |
17770 |
+diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c |
17771 |
+index aadb8d237aefc..b94abb8f7706a 100644 |
17772 |
+--- a/drivers/platform/chrome/cros_ec_typec.c |
17773 |
++++ b/drivers/platform/chrome/cros_ec_typec.c |
17774 |
+@@ -156,12 +156,12 @@ static int cros_typec_get_switch_handles(struct cros_typec_port *port, |
17775 |
+ return 0; |
17776 |
+ |
17777 |
+ role_sw_err: |
17778 |
+- usb_role_switch_put(port->role_sw); |
17779 |
+-ori_sw_err: |
17780 |
+ typec_switch_put(port->ori_sw); |
17781 |
+-mux_err: |
17782 |
++ port->ori_sw = NULL; |
17783 |
++ori_sw_err: |
17784 |
+ typec_mux_put(port->mux); |
17785 |
+- |
17786 |
++ port->mux = NULL; |
17787 |
++mux_err: |
17788 |
+ return -ENODEV; |
17789 |
+ } |
17790 |
+ |
17791 |
+diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c |
17792 |
+index 48a6617aa12f3..de76de6f50900 100644 |
17793 |
+--- a/drivers/platform/chrome/cros_usbpd_notify.c |
17794 |
++++ b/drivers/platform/chrome/cros_usbpd_notify.c |
17795 |
+@@ -285,7 +285,11 @@ static int __init cros_usbpd_notify_init(void) |
17796 |
+ return ret; |
17797 |
+ |
17798 |
+ #ifdef CONFIG_ACPI |
17799 |
+- platform_driver_register(&cros_usbpd_notify_acpi_driver); |
17800 |
++ ret = platform_driver_register(&cros_usbpd_notify_acpi_driver); |
17801 |
++ if (ret) { |
17802 |
++ platform_driver_unregister(&cros_usbpd_notify_plat_driver); |
17803 |
++ return ret; |
17804 |
++ } |
17805 |
+ #endif |
17806 |
+ return 0; |
17807 |
+ } |
17808 |
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c |
17809 |
+index 65b4a819f1bdf..c2c9b0d3244cb 100644 |
17810 |
+--- a/drivers/platform/mellanox/mlxbf-pmc.c |
17811 |
++++ b/drivers/platform/mellanox/mlxbf-pmc.c |
17812 |
+@@ -358,7 +358,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = { |
17813 |
+ { 0x32, "DDN_DIAG_W_INGRESS" }, |
17814 |
+ { 0x33, "DDN_DIAG_C_INGRESS" }, |
17815 |
+ { 0x34, "DDN_DIAG_CORE_SENT" }, |
17816 |
+- { 0x35, "NDN_DIAG_S_OUT_OF_CRED" }, |
17817 |
++ { 0x35, "NDN_DIAG_N_OUT_OF_CRED" }, |
17818 |
+ { 0x36, "NDN_DIAG_S_OUT_OF_CRED" }, |
17819 |
+ { 0x37, "NDN_DIAG_E_OUT_OF_CRED" }, |
17820 |
+ { 0x38, "NDN_DIAG_W_OUT_OF_CRED" }, |
17821 |
+diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c |
17822 |
+index eac3e6b4ea113..935562c870c3d 100644 |
17823 |
+--- a/drivers/platform/x86/huawei-wmi.c |
17824 |
++++ b/drivers/platform/x86/huawei-wmi.c |
17825 |
+@@ -760,6 +760,9 @@ static int huawei_wmi_input_setup(struct device *dev, |
17826 |
+ const char *guid, |
17827 |
+ struct input_dev **idev) |
17828 |
+ { |
17829 |
++ acpi_status status; |
17830 |
++ int err; |
17831 |
++ |
17832 |
+ *idev = devm_input_allocate_device(dev); |
17833 |
+ if (!*idev) |
17834 |
+ return -ENOMEM; |
17835 |
+@@ -769,10 +772,19 @@ static int huawei_wmi_input_setup(struct device *dev, |
17836 |
+ (*idev)->id.bustype = BUS_HOST; |
17837 |
+ (*idev)->dev.parent = dev; |
17838 |
+ |
17839 |
+- return sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL) || |
17840 |
+- input_register_device(*idev) || |
17841 |
+- wmi_install_notify_handler(guid, huawei_wmi_input_notify, |
17842 |
+- *idev); |
17843 |
++ err = sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL); |
17844 |
++ if (err) |
17845 |
++ return err; |
17846 |
++ |
17847 |
++ err = input_register_device(*idev); |
17848 |
++ if (err) |
17849 |
++ return err; |
17850 |
++ |
17851 |
++ status = wmi_install_notify_handler(guid, huawei_wmi_input_notify, *idev); |
17852 |
++ if (ACPI_FAILURE(status)) |
17853 |
++ return -EIO; |
17854 |
++ |
17855 |
++ return 0; |
17856 |
+ } |
17857 |
+ |
17858 |
+ static void huawei_wmi_input_exit(struct device *dev, const char *guid) |
17859 |
+diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c |
17860 |
+index 7cc9089d1e14f..e7a3e34028178 100644 |
17861 |
+--- a/drivers/platform/x86/intel_scu_ipc.c |
17862 |
++++ b/drivers/platform/x86/intel_scu_ipc.c |
17863 |
+@@ -583,7 +583,6 @@ __intel_scu_ipc_register(struct device *parent, |
17864 |
+ scu->dev.parent = parent; |
17865 |
+ scu->dev.class = &intel_scu_ipc_class; |
17866 |
+ scu->dev.release = intel_scu_ipc_release; |
17867 |
+- dev_set_name(&scu->dev, "intel_scu_ipc"); |
17868 |
+ |
17869 |
+ if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem), |
17870 |
+ "intel_scu_ipc")) { |
17871 |
+@@ -612,6 +611,7 @@ __intel_scu_ipc_register(struct device *parent, |
17872 |
+ * After this point intel_scu_ipc_release() takes care of |
17873 |
+ * releasing the SCU IPC resources once refcount drops to zero. |
17874 |
+ */ |
17875 |
++ dev_set_name(&scu->dev, "intel_scu_ipc"); |
17876 |
+ err = device_register(&scu->dev); |
17877 |
+ if (err) { |
17878 |
+ put_device(&scu->dev); |
17879 |
+diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c |
17880 |
+index 9a19fbd2f7341..9a457956025a5 100644 |
17881 |
+--- a/drivers/platform/x86/mxm-wmi.c |
17882 |
++++ b/drivers/platform/x86/mxm-wmi.c |
17883 |
+@@ -35,13 +35,11 @@ int mxm_wmi_call_mxds(int adapter) |
17884 |
+ .xarg = 1, |
17885 |
+ }; |
17886 |
+ struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; |
17887 |
+- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; |
17888 |
+ acpi_status status; |
17889 |
+ |
17890 |
+ printk("calling mux switch %d\n", adapter); |
17891 |
+ |
17892 |
+- status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, |
17893 |
+- &output); |
17894 |
++ status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL); |
17895 |
+ |
17896 |
+ if (ACPI_FAILURE(status)) |
17897 |
+ return status; |
17898 |
+@@ -60,13 +58,11 @@ int mxm_wmi_call_mxmx(int adapter) |
17899 |
+ .xarg = 1, |
17900 |
+ }; |
17901 |
+ struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; |
17902 |
+- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; |
17903 |
+ acpi_status status; |
17904 |
+ |
17905 |
+ printk("calling mux switch %d\n", adapter); |
17906 |
+ |
17907 |
+- status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, |
17908 |
+- &output); |
17909 |
++ status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL); |
17910 |
+ |
17911 |
+ if (ACPI_FAILURE(status)) |
17912 |
+ return status; |
17913 |
+diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c |
17914 |
+index 4df5aa6a309c3..6a60c5d83383b 100644 |
17915 |
+--- a/drivers/pnp/core.c |
17916 |
++++ b/drivers/pnp/core.c |
17917 |
+@@ -148,14 +148,14 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, |
17918 |
+ dev->dev.coherent_dma_mask = dev->dma_mask; |
17919 |
+ dev->dev.release = &pnp_release_device; |
17920 |
+ |
17921 |
+- dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number); |
17922 |
+- |
17923 |
+ dev_id = pnp_add_id(dev, pnpid); |
17924 |
+ if (!dev_id) { |
17925 |
+ kfree(dev); |
17926 |
+ return NULL; |
17927 |
+ } |
17928 |
+ |
17929 |
++ dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number); |
17930 |
++ |
17931 |
+ return dev; |
17932 |
+ } |
17933 |
+ |
17934 |
+diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c |
17935 |
+index 15eadaf46f144..a4f766fc7c9d7 100644 |
17936 |
+--- a/drivers/power/supply/ab8500_charger.c |
17937 |
++++ b/drivers/power/supply/ab8500_charger.c |
17938 |
+@@ -3726,7 +3726,14 @@ static int __init ab8500_charger_init(void) |
17939 |
+ if (ret) |
17940 |
+ return ret; |
17941 |
+ |
17942 |
+- return platform_driver_register(&ab8500_charger_driver); |
17943 |
++ ret = platform_driver_register(&ab8500_charger_driver); |
17944 |
++ if (ret) { |
17945 |
++ platform_unregister_drivers(ab8500_charger_component_drivers, |
17946 |
++ ARRAY_SIZE(ab8500_charger_component_drivers)); |
17947 |
++ return ret; |
17948 |
++ } |
17949 |
++ |
17950 |
++ return 0; |
17951 |
+ } |
17952 |
+ |
17953 |
+ static void __exit ab8500_charger_exit(void) |
17954 |
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c |
17955 |
+index a6e9afa5a1cff..3f9c60c5b250b 100644 |
17956 |
+--- a/drivers/power/supply/power_supply_core.c |
17957 |
++++ b/drivers/power/supply/power_supply_core.c |
17958 |
+@@ -696,6 +696,11 @@ int power_supply_get_battery_info(struct power_supply *psy, |
17959 |
+ int i, tab_len, size; |
17960 |
+ |
17961 |
+ propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index); |
17962 |
++ if (!propname) { |
17963 |
++ power_supply_put_battery_info(psy, info); |
17964 |
++ err = -ENOMEM; |
17965 |
++ goto out_put_node; |
17966 |
++ } |
17967 |
+ list = of_get_property(battery_np, propname, &size); |
17968 |
+ if (!list || !size) { |
17969 |
+ dev_err(&psy->dev, "failed to get %s\n", propname); |
17970 |
+@@ -1220,8 +1225,8 @@ create_triggers_failed: |
17971 |
+ register_cooler_failed: |
17972 |
+ psy_unregister_thermal(psy); |
17973 |
+ register_thermal_failed: |
17974 |
+- device_del(dev); |
17975 |
+ wakeup_init_failed: |
17976 |
++ device_del(dev); |
17977 |
+ device_add_failed: |
17978 |
+ check_supplies_failed: |
17979 |
+ dev_set_name_failed: |
17980 |
+diff --git a/drivers/power/supply/z2_battery.c b/drivers/power/supply/z2_battery.c |
17981 |
+index 7ed4e4bb26eca..fd33cdf9cf12c 100644 |
17982 |
+--- a/drivers/power/supply/z2_battery.c |
17983 |
++++ b/drivers/power/supply/z2_battery.c |
17984 |
+@@ -206,10 +206,12 @@ static int z2_batt_probe(struct i2c_client *client, |
17985 |
+ |
17986 |
+ charger->charge_gpiod = devm_gpiod_get_optional(&client->dev, |
17987 |
+ NULL, GPIOD_IN); |
17988 |
+- if (IS_ERR(charger->charge_gpiod)) |
17989 |
+- return dev_err_probe(&client->dev, |
17990 |
++ if (IS_ERR(charger->charge_gpiod)) { |
17991 |
++ ret = dev_err_probe(&client->dev, |
17992 |
+ PTR_ERR(charger->charge_gpiod), |
17993 |
+ "failed to get charge GPIO\n"); |
17994 |
++ goto err; |
17995 |
++ } |
17996 |
+ |
17997 |
+ if (charger->charge_gpiod) { |
17998 |
+ gpiod_set_consumer_name(charger->charge_gpiod, "BATT CHRG"); |
17999 |
+diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c |
18000 |
+index 0d4dd80e9f07f..f8f9a74891293 100644 |
18001 |
+--- a/drivers/pwm/pwm-mediatek.c |
18002 |
++++ b/drivers/pwm/pwm-mediatek.c |
18003 |
+@@ -275,7 +275,7 @@ static const struct pwm_mediatek_of_data mt2712_pwm_data = { |
18004 |
+ static const struct pwm_mediatek_of_data mt7622_pwm_data = { |
18005 |
+ .num_pwms = 6, |
18006 |
+ .pwm45_fixup = false, |
18007 |
+- .has_ck_26m_sel = false, |
18008 |
++ .has_ck_26m_sel = true, |
18009 |
+ }; |
18010 |
+ |
18011 |
+ static const struct pwm_mediatek_of_data mt7623_pwm_data = { |
18012 |
+diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c |
18013 |
+index c605013e4114c..3fbb4bae93a4e 100644 |
18014 |
+--- a/drivers/pwm/pwm-mtk-disp.c |
18015 |
++++ b/drivers/pwm/pwm-mtk-disp.c |
18016 |
+@@ -178,7 +178,7 @@ static void mtk_disp_pwm_get_state(struct pwm_chip *chip, |
18017 |
+ { |
18018 |
+ struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip); |
18019 |
+ u64 rate, period, high_width; |
18020 |
+- u32 clk_div, con0, con1; |
18021 |
++ u32 clk_div, pwm_en, con0, con1; |
18022 |
+ int err; |
18023 |
+ |
18024 |
+ err = clk_prepare_enable(mdp->clk_main); |
18025 |
+@@ -197,7 +197,8 @@ static void mtk_disp_pwm_get_state(struct pwm_chip *chip, |
18026 |
+ rate = clk_get_rate(mdp->clk_main); |
18027 |
+ con0 = readl(mdp->base + mdp->data->con0); |
18028 |
+ con1 = readl(mdp->base + mdp->data->con1); |
18029 |
+- state->enabled = !!(con0 & BIT(0)); |
18030 |
++ pwm_en = readl(mdp->base + DISP_PWM_EN); |
18031 |
++ state->enabled = !!(pwm_en & mdp->data->enable_mask); |
18032 |
+ clk_div = FIELD_GET(PWM_CLKDIV_MASK, con0); |
18033 |
+ period = FIELD_GET(PWM_PERIOD_MASK, con1); |
18034 |
+ /* |
18035 |
+diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c |
18036 |
+index 58347fcd48125..07e9fc58354f4 100644 |
18037 |
+--- a/drivers/pwm/pwm-sifive.c |
18038 |
++++ b/drivers/pwm/pwm-sifive.c |
18039 |
+@@ -217,8 +217,11 @@ static int pwm_sifive_clock_notifier(struct notifier_block *nb, |
18040 |
+ struct pwm_sifive_ddata *ddata = |
18041 |
+ container_of(nb, struct pwm_sifive_ddata, notifier); |
18042 |
+ |
18043 |
+- if (event == POST_RATE_CHANGE) |
18044 |
++ if (event == POST_RATE_CHANGE) { |
18045 |
++ mutex_lock(&ddata->lock); |
18046 |
+ pwm_sifive_update_clock(ddata, ndata->new_rate); |
18047 |
++ mutex_unlock(&ddata->lock); |
18048 |
++ } |
18049 |
+ |
18050 |
+ return NOTIFY_OK; |
18051 |
+ } |
18052 |
+diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c |
18053 |
+index 11a10b575ace9..6a1ff9d42f795 100644 |
18054 |
+--- a/drivers/pwm/pwm-tegra.c |
18055 |
++++ b/drivers/pwm/pwm-tegra.c |
18056 |
+@@ -142,8 +142,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, |
18057 |
+ * source clock rate as required_clk_rate, PWM controller will |
18058 |
+ * be able to configure the requested period. |
18059 |
+ */ |
18060 |
+- required_clk_rate = |
18061 |
+- (NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH; |
18062 |
++ required_clk_rate = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC << PWM_DUTY_WIDTH, |
18063 |
++ period_ns); |
18064 |
+ |
18065 |
+ err = clk_set_rate(pc->clk, required_clk_rate); |
18066 |
+ if (err < 0) |
18067 |
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c |
18068 |
+index 94331d999d273..5ac2dc1e2abd8 100644 |
18069 |
+--- a/drivers/rapidio/devices/rio_mport_cdev.c |
18070 |
++++ b/drivers/rapidio/devices/rio_mport_cdev.c |
18071 |
+@@ -1803,8 +1803,11 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, |
18072 |
+ rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], |
18073 |
+ 0, 0xffff); |
18074 |
+ err = rio_add_device(rdev); |
18075 |
+- if (err) |
18076 |
+- goto cleanup; |
18077 |
++ if (err) { |
18078 |
++ put_device(&rdev->dev); |
18079 |
++ return err; |
18080 |
++ } |
18081 |
++ |
18082 |
+ rio_dev_get(rdev); |
18083 |
+ |
18084 |
+ return 0; |
18085 |
+@@ -1900,10 +1903,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp) |
18086 |
+ |
18087 |
+ priv->md = chdev; |
18088 |
+ |
18089 |
+- mutex_lock(&chdev->file_mutex); |
18090 |
+- list_add_tail(&priv->list, &chdev->file_list); |
18091 |
+- mutex_unlock(&chdev->file_mutex); |
18092 |
+- |
18093 |
+ INIT_LIST_HEAD(&priv->db_filters); |
18094 |
+ INIT_LIST_HEAD(&priv->pw_filters); |
18095 |
+ spin_lock_init(&priv->fifo_lock); |
18096 |
+@@ -1912,6 +1911,7 @@ static int mport_cdev_open(struct inode *inode, struct file *filp) |
18097 |
+ sizeof(struct rio_event) * MPORT_EVENT_DEPTH, |
18098 |
+ GFP_KERNEL); |
18099 |
+ if (ret < 0) { |
18100 |
++ put_device(&chdev->dev); |
18101 |
+ dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n"); |
18102 |
+ ret = -ENOMEM; |
18103 |
+ goto err_fifo; |
18104 |
+@@ -1922,6 +1922,9 @@ static int mport_cdev_open(struct inode *inode, struct file *filp) |
18105 |
+ spin_lock_init(&priv->req_lock); |
18106 |
+ mutex_init(&priv->dma_lock); |
18107 |
+ #endif |
18108 |
++ mutex_lock(&chdev->file_mutex); |
18109 |
++ list_add_tail(&priv->list, &chdev->file_list); |
18110 |
++ mutex_unlock(&chdev->file_mutex); |
18111 |
+ |
18112 |
+ filp->private_data = priv; |
18113 |
+ goto out; |
18114 |
+diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c |
18115 |
+index 19b0c33f4a62a..fdcf742b2adbc 100644 |
18116 |
+--- a/drivers/rapidio/rio-scan.c |
18117 |
++++ b/drivers/rapidio/rio-scan.c |
18118 |
+@@ -454,8 +454,12 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, |
18119 |
+ 0, 0xffff); |
18120 |
+ |
18121 |
+ ret = rio_add_device(rdev); |
18122 |
+- if (ret) |
18123 |
+- goto cleanup; |
18124 |
++ if (ret) { |
18125 |
++ if (rswitch) |
18126 |
++ kfree(rswitch->route_table); |
18127 |
++ put_device(&rdev->dev); |
18128 |
++ return NULL; |
18129 |
++ } |
18130 |
+ |
18131 |
+ rio_dev_get(rdev); |
18132 |
+ |
18133 |
+diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c |
18134 |
+index e74cf09eeff07..9544b8ee0c963 100644 |
18135 |
+--- a/drivers/rapidio/rio.c |
18136 |
++++ b/drivers/rapidio/rio.c |
18137 |
+@@ -2186,11 +2186,16 @@ int rio_register_mport(struct rio_mport *port) |
18138 |
+ atomic_set(&port->state, RIO_DEVICE_RUNNING); |
18139 |
+ |
18140 |
+ res = device_register(&port->dev); |
18141 |
+- if (res) |
18142 |
++ if (res) { |
18143 |
+ dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n", |
18144 |
+ port->id, res); |
18145 |
+- else |
18146 |
++ mutex_lock(&rio_mport_list_lock); |
18147 |
++ list_del(&port->node); |
18148 |
++ mutex_unlock(&rio_mport_list_lock); |
18149 |
++ put_device(&port->dev); |
18150 |
++ } else { |
18151 |
+ dev_dbg(&port->dev, "RIO: registered mport%d\n", port->id); |
18152 |
++ } |
18153 |
+ |
18154 |
+ return res; |
18155 |
+ } |
18156 |
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c |
18157 |
+index 221ae807b379c..3eae3aa5ad1d2 100644 |
18158 |
+--- a/drivers/regulator/core.c |
18159 |
++++ b/drivers/regulator/core.c |
18160 |
+@@ -962,7 +962,7 @@ static int drms_uA_update(struct regulator_dev *rdev) |
18161 |
+ /* get input voltage */ |
18162 |
+ input_uV = 0; |
18163 |
+ if (rdev->supply) |
18164 |
+- input_uV = regulator_get_voltage(rdev->supply); |
18165 |
++ input_uV = regulator_get_voltage_rdev(rdev->supply->rdev); |
18166 |
+ if (input_uV <= 0) |
18167 |
+ input_uV = rdev->constraints->input_uV; |
18168 |
+ if (input_uV <= 0) { |
18169 |
+@@ -1531,7 +1531,13 @@ static int set_machine_constraints(struct regulator_dev *rdev) |
18170 |
+ if (rdev->supply_name && !rdev->supply) |
18171 |
+ return -EPROBE_DEFER; |
18172 |
+ |
18173 |
+- if (rdev->supply) { |
18174 |
++ /* If supplying regulator has already been enabled, |
18175 |
++ * it's not intended to have use_count increment |
18176 |
++ * when rdev is only boot-on. |
18177 |
++ */ |
18178 |
++ if (rdev->supply && |
18179 |
++ (rdev->constraints->always_on || |
18180 |
++ !regulator_is_enabled(rdev->supply))) { |
18181 |
+ ret = regulator_enable(rdev->supply); |
18182 |
+ if (ret < 0) { |
18183 |
+ _regulator_put(rdev->supply); |
18184 |
+@@ -1577,6 +1583,7 @@ static int set_supply(struct regulator_dev *rdev, |
18185 |
+ |
18186 |
+ rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); |
18187 |
+ if (rdev->supply == NULL) { |
18188 |
++ module_put(supply_rdev->owner); |
18189 |
+ err = -ENOMEM; |
18190 |
+ return err; |
18191 |
+ } |
18192 |
+@@ -1750,7 +1757,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, |
18193 |
+ |
18194 |
+ regulator = kzalloc(sizeof(*regulator), GFP_KERNEL); |
18195 |
+ if (regulator == NULL) { |
18196 |
+- kfree(supply_name); |
18197 |
++ kfree_const(supply_name); |
18198 |
+ return NULL; |
18199 |
+ } |
18200 |
+ |
18201 |
+@@ -1880,6 +1887,7 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev, |
18202 |
+ node = of_get_regulator(dev, supply); |
18203 |
+ if (node) { |
18204 |
+ r = of_find_regulator_by_node(node); |
18205 |
++ of_node_put(node); |
18206 |
+ if (r) |
18207 |
+ return r; |
18208 |
+ |
18209 |
+@@ -5546,6 +5554,7 @@ unset_supplies: |
18210 |
+ regulator_remove_coupling(rdev); |
18211 |
+ mutex_unlock(®ulator_list_mutex); |
18212 |
+ wash: |
18213 |
++ regulator_put(rdev->supply); |
18214 |
+ kfree(rdev->coupling_desc.coupled_rdevs); |
18215 |
+ mutex_lock(®ulator_list_mutex); |
18216 |
+ regulator_ena_gpio_free(rdev); |
18217 |
+diff --git a/drivers/regulator/qcom-labibb-regulator.c b/drivers/regulator/qcom-labibb-regulator.c |
18218 |
+index 639b71eb41ffe..bcf7140f3bc98 100644 |
18219 |
+--- a/drivers/regulator/qcom-labibb-regulator.c |
18220 |
++++ b/drivers/regulator/qcom-labibb-regulator.c |
18221 |
+@@ -822,6 +822,7 @@ static int qcom_labibb_regulator_probe(struct platform_device *pdev) |
18222 |
+ if (irq == 0) |
18223 |
+ irq = -EINVAL; |
18224 |
+ |
18225 |
++ of_node_put(reg_node); |
18226 |
+ return dev_err_probe(vreg->dev, irq, |
18227 |
+ "Short-circuit irq not found.\n"); |
18228 |
+ } |
18229 |
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c |
18230 |
+index 7f458d510483f..27efdbbd90d9a 100644 |
18231 |
+--- a/drivers/regulator/qcom-rpmh-regulator.c |
18232 |
++++ b/drivers/regulator/qcom-rpmh-regulator.c |
18233 |
+@@ -1108,7 +1108,7 @@ static const struct rpmh_vreg_init_data pm7325_vreg_data[] = { |
18234 |
+ static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = { |
18235 |
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps520, "vdd-s1"), |
18236 |
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps520, "vdd-s2"), |
18237 |
+- RPMH_VREG("smps3", "smp%s3", &pmic5_hfsmps510, "vdd-s3"), |
18238 |
++ RPMH_VREG("smps3", "smp%s3", &pmic5_hfsmps515, "vdd-s3"), |
18239 |
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1-l2"), |
18240 |
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l1-l2"), |
18241 |
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"), |
18242 |
+diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c |
18243 |
+index 78d90d856e405..fbcbc00f2e645 100644 |
18244 |
+--- a/drivers/remoteproc/qcom_q6v5_pas.c |
18245 |
++++ b/drivers/remoteproc/qcom_q6v5_pas.c |
18246 |
+@@ -386,6 +386,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp) |
18247 |
+ } |
18248 |
+ |
18249 |
+ ret = of_address_to_resource(node, 0, &r); |
18250 |
++ of_node_put(node); |
18251 |
+ if (ret) |
18252 |
+ return ret; |
18253 |
+ |
18254 |
+@@ -498,6 +499,7 @@ detach_proxy_pds: |
18255 |
+ detach_active_pds: |
18256 |
+ adsp_pds_detach(adsp, adsp->active_pds, adsp->active_pd_count); |
18257 |
+ free_rproc: |
18258 |
++ device_init_wakeup(adsp->dev, false); |
18259 |
+ rproc_free(rproc); |
18260 |
+ |
18261 |
+ return ret; |
18262 |
+@@ -513,6 +515,8 @@ static int adsp_remove(struct platform_device *pdev) |
18263 |
+ qcom_remove_sysmon_subdev(adsp->sysmon); |
18264 |
+ qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev); |
18265 |
+ qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev); |
18266 |
++ adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); |
18267 |
++ device_init_wakeup(adsp->dev, false); |
18268 |
+ rproc_free(adsp->rproc); |
18269 |
+ |
18270 |
+ return 0; |
18271 |
+diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c |
18272 |
+index 20d50ec7eff1b..cfd34ffcbb121 100644 |
18273 |
+--- a/drivers/remoteproc/qcom_q6v5_wcss.c |
18274 |
++++ b/drivers/remoteproc/qcom_q6v5_wcss.c |
18275 |
+@@ -351,7 +351,7 @@ static int q6v5_wcss_qcs404_power_on(struct q6v5_wcss *wcss) |
18276 |
+ if (ret) { |
18277 |
+ dev_err(wcss->dev, |
18278 |
+ "xo cbcr enabling timed out (rc:%d)\n", ret); |
18279 |
+- return ret; |
18280 |
++ goto disable_xo_cbcr_clk; |
18281 |
+ } |
18282 |
+ |
18283 |
+ writel(0, wcss->reg_base + Q6SS_CGC_OVERRIDE); |
18284 |
+@@ -417,6 +417,7 @@ disable_sleep_cbcr_clk: |
18285 |
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR); |
18286 |
+ val &= ~Q6SS_CLK_ENABLE; |
18287 |
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR); |
18288 |
++disable_xo_cbcr_clk: |
18289 |
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR); |
18290 |
+ val &= ~Q6SS_CLK_ENABLE; |
18291 |
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR); |
18292 |
+@@ -827,6 +828,9 @@ static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss, |
18293 |
+ int ret; |
18294 |
+ |
18295 |
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); |
18296 |
++ if (!res) |
18297 |
++ return -EINVAL; |
18298 |
++ |
18299 |
+ wcss->reg_base = devm_ioremap(&pdev->dev, res->start, |
18300 |
+ resource_size(res)); |
18301 |
+ if (!wcss->reg_base) |
18302 |
+diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c |
18303 |
+index a9f04dd83ab68..fbfaf2637a91a 100644 |
18304 |
+--- a/drivers/remoteproc/qcom_sysmon.c |
18305 |
++++ b/drivers/remoteproc/qcom_sysmon.c |
18306 |
+@@ -650,7 +650,9 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, |
18307 |
+ if (sysmon->shutdown_irq != -ENODATA) { |
18308 |
+ dev_err(sysmon->dev, |
18309 |
+ "failed to retrieve shutdown-ack IRQ\n"); |
18310 |
+- return ERR_PTR(sysmon->shutdown_irq); |
18311 |
++ ret = sysmon->shutdown_irq; |
18312 |
++ kfree(sysmon); |
18313 |
++ return ERR_PTR(ret); |
18314 |
+ } |
18315 |
+ } else { |
18316 |
+ ret = devm_request_threaded_irq(sysmon->dev, |
18317 |
+@@ -661,6 +663,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, |
18318 |
+ if (ret) { |
18319 |
+ dev_err(sysmon->dev, |
18320 |
+ "failed to acquire shutdown-ack IRQ\n"); |
18321 |
++ kfree(sysmon); |
18322 |
+ return ERR_PTR(ret); |
18323 |
+ } |
18324 |
+ } |
18325 |
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c |
18326 |
+index 7c006c2b125f8..00e2ca7374ecf 100644 |
18327 |
+--- a/drivers/rtc/rtc-cmos.c |
18328 |
++++ b/drivers/rtc/rtc-cmos.c |
18329 |
+@@ -744,6 +744,168 @@ static irqreturn_t cmos_interrupt(int irq, void *p) |
18330 |
+ return IRQ_NONE; |
18331 |
+ } |
18332 |
+ |
18333 |
++#ifdef CONFIG_ACPI |
18334 |
++ |
18335 |
++#include <linux/acpi.h> |
18336 |
++ |
18337 |
++static u32 rtc_handler(void *context) |
18338 |
++{ |
18339 |
++ struct device *dev = context; |
18340 |
++ struct cmos_rtc *cmos = dev_get_drvdata(dev); |
18341 |
++ unsigned char rtc_control = 0; |
18342 |
++ unsigned char rtc_intr; |
18343 |
++ unsigned long flags; |
18344 |
++ |
18345 |
++ |
18346 |
++ /* |
18347 |
++ * Always update rtc irq when ACPI is used as RTC Alarm. |
18348 |
++ * Or else, ACPI SCI is enabled during suspend/resume only, |
18349 |
++ * update rtc irq in that case. |
18350 |
++ */ |
18351 |
++ if (cmos_use_acpi_alarm()) |
18352 |
++ cmos_interrupt(0, (void *)cmos->rtc); |
18353 |
++ else { |
18354 |
++ /* Fix me: can we use cmos_interrupt() here as well? */ |
18355 |
++ spin_lock_irqsave(&rtc_lock, flags); |
18356 |
++ if (cmos_rtc.suspend_ctrl) |
18357 |
++ rtc_control = CMOS_READ(RTC_CONTROL); |
18358 |
++ if (rtc_control & RTC_AIE) { |
18359 |
++ cmos_rtc.suspend_ctrl &= ~RTC_AIE; |
18360 |
++ CMOS_WRITE(rtc_control, RTC_CONTROL); |
18361 |
++ rtc_intr = CMOS_READ(RTC_INTR_FLAGS); |
18362 |
++ rtc_update_irq(cmos->rtc, 1, rtc_intr); |
18363 |
++ } |
18364 |
++ spin_unlock_irqrestore(&rtc_lock, flags); |
18365 |
++ } |
18366 |
++ |
18367 |
++ pm_wakeup_hard_event(dev); |
18368 |
++ acpi_clear_event(ACPI_EVENT_RTC); |
18369 |
++ acpi_disable_event(ACPI_EVENT_RTC, 0); |
18370 |
++ return ACPI_INTERRUPT_HANDLED; |
18371 |
++} |
18372 |
++ |
18373 |
++static void acpi_rtc_event_setup(struct device *dev) |
18374 |
++{ |
18375 |
++ if (acpi_disabled) |
18376 |
++ return; |
18377 |
++ |
18378 |
++ acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); |
18379 |
++ /* |
18380 |
++ * After the RTC handler is installed, the Fixed_RTC event should |
18381 |
++ * be disabled. Only when the RTC alarm is set will it be enabled. |
18382 |
++ */ |
18383 |
++ acpi_clear_event(ACPI_EVENT_RTC); |
18384 |
++ acpi_disable_event(ACPI_EVENT_RTC, 0); |
18385 |
++} |
18386 |
++ |
18387 |
++static void acpi_rtc_event_cleanup(void) |
18388 |
++{ |
18389 |
++ if (acpi_disabled) |
18390 |
++ return; |
18391 |
++ |
18392 |
++ acpi_remove_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler); |
18393 |
++} |
18394 |
++ |
18395 |
++static void rtc_wake_on(struct device *dev) |
18396 |
++{ |
18397 |
++ acpi_clear_event(ACPI_EVENT_RTC); |
18398 |
++ acpi_enable_event(ACPI_EVENT_RTC, 0); |
18399 |
++} |
18400 |
++ |
18401 |
++static void rtc_wake_off(struct device *dev) |
18402 |
++{ |
18403 |
++ acpi_disable_event(ACPI_EVENT_RTC, 0); |
18404 |
++} |
18405 |
++ |
18406 |
++#ifdef CONFIG_X86 |
18407 |
++/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */ |
18408 |
++static void use_acpi_alarm_quirks(void) |
18409 |
++{ |
18410 |
++ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
18411 |
++ return; |
18412 |
++ |
18413 |
++ if (!is_hpet_enabled()) |
18414 |
++ return; |
18415 |
++ |
18416 |
++ if (dmi_get_bios_year() < 2015) |
18417 |
++ return; |
18418 |
++ |
18419 |
++ use_acpi_alarm = true; |
18420 |
++} |
18421 |
++#else |
18422 |
++static inline void use_acpi_alarm_quirks(void) { } |
18423 |
++#endif |
18424 |
++ |
18425 |
++static void acpi_cmos_wake_setup(struct device *dev) |
18426 |
++{ |
18427 |
++ if (acpi_disabled) |
18428 |
++ return; |
18429 |
++ |
18430 |
++ use_acpi_alarm_quirks(); |
18431 |
++ |
18432 |
++ cmos_rtc.wake_on = rtc_wake_on; |
18433 |
++ cmos_rtc.wake_off = rtc_wake_off; |
18434 |
++ |
18435 |
++ /* ACPI tables bug workaround. */ |
18436 |
++ if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) { |
18437 |
++ dev_dbg(dev, "bogus FADT month_alarm (%d)\n", |
18438 |
++ acpi_gbl_FADT.month_alarm); |
18439 |
++ acpi_gbl_FADT.month_alarm = 0; |
18440 |
++ } |
18441 |
++ |
18442 |
++ cmos_rtc.day_alrm = acpi_gbl_FADT.day_alarm; |
18443 |
++ cmos_rtc.mon_alrm = acpi_gbl_FADT.month_alarm; |
18444 |
++ cmos_rtc.century = acpi_gbl_FADT.century; |
18445 |
++ |
18446 |
++ if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE) |
18447 |
++ dev_info(dev, "RTC can wake from S4\n"); |
18448 |
++ |
18449 |
++ /* RTC always wakes from S1/S2/S3, and often S4/STD */ |
18450 |
++ device_init_wakeup(dev, 1); |
18451 |
++} |
18452 |
++ |
18453 |
++static void cmos_check_acpi_rtc_status(struct device *dev, |
18454 |
++ unsigned char *rtc_control) |
18455 |
++{ |
18456 |
++ struct cmos_rtc *cmos = dev_get_drvdata(dev); |
18457 |
++ acpi_event_status rtc_status; |
18458 |
++ acpi_status status; |
18459 |
++ |
18460 |
++ if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC) |
18461 |
++ return; |
18462 |
++ |
18463 |
++ status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status); |
18464 |
++ if (ACPI_FAILURE(status)) { |
18465 |
++ dev_err(dev, "Could not get RTC status\n"); |
18466 |
++ } else if (rtc_status & ACPI_EVENT_FLAG_SET) { |
18467 |
++ unsigned char mask; |
18468 |
++ *rtc_control &= ~RTC_AIE; |
18469 |
++ CMOS_WRITE(*rtc_control, RTC_CONTROL); |
18470 |
++ mask = CMOS_READ(RTC_INTR_FLAGS); |
18471 |
++ rtc_update_irq(cmos->rtc, 1, mask); |
18472 |
++ } |
18473 |
++} |
18474 |
++ |
18475 |
++#else /* !CONFIG_ACPI */ |
18476 |
++ |
18477 |
++static inline void acpi_rtc_event_setup(struct device *dev) |
18478 |
++{ |
18479 |
++} |
18480 |
++ |
18481 |
++static inline void acpi_rtc_event_cleanup(void) |
18482 |
++{ |
18483 |
++} |
18484 |
++ |
18485 |
++static inline void acpi_cmos_wake_setup(struct device *dev) |
18486 |
++{ |
18487 |
++} |
18488 |
++ |
18489 |
++static inline void cmos_check_acpi_rtc_status(struct device *dev, |
18490 |
++ unsigned char *rtc_control) |
18491 |
++{ |
18492 |
++} |
18493 |
++#endif /* CONFIG_ACPI */ |
18494 |
++ |
18495 |
+ #ifdef CONFIG_PNP |
18496 |
+ #define INITSECTION |
18497 |
+ |
18498 |
+@@ -827,19 +989,27 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) |
18499 |
+ if (info->address_space) |
18500 |
+ address_space = info->address_space; |
18501 |
+ |
18502 |
+- if (info->rtc_day_alarm && info->rtc_day_alarm < 128) |
18503 |
+- cmos_rtc.day_alrm = info->rtc_day_alarm; |
18504 |
+- if (info->rtc_mon_alarm && info->rtc_mon_alarm < 128) |
18505 |
+- cmos_rtc.mon_alrm = info->rtc_mon_alarm; |
18506 |
+- if (info->rtc_century && info->rtc_century < 128) |
18507 |
+- cmos_rtc.century = info->rtc_century; |
18508 |
++ cmos_rtc.day_alrm = info->rtc_day_alarm; |
18509 |
++ cmos_rtc.mon_alrm = info->rtc_mon_alarm; |
18510 |
++ cmos_rtc.century = info->rtc_century; |
18511 |
+ |
18512 |
+ if (info->wake_on && info->wake_off) { |
18513 |
+ cmos_rtc.wake_on = info->wake_on; |
18514 |
+ cmos_rtc.wake_off = info->wake_off; |
18515 |
+ } |
18516 |
++ } else { |
18517 |
++ acpi_cmos_wake_setup(dev); |
18518 |
+ } |
18519 |
+ |
18520 |
++ if (cmos_rtc.day_alrm >= 128) |
18521 |
++ cmos_rtc.day_alrm = 0; |
18522 |
++ |
18523 |
++ if (cmos_rtc.mon_alrm >= 128) |
18524 |
++ cmos_rtc.mon_alrm = 0; |
18525 |
++ |
18526 |
++ if (cmos_rtc.century >= 128) |
18527 |
++ cmos_rtc.century = 0; |
18528 |
++ |
18529 |
+ cmos_rtc.dev = dev; |
18530 |
+ dev_set_drvdata(dev, &cmos_rtc); |
18531 |
+ |
18532 |
+@@ -928,6 +1098,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) |
18533 |
+ nvmem_cfg.size = address_space - NVRAM_OFFSET; |
18534 |
+ devm_rtc_nvmem_register(cmos_rtc.rtc, &nvmem_cfg); |
18535 |
+ |
18536 |
++ /* |
18537 |
++ * Everything has gone well so far, so by default register a handler for |
18538 |
++ * the ACPI RTC fixed event. |
18539 |
++ */ |
18540 |
++ if (!info) |
18541 |
++ acpi_rtc_event_setup(dev); |
18542 |
++ |
18543 |
+ dev_info(dev, "%s%s, %d bytes nvram%s\n", |
18544 |
+ !is_valid_irq(rtc_irq) ? "no alarms" : |
18545 |
+ cmos_rtc.mon_alrm ? "alarms up to one year" : |
18546 |
+@@ -973,6 +1150,9 @@ static void cmos_do_remove(struct device *dev) |
18547 |
+ hpet_unregister_irq_handler(cmos_interrupt); |
18548 |
+ } |
18549 |
+ |
18550 |
++ if (!dev_get_platdata(dev)) |
18551 |
++ acpi_rtc_event_cleanup(); |
18552 |
++ |
18553 |
+ cmos->rtc = NULL; |
18554 |
+ |
18555 |
+ ports = cmos->iomem; |
18556 |
+@@ -1122,9 +1302,6 @@ static void cmos_check_wkalrm(struct device *dev) |
18557 |
+ } |
18558 |
+ } |
18559 |
+ |
18560 |
+-static void cmos_check_acpi_rtc_status(struct device *dev, |
18561 |
+- unsigned char *rtc_control); |
18562 |
+- |
18563 |
+ static int __maybe_unused cmos_resume(struct device *dev) |
18564 |
+ { |
18565 |
+ struct cmos_rtc *cmos = dev_get_drvdata(dev); |
18566 |
+@@ -1191,174 +1368,16 @@ static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); |
18567 |
+ * predate even PNPBIOS should set up platform_bus devices. |
18568 |
+ */ |
18569 |
+ |
18570 |
+-#ifdef CONFIG_ACPI |
18571 |
+- |
18572 |
+-#include <linux/acpi.h> |
18573 |
+- |
18574 |
+-static u32 rtc_handler(void *context) |
18575 |
+-{ |
18576 |
+- struct device *dev = context; |
18577 |
+- struct cmos_rtc *cmos = dev_get_drvdata(dev); |
18578 |
+- unsigned char rtc_control = 0; |
18579 |
+- unsigned char rtc_intr; |
18580 |
+- unsigned long flags; |
18581 |
+- |
18582 |
+- |
18583 |
+- /* |
18584 |
+- * Always update rtc irq when ACPI is used as RTC Alarm. |
18585 |
+- * Or else, ACPI SCI is enabled during suspend/resume only, |
18586 |
+- * update rtc irq in that case. |
18587 |
+- */ |
18588 |
+- if (cmos_use_acpi_alarm()) |
18589 |
+- cmos_interrupt(0, (void *)cmos->rtc); |
18590 |
+- else { |
18591 |
+- /* Fix me: can we use cmos_interrupt() here as well? */ |
18592 |
+- spin_lock_irqsave(&rtc_lock, flags); |
18593 |
+- if (cmos_rtc.suspend_ctrl) |
18594 |
+- rtc_control = CMOS_READ(RTC_CONTROL); |
18595 |
+- if (rtc_control & RTC_AIE) { |
18596 |
+- cmos_rtc.suspend_ctrl &= ~RTC_AIE; |
18597 |
+- CMOS_WRITE(rtc_control, RTC_CONTROL); |
18598 |
+- rtc_intr = CMOS_READ(RTC_INTR_FLAGS); |
18599 |
+- rtc_update_irq(cmos->rtc, 1, rtc_intr); |
18600 |
+- } |
18601 |
+- spin_unlock_irqrestore(&rtc_lock, flags); |
18602 |
+- } |
18603 |
+- |
18604 |
+- pm_wakeup_hard_event(dev); |
18605 |
+- acpi_clear_event(ACPI_EVENT_RTC); |
18606 |
+- acpi_disable_event(ACPI_EVENT_RTC, 0); |
18607 |
+- return ACPI_INTERRUPT_HANDLED; |
18608 |
+-} |
18609 |
+- |
18610 |
+-static inline void rtc_wake_setup(struct device *dev) |
18611 |
+-{ |
18612 |
+- acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); |
18613 |
+- /* |
18614 |
+- * After the RTC handler is installed, the Fixed_RTC event should |
18615 |
+- * be disabled. Only when the RTC alarm is set will it be enabled. |
18616 |
+- */ |
18617 |
+- acpi_clear_event(ACPI_EVENT_RTC); |
18618 |
+- acpi_disable_event(ACPI_EVENT_RTC, 0); |
18619 |
+-} |
18620 |
+- |
18621 |
+-static void rtc_wake_on(struct device *dev) |
18622 |
+-{ |
18623 |
+- acpi_clear_event(ACPI_EVENT_RTC); |
18624 |
+- acpi_enable_event(ACPI_EVENT_RTC, 0); |
18625 |
+-} |
18626 |
+- |
18627 |
+-static void rtc_wake_off(struct device *dev) |
18628 |
+-{ |
18629 |
+- acpi_disable_event(ACPI_EVENT_RTC, 0); |
18630 |
+-} |
18631 |
+- |
18632 |
+-#ifdef CONFIG_X86 |
18633 |
+-/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */ |
18634 |
+-static void use_acpi_alarm_quirks(void) |
18635 |
+-{ |
18636 |
+- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
18637 |
+- return; |
18638 |
+- |
18639 |
+- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) |
18640 |
+- return; |
18641 |
+- |
18642 |
+- if (!is_hpet_enabled()) |
18643 |
+- return; |
18644 |
+- |
18645 |
+- if (dmi_get_bios_year() < 2015) |
18646 |
+- return; |
18647 |
+- |
18648 |
+- use_acpi_alarm = true; |
18649 |
+-} |
18650 |
+-#else |
18651 |
+-static inline void use_acpi_alarm_quirks(void) { } |
18652 |
+-#endif |
18653 |
+- |
18654 |
+-/* Every ACPI platform has a mc146818 compatible "cmos rtc". Here we find |
18655 |
+- * its device node and pass extra config data. This helps its driver use |
18656 |
+- * capabilities that the now-obsolete mc146818 didn't have, and informs it |
18657 |
+- * that this board's RTC is wakeup-capable (per ACPI spec). |
18658 |
+- */ |
18659 |
+-static struct cmos_rtc_board_info acpi_rtc_info; |
18660 |
+- |
18661 |
+-static void cmos_wake_setup(struct device *dev) |
18662 |
+-{ |
18663 |
+- if (acpi_disabled) |
18664 |
+- return; |
18665 |
+- |
18666 |
+- use_acpi_alarm_quirks(); |
18667 |
+- |
18668 |
+- rtc_wake_setup(dev); |
18669 |
+- acpi_rtc_info.wake_on = rtc_wake_on; |
18670 |
+- acpi_rtc_info.wake_off = rtc_wake_off; |
18671 |
+- |
18672 |
+- /* workaround bug in some ACPI tables */ |
18673 |
+- if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) { |
18674 |
+- dev_dbg(dev, "bogus FADT month_alarm (%d)\n", |
18675 |
+- acpi_gbl_FADT.month_alarm); |
18676 |
+- acpi_gbl_FADT.month_alarm = 0; |
18677 |
+- } |
18678 |
+- |
18679 |
+- acpi_rtc_info.rtc_day_alarm = acpi_gbl_FADT.day_alarm; |
18680 |
+- acpi_rtc_info.rtc_mon_alarm = acpi_gbl_FADT.month_alarm; |
18681 |
+- acpi_rtc_info.rtc_century = acpi_gbl_FADT.century; |
18682 |
+- |
18683 |
+- /* NOTE: S4_RTC_WAKE is NOT currently useful to Linux */ |
18684 |
+- if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE) |
18685 |
+- dev_info(dev, "RTC can wake from S4\n"); |
18686 |
+- |
18687 |
+- dev->platform_data = &acpi_rtc_info; |
18688 |
+- |
18689 |
+- /* RTC always wakes from S1/S2/S3, and often S4/STD */ |
18690 |
+- device_init_wakeup(dev, 1); |
18691 |
+-} |
18692 |
+- |
18693 |
+-static void cmos_check_acpi_rtc_status(struct device *dev, |
18694 |
+- unsigned char *rtc_control) |
18695 |
+-{ |
18696 |
+- struct cmos_rtc *cmos = dev_get_drvdata(dev); |
18697 |
+- acpi_event_status rtc_status; |
18698 |
+- acpi_status status; |
18699 |
+- |
18700 |
+- if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC) |
18701 |
+- return; |
18702 |
+- |
18703 |
+- status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status); |
18704 |
+- if (ACPI_FAILURE(status)) { |
18705 |
+- dev_err(dev, "Could not get RTC status\n"); |
18706 |
+- } else if (rtc_status & ACPI_EVENT_FLAG_SET) { |
18707 |
+- unsigned char mask; |
18708 |
+- *rtc_control &= ~RTC_AIE; |
18709 |
+- CMOS_WRITE(*rtc_control, RTC_CONTROL); |
18710 |
+- mask = CMOS_READ(RTC_INTR_FLAGS); |
18711 |
+- rtc_update_irq(cmos->rtc, 1, mask); |
18712 |
+- } |
18713 |
+-} |
18714 |
+- |
18715 |
+-#else |
18716 |
+- |
18717 |
+-static void cmos_wake_setup(struct device *dev) |
18718 |
+-{ |
18719 |
+-} |
18720 |
+- |
18721 |
+-static void cmos_check_acpi_rtc_status(struct device *dev, |
18722 |
+- unsigned char *rtc_control) |
18723 |
+-{ |
18724 |
+-} |
18725 |
+- |
18726 |
+-#endif |
18727 |
+- |
18728 |
+ #ifdef CONFIG_PNP |
18729 |
+ |
18730 |
+ #include <linux/pnp.h> |
18731 |
+ |
18732 |
+ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) |
18733 |
+ { |
18734 |
+- cmos_wake_setup(&pnp->dev); |
18735 |
++ int irq; |
18736 |
+ |
18737 |
+ if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) { |
18738 |
+- unsigned int irq = 0; |
18739 |
++ irq = 0; |
18740 |
+ #ifdef CONFIG_X86 |
18741 |
+ /* Some machines contain a PNP entry for the RTC, but |
18742 |
+ * don't define the IRQ. It should always be safe to |
18743 |
+@@ -1367,13 +1386,11 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) |
18744 |
+ if (nr_legacy_irqs()) |
18745 |
+ irq = RTC_IRQ; |
18746 |
+ #endif |
18747 |
+- return cmos_do_probe(&pnp->dev, |
18748 |
+- pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); |
18749 |
+ } else { |
18750 |
+- return cmos_do_probe(&pnp->dev, |
18751 |
+- pnp_get_resource(pnp, IORESOURCE_IO, 0), |
18752 |
+- pnp_irq(pnp, 0)); |
18753 |
++ irq = pnp_irq(pnp, 0); |
18754 |
+ } |
18755 |
++ |
18756 |
++ return cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); |
18757 |
+ } |
18758 |
+ |
18759 |
+ static void cmos_pnp_remove(struct pnp_dev *pnp) |
18760 |
+@@ -1460,7 +1477,6 @@ static int __init cmos_platform_probe(struct platform_device *pdev) |
18761 |
+ int irq; |
18762 |
+ |
18763 |
+ cmos_of_init(pdev); |
18764 |
+- cmos_wake_setup(&pdev->dev); |
18765 |
+ |
18766 |
+ if (RTC_IOMAPPED) |
18767 |
+ resource = platform_get_resource(pdev, IORESOURCE_IO, 0); |
18768 |
+diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c |
18769 |
+index 5e03834016294..f6d2ad91ff7a9 100644 |
18770 |
+--- a/drivers/rtc/rtc-mxc_v2.c |
18771 |
++++ b/drivers/rtc/rtc-mxc_v2.c |
18772 |
+@@ -336,8 +336,10 @@ static int mxc_rtc_probe(struct platform_device *pdev) |
18773 |
+ } |
18774 |
+ |
18775 |
+ pdata->rtc = devm_rtc_allocate_device(&pdev->dev); |
18776 |
+- if (IS_ERR(pdata->rtc)) |
18777 |
++ if (IS_ERR(pdata->rtc)) { |
18778 |
++ clk_disable_unprepare(pdata->clk); |
18779 |
+ return PTR_ERR(pdata->rtc); |
18780 |
++ } |
18781 |
+ |
18782 |
+ pdata->rtc->ops = &mxc_rtc_ops; |
18783 |
+ pdata->rtc->range_max = U32_MAX; |
18784 |
+diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c |
18785 |
+index 14da4ab301044..bf2e370907b73 100644 |
18786 |
+--- a/drivers/rtc/rtc-pcf85063.c |
18787 |
++++ b/drivers/rtc/rtc-pcf85063.c |
18788 |
+@@ -167,10 +167,10 @@ static int pcf85063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) |
18789 |
+ if (ret) |
18790 |
+ return ret; |
18791 |
+ |
18792 |
+- alrm->time.tm_sec = bcd2bin(buf[0]); |
18793 |
+- alrm->time.tm_min = bcd2bin(buf[1]); |
18794 |
+- alrm->time.tm_hour = bcd2bin(buf[2]); |
18795 |
+- alrm->time.tm_mday = bcd2bin(buf[3]); |
18796 |
++ alrm->time.tm_sec = bcd2bin(buf[0] & 0x7f); |
18797 |
++ alrm->time.tm_min = bcd2bin(buf[1] & 0x7f); |
18798 |
++ alrm->time.tm_hour = bcd2bin(buf[2] & 0x3f); |
18799 |
++ alrm->time.tm_mday = bcd2bin(buf[3] & 0x3f); |
18800 |
+ |
18801 |
+ ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &val); |
18802 |
+ if (ret) |
18803 |
+@@ -422,7 +422,7 @@ static int pcf85063_clkout_control(struct clk_hw *hw, bool enable) |
18804 |
+ unsigned int buf; |
18805 |
+ int ret; |
18806 |
+ |
18807 |
+- ret = regmap_read(pcf85063->regmap, PCF85063_REG_OFFSET, &buf); |
18808 |
++ ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf); |
18809 |
+ if (ret < 0) |
18810 |
+ return ret; |
18811 |
+ buf &= PCF85063_REG_CLKO_F_MASK; |
18812 |
+diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c |
18813 |
+index 7fb9145c43bd5..fa351ac201587 100644 |
18814 |
+--- a/drivers/rtc/rtc-pic32.c |
18815 |
++++ b/drivers/rtc/rtc-pic32.c |
18816 |
+@@ -324,16 +324,16 @@ static int pic32_rtc_probe(struct platform_device *pdev) |
18817 |
+ |
18818 |
+ spin_lock_init(&pdata->alarm_lock); |
18819 |
+ |
18820 |
++ pdata->rtc = devm_rtc_allocate_device(&pdev->dev); |
18821 |
++ if (IS_ERR(pdata->rtc)) |
18822 |
++ return PTR_ERR(pdata->rtc); |
18823 |
++ |
18824 |
+ clk_prepare_enable(pdata->clk); |
18825 |
+ |
18826 |
+ pic32_rtc_enable(pdata, 1); |
18827 |
+ |
18828 |
+ device_init_wakeup(&pdev->dev, 1); |
18829 |
+ |
18830 |
+- pdata->rtc = devm_rtc_allocate_device(&pdev->dev); |
18831 |
+- if (IS_ERR(pdata->rtc)) |
18832 |
+- return PTR_ERR(pdata->rtc); |
18833 |
+- |
18834 |
+ pdata->rtc->ops = &pic32_rtcops; |
18835 |
+ pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; |
18836 |
+ pdata->rtc->range_max = RTC_TIMESTAMP_END_2099; |
18837 |
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c |
18838 |
+index bd929b0e7d7de..d82acf1af1fae 100644 |
18839 |
+--- a/drivers/rtc/rtc-snvs.c |
18840 |
++++ b/drivers/rtc/rtc-snvs.c |
18841 |
+@@ -32,6 +32,14 @@ |
18842 |
+ #define SNVS_LPPGDR_INIT 0x41736166 |
18843 |
+ #define CNTR_TO_SECS_SH 15 |
18844 |
+ |
18845 |
++/* The maximum RTC clock cycles that are allowed to pass between two |
18846 |
++ * consecutive clock counter register reads. If the values are corrupted a |
18847 |
++ * bigger difference is expected. The RTC frequency is 32kHz. With 320 cycles |
18848 |
++ * we end at 10ms which should be enough for most cases. If it once takes |
18849 |
++ * longer than expected we do a retry. |
18850 |
++ */ |
18851 |
++#define MAX_RTC_READ_DIFF_CYCLES 320 |
18852 |
++ |
18853 |
+ struct snvs_rtc_data { |
18854 |
+ struct rtc_device *rtc; |
18855 |
+ struct regmap *regmap; |
18856 |
+@@ -56,6 +64,7 @@ static u64 rtc_read_lpsrt(struct snvs_rtc_data *data) |
18857 |
+ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) |
18858 |
+ { |
18859 |
+ u64 read1, read2; |
18860 |
++ s64 diff; |
18861 |
+ unsigned int timeout = 100; |
18862 |
+ |
18863 |
+ /* As expected, the registers might update between the read of the LSB |
18864 |
+@@ -66,7 +75,8 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) |
18865 |
+ do { |
18866 |
+ read2 = read1; |
18867 |
+ read1 = rtc_read_lpsrt(data); |
18868 |
+- } while (read1 != read2 && --timeout); |
18869 |
++ diff = read1 - read2; |
18870 |
++ } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout); |
18871 |
+ if (!timeout) |
18872 |
+ dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n"); |
18873 |
+ |
18874 |
+@@ -78,13 +88,15 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) |
18875 |
+ static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb) |
18876 |
+ { |
18877 |
+ u32 count1, count2; |
18878 |
++ s32 diff; |
18879 |
+ unsigned int timeout = 100; |
18880 |
+ |
18881 |
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1); |
18882 |
+ do { |
18883 |
+ count2 = count1; |
18884 |
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1); |
18885 |
+- } while (count1 != count2 && --timeout); |
18886 |
++ diff = count1 - count2; |
18887 |
++ } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout); |
18888 |
+ if (!timeout) { |
18889 |
+ dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n"); |
18890 |
+ return -ETIMEDOUT; |
18891 |
+diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c |
18892 |
+index bdb20f63254e2..0f8e4231098ef 100644 |
18893 |
+--- a/drivers/rtc/rtc-st-lpc.c |
18894 |
++++ b/drivers/rtc/rtc-st-lpc.c |
18895 |
+@@ -238,6 +238,7 @@ static int st_rtc_probe(struct platform_device *pdev) |
18896 |
+ |
18897 |
+ rtc->clkrate = clk_get_rate(rtc->clk); |
18898 |
+ if (!rtc->clkrate) { |
18899 |
++ clk_disable_unprepare(rtc->clk); |
18900 |
+ dev_err(&pdev->dev, "Unable to fetch clock rate\n"); |
18901 |
+ return -EINVAL; |
18902 |
+ } |
18903 |
+diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c |
18904 |
+index fd705429708e8..f91b6cfd7ed01 100644 |
18905 |
+--- a/drivers/s390/net/ctcm_main.c |
18906 |
++++ b/drivers/s390/net/ctcm_main.c |
18907 |
+@@ -825,16 +825,9 @@ done: |
18908 |
+ /** |
18909 |
+ * Start transmission of a packet. |
18910 |
+ * Called from generic network device layer. |
18911 |
+- * |
18912 |
+- * skb Pointer to buffer containing the packet. |
18913 |
+- * dev Pointer to interface struct. |
18914 |
+- * |
18915 |
+- * returns 0 if packet consumed, !0 if packet rejected. |
18916 |
+- * Note: If we return !0, then the packet is free'd by |
18917 |
+- * the generic network layer. |
18918 |
+ */ |
18919 |
+ /* first merge version - leaving both functions separated */ |
18920 |
+-static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) |
18921 |
++static netdev_tx_t ctcm_tx(struct sk_buff *skb, struct net_device *dev) |
18922 |
+ { |
18923 |
+ struct ctcm_priv *priv = dev->ml_priv; |
18924 |
+ |
18925 |
+@@ -877,7 +870,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) |
18926 |
+ } |
18927 |
+ |
18928 |
+ /* unmerged MPC variant of ctcm_tx */ |
18929 |
+-static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) |
18930 |
++static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) |
18931 |
+ { |
18932 |
+ int len = 0; |
18933 |
+ struct ctcm_priv *priv = dev->ml_priv; |
18934 |
+diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c |
18935 |
+index 06a322bdced6d..7e743f4717a91 100644 |
18936 |
+--- a/drivers/s390/net/lcs.c |
18937 |
++++ b/drivers/s390/net/lcs.c |
18938 |
+@@ -1518,9 +1518,8 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) |
18939 |
+ /** |
18940 |
+ * Packet transmit function called by network stack |
18941 |
+ */ |
18942 |
+-static int |
18943 |
+-__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, |
18944 |
+- struct net_device *dev) |
18945 |
++static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, |
18946 |
++ struct net_device *dev) |
18947 |
+ { |
18948 |
+ struct lcs_header *header; |
18949 |
+ int rc = NETDEV_TX_OK; |
18950 |
+@@ -1581,8 +1580,7 @@ out: |
18951 |
+ return rc; |
18952 |
+ } |
18953 |
+ |
18954 |
+-static int |
18955 |
+-lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) |
18956 |
++static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) |
18957 |
+ { |
18958 |
+ struct lcs_card *card; |
18959 |
+ int rc; |
18960 |
+diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c |
18961 |
+index 5a0c2f07a3a25..ce5f0ffd6cc8d 100644 |
18962 |
+--- a/drivers/s390/net/netiucv.c |
18963 |
++++ b/drivers/s390/net/netiucv.c |
18964 |
+@@ -1252,15 +1252,8 @@ static int netiucv_close(struct net_device *dev) |
18965 |
+ /** |
18966 |
+ * Start transmission of a packet. |
18967 |
+ * Called from generic network device layer. |
18968 |
+- * |
18969 |
+- * @param skb Pointer to buffer containing the packet. |
18970 |
+- * @param dev Pointer to interface struct. |
18971 |
+- * |
18972 |
+- * @return 0 if packet consumed, !0 if packet rejected. |
18973 |
+- * Note: If we return !0, then the packet is free'd by |
18974 |
+- * the generic network layer. |
18975 |
+ */ |
18976 |
+-static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) |
18977 |
++static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev) |
18978 |
+ { |
18979 |
+ struct netiucv_priv *privptr = netdev_priv(dev); |
18980 |
+ int rc; |
18981 |
+diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c |
18982 |
+index eab68fd9337ac..37e1ab96ee5be 100644 |
18983 |
+--- a/drivers/scsi/elx/efct/efct_driver.c |
18984 |
++++ b/drivers/scsi/elx/efct/efct_driver.c |
18985 |
+@@ -42,6 +42,7 @@ efct_device_init(void) |
18986 |
+ |
18987 |
+ rc = efct_scsi_reg_fc_transport(); |
18988 |
+ if (rc) { |
18989 |
++ efct_scsi_tgt_driver_exit(); |
18990 |
+ pr_err("failed to register to FC host\n"); |
18991 |
+ return rc; |
18992 |
+ } |
18993 |
+diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h |
18994 |
+index ee291cabf7e05..b14e516be7d53 100644 |
18995 |
+--- a/drivers/scsi/elx/libefc/efclib.h |
18996 |
++++ b/drivers/scsi/elx/libefc/efclib.h |
18997 |
+@@ -58,10 +58,12 @@ enum efc_node_send_ls_acc { |
18998 |
+ #define EFC_LINK_STATUS_UP 0 |
18999 |
+ #define EFC_LINK_STATUS_DOWN 1 |
19000 |
+ |
19001 |
++enum efc_sm_event; |
19002 |
++ |
19003 |
+ /* State machine context header */ |
19004 |
+ struct efc_sm_ctx { |
19005 |
+ void (*current_state)(struct efc_sm_ctx *ctx, |
19006 |
+- u32 evt, void *arg); |
19007 |
++ enum efc_sm_event evt, void *arg); |
19008 |
+ |
19009 |
+ const char *description; |
19010 |
+ void *app; |
19011 |
+@@ -364,7 +366,7 @@ struct efc_node { |
19012 |
+ int prev_evt; |
19013 |
+ |
19014 |
+ void (*nodedb_state)(struct efc_sm_ctx *ctx, |
19015 |
+- u32 evt, void *arg); |
19016 |
++ enum efc_sm_event evt, void *arg); |
19017 |
+ struct timer_list gidpt_delay_timer; |
19018 |
+ u64 time_last_gidpt_msec; |
19019 |
+ |
19020 |
+diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c |
19021 |
+index 5ae6c207d3ac3..76dbdae0e9874 100644 |
19022 |
+--- a/drivers/scsi/fcoe/fcoe.c |
19023 |
++++ b/drivers/scsi/fcoe/fcoe.c |
19024 |
+@@ -2501,6 +2501,7 @@ static int __init fcoe_init(void) |
19025 |
+ |
19026 |
+ out_free: |
19027 |
+ mutex_unlock(&fcoe_config_mutex); |
19028 |
++ fcoe_transport_detach(&fcoe_sw_transport); |
19029 |
+ out_destroy: |
19030 |
+ destroy_workqueue(fcoe_wq); |
19031 |
+ return rc; |
19032 |
+diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c |
19033 |
+index af658aa38fedf..6260aa5ea6af8 100644 |
19034 |
+--- a/drivers/scsi/fcoe/fcoe_sysfs.c |
19035 |
++++ b/drivers/scsi/fcoe/fcoe_sysfs.c |
19036 |
+@@ -830,14 +830,15 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, |
19037 |
+ |
19038 |
+ dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id); |
19039 |
+ error = device_register(&ctlr->dev); |
19040 |
+- if (error) |
19041 |
+- goto out_del_q2; |
19042 |
++ if (error) { |
19043 |
++ destroy_workqueue(ctlr->devloss_work_q); |
19044 |
++ destroy_workqueue(ctlr->work_q); |
19045 |
++ put_device(&ctlr->dev); |
19046 |
++ return NULL; |
19047 |
++ } |
19048 |
+ |
19049 |
+ return ctlr; |
19050 |
+ |
19051 |
+-out_del_q2: |
19052 |
+- destroy_workqueue(ctlr->devloss_work_q); |
19053 |
+- ctlr->devloss_work_q = NULL; |
19054 |
+ out_del_q: |
19055 |
+ destroy_workqueue(ctlr->work_q); |
19056 |
+ ctlr->work_q = NULL; |
19057 |
+@@ -1036,16 +1037,16 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, |
19058 |
+ fcf->selected = new_fcf->selected; |
19059 |
+ |
19060 |
+ error = device_register(&fcf->dev); |
19061 |
+- if (error) |
19062 |
+- goto out_del; |
19063 |
++ if (error) { |
19064 |
++ put_device(&fcf->dev); |
19065 |
++ goto out; |
19066 |
++ } |
19067 |
+ |
19068 |
+ fcf->state = FCOE_FCF_STATE_CONNECTED; |
19069 |
+ list_add_tail(&fcf->peers, &ctlr->fcfs); |
19070 |
+ |
19071 |
+ return fcf; |
19072 |
+ |
19073 |
+-out_del: |
19074 |
+- kfree(fcf); |
19075 |
+ out: |
19076 |
+ return NULL; |
19077 |
+ } |
19078 |
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c |
19079 |
+index 3faa87fa296a2..cf7988de7b90b 100644 |
19080 |
+--- a/drivers/scsi/hpsa.c |
19081 |
++++ b/drivers/scsi/hpsa.c |
19082 |
+@@ -8927,7 +8927,7 @@ clean1: /* wq/aer/h */ |
19083 |
+ destroy_workqueue(h->monitor_ctlr_wq); |
19084 |
+ h->monitor_ctlr_wq = NULL; |
19085 |
+ } |
19086 |
+- kfree(h); |
19087 |
++ hpda_free_ctlr_info(h); |
19088 |
+ return rc; |
19089 |
+ } |
19090 |
+ |
19091 |
+@@ -9788,7 +9788,8 @@ static int hpsa_add_sas_host(struct ctlr_info *h) |
19092 |
+ return 0; |
19093 |
+ |
19094 |
+ free_sas_phy: |
19095 |
+- hpsa_free_sas_phy(hpsa_sas_phy); |
19096 |
++ sas_phy_free(hpsa_sas_phy->phy); |
19097 |
++ kfree(hpsa_sas_phy); |
19098 |
+ free_sas_port: |
19099 |
+ hpsa_free_sas_port(hpsa_sas_port); |
19100 |
+ free_sas_node: |
19101 |
+@@ -9824,10 +9825,12 @@ static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, |
19102 |
+ |
19103 |
+ rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); |
19104 |
+ if (rc) |
19105 |
+- goto free_sas_port; |
19106 |
++ goto free_sas_rphy; |
19107 |
+ |
19108 |
+ return 0; |
19109 |
+ |
19110 |
++free_sas_rphy: |
19111 |
++ sas_rphy_free(rphy); |
19112 |
+ free_sas_port: |
19113 |
+ hpsa_free_sas_port(hpsa_sas_port); |
19114 |
+ device->sas_port = NULL; |
19115 |
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c |
19116 |
+index 56b8a2d6ffe49..04fb7fc012264 100644 |
19117 |
+--- a/drivers/scsi/ipr.c |
19118 |
++++ b/drivers/scsi/ipr.c |
19119 |
+@@ -10869,11 +10869,19 @@ static struct notifier_block ipr_notifier = { |
19120 |
+ **/ |
19121 |
+ static int __init ipr_init(void) |
19122 |
+ { |
19123 |
++ int rc; |
19124 |
++ |
19125 |
+ ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", |
19126 |
+ IPR_DRIVER_VERSION, IPR_DRIVER_DATE); |
19127 |
+ |
19128 |
+ register_reboot_notifier(&ipr_notifier); |
19129 |
+- return pci_register_driver(&ipr_driver); |
19130 |
++ rc = pci_register_driver(&ipr_driver); |
19131 |
++ if (rc) { |
19132 |
++ unregister_reboot_notifier(&ipr_notifier); |
19133 |
++ return rc; |
19134 |
++ } |
19135 |
++ |
19136 |
++ return 0; |
19137 |
+ } |
19138 |
+ |
19139 |
+ /** |
19140 |
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c |
19141 |
+index d6e761adf1f1d..df3b190fccd16 100644 |
19142 |
+--- a/drivers/scsi/lpfc/lpfc_sli.c |
19143 |
++++ b/drivers/scsi/lpfc/lpfc_sli.c |
19144 |
+@@ -7992,10 +7992,10 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, |
19145 |
+ "IO_cnt", "Info", "BWutil(ms)"); |
19146 |
+ } |
19147 |
+ |
19148 |
+- /* Needs to be _bh because record is called from timer interrupt |
19149 |
++ /* Needs to be _irq because record is called from timer interrupt |
19150 |
+ * context |
19151 |
+ */ |
19152 |
+- spin_lock_bh(ring_lock); |
19153 |
++ spin_lock_irq(ring_lock); |
19154 |
+ while (*head_idx != *tail_idx) { |
19155 |
+ entry = &ring[*head_idx]; |
19156 |
+ |
19157 |
+@@ -8039,7 +8039,7 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, |
19158 |
+ if (cnt >= max_read_entries) |
19159 |
+ break; |
19160 |
+ } |
19161 |
+- spin_unlock_bh(ring_lock); |
19162 |
++ spin_unlock_irq(ring_lock); |
19163 |
+ |
19164 |
+ return cnt; |
19165 |
+ } |
19166 |
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c |
19167 |
+index 0681daee6c149..e5ecd6ada6cdd 100644 |
19168 |
+--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c |
19169 |
++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c |
19170 |
+@@ -829,6 +829,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, |
19171 |
+ if ((sas_rphy_add(rphy))) { |
19172 |
+ ioc_err(ioc, "failure at %s:%d/%s()!\n", |
19173 |
+ __FILE__, __LINE__, __func__); |
19174 |
++ sas_rphy_free(rphy); |
19175 |
++ rphy = NULL; |
19176 |
+ } |
19177 |
+ |
19178 |
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { |
19179 |
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h |
19180 |
+index 51c7ce5f97923..307ffdfe048be 100644 |
19181 |
+--- a/drivers/scsi/qla2xxx/qla_def.h |
19182 |
++++ b/drivers/scsi/qla2xxx/qla_def.h |
19183 |
+@@ -5117,17 +5117,17 @@ struct secure_flash_update_block_pk { |
19184 |
+ (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \ |
19185 |
+ test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) |
19186 |
+ |
19187 |
+-#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ |
19188 |
+- atomic_inc(&__vha->vref_count); \ |
19189 |
+- mb(); \ |
19190 |
+- if (__vha->flags.delete_progress) { \ |
19191 |
+- atomic_dec(&__vha->vref_count); \ |
19192 |
+- wake_up(&__vha->vref_waitq); \ |
19193 |
+- __bail = 1; \ |
19194 |
+- } else { \ |
19195 |
+- __bail = 0; \ |
19196 |
+- } \ |
19197 |
+-} while (0) |
19198 |
++static inline bool qla_vha_mark_busy(scsi_qla_host_t *vha) |
19199 |
++{ |
19200 |
++ atomic_inc(&vha->vref_count); |
19201 |
++ mb(); |
19202 |
++ if (vha->flags.delete_progress) { |
19203 |
++ atomic_dec(&vha->vref_count); |
19204 |
++ wake_up(&vha->vref_waitq); |
19205 |
++ return true; |
19206 |
++ } |
19207 |
++ return false; |
19208 |
++} |
19209 |
+ |
19210 |
+ #define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ |
19211 |
+ atomic_dec(&__vha->vref_count); \ |
19212 |
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c |
19213 |
+index b81797a3ab617..30798ab84db91 100644 |
19214 |
+--- a/drivers/scsi/qla2xxx/qla_init.c |
19215 |
++++ b/drivers/scsi/qla2xxx/qla_init.c |
19216 |
+@@ -110,6 +110,7 @@ static void qla24xx_abort_iocb_timeout(void *data) |
19217 |
+ struct qla_qpair *qpair = sp->qpair; |
19218 |
+ u32 handle; |
19219 |
+ unsigned long flags; |
19220 |
++ int sp_found = 0, cmdsp_found = 0; |
19221 |
+ |
19222 |
+ if (sp->cmd_sp) |
19223 |
+ ql_dbg(ql_dbg_async, sp->vha, 0x507c, |
19224 |
+@@ -124,18 +125,21 @@ static void qla24xx_abort_iocb_timeout(void *data) |
19225 |
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags); |
19226 |
+ for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) { |
19227 |
+ if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] == |
19228 |
+- sp->cmd_sp)) |
19229 |
++ sp->cmd_sp)) { |
19230 |
+ qpair->req->outstanding_cmds[handle] = NULL; |
19231 |
++ cmdsp_found = 1; |
19232 |
++ } |
19233 |
+ |
19234 |
+ /* removing the abort */ |
19235 |
+ if (qpair->req->outstanding_cmds[handle] == sp) { |
19236 |
+ qpair->req->outstanding_cmds[handle] = NULL; |
19237 |
++ sp_found = 1; |
19238 |
+ break; |
19239 |
+ } |
19240 |
+ } |
19241 |
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); |
19242 |
+ |
19243 |
+- if (sp->cmd_sp) { |
19244 |
++ if (cmdsp_found && sp->cmd_sp) { |
19245 |
+ /* |
19246 |
+ * This done function should take care of |
19247 |
+ * original command ref: INIT |
19248 |
+@@ -143,8 +147,10 @@ static void qla24xx_abort_iocb_timeout(void *data) |
19249 |
+ sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); |
19250 |
+ } |
19251 |
+ |
19252 |
+- abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); |
19253 |
+- sp->done(sp, QLA_OS_TIMER_EXPIRED); |
19254 |
++ if (sp_found) { |
19255 |
++ abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); |
19256 |
++ sp->done(sp, QLA_OS_TIMER_EXPIRED); |
19257 |
++ } |
19258 |
+ } |
19259 |
+ |
19260 |
+ static void qla24xx_abort_sp_done(srb_t *sp, int res) |
19261 |
+@@ -168,7 +174,6 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) |
19262 |
+ struct srb_iocb *abt_iocb; |
19263 |
+ srb_t *sp; |
19264 |
+ int rval = QLA_FUNCTION_FAILED; |
19265 |
+- uint8_t bail; |
19266 |
+ |
19267 |
+ /* ref: INIT for ABTS command */ |
19268 |
+ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, |
19269 |
+@@ -176,7 +181,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) |
19270 |
+ if (!sp) |
19271 |
+ return QLA_MEMORY_ALLOC_FAILED; |
19272 |
+ |
19273 |
+- QLA_VHA_MARK_BUSY(vha, bail); |
19274 |
++ qla_vha_mark_busy(vha); |
19275 |
+ abt_iocb = &sp->u.iocb_cmd; |
19276 |
+ sp->type = SRB_ABT_CMD; |
19277 |
+ sp->name = "abort"; |
19278 |
+@@ -2022,14 +2027,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, |
19279 |
+ struct srb_iocb *tm_iocb; |
19280 |
+ srb_t *sp; |
19281 |
+ int rval = QLA_FUNCTION_FAILED; |
19282 |
+- uint8_t bail; |
19283 |
+ |
19284 |
+ /* ref: INIT */ |
19285 |
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); |
19286 |
+ if (!sp) |
19287 |
+ goto done; |
19288 |
+ |
19289 |
+- QLA_VHA_MARK_BUSY(vha, bail); |
19290 |
++ qla_vha_mark_busy(vha); |
19291 |
+ sp->type = SRB_TM_CMD; |
19292 |
+ sp->name = "tmf"; |
19293 |
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), |
19294 |
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h |
19295 |
+index db17f7f410cdd..5185dc5daf80d 100644 |
19296 |
+--- a/drivers/scsi/qla2xxx/qla_inline.h |
19297 |
++++ b/drivers/scsi/qla2xxx/qla_inline.h |
19298 |
+@@ -225,11 +225,9 @@ static inline srb_t * |
19299 |
+ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) |
19300 |
+ { |
19301 |
+ srb_t *sp = NULL; |
19302 |
+- uint8_t bail; |
19303 |
+ struct qla_qpair *qpair; |
19304 |
+ |
19305 |
+- QLA_VHA_MARK_BUSY(vha, bail); |
19306 |
+- if (unlikely(bail)) |
19307 |
++ if (unlikely(qla_vha_mark_busy(vha))) |
19308 |
+ return NULL; |
19309 |
+ |
19310 |
+ qpair = vha->hw->base_qpair; |
19311 |
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c |
19312 |
+index 00e97f0a07ebe..05d827227d0b3 100644 |
19313 |
+--- a/drivers/scsi/qla2xxx/qla_os.c |
19314 |
++++ b/drivers/scsi/qla2xxx/qla_os.c |
19315 |
+@@ -5043,13 +5043,11 @@ struct qla_work_evt * |
19316 |
+ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) |
19317 |
+ { |
19318 |
+ struct qla_work_evt *e; |
19319 |
+- uint8_t bail; |
19320 |
+ |
19321 |
+ if (test_bit(UNLOADING, &vha->dpc_flags)) |
19322 |
+ return NULL; |
19323 |
+ |
19324 |
+- QLA_VHA_MARK_BUSY(vha, bail); |
19325 |
+- if (bail) |
19326 |
++ if (qla_vha_mark_busy(vha)) |
19327 |
+ return NULL; |
19328 |
+ |
19329 |
+ e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); |
19330 |
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c |
19331 |
+index 0b16061d8da8b..591df0a91057e 100644 |
19332 |
+--- a/drivers/scsi/scsi_debug.c |
19333 |
++++ b/drivers/scsi/scsi_debug.c |
19334 |
+@@ -3640,7 +3640,7 @@ static int resp_write_scat(struct scsi_cmnd *scp, |
19335 |
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); |
19336 |
+ return illegal_condition_result; |
19337 |
+ } |
19338 |
+- lrdp = kzalloc(lbdof_blen, GFP_ATOMIC); |
19339 |
++ lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN); |
19340 |
+ if (lrdp == NULL) |
19341 |
+ return SCSI_MLQUEUE_HOST_BUSY; |
19342 |
+ if (sdebug_verbose) |
19343 |
+@@ -4296,7 +4296,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) |
19344 |
+ if (ret) |
19345 |
+ return ret; |
19346 |
+ |
19347 |
+- arr = kcalloc(lb_size, vnum, GFP_ATOMIC); |
19348 |
++ arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN); |
19349 |
+ if (!arr) { |
19350 |
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, |
19351 |
+ INSUFF_RES_ASCQ); |
19352 |
+@@ -4367,7 +4367,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, |
19353 |
+ rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD), |
19354 |
+ max_zones); |
19355 |
+ |
19356 |
+- arr = kzalloc(alloc_len, GFP_ATOMIC); |
19357 |
++ arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN); |
19358 |
+ if (!arr) { |
19359 |
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, |
19360 |
+ INSUFF_RES_ASCQ); |
19361 |
+@@ -7156,7 +7156,10 @@ clean: |
19362 |
+ kfree(sdbg_devinfo->zstate); |
19363 |
+ kfree(sdbg_devinfo); |
19364 |
+ } |
19365 |
+- kfree(sdbg_host); |
19366 |
++ if (sdbg_host->dev.release) |
19367 |
++ put_device(&sdbg_host->dev); |
19368 |
++ else |
19369 |
++ kfree(sdbg_host); |
19370 |
+ pr_warn("%s: failed, errno=%d\n", __func__, -error); |
19371 |
+ return error; |
19372 |
+ } |
19373 |
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c |
19374 |
+index bb5a6e0fa49ab..dd9f5778f687d 100644 |
19375 |
+--- a/drivers/scsi/scsi_error.c |
19376 |
++++ b/drivers/scsi/scsi_error.c |
19377 |
+@@ -343,19 +343,11 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) |
19378 |
+ |
19379 |
+ if (rtn == BLK_EH_DONE) { |
19380 |
+ /* |
19381 |
+- * Set the command to complete first in order to prevent a real |
19382 |
+- * completion from releasing the command while error handling |
19383 |
+- * is using it. If the command was already completed, then the |
19384 |
+- * lower level driver beat the timeout handler, and it is safe |
19385 |
+- * to return without escalating error recovery. |
19386 |
+- * |
19387 |
+- * If timeout handling lost the race to a real completion, the |
19388 |
+- * block layer may ignore that due to a fake timeout injection, |
19389 |
+- * so return RESET_TIMER to allow error handling another shot |
19390 |
+- * at this command. |
19391 |
++ * If scsi_done() has already set SCMD_STATE_COMPLETE, do not |
19392 |
++ * modify *scmd. |
19393 |
+ */ |
19394 |
+ if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state)) |
19395 |
+- return BLK_EH_RESET_TIMER; |
19396 |
++ return BLK_EH_DONE; |
19397 |
+ if (scsi_abort_command(scmd) != SUCCESS) { |
19398 |
+ set_host_byte(scmd, DID_TIME_OUT); |
19399 |
+ scsi_eh_scmd_add(scmd); |
19400 |
+diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c |
19401 |
+index e9ccfb97773f1..7cf871323b2c4 100644 |
19402 |
+--- a/drivers/scsi/snic/snic_disc.c |
19403 |
++++ b/drivers/scsi/snic/snic_disc.c |
19404 |
+@@ -318,6 +318,9 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) |
19405 |
+ ret); |
19406 |
+ |
19407 |
+ put_device(&snic->shost->shost_gendev); |
19408 |
++ spin_lock_irqsave(snic->shost->host_lock, flags); |
19409 |
++ list_del(&tgt->list); |
19410 |
++ spin_unlock_irqrestore(snic->shost->host_lock, flags); |
19411 |
+ kfree(tgt); |
19412 |
+ tgt = NULL; |
19413 |
+ |
19414 |
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c |
19415 |
+index dae1a85f1512c..a428b8145dcc2 100644 |
19416 |
+--- a/drivers/scsi/ufs/ufshcd.c |
19417 |
++++ b/drivers/scsi/ufs/ufshcd.c |
19418 |
+@@ -8476,8 +8476,6 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, |
19419 |
+ struct scsi_device *sdp; |
19420 |
+ unsigned long flags; |
19421 |
+ int ret, retries; |
19422 |
+- unsigned long deadline; |
19423 |
+- int32_t remaining; |
19424 |
+ |
19425 |
+ spin_lock_irqsave(hba->host->host_lock, flags); |
19426 |
+ sdp = hba->sdev_ufs_device; |
19427 |
+@@ -8510,14 +8508,9 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, |
19428 |
+ * callbacks hence set the RQF_PM flag so that it doesn't resume the |
19429 |
+ * already suspended childs. |
19430 |
+ */ |
19431 |
+- deadline = jiffies + 10 * HZ; |
19432 |
+ for (retries = 3; retries > 0; --retries) { |
19433 |
+- ret = -ETIMEDOUT; |
19434 |
+- remaining = deadline - jiffies; |
19435 |
+- if (remaining <= 0) |
19436 |
+- break; |
19437 |
+ ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, |
19438 |
+- remaining / HZ, 0, 0, RQF_PM, NULL); |
19439 |
++ HZ, 0, 0, RQF_PM, NULL); |
19440 |
+ if (!scsi_status_is_check_condition(ret) || |
19441 |
+ !scsi_sense_valid(&sshdr) || |
19442 |
+ sshdr.sense_key != UNIT_ATTENTION) |
19443 |
+diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c |
19444 |
+index afd2fd74802d2..52ecde8e446cf 100644 |
19445 |
+--- a/drivers/soc/mediatek/mtk-pm-domains.c |
19446 |
++++ b/drivers/soc/mediatek/mtk-pm-domains.c |
19447 |
+@@ -272,9 +272,9 @@ static int scpsys_power_off(struct generic_pm_domain *genpd) |
19448 |
+ clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks); |
19449 |
+ |
19450 |
+ /* subsys power off */ |
19451 |
+- regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT); |
19452 |
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT); |
19453 |
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT); |
19454 |
++ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT); |
19455 |
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT); |
19456 |
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT); |
19457 |
+ |
19458 |
+diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c |
19459 |
+index 2e455d9e3d94a..173427bbf916e 100644 |
19460 |
+--- a/drivers/soc/qcom/apr.c |
19461 |
++++ b/drivers/soc/qcom/apr.c |
19462 |
+@@ -15,13 +15,18 @@ |
19463 |
+ #include <linux/rpmsg.h> |
19464 |
+ #include <linux/of.h> |
19465 |
+ |
19466 |
+-struct apr { |
19467 |
++enum { |
19468 |
++ PR_TYPE_APR = 0, |
19469 |
++}; |
19470 |
++ |
19471 |
++struct packet_router { |
19472 |
+ struct rpmsg_endpoint *ch; |
19473 |
+ struct device *dev; |
19474 |
+ spinlock_t svcs_lock; |
19475 |
+ spinlock_t rx_lock; |
19476 |
+ struct idr svcs_idr; |
19477 |
+ int dest_domain_id; |
19478 |
++ int type; |
19479 |
+ struct pdr_handle *pdr; |
19480 |
+ struct workqueue_struct *rxwq; |
19481 |
+ struct work_struct rx_work; |
19482 |
+@@ -44,21 +49,21 @@ struct apr_rx_buf { |
19483 |
+ */ |
19484 |
+ int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt) |
19485 |
+ { |
19486 |
+- struct apr *apr = dev_get_drvdata(adev->dev.parent); |
19487 |
++ struct packet_router *apr = dev_get_drvdata(adev->dev.parent); |
19488 |
+ struct apr_hdr *hdr; |
19489 |
+ unsigned long flags; |
19490 |
+ int ret; |
19491 |
+ |
19492 |
+- spin_lock_irqsave(&adev->lock, flags); |
19493 |
++ spin_lock_irqsave(&adev->svc.lock, flags); |
19494 |
+ |
19495 |
+ hdr = &pkt->hdr; |
19496 |
+ hdr->src_domain = APR_DOMAIN_APPS; |
19497 |
+- hdr->src_svc = adev->svc_id; |
19498 |
++ hdr->src_svc = adev->svc.id; |
19499 |
+ hdr->dest_domain = adev->domain_id; |
19500 |
+- hdr->dest_svc = adev->svc_id; |
19501 |
++ hdr->dest_svc = adev->svc.id; |
19502 |
+ |
19503 |
+ ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size); |
19504 |
+- spin_unlock_irqrestore(&adev->lock, flags); |
19505 |
++ spin_unlock_irqrestore(&adev->svc.lock, flags); |
19506 |
+ |
19507 |
+ return ret ? ret : hdr->pkt_size; |
19508 |
+ } |
19509 |
+@@ -74,7 +79,7 @@ static void apr_dev_release(struct device *dev) |
19510 |
+ static int apr_callback(struct rpmsg_device *rpdev, void *buf, |
19511 |
+ int len, void *priv, u32 addr) |
19512 |
+ { |
19513 |
+- struct apr *apr = dev_get_drvdata(&rpdev->dev); |
19514 |
++ struct packet_router *apr = dev_get_drvdata(&rpdev->dev); |
19515 |
+ struct apr_rx_buf *abuf; |
19516 |
+ unsigned long flags; |
19517 |
+ |
19518 |
+@@ -100,11 +105,11 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf, |
19519 |
+ return 0; |
19520 |
+ } |
19521 |
+ |
19522 |
+- |
19523 |
+-static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf) |
19524 |
++static int apr_do_rx_callback(struct packet_router *apr, struct apr_rx_buf *abuf) |
19525 |
+ { |
19526 |
+ uint16_t hdr_size, msg_type, ver, svc_id; |
19527 |
+- struct apr_device *svc = NULL; |
19528 |
++ struct pkt_router_svc *svc; |
19529 |
++ struct apr_device *adev; |
19530 |
+ struct apr_driver *adrv = NULL; |
19531 |
+ struct apr_resp_pkt resp; |
19532 |
+ struct apr_hdr *hdr; |
19533 |
+@@ -145,12 +150,15 @@ static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf) |
19534 |
+ svc_id = hdr->dest_svc; |
19535 |
+ spin_lock_irqsave(&apr->svcs_lock, flags); |
19536 |
+ svc = idr_find(&apr->svcs_idr, svc_id); |
19537 |
+- if (svc && svc->dev.driver) |
19538 |
+- adrv = to_apr_driver(svc->dev.driver); |
19539 |
++ if (svc && svc->dev->driver) { |
19540 |
++ adev = svc_to_apr_device(svc); |
19541 |
++ adrv = to_apr_driver(adev->dev.driver); |
19542 |
++ } |
19543 |
+ spin_unlock_irqrestore(&apr->svcs_lock, flags); |
19544 |
+ |
19545 |
+- if (!adrv) { |
19546 |
+- dev_err(apr->dev, "APR: service is not registered\n"); |
19547 |
++ if (!adrv || !adev) { |
19548 |
++ dev_err(apr->dev, "APR: service is not registered (%d)\n", |
19549 |
++ svc_id); |
19550 |
+ return -EINVAL; |
19551 |
+ } |
19552 |
+ |
19553 |
+@@ -164,20 +172,26 @@ static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf) |
19554 |
+ if (resp.payload_size > 0) |
19555 |
+ resp.payload = buf + hdr_size; |
19556 |
+ |
19557 |
+- adrv->callback(svc, &resp); |
19558 |
++ adrv->callback(adev, &resp); |
19559 |
+ |
19560 |
+ return 0; |
19561 |
+ } |
19562 |
+ |
19563 |
+ static void apr_rxwq(struct work_struct *work) |
19564 |
+ { |
19565 |
+- struct apr *apr = container_of(work, struct apr, rx_work); |
19566 |
++ struct packet_router *apr = container_of(work, struct packet_router, rx_work); |
19567 |
+ struct apr_rx_buf *abuf, *b; |
19568 |
+ unsigned long flags; |
19569 |
+ |
19570 |
+ if (!list_empty(&apr->rx_list)) { |
19571 |
+ list_for_each_entry_safe(abuf, b, &apr->rx_list, node) { |
19572 |
+- apr_do_rx_callback(apr, abuf); |
19573 |
++ switch (apr->type) { |
19574 |
++ case PR_TYPE_APR: |
19575 |
++ apr_do_rx_callback(apr, abuf); |
19576 |
++ break; |
19577 |
++ default: |
19578 |
++ break; |
19579 |
++ } |
19580 |
+ spin_lock_irqsave(&apr->rx_lock, flags); |
19581 |
+ list_del(&abuf->node); |
19582 |
+ spin_unlock_irqrestore(&apr->rx_lock, flags); |
19583 |
+@@ -201,7 +215,7 @@ static int apr_device_match(struct device *dev, struct device_driver *drv) |
19584 |
+ |
19585 |
+ while (id->domain_id != 0 || id->svc_id != 0) { |
19586 |
+ if (id->domain_id == adev->domain_id && |
19587 |
+- id->svc_id == adev->svc_id) |
19588 |
++ id->svc_id == adev->svc.id) |
19589 |
+ return 1; |
19590 |
+ id++; |
19591 |
+ } |
19592 |
+@@ -221,14 +235,14 @@ static void apr_device_remove(struct device *dev) |
19593 |
+ { |
19594 |
+ struct apr_device *adev = to_apr_device(dev); |
19595 |
+ struct apr_driver *adrv; |
19596 |
+- struct apr *apr = dev_get_drvdata(adev->dev.parent); |
19597 |
++ struct packet_router *apr = dev_get_drvdata(adev->dev.parent); |
19598 |
+ |
19599 |
+ if (dev->driver) { |
19600 |
+ adrv = to_apr_driver(dev->driver); |
19601 |
+ if (adrv->remove) |
19602 |
+ adrv->remove(adev); |
19603 |
+ spin_lock(&apr->svcs_lock); |
19604 |
+- idr_remove(&apr->svcs_idr, adev->svc_id); |
19605 |
++ idr_remove(&apr->svcs_idr, adev->svc.id); |
19606 |
+ spin_unlock(&apr->svcs_lock); |
19607 |
+ } |
19608 |
+ } |
19609 |
+@@ -255,28 +269,39 @@ struct bus_type aprbus = { |
19610 |
+ EXPORT_SYMBOL_GPL(aprbus); |
19611 |
+ |
19612 |
+ static int apr_add_device(struct device *dev, struct device_node *np, |
19613 |
+- const struct apr_device_id *id) |
19614 |
++ u32 svc_id, u32 domain_id) |
19615 |
+ { |
19616 |
+- struct apr *apr = dev_get_drvdata(dev); |
19617 |
++ struct packet_router *apr = dev_get_drvdata(dev); |
19618 |
+ struct apr_device *adev = NULL; |
19619 |
++ struct pkt_router_svc *svc; |
19620 |
+ int ret; |
19621 |
+ |
19622 |
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL); |
19623 |
+ if (!adev) |
19624 |
+ return -ENOMEM; |
19625 |
+ |
19626 |
+- spin_lock_init(&adev->lock); |
19627 |
++ adev->svc_id = svc_id; |
19628 |
++ svc = &adev->svc; |
19629 |
++ |
19630 |
++ svc->id = svc_id; |
19631 |
++ svc->pr = apr; |
19632 |
++ svc->priv = adev; |
19633 |
++ svc->dev = dev; |
19634 |
++ spin_lock_init(&svc->lock); |
19635 |
++ |
19636 |
++ adev->domain_id = domain_id; |
19637 |
+ |
19638 |
+- adev->svc_id = id->svc_id; |
19639 |
+- adev->domain_id = id->domain_id; |
19640 |
+- adev->version = id->svc_version; |
19641 |
+ if (np) |
19642 |
+ snprintf(adev->name, APR_NAME_SIZE, "%pOFn", np); |
19643 |
+- else |
19644 |
+- strscpy(adev->name, id->name, APR_NAME_SIZE); |
19645 |
+ |
19646 |
+- dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name, |
19647 |
+- id->domain_id, id->svc_id); |
19648 |
++ switch (apr->type) { |
19649 |
++ case PR_TYPE_APR: |
19650 |
++ dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name, |
19651 |
++ domain_id, svc_id); |
19652 |
++ break; |
19653 |
++ default: |
19654 |
++ break; |
19655 |
++ } |
19656 |
+ |
19657 |
+ adev->dev.bus = &aprbus; |
19658 |
+ adev->dev.parent = dev; |
19659 |
+@@ -285,12 +310,19 @@ static int apr_add_device(struct device *dev, struct device_node *np, |
19660 |
+ adev->dev.driver = NULL; |
19661 |
+ |
19662 |
+ spin_lock(&apr->svcs_lock); |
19663 |
+- idr_alloc(&apr->svcs_idr, adev, id->svc_id, |
19664 |
+- id->svc_id + 1, GFP_ATOMIC); |
19665 |
++ ret = idr_alloc(&apr->svcs_idr, svc, svc_id, svc_id + 1, GFP_ATOMIC); |
19666 |
+ spin_unlock(&apr->svcs_lock); |
19667 |
++ if (ret < 0) { |
19668 |
++ dev_err(dev, "idr_alloc failed: %d\n", ret); |
19669 |
++ goto out; |
19670 |
++ } |
19671 |
+ |
19672 |
+- of_property_read_string_index(np, "qcom,protection-domain", |
19673 |
+- 1, &adev->service_path); |
19674 |
++ ret = of_property_read_string_index(np, "qcom,protection-domain", |
19675 |
++ 1, &adev->service_path); |
19676 |
++ if (ret < 0) { |
19677 |
++ dev_err(dev, "Failed to read second value of qcom,protection-domain\n"); |
19678 |
++ goto out; |
19679 |
++ } |
19680 |
+ |
19681 |
+ dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev)); |
19682 |
+ |
19683 |
+@@ -300,13 +332,14 @@ static int apr_add_device(struct device *dev, struct device_node *np, |
19684 |
+ put_device(&adev->dev); |
19685 |
+ } |
19686 |
+ |
19687 |
++out: |
19688 |
+ return ret; |
19689 |
+ } |
19690 |
+ |
19691 |
+ static int of_apr_add_pd_lookups(struct device *dev) |
19692 |
+ { |
19693 |
+ const char *service_name, *service_path; |
19694 |
+- struct apr *apr = dev_get_drvdata(dev); |
19695 |
++ struct packet_router *apr = dev_get_drvdata(dev); |
19696 |
+ struct device_node *node; |
19697 |
+ struct pdr_service *pds; |
19698 |
+ int ret; |
19699 |
+@@ -338,13 +371,14 @@ static int of_apr_add_pd_lookups(struct device *dev) |
19700 |
+ |
19701 |
+ static void of_register_apr_devices(struct device *dev, const char *svc_path) |
19702 |
+ { |
19703 |
+- struct apr *apr = dev_get_drvdata(dev); |
19704 |
++ struct packet_router *apr = dev_get_drvdata(dev); |
19705 |
+ struct device_node *node; |
19706 |
+ const char *service_path; |
19707 |
+ int ret; |
19708 |
+ |
19709 |
+ for_each_child_of_node(dev->of_node, node) { |
19710 |
+- struct apr_device_id id = { {0} }; |
19711 |
++ u32 svc_id; |
19712 |
++ u32 domain_id; |
19713 |
+ |
19714 |
+ /* |
19715 |
+ * This function is called with svc_path NULL during |
19716 |
+@@ -374,13 +408,13 @@ static void of_register_apr_devices(struct device *dev, const char *svc_path) |
19717 |
+ continue; |
19718 |
+ } |
19719 |
+ |
19720 |
+- if (of_property_read_u32(node, "reg", &id.svc_id)) |
19721 |
++ if (of_property_read_u32(node, "reg", &svc_id)) |
19722 |
+ continue; |
19723 |
+ |
19724 |
+- id.domain_id = apr->dest_domain_id; |
19725 |
++ domain_id = apr->dest_domain_id; |
19726 |
+ |
19727 |
+- if (apr_add_device(dev, node, &id)) |
19728 |
+- dev_err(dev, "Failed to add apr %d svc\n", id.svc_id); |
19729 |
++ if (apr_add_device(dev, node, svc_id, domain_id)) |
19730 |
++ dev_err(dev, "Failed to add apr %d svc\n", svc_id); |
19731 |
+ } |
19732 |
+ } |
19733 |
+ |
19734 |
+@@ -400,7 +434,7 @@ static int apr_remove_device(struct device *dev, void *svc_path) |
19735 |
+ |
19736 |
+ static void apr_pd_status(int state, char *svc_path, void *priv) |
19737 |
+ { |
19738 |
+- struct apr *apr = (struct apr *)priv; |
19739 |
++ struct packet_router *apr = (struct packet_router *)priv; |
19740 |
+ |
19741 |
+ switch (state) { |
19742 |
+ case SERVREG_SERVICE_STATE_UP: |
19743 |
+@@ -415,16 +449,20 @@ static void apr_pd_status(int state, char *svc_path, void *priv) |
19744 |
+ static int apr_probe(struct rpmsg_device *rpdev) |
19745 |
+ { |
19746 |
+ struct device *dev = &rpdev->dev; |
19747 |
+- struct apr *apr; |
19748 |
++ struct packet_router *apr; |
19749 |
+ int ret; |
19750 |
+ |
19751 |
+ apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL); |
19752 |
+ if (!apr) |
19753 |
+ return -ENOMEM; |
19754 |
+ |
19755 |
+- ret = of_property_read_u32(dev->of_node, "qcom,apr-domain", &apr->dest_domain_id); |
19756 |
++ ret = of_property_read_u32(dev->of_node, "qcom,domain", &apr->dest_domain_id); |
19757 |
++ if (ret) /* try deprecated apr-domain property */ |
19758 |
++ ret = of_property_read_u32(dev->of_node, "qcom,apr-domain", |
19759 |
++ &apr->dest_domain_id); |
19760 |
++ apr->type = PR_TYPE_APR; |
19761 |
+ if (ret) { |
19762 |
+- dev_err(dev, "APR Domain ID not specified in DT\n"); |
19763 |
++ dev_err(dev, "Domain ID not specified in DT\n"); |
19764 |
+ return ret; |
19765 |
+ } |
19766 |
+ |
19767 |
+@@ -467,7 +505,7 @@ destroy_wq: |
19768 |
+ |
19769 |
+ static void apr_remove(struct rpmsg_device *rpdev) |
19770 |
+ { |
19771 |
+- struct apr *apr = dev_get_drvdata(&rpdev->dev); |
19772 |
++ struct packet_router *apr = dev_get_drvdata(&rpdev->dev); |
19773 |
+ |
19774 |
+ pdr_handle_release(apr->pdr); |
19775 |
+ device_for_each_child(&rpdev->dev, NULL, apr_remove_device); |
19776 |
+@@ -504,20 +542,20 @@ void apr_driver_unregister(struct apr_driver *drv) |
19777 |
+ } |
19778 |
+ EXPORT_SYMBOL_GPL(apr_driver_unregister); |
19779 |
+ |
19780 |
+-static const struct of_device_id apr_of_match[] = { |
19781 |
++static const struct of_device_id pkt_router_of_match[] = { |
19782 |
+ { .compatible = "qcom,apr"}, |
19783 |
+ { .compatible = "qcom,apr-v2"}, |
19784 |
+ {} |
19785 |
+ }; |
19786 |
+-MODULE_DEVICE_TABLE(of, apr_of_match); |
19787 |
++MODULE_DEVICE_TABLE(of, pkt_router_of_match); |
19788 |
+ |
19789 |
+-static struct rpmsg_driver apr_driver = { |
19790 |
++static struct rpmsg_driver packet_router_driver = { |
19791 |
+ .probe = apr_probe, |
19792 |
+ .remove = apr_remove, |
19793 |
+ .callback = apr_callback, |
19794 |
+ .drv = { |
19795 |
+ .name = "qcom,apr", |
19796 |
+- .of_match_table = apr_of_match, |
19797 |
++ .of_match_table = pkt_router_of_match, |
19798 |
+ }, |
19799 |
+ }; |
19800 |
+ |
19801 |
+@@ -527,7 +565,7 @@ static int __init apr_init(void) |
19802 |
+ |
19803 |
+ ret = bus_register(&aprbus); |
19804 |
+ if (!ret) |
19805 |
+- ret = register_rpmsg_driver(&apr_driver); |
19806 |
++ ret = register_rpmsg_driver(&packet_router_driver); |
19807 |
+ else |
19808 |
+ bus_unregister(&aprbus); |
19809 |
+ |
19810 |
+@@ -537,7 +575,7 @@ static int __init apr_init(void) |
19811 |
+ static void __exit apr_exit(void) |
19812 |
+ { |
19813 |
+ bus_unregister(&aprbus); |
19814 |
+- unregister_rpmsg_driver(&apr_driver); |
19815 |
++ unregister_rpmsg_driver(&packet_router_driver); |
19816 |
+ } |
19817 |
+ |
19818 |
+ subsys_initcall(apr_init); |
19819 |
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c |
19820 |
+index cabd8870316d3..47d41804fdf67 100644 |
19821 |
+--- a/drivers/soc/qcom/llcc-qcom.c |
19822 |
++++ b/drivers/soc/qcom/llcc-qcom.c |
19823 |
+@@ -607,7 +607,7 @@ static int qcom_llcc_probe(struct platform_device *pdev) |
19824 |
+ if (ret) |
19825 |
+ goto err; |
19826 |
+ |
19827 |
+- drv_data->ecc_irq = platform_get_irq(pdev, 0); |
19828 |
++ drv_data->ecc_irq = platform_get_irq_optional(pdev, 0); |
19829 |
+ if (drv_data->ecc_irq >= 0) { |
19830 |
+ llcc_edac = platform_device_register_data(&pdev->dev, |
19831 |
+ "qcom_llcc_edac", -1, drv_data, |
19832 |
+diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c |
19833 |
+index 2ac3856b8d42d..52389859395c6 100644 |
19834 |
+--- a/drivers/soc/ti/knav_qmss_queue.c |
19835 |
++++ b/drivers/soc/ti/knav_qmss_queue.c |
19836 |
+@@ -67,7 +67,7 @@ static DEFINE_MUTEX(knav_dev_lock); |
19837 |
+ * Newest followed by older ones. Search is done from start of the array |
19838 |
+ * until a firmware file is found. |
19839 |
+ */ |
19840 |
+-const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"}; |
19841 |
++static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"}; |
19842 |
+ |
19843 |
+ static bool device_ready; |
19844 |
+ bool knav_qmss_device_ready(void) |
19845 |
+@@ -1785,9 +1785,9 @@ static int knav_queue_probe(struct platform_device *pdev) |
19846 |
+ INIT_LIST_HEAD(&kdev->pdsps); |
19847 |
+ |
19848 |
+ pm_runtime_enable(&pdev->dev); |
19849 |
+- ret = pm_runtime_get_sync(&pdev->dev); |
19850 |
++ ret = pm_runtime_resume_and_get(&pdev->dev); |
19851 |
+ if (ret < 0) { |
19852 |
+- pm_runtime_put_noidle(&pdev->dev); |
19853 |
++ pm_runtime_disable(&pdev->dev); |
19854 |
+ dev_err(dev, "Failed to enable QMSS\n"); |
19855 |
+ return ret; |
19856 |
+ } |
19857 |
+diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c |
19858 |
+index b5b2fa538d5c3..4d15587324d4f 100644 |
19859 |
+--- a/drivers/soc/ti/smartreflex.c |
19860 |
++++ b/drivers/soc/ti/smartreflex.c |
19861 |
+@@ -931,6 +931,7 @@ static int omap_sr_probe(struct platform_device *pdev) |
19862 |
+ err_debugfs: |
19863 |
+ debugfs_remove_recursive(sr_info->dbg_dir); |
19864 |
+ err_list_del: |
19865 |
++ pm_runtime_disable(&pdev->dev); |
19866 |
+ list_del(&sr_info->node); |
19867 |
+ clk_unprepare(sr_info->fck); |
19868 |
+ |
19869 |
+diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c |
19870 |
+index 0584f4d2fde29..3ffdab6caac2a 100644 |
19871 |
+--- a/drivers/spi/spi-gpio.c |
19872 |
++++ b/drivers/spi/spi-gpio.c |
19873 |
+@@ -244,9 +244,19 @@ static int spi_gpio_set_direction(struct spi_device *spi, bool output) |
19874 |
+ if (output) |
19875 |
+ return gpiod_direction_output(spi_gpio->mosi, 1); |
19876 |
+ |
19877 |
+- ret = gpiod_direction_input(spi_gpio->mosi); |
19878 |
+- if (ret) |
19879 |
+- return ret; |
19880 |
++ /* |
19881 |
++ * Only change MOSI to an input if using 3WIRE mode. |
19882 |
++ * Otherwise, MOSI could be left floating if there is |
19883 |
++ * no pull resistor connected to the I/O pin, or could |
19884 |
++ * be left logic high if there is a pull-up. Transmitting |
19885 |
++ * logic high when only clocking MISO data in can put some |
19886 |
++ * SPI devices in to a bad state. |
19887 |
++ */ |
19888 |
++ if (spi->mode & SPI_3WIRE) { |
19889 |
++ ret = gpiod_direction_input(spi_gpio->mosi); |
19890 |
++ if (ret) |
19891 |
++ return ret; |
19892 |
++ } |
19893 |
+ /* |
19894 |
+ * Send a turnaround high impedance cycle when switching |
19895 |
+ * from output to input. Theoretically there should be |
19896 |
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c |
19897 |
+index 1bd73e322b7bb..d233e2424ad14 100644 |
19898 |
+--- a/drivers/spi/spidev.c |
19899 |
++++ b/drivers/spi/spidev.c |
19900 |
+@@ -376,12 +376,23 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
19901 |
+ switch (cmd) { |
19902 |
+ /* read requests */ |
19903 |
+ case SPI_IOC_RD_MODE: |
19904 |
+- retval = put_user(spi->mode & SPI_MODE_MASK, |
19905 |
+- (__u8 __user *)arg); |
19906 |
+- break; |
19907 |
+ case SPI_IOC_RD_MODE32: |
19908 |
+- retval = put_user(spi->mode & SPI_MODE_MASK, |
19909 |
+- (__u32 __user *)arg); |
19910 |
++ tmp = spi->mode; |
19911 |
++ |
19912 |
++ { |
19913 |
++ struct spi_controller *ctlr = spi->controller; |
19914 |
++ |
19915 |
++ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods && |
19916 |
++ ctlr->cs_gpiods[spi->chip_select]) |
19917 |
++ tmp &= ~SPI_CS_HIGH; |
19918 |
++ } |
19919 |
++ |
19920 |
++ if (cmd == SPI_IOC_RD_MODE) |
19921 |
++ retval = put_user(tmp & SPI_MODE_MASK, |
19922 |
++ (__u8 __user *)arg); |
19923 |
++ else |
19924 |
++ retval = put_user(tmp & SPI_MODE_MASK, |
19925 |
++ (__u32 __user *)arg); |
19926 |
+ break; |
19927 |
+ case SPI_IOC_RD_LSB_FIRST: |
19928 |
+ retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, |
19929 |
+diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c |
19930 |
+index 1d3026dae827e..62d5397ff1f98 100644 |
19931 |
+--- a/drivers/staging/iio/accel/adis16203.c |
19932 |
++++ b/drivers/staging/iio/accel/adis16203.c |
19933 |
+@@ -312,3 +312,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@×××××.com>"); |
19934 |
+ MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer"); |
19935 |
+ MODULE_LICENSE("GPL v2"); |
19936 |
+ MODULE_ALIAS("spi:adis16203"); |
19937 |
++MODULE_IMPORT_NS(IIO_ADISLIB); |
19938 |
+diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c |
19939 |
+index 2a8aa83b8d9e6..bca857eef92e2 100644 |
19940 |
+--- a/drivers/staging/iio/accel/adis16240.c |
19941 |
++++ b/drivers/staging/iio/accel/adis16240.c |
19942 |
+@@ -440,3 +440,4 @@ MODULE_AUTHOR("Barry Song <21cnbao@×××××.com>"); |
19943 |
+ MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder"); |
19944 |
+ MODULE_LICENSE("GPL v2"); |
19945 |
+ MODULE_ALIAS("spi:adis16240"); |
19946 |
++MODULE_IMPORT_NS(IIO_ADISLIB); |
19947 |
+diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c |
19948 |
+index 6acfc94a16e73..b520d1e0edd14 100644 |
19949 |
+--- a/drivers/staging/mt7621-pci/pci-mt7621.c |
19950 |
++++ b/drivers/staging/mt7621-pci/pci-mt7621.c |
19951 |
+@@ -93,8 +93,8 @@ struct mt7621_pcie_port { |
19952 |
+ * reset lines are inverted. |
19953 |
+ */ |
19954 |
+ struct mt7621_pcie { |
19955 |
+- void __iomem *base; |
19956 |
+ struct device *dev; |
19957 |
++ void __iomem *base; |
19958 |
+ struct list_head ports; |
19959 |
+ bool resets_inverted; |
19960 |
+ }; |
19961 |
+@@ -129,7 +129,7 @@ static inline void pcie_port_write(struct mt7621_pcie_port *port, |
19962 |
+ writel_relaxed(val, port->base + reg); |
19963 |
+ } |
19964 |
+ |
19965 |
+-static inline u32 mt7621_pci_get_cfgaddr(unsigned int bus, unsigned int slot, |
19966 |
++static inline u32 mt7621_pcie_get_cfgaddr(unsigned int bus, unsigned int slot, |
19967 |
+ unsigned int func, unsigned int where) |
19968 |
+ { |
19969 |
+ return (((where & 0xF00) >> 8) << 24) | (bus << 16) | (slot << 11) | |
19970 |
+@@ -140,7 +140,7 @@ static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus, |
19971 |
+ unsigned int devfn, int where) |
19972 |
+ { |
19973 |
+ struct mt7621_pcie *pcie = bus->sysdata; |
19974 |
+- u32 address = mt7621_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), |
19975 |
++ u32 address = mt7621_pcie_get_cfgaddr(bus->number, PCI_SLOT(devfn), |
19976 |
+ PCI_FUNC(devfn), where); |
19977 |
+ |
19978 |
+ writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR); |
19979 |
+@@ -148,7 +148,7 @@ static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus, |
19980 |
+ return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3); |
19981 |
+ } |
19982 |
+ |
19983 |
+-struct pci_ops mt7621_pci_ops = { |
19984 |
++struct pci_ops mt7621_pcie_ops = { |
19985 |
+ .map_bus = mt7621_pcie_map_bus, |
19986 |
+ .read = pci_generic_config_read, |
19987 |
+ .write = pci_generic_config_write, |
19988 |
+@@ -156,7 +156,7 @@ struct pci_ops mt7621_pci_ops = { |
19989 |
+ |
19990 |
+ static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg) |
19991 |
+ { |
19992 |
+- u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg); |
19993 |
++ u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg); |
19994 |
+ |
19995 |
+ pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR); |
19996 |
+ return pcie_read(pcie, RALINK_PCI_CONFIG_DATA); |
19997 |
+@@ -165,7 +165,7 @@ static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg) |
19998 |
+ static void write_config(struct mt7621_pcie *pcie, unsigned int dev, |
19999 |
+ u32 reg, u32 val) |
20000 |
+ { |
20001 |
+- u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg); |
20002 |
++ u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg); |
20003 |
+ |
20004 |
+ pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR); |
20005 |
+ pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA); |
20006 |
+@@ -505,16 +505,17 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host) |
20007 |
+ { |
20008 |
+ struct mt7621_pcie *pcie = pci_host_bridge_priv(host); |
20009 |
+ |
20010 |
+- host->ops = &mt7621_pci_ops; |
20011 |
++ host->ops = &mt7621_pcie_ops; |
20012 |
+ host->sysdata = pcie; |
20013 |
+ return pci_host_probe(host); |
20014 |
+ } |
20015 |
+ |
20016 |
+-static const struct soc_device_attribute mt7621_pci_quirks_match[] = { |
20017 |
+- { .soc_id = "mt7621", .revision = "E2" } |
20018 |
++static const struct soc_device_attribute mt7621_pcie_quirks_match[] = { |
20019 |
++ { .soc_id = "mt7621", .revision = "E2" }, |
20020 |
++ { /* sentinel */ } |
20021 |
+ }; |
20022 |
+ |
20023 |
+-static int mt7621_pci_probe(struct platform_device *pdev) |
20024 |
++static int mt7621_pcie_probe(struct platform_device *pdev) |
20025 |
+ { |
20026 |
+ struct device *dev = &pdev->dev; |
20027 |
+ const struct soc_device_attribute *attr; |
20028 |
+@@ -535,7 +536,7 @@ static int mt7621_pci_probe(struct platform_device *pdev) |
20029 |
+ platform_set_drvdata(pdev, pcie); |
20030 |
+ INIT_LIST_HEAD(&pcie->ports); |
20031 |
+ |
20032 |
+- attr = soc_device_match(mt7621_pci_quirks_match); |
20033 |
++ attr = soc_device_match(mt7621_pcie_quirks_match); |
20034 |
+ if (attr) |
20035 |
+ pcie->resets_inverted = true; |
20036 |
+ |
20037 |
+@@ -572,7 +573,7 @@ remove_resets: |
20038 |
+ return err; |
20039 |
+ } |
20040 |
+ |
20041 |
+-static int mt7621_pci_remove(struct platform_device *pdev) |
20042 |
++static int mt7621_pcie_remove(struct platform_device *pdev) |
20043 |
+ { |
20044 |
+ struct mt7621_pcie *pcie = platform_get_drvdata(pdev); |
20045 |
+ struct mt7621_pcie_port *port; |
20046 |
+@@ -583,18 +584,18 @@ static int mt7621_pci_remove(struct platform_device *pdev) |
20047 |
+ return 0; |
20048 |
+ } |
20049 |
+ |
20050 |
+-static const struct of_device_id mt7621_pci_ids[] = { |
20051 |
++static const struct of_device_id mt7621_pcie_ids[] = { |
20052 |
+ { .compatible = "mediatek,mt7621-pci" }, |
20053 |
+ {}, |
20054 |
+ }; |
20055 |
+-MODULE_DEVICE_TABLE(of, mt7621_pci_ids); |
20056 |
++MODULE_DEVICE_TABLE(of, mt7621_pcie_ids); |
20057 |
+ |
20058 |
+-static struct platform_driver mt7621_pci_driver = { |
20059 |
+- .probe = mt7621_pci_probe, |
20060 |
+- .remove = mt7621_pci_remove, |
20061 |
++static struct platform_driver mt7621_pcie_driver = { |
20062 |
++ .probe = mt7621_pcie_probe, |
20063 |
++ .remove = mt7621_pcie_remove, |
20064 |
+ .driver = { |
20065 |
+ .name = "mt7621-pci", |
20066 |
+- .of_match_table = of_match_ptr(mt7621_pci_ids), |
20067 |
++ .of_match_table = of_match_ptr(mt7621_pcie_ids), |
20068 |
+ }, |
20069 |
+ }; |
20070 |
+-builtin_platform_driver(mt7621_pci_driver); |
20071 |
++builtin_platform_driver(mt7621_pcie_driver); |
20072 |
+diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c |
20073 |
+index e3d0a361d370d..98e90670560b5 100644 |
20074 |
+--- a/drivers/staging/rtl8192e/rtllib_rx.c |
20075 |
++++ b/drivers/staging/rtl8192e/rtllib_rx.c |
20076 |
+@@ -1489,9 +1489,9 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb, |
20077 |
+ hdrlen += 4; |
20078 |
+ } |
20079 |
+ |
20080 |
+- rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen); |
20081 |
+ ieee->stats.rx_packets++; |
20082 |
+ ieee->stats.rx_bytes += skb->len; |
20083 |
++ rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen); |
20084 |
+ |
20085 |
+ return 1; |
20086 |
+ } |
20087 |
+diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c |
20088 |
+index b58e75932ecd5..3686b3c599ce7 100644 |
20089 |
+--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c |
20090 |
++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c |
20091 |
+@@ -951,9 +951,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, |
20092 |
+ #endif |
20093 |
+ |
20094 |
+ if (ieee->iw_mode == IW_MODE_MONITOR) { |
20095 |
++ unsigned int len = skb->len; |
20096 |
++ |
20097 |
+ ieee80211_monitor_rx(ieee, skb, rx_stats); |
20098 |
+ stats->rx_packets++; |
20099 |
+- stats->rx_bytes += skb->len; |
20100 |
++ stats->rx_bytes += len; |
20101 |
+ return 1; |
20102 |
+ } |
20103 |
+ |
20104 |
+diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c |
20105 |
+index af666bd9e8d4d..c5cd873c6e016 100644 |
20106 |
+--- a/drivers/thermal/imx8mm_thermal.c |
20107 |
++++ b/drivers/thermal/imx8mm_thermal.c |
20108 |
+@@ -65,8 +65,14 @@ static int imx8mm_tmu_get_temp(void *data, int *temp) |
20109 |
+ u32 val; |
20110 |
+ |
20111 |
+ val = readl_relaxed(tmu->base + TRITSR) & TRITSR_TEMP0_VAL_MASK; |
20112 |
++ |
20113 |
++ /* |
20114 |
++ * Do not validate against the V bit (bit 31) due to errata |
20115 |
++ * ERR051272: TMU: Bit 31 of registers TMU_TSCR/TMU_TRITSR/TMU_TRATSR invalid |
20116 |
++ */ |
20117 |
++ |
20118 |
+ *temp = val * 1000; |
20119 |
+- if (*temp < VER1_TEMP_LOW_LIMIT) |
20120 |
++ if (*temp < VER1_TEMP_LOW_LIMIT || *temp > VER2_TEMP_HIGH_LIMIT) |
20121 |
+ return -EAGAIN; |
20122 |
+ |
20123 |
+ return 0; |
20124 |
+diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c |
20125 |
+index eafa7526eb8b4..cc94d8b005d49 100644 |
20126 |
+--- a/drivers/thermal/qcom/lmh.c |
20127 |
++++ b/drivers/thermal/qcom/lmh.c |
20128 |
+@@ -43,7 +43,7 @@ static irqreturn_t lmh_handle_irq(int hw_irq, void *data) |
20129 |
+ if (irq) |
20130 |
+ generic_handle_irq(irq); |
20131 |
+ |
20132 |
+- return 0; |
20133 |
++ return IRQ_HANDLED; |
20134 |
+ } |
20135 |
+ |
20136 |
+ static void lmh_enable_interrupt(struct irq_data *d) |
20137 |
+diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c |
20138 |
+index 7419e196dbb06..1037de19873a5 100644 |
20139 |
+--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c |
20140 |
++++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c |
20141 |
+@@ -251,7 +251,8 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, |
20142 |
+ disable_s2_shutdown = true; |
20143 |
+ else |
20144 |
+ dev_warn(chip->dev, |
20145 |
+- "No ADC is configured and critical temperature is above the maximum stage 2 threshold of 140 C! Configuring stage 2 shutdown at 140 C.\n"); |
20146 |
++ "No ADC is configured and critical temperature %d mC is above the maximum stage 2 threshold of %ld mC! Configuring stage 2 shutdown at %ld mC.\n", |
20147 |
++ temp, stage2_threshold_max, stage2_threshold_max); |
20148 |
+ } |
20149 |
+ |
20150 |
+ skip: |
20151 |
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c |
20152 |
+index 867c8aa92b3ac..38082fdc4fdef 100644 |
20153 |
+--- a/drivers/thermal/thermal_core.c |
20154 |
++++ b/drivers/thermal/thermal_core.c |
20155 |
+@@ -905,10 +905,6 @@ __thermal_cooling_device_register(struct device_node *np, |
20156 |
+ cdev->id = ret; |
20157 |
+ id = ret; |
20158 |
+ |
20159 |
+- ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id); |
20160 |
+- if (ret) |
20161 |
+- goto out_ida_remove; |
20162 |
+- |
20163 |
+ cdev->type = kstrdup(type ? type : "", GFP_KERNEL); |
20164 |
+ if (!cdev->type) { |
20165 |
+ ret = -ENOMEM; |
20166 |
+@@ -923,6 +919,11 @@ __thermal_cooling_device_register(struct device_node *np, |
20167 |
+ cdev->device.class = &thermal_class; |
20168 |
+ cdev->devdata = devdata; |
20169 |
+ thermal_cooling_device_setup_sysfs(cdev); |
20170 |
++ ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id); |
20171 |
++ if (ret) { |
20172 |
++ thermal_cooling_device_destroy_sysfs(cdev); |
20173 |
++ goto out_kfree_type; |
20174 |
++ } |
20175 |
+ ret = device_register(&cdev->device); |
20176 |
+ if (ret) |
20177 |
+ goto out_kfree_type; |
20178 |
+@@ -1235,10 +1236,6 @@ thermal_zone_device_register(const char *type, int trips, int mask, |
20179 |
+ tz->id = id; |
20180 |
+ strlcpy(tz->type, type, sizeof(tz->type)); |
20181 |
+ |
20182 |
+- result = dev_set_name(&tz->device, "thermal_zone%d", tz->id); |
20183 |
+- if (result) |
20184 |
+- goto remove_id; |
20185 |
+- |
20186 |
+ if (!ops->critical) |
20187 |
+ ops->critical = thermal_zone_device_critical; |
20188 |
+ |
20189 |
+@@ -1260,6 +1257,11 @@ thermal_zone_device_register(const char *type, int trips, int mask, |
20190 |
+ /* A new thermal zone needs to be updated anyway. */ |
20191 |
+ atomic_set(&tz->need_update, 1); |
20192 |
+ |
20193 |
++ result = dev_set_name(&tz->device, "thermal_zone%d", tz->id); |
20194 |
++ if (result) { |
20195 |
++ thermal_zone_destroy_device_groups(tz); |
20196 |
++ goto remove_id; |
20197 |
++ } |
20198 |
+ result = device_register(&tz->device); |
20199 |
+ if (result) |
20200 |
+ goto release_device; |
20201 |
+diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c |
20202 |
+index 711cf30e835aa..60b3ac1a03175 100644 |
20203 |
+--- a/drivers/tty/serial/8250/8250_bcm7271.c |
20204 |
++++ b/drivers/tty/serial/8250/8250_bcm7271.c |
20205 |
+@@ -1214,9 +1214,17 @@ static struct platform_driver brcmuart_platform_driver = { |
20206 |
+ |
20207 |
+ static int __init brcmuart_init(void) |
20208 |
+ { |
20209 |
++ int ret; |
20210 |
++ |
20211 |
+ brcmuart_debugfs_root = debugfs_create_dir( |
20212 |
+ brcmuart_platform_driver.driver.name, NULL); |
20213 |
+- return platform_driver_register(&brcmuart_platform_driver); |
20214 |
++ ret = platform_driver_register(&brcmuart_platform_driver); |
20215 |
++ if (ret) { |
20216 |
++ debugfs_remove_recursive(brcmuart_debugfs_root); |
20217 |
++ return ret; |
20218 |
++ } |
20219 |
++ |
20220 |
++ return 0; |
20221 |
+ } |
20222 |
+ module_init(brcmuart_init); |
20223 |
+ |
20224 |
+diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c |
20225 |
+index 7c5f4e966b594..91799c420e250 100644 |
20226 |
+--- a/drivers/tty/serial/altera_uart.c |
20227 |
++++ b/drivers/tty/serial/altera_uart.c |
20228 |
+@@ -199,9 +199,8 @@ static void altera_uart_set_termios(struct uart_port *port, |
20229 |
+ */ |
20230 |
+ } |
20231 |
+ |
20232 |
+-static void altera_uart_rx_chars(struct altera_uart *pp) |
20233 |
++static void altera_uart_rx_chars(struct uart_port *port) |
20234 |
+ { |
20235 |
+- struct uart_port *port = &pp->port; |
20236 |
+ unsigned char ch, flag; |
20237 |
+ unsigned short status; |
20238 |
+ |
20239 |
+@@ -246,9 +245,8 @@ static void altera_uart_rx_chars(struct altera_uart *pp) |
20240 |
+ tty_flip_buffer_push(&port->state->port); |
20241 |
+ } |
20242 |
+ |
20243 |
+-static void altera_uart_tx_chars(struct altera_uart *pp) |
20244 |
++static void altera_uart_tx_chars(struct uart_port *port) |
20245 |
+ { |
20246 |
+- struct uart_port *port = &pp->port; |
20247 |
+ struct circ_buf *xmit = &port->state->xmit; |
20248 |
+ |
20249 |
+ if (port->x_char) { |
20250 |
+@@ -272,26 +270,25 @@ static void altera_uart_tx_chars(struct altera_uart *pp) |
20251 |
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
20252 |
+ uart_write_wakeup(port); |
20253 |
+ |
20254 |
+- if (xmit->head == xmit->tail) { |
20255 |
+- pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK; |
20256 |
+- altera_uart_update_ctrl_reg(pp); |
20257 |
+- } |
20258 |
++ if (uart_circ_empty(xmit)) |
20259 |
++ altera_uart_stop_tx(port); |
20260 |
+ } |
20261 |
+ |
20262 |
+ static irqreturn_t altera_uart_interrupt(int irq, void *data) |
20263 |
+ { |
20264 |
+ struct uart_port *port = data; |
20265 |
+ struct altera_uart *pp = container_of(port, struct altera_uart, port); |
20266 |
++ unsigned long flags; |
20267 |
+ unsigned int isr; |
20268 |
+ |
20269 |
+ isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr; |
20270 |
+ |
20271 |
+- spin_lock(&port->lock); |
20272 |
++ spin_lock_irqsave(&port->lock, flags); |
20273 |
+ if (isr & ALTERA_UART_STATUS_RRDY_MSK) |
20274 |
+- altera_uart_rx_chars(pp); |
20275 |
++ altera_uart_rx_chars(port); |
20276 |
+ if (isr & ALTERA_UART_STATUS_TRDY_MSK) |
20277 |
+- altera_uart_tx_chars(pp); |
20278 |
+- spin_unlock(&port->lock); |
20279 |
++ altera_uart_tx_chars(port); |
20280 |
++ spin_unlock_irqrestore(&port->lock, flags); |
20281 |
+ |
20282 |
+ return IRQ_RETVAL(isr); |
20283 |
+ } |
20284 |
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c |
20285 |
+index 300a8bbb4b807..7c8515f83f0a7 100644 |
20286 |
+--- a/drivers/tty/serial/amba-pl011.c |
20287 |
++++ b/drivers/tty/serial/amba-pl011.c |
20288 |
+@@ -1050,6 +1050,9 @@ static void pl011_dma_rx_callback(void *data) |
20289 |
+ */ |
20290 |
+ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) |
20291 |
+ { |
20292 |
++ if (!uap->using_rx_dma) |
20293 |
++ return; |
20294 |
++ |
20295 |
+ /* FIXME. Just disable the DMA enable */ |
20296 |
+ uap->dmacr &= ~UART011_RXDMAE; |
20297 |
+ pl011_write(uap->dmacr, uap, REG_DMACR); |
20298 |
+@@ -1833,8 +1836,17 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap) |
20299 |
+ static void pl011_unthrottle_rx(struct uart_port *port) |
20300 |
+ { |
20301 |
+ struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); |
20302 |
++ unsigned long flags; |
20303 |
+ |
20304 |
+- pl011_enable_interrupts(uap); |
20305 |
++ spin_lock_irqsave(&uap->port.lock, flags); |
20306 |
++ |
20307 |
++ uap->im = UART011_RTIM; |
20308 |
++ if (!pl011_dma_rx_running(uap)) |
20309 |
++ uap->im |= UART011_RXIM; |
20310 |
++ |
20311 |
++ pl011_write(uap->im, uap, REG_IMSC); |
20312 |
++ |
20313 |
++ spin_unlock_irqrestore(&uap->port.lock, flags); |
20314 |
+ } |
20315 |
+ |
20316 |
+ static int pl011_startup(struct uart_port *port) |
20317 |
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c |
20318 |
+index 1e65933f6ccec..52cab2038da8c 100644 |
20319 |
+--- a/drivers/tty/serial/pch_uart.c |
20320 |
++++ b/drivers/tty/serial/pch_uart.c |
20321 |
+@@ -707,6 +707,7 @@ static void pch_request_dma(struct uart_port *port) |
20322 |
+ if (!chan) { |
20323 |
+ dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n", |
20324 |
+ __func__); |
20325 |
++ pci_dev_put(dma_dev); |
20326 |
+ return; |
20327 |
+ } |
20328 |
+ priv->chan_tx = chan; |
20329 |
+@@ -723,6 +724,7 @@ static void pch_request_dma(struct uart_port *port) |
20330 |
+ __func__); |
20331 |
+ dma_release_channel(priv->chan_tx); |
20332 |
+ priv->chan_tx = NULL; |
20333 |
++ pci_dev_put(dma_dev); |
20334 |
+ return; |
20335 |
+ } |
20336 |
+ |
20337 |
+@@ -730,6 +732,8 @@ static void pch_request_dma(struct uart_port *port) |
20338 |
+ priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize, |
20339 |
+ &priv->rx_buf_dma, GFP_KERNEL); |
20340 |
+ priv->chan_rx = chan; |
20341 |
++ |
20342 |
++ pci_dev_put(dma_dev); |
20343 |
+ } |
20344 |
+ |
20345 |
+ static void pch_dma_rx_complete(void *arg) |
20346 |
+diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c |
20347 |
+index d4dba298de7af..79187ff9ac131 100644 |
20348 |
+--- a/drivers/tty/serial/serial-tegra.c |
20349 |
++++ b/drivers/tty/serial/serial-tegra.c |
20350 |
+@@ -619,8 +619,9 @@ static void tegra_uart_stop_tx(struct uart_port *u) |
20351 |
+ if (tup->tx_in_progress != TEGRA_UART_TX_DMA) |
20352 |
+ return; |
20353 |
+ |
20354 |
+- dmaengine_terminate_all(tup->tx_dma_chan); |
20355 |
++ dmaengine_pause(tup->tx_dma_chan); |
20356 |
+ dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); |
20357 |
++ dmaengine_terminate_all(tup->tx_dma_chan); |
20358 |
+ count = tup->tx_bytes_requested - state.residue; |
20359 |
+ async_tx_ack(tup->tx_dma_desc); |
20360 |
+ uart_xmit_advance(&tup->uport, count); |
20361 |
+@@ -763,8 +764,9 @@ static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup) |
20362 |
+ return; |
20363 |
+ } |
20364 |
+ |
20365 |
+- dmaengine_terminate_all(tup->rx_dma_chan); |
20366 |
++ dmaengine_pause(tup->rx_dma_chan); |
20367 |
+ dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state); |
20368 |
++ dmaengine_terminate_all(tup->rx_dma_chan); |
20369 |
+ |
20370 |
+ tegra_uart_rx_buffer_push(tup, state.residue); |
20371 |
+ tup->rx_dma_active = false; |
20372 |
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c |
20373 |
+index ce7ff7a0207f2..5c60960e185d2 100644 |
20374 |
+--- a/drivers/tty/serial/stm32-usart.c |
20375 |
++++ b/drivers/tty/serial/stm32-usart.c |
20376 |
+@@ -1363,22 +1363,10 @@ static int stm32_usart_serial_probe(struct platform_device *pdev) |
20377 |
+ if (!stm32port->info) |
20378 |
+ return -EINVAL; |
20379 |
+ |
20380 |
+- ret = stm32_usart_init_port(stm32port, pdev); |
20381 |
+- if (ret) |
20382 |
+- return ret; |
20383 |
+- |
20384 |
+- if (stm32port->wakeup_src) { |
20385 |
+- device_set_wakeup_capable(&pdev->dev, true); |
20386 |
+- ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); |
20387 |
+- if (ret) |
20388 |
+- goto err_deinit_port; |
20389 |
+- } |
20390 |
+- |
20391 |
+ stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); |
20392 |
+- if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) { |
20393 |
+- ret = -EPROBE_DEFER; |
20394 |
+- goto err_wakeirq; |
20395 |
+- } |
20396 |
++ if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) |
20397 |
++ return -EPROBE_DEFER; |
20398 |
++ |
20399 |
+ /* Fall back in interrupt mode for any non-deferral error */ |
20400 |
+ if (IS_ERR(stm32port->rx_ch)) |
20401 |
+ stm32port->rx_ch = NULL; |
20402 |
+@@ -1392,6 +1380,17 @@ static int stm32_usart_serial_probe(struct platform_device *pdev) |
20403 |
+ if (IS_ERR(stm32port->tx_ch)) |
20404 |
+ stm32port->tx_ch = NULL; |
20405 |
+ |
20406 |
++ ret = stm32_usart_init_port(stm32port, pdev); |
20407 |
++ if (ret) |
20408 |
++ goto err_dma_tx; |
20409 |
++ |
20410 |
++ if (stm32port->wakeup_src) { |
20411 |
++ device_set_wakeup_capable(&pdev->dev, true); |
20412 |
++ ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); |
20413 |
++ if (ret) |
20414 |
++ goto err_deinit_port; |
20415 |
++ } |
20416 |
++ |
20417 |
+ if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { |
20418 |
+ /* Fall back in interrupt mode */ |
20419 |
+ dma_release_channel(stm32port->rx_ch); |
20420 |
+@@ -1428,19 +1427,11 @@ err_port: |
20421 |
+ pm_runtime_set_suspended(&pdev->dev); |
20422 |
+ pm_runtime_put_noidle(&pdev->dev); |
20423 |
+ |
20424 |
+- if (stm32port->tx_ch) { |
20425 |
++ if (stm32port->tx_ch) |
20426 |
+ stm32_usart_of_dma_tx_remove(stm32port, pdev); |
20427 |
+- dma_release_channel(stm32port->tx_ch); |
20428 |
+- } |
20429 |
+- |
20430 |
+ if (stm32port->rx_ch) |
20431 |
+ stm32_usart_of_dma_rx_remove(stm32port, pdev); |
20432 |
+ |
20433 |
+-err_dma_rx: |
20434 |
+- if (stm32port->rx_ch) |
20435 |
+- dma_release_channel(stm32port->rx_ch); |
20436 |
+- |
20437 |
+-err_wakeirq: |
20438 |
+ if (stm32port->wakeup_src) |
20439 |
+ dev_pm_clear_wake_irq(&pdev->dev); |
20440 |
+ |
20441 |
+@@ -1450,6 +1441,14 @@ err_deinit_port: |
20442 |
+ |
20443 |
+ stm32_usart_deinit_port(stm32port); |
20444 |
+ |
20445 |
++err_dma_tx: |
20446 |
++ if (stm32port->tx_ch) |
20447 |
++ dma_release_channel(stm32port->tx_ch); |
20448 |
++ |
20449 |
++err_dma_rx: |
20450 |
++ if (stm32port->rx_ch) |
20451 |
++ dma_release_channel(stm32port->rx_ch); |
20452 |
++ |
20453 |
+ return ret; |
20454 |
+ } |
20455 |
+ |
20456 |
+diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c |
20457 |
+index 92e5726340090..ac7cb80e4d6bd 100644 |
20458 |
+--- a/drivers/tty/serial/sunsab.c |
20459 |
++++ b/drivers/tty/serial/sunsab.c |
20460 |
+@@ -1137,7 +1137,13 @@ static int __init sunsab_init(void) |
20461 |
+ } |
20462 |
+ } |
20463 |
+ |
20464 |
+- return platform_driver_register(&sab_driver); |
20465 |
++ err = platform_driver_register(&sab_driver); |
20466 |
++ if (err) { |
20467 |
++ kfree(sunsab_ports); |
20468 |
++ sunsab_ports = NULL; |
20469 |
++ } |
20470 |
++ |
20471 |
++ return err; |
20472 |
+ } |
20473 |
+ |
20474 |
+ static void __exit sunsab_exit(void) |
20475 |
+diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c |
20476 |
+index 6b5cfa5b06733..28be820b546e9 100644 |
20477 |
+--- a/drivers/uio/uio_dmem_genirq.c |
20478 |
++++ b/drivers/uio/uio_dmem_genirq.c |
20479 |
+@@ -110,8 +110,10 @@ static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info) |
20480 |
+ * remember the state so we can allow user space to enable it later. |
20481 |
+ */ |
20482 |
+ |
20483 |
++ spin_lock(&priv->lock); |
20484 |
+ if (!test_and_set_bit(0, &priv->flags)) |
20485 |
+ disable_irq_nosync(irq); |
20486 |
++ spin_unlock(&priv->lock); |
20487 |
+ |
20488 |
+ return IRQ_HANDLED; |
20489 |
+ } |
20490 |
+@@ -125,20 +127,19 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) |
20491 |
+ * in the interrupt controller, but keep track of the |
20492 |
+ * state to prevent per-irq depth damage. |
20493 |
+ * |
20494 |
+- * Serialize this operation to support multiple tasks. |
20495 |
++ * Serialize this operation to support multiple tasks and concurrency |
20496 |
++ * with irq handler on SMP systems. |
20497 |
+ */ |
20498 |
+ |
20499 |
+ spin_lock_irqsave(&priv->lock, flags); |
20500 |
+ if (irq_on) { |
20501 |
+ if (test_and_clear_bit(0, &priv->flags)) |
20502 |
+ enable_irq(dev_info->irq); |
20503 |
+- spin_unlock_irqrestore(&priv->lock, flags); |
20504 |
+ } else { |
20505 |
+- if (!test_and_set_bit(0, &priv->flags)) { |
20506 |
+- spin_unlock_irqrestore(&priv->lock, flags); |
20507 |
+- disable_irq(dev_info->irq); |
20508 |
+- } |
20509 |
++ if (!test_and_set_bit(0, &priv->flags)) |
20510 |
++ disable_irq_nosync(dev_info->irq); |
20511 |
+ } |
20512 |
++ spin_unlock_irqrestore(&priv->lock, flags); |
20513 |
+ |
20514 |
+ return 0; |
20515 |
+ } |
20516 |
+diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c |
20517 |
+index 2f29431f612e0..b23e543b3a3d5 100644 |
20518 |
+--- a/drivers/usb/cdns3/cdnsp-ring.c |
20519 |
++++ b/drivers/usb/cdns3/cdnsp-ring.c |
20520 |
+@@ -2006,10 +2006,11 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) |
20521 |
+ |
20522 |
+ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) |
20523 |
+ { |
20524 |
+- u32 field, length_field, remainder; |
20525 |
++ u32 field, length_field, zlp = 0; |
20526 |
+ struct cdnsp_ep *pep = preq->pep; |
20527 |
+ struct cdnsp_ring *ep_ring; |
20528 |
+ int num_trbs; |
20529 |
++ u32 maxp; |
20530 |
+ int ret; |
20531 |
+ |
20532 |
+ ep_ring = cdnsp_request_to_transfer_ring(pdev, preq); |
20533 |
+@@ -2019,26 +2020,33 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) |
20534 |
+ /* 1 TRB for data, 1 for status */ |
20535 |
+ num_trbs = (pdev->three_stage_setup) ? 2 : 1; |
20536 |
+ |
20537 |
++ maxp = usb_endpoint_maxp(pep->endpoint.desc); |
20538 |
++ |
20539 |
++ if (preq->request.zero && preq->request.length && |
20540 |
++ (preq->request.length % maxp == 0)) { |
20541 |
++ num_trbs++; |
20542 |
++ zlp = 1; |
20543 |
++ } |
20544 |
++ |
20545 |
+ ret = cdnsp_prepare_transfer(pdev, preq, num_trbs); |
20546 |
+ if (ret) |
20547 |
+ return ret; |
20548 |
+ |
20549 |
+ /* If there's data, queue data TRBs */ |
20550 |
+- if (pdev->ep0_expect_in) |
20551 |
+- field = TRB_TYPE(TRB_DATA) | TRB_IOC; |
20552 |
+- else |
20553 |
+- field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC; |
20554 |
+- |
20555 |
+ if (preq->request.length > 0) { |
20556 |
+- remainder = cdnsp_td_remainder(pdev, 0, preq->request.length, |
20557 |
+- preq->request.length, preq, 1, 0); |
20558 |
++ field = TRB_TYPE(TRB_DATA); |
20559 |
+ |
20560 |
+- length_field = TRB_LEN(preq->request.length) | |
20561 |
+- TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); |
20562 |
++ if (zlp) |
20563 |
++ field |= TRB_CHAIN; |
20564 |
++ else |
20565 |
++ field |= TRB_IOC | (pdev->ep0_expect_in ? 0 : TRB_ISP); |
20566 |
+ |
20567 |
+ if (pdev->ep0_expect_in) |
20568 |
+ field |= TRB_DIR_IN; |
20569 |
+ |
20570 |
++ length_field = TRB_LEN(preq->request.length) | |
20571 |
++ TRB_TD_SIZE(zlp) | TRB_INTR_TARGET(0); |
20572 |
++ |
20573 |
+ cdnsp_queue_trb(pdev, ep_ring, true, |
20574 |
+ lower_32_bits(preq->request.dma), |
20575 |
+ upper_32_bits(preq->request.dma), length_field, |
20576 |
+@@ -2046,6 +2054,20 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) |
20577 |
+ TRB_SETUPID(pdev->setup_id) | |
20578 |
+ pdev->setup_speed); |
20579 |
+ |
20580 |
++ if (zlp) { |
20581 |
++ field = TRB_TYPE(TRB_NORMAL) | TRB_IOC; |
20582 |
++ |
20583 |
++ if (!pdev->ep0_expect_in) |
20584 |
++ field = TRB_ISP; |
20585 |
++ |
20586 |
++ cdnsp_queue_trb(pdev, ep_ring, true, |
20587 |
++ lower_32_bits(preq->request.dma), |
20588 |
++ upper_32_bits(preq->request.dma), 0, |
20589 |
++ field | ep_ring->cycle_state | |
20590 |
++ TRB_SETUPID(pdev->setup_id) | |
20591 |
++ pdev->setup_speed); |
20592 |
++ } |
20593 |
++ |
20594 |
+ pdev->ep0_stage = CDNSP_DATA_STAGE; |
20595 |
+ } |
20596 |
+ |
20597 |
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c |
20598 |
+index a2f3e56aba05c..7258e640e9ee1 100644 |
20599 |
+--- a/drivers/usb/dwc3/core.c |
20600 |
++++ b/drivers/usb/dwc3/core.c |
20601 |
+@@ -120,21 +120,25 @@ static void __dwc3_set_mode(struct work_struct *work) |
20602 |
+ unsigned long flags; |
20603 |
+ int ret; |
20604 |
+ u32 reg; |
20605 |
++ u32 desired_dr_role; |
20606 |
+ |
20607 |
+ mutex_lock(&dwc->mutex); |
20608 |
++ spin_lock_irqsave(&dwc->lock, flags); |
20609 |
++ desired_dr_role = dwc->desired_dr_role; |
20610 |
++ spin_unlock_irqrestore(&dwc->lock, flags); |
20611 |
+ |
20612 |
+ pm_runtime_get_sync(dwc->dev); |
20613 |
+ |
20614 |
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) |
20615 |
+ dwc3_otg_update(dwc, 0); |
20616 |
+ |
20617 |
+- if (!dwc->desired_dr_role) |
20618 |
++ if (!desired_dr_role) |
20619 |
+ goto out; |
20620 |
+ |
20621 |
+- if (dwc->desired_dr_role == dwc->current_dr_role) |
20622 |
++ if (desired_dr_role == dwc->current_dr_role) |
20623 |
+ goto out; |
20624 |
+ |
20625 |
+- if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) |
20626 |
++ if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) |
20627 |
+ goto out; |
20628 |
+ |
20629 |
+ switch (dwc->current_dr_role) { |
20630 |
+@@ -162,7 +166,7 @@ static void __dwc3_set_mode(struct work_struct *work) |
20631 |
+ */ |
20632 |
+ if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) || |
20633 |
+ DWC3_VER_IS_PRIOR(DWC31, 190A)) && |
20634 |
+- dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { |
20635 |
++ desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { |
20636 |
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL); |
20637 |
+ reg |= DWC3_GCTL_CORESOFTRESET; |
20638 |
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg); |
20639 |
+@@ -182,11 +186,11 @@ static void __dwc3_set_mode(struct work_struct *work) |
20640 |
+ |
20641 |
+ spin_lock_irqsave(&dwc->lock, flags); |
20642 |
+ |
20643 |
+- dwc3_set_prtcap(dwc, dwc->desired_dr_role); |
20644 |
++ dwc3_set_prtcap(dwc, desired_dr_role); |
20645 |
+ |
20646 |
+ spin_unlock_irqrestore(&dwc->lock, flags); |
20647 |
+ |
20648 |
+- switch (dwc->desired_dr_role) { |
20649 |
++ switch (desired_dr_role) { |
20650 |
+ case DWC3_GCTL_PRTCAP_HOST: |
20651 |
+ ret = dwc3_host_init(dwc); |
20652 |
+ if (ret) { |
20653 |
+@@ -959,8 +963,13 @@ static int dwc3_core_init(struct dwc3 *dwc) |
20654 |
+ |
20655 |
+ if (!dwc->ulpi_ready) { |
20656 |
+ ret = dwc3_core_ulpi_init(dwc); |
20657 |
+- if (ret) |
20658 |
++ if (ret) { |
20659 |
++ if (ret == -ETIMEDOUT) { |
20660 |
++ dwc3_core_soft_reset(dwc); |
20661 |
++ ret = -EPROBE_DEFER; |
20662 |
++ } |
20663 |
+ goto err0; |
20664 |
++ } |
20665 |
+ dwc->ulpi_ready = true; |
20666 |
+ } |
20667 |
+ |
20668 |
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c |
20669 |
+index ca0a7d9eaa34e..6be6009f911e1 100644 |
20670 |
+--- a/drivers/usb/gadget/function/f_hid.c |
20671 |
++++ b/drivers/usb/gadget/function/f_hid.c |
20672 |
+@@ -71,7 +71,7 @@ struct f_hidg { |
20673 |
+ wait_queue_head_t write_queue; |
20674 |
+ struct usb_request *req; |
20675 |
+ |
20676 |
+- int minor; |
20677 |
++ struct device dev; |
20678 |
+ struct cdev cdev; |
20679 |
+ struct usb_function func; |
20680 |
+ |
20681 |
+@@ -84,6 +84,14 @@ static inline struct f_hidg *func_to_hidg(struct usb_function *f) |
20682 |
+ return container_of(f, struct f_hidg, func); |
20683 |
+ } |
20684 |
+ |
20685 |
++static void hidg_release(struct device *dev) |
20686 |
++{ |
20687 |
++ struct f_hidg *hidg = container_of(dev, struct f_hidg, dev); |
20688 |
++ |
20689 |
++ kfree(hidg->set_report_buf); |
20690 |
++ kfree(hidg); |
20691 |
++} |
20692 |
++ |
20693 |
+ /*-------------------------------------------------------------------------*/ |
20694 |
+ /* Static descriptors */ |
20695 |
+ |
20696 |
+@@ -904,9 +912,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) |
20697 |
+ struct usb_ep *ep; |
20698 |
+ struct f_hidg *hidg = func_to_hidg(f); |
20699 |
+ struct usb_string *us; |
20700 |
+- struct device *device; |
20701 |
+ int status; |
20702 |
+- dev_t dev; |
20703 |
+ |
20704 |
+ /* maybe allocate device-global string IDs, and patch descriptors */ |
20705 |
+ us = usb_gstrings_attach(c->cdev, ct_func_strings, |
20706 |
+@@ -999,21 +1005,11 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) |
20707 |
+ |
20708 |
+ /* create char device */ |
20709 |
+ cdev_init(&hidg->cdev, &f_hidg_fops); |
20710 |
+- dev = MKDEV(major, hidg->minor); |
20711 |
+- status = cdev_add(&hidg->cdev, dev, 1); |
20712 |
++ status = cdev_device_add(&hidg->cdev, &hidg->dev); |
20713 |
+ if (status) |
20714 |
+ goto fail_free_descs; |
20715 |
+ |
20716 |
+- device = device_create(hidg_class, NULL, dev, NULL, |
20717 |
+- "%s%d", "hidg", hidg->minor); |
20718 |
+- if (IS_ERR(device)) { |
20719 |
+- status = PTR_ERR(device); |
20720 |
+- goto del; |
20721 |
+- } |
20722 |
+- |
20723 |
+ return 0; |
20724 |
+-del: |
20725 |
+- cdev_del(&hidg->cdev); |
20726 |
+ fail_free_descs: |
20727 |
+ usb_free_all_descriptors(f); |
20728 |
+ fail: |
20729 |
+@@ -1244,9 +1240,7 @@ static void hidg_free(struct usb_function *f) |
20730 |
+ |
20731 |
+ hidg = func_to_hidg(f); |
20732 |
+ opts = container_of(f->fi, struct f_hid_opts, func_inst); |
20733 |
+- kfree(hidg->report_desc); |
20734 |
+- kfree(hidg->set_report_buf); |
20735 |
+- kfree(hidg); |
20736 |
++ put_device(&hidg->dev); |
20737 |
+ mutex_lock(&opts->lock); |
20738 |
+ --opts->refcnt; |
20739 |
+ mutex_unlock(&opts->lock); |
20740 |
+@@ -1256,8 +1250,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f) |
20741 |
+ { |
20742 |
+ struct f_hidg *hidg = func_to_hidg(f); |
20743 |
+ |
20744 |
+- device_destroy(hidg_class, MKDEV(major, hidg->minor)); |
20745 |
+- cdev_del(&hidg->cdev); |
20746 |
++ cdev_device_del(&hidg->cdev, &hidg->dev); |
20747 |
+ |
20748 |
+ usb_free_all_descriptors(f); |
20749 |
+ } |
20750 |
+@@ -1266,6 +1259,7 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) |
20751 |
+ { |
20752 |
+ struct f_hidg *hidg; |
20753 |
+ struct f_hid_opts *opts; |
20754 |
++ int ret; |
20755 |
+ |
20756 |
+ /* allocate and initialize one new instance */ |
20757 |
+ hidg = kzalloc(sizeof(*hidg), GFP_KERNEL); |
20758 |
+@@ -1277,17 +1271,28 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) |
20759 |
+ mutex_lock(&opts->lock); |
20760 |
+ ++opts->refcnt; |
20761 |
+ |
20762 |
+- hidg->minor = opts->minor; |
20763 |
++ device_initialize(&hidg->dev); |
20764 |
++ hidg->dev.release = hidg_release; |
20765 |
++ hidg->dev.class = hidg_class; |
20766 |
++ hidg->dev.devt = MKDEV(major, opts->minor); |
20767 |
++ ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor); |
20768 |
++ if (ret) { |
20769 |
++ --opts->refcnt; |
20770 |
++ mutex_unlock(&opts->lock); |
20771 |
++ return ERR_PTR(ret); |
20772 |
++ } |
20773 |
++ |
20774 |
+ hidg->bInterfaceSubClass = opts->subclass; |
20775 |
+ hidg->bInterfaceProtocol = opts->protocol; |
20776 |
+ hidg->report_length = opts->report_length; |
20777 |
+ hidg->report_desc_length = opts->report_desc_length; |
20778 |
+ if (opts->report_desc) { |
20779 |
+- hidg->report_desc = kmemdup(opts->report_desc, |
20780 |
+- opts->report_desc_length, |
20781 |
+- GFP_KERNEL); |
20782 |
++ hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc, |
20783 |
++ opts->report_desc_length, |
20784 |
++ GFP_KERNEL); |
20785 |
+ if (!hidg->report_desc) { |
20786 |
+- kfree(hidg); |
20787 |
++ put_device(&hidg->dev); |
20788 |
++ --opts->refcnt; |
20789 |
+ mutex_unlock(&opts->lock); |
20790 |
+ return ERR_PTR(-ENOMEM); |
20791 |
+ } |
20792 |
+diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c |
20793 |
+index fdca28e72a3b4..d0e051beb3af9 100644 |
20794 |
+--- a/drivers/usb/gadget/udc/fotg210-udc.c |
20795 |
++++ b/drivers/usb/gadget/udc/fotg210-udc.c |
20796 |
+@@ -629,10 +629,10 @@ static void fotg210_request_error(struct fotg210_udc *fotg210) |
20797 |
+ static void fotg210_set_address(struct fotg210_udc *fotg210, |
20798 |
+ struct usb_ctrlrequest *ctrl) |
20799 |
+ { |
20800 |
+- if (ctrl->wValue >= 0x0100) { |
20801 |
++ if (le16_to_cpu(ctrl->wValue) >= 0x0100) { |
20802 |
+ fotg210_request_error(fotg210); |
20803 |
+ } else { |
20804 |
+- fotg210_set_dev_addr(fotg210, ctrl->wValue); |
20805 |
++ fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue)); |
20806 |
+ fotg210_set_cxdone(fotg210); |
20807 |
+ } |
20808 |
+ } |
20809 |
+@@ -713,17 +713,17 @@ static void fotg210_get_status(struct fotg210_udc *fotg210, |
20810 |
+ |
20811 |
+ switch (ctrl->bRequestType & USB_RECIP_MASK) { |
20812 |
+ case USB_RECIP_DEVICE: |
20813 |
+- fotg210->ep0_data = 1 << USB_DEVICE_SELF_POWERED; |
20814 |
++ fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED); |
20815 |
+ break; |
20816 |
+ case USB_RECIP_INTERFACE: |
20817 |
+- fotg210->ep0_data = 0; |
20818 |
++ fotg210->ep0_data = cpu_to_le16(0); |
20819 |
+ break; |
20820 |
+ case USB_RECIP_ENDPOINT: |
20821 |
+ epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK; |
20822 |
+ if (epnum) |
20823 |
+ fotg210->ep0_data = |
20824 |
+- fotg210_is_epnstall(fotg210->ep[epnum]) |
20825 |
+- << USB_ENDPOINT_HALT; |
20826 |
++ cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum]) |
20827 |
++ << USB_ENDPOINT_HALT); |
20828 |
+ else |
20829 |
+ fotg210_request_error(fotg210); |
20830 |
+ break; |
20831 |
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c |
20832 |
+index c53f6f276d5c6..f8a63c1434925 100644 |
20833 |
+--- a/drivers/usb/host/xhci-mtk.c |
20834 |
++++ b/drivers/usb/host/xhci-mtk.c |
20835 |
+@@ -619,7 +619,6 @@ static int xhci_mtk_probe(struct platform_device *pdev) |
20836 |
+ |
20837 |
+ dealloc_usb3_hcd: |
20838 |
+ usb_remove_hcd(xhci->shared_hcd); |
20839 |
+- xhci->shared_hcd = NULL; |
20840 |
+ |
20841 |
+ dealloc_usb2_hcd: |
20842 |
+ usb_remove_hcd(hcd); |
20843 |
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
20844 |
+index f9707997969d4..90bf5d57b1a95 100644 |
20845 |
+--- a/drivers/usb/host/xhci-ring.c |
20846 |
++++ b/drivers/usb/host/xhci-ring.c |
20847 |
+@@ -2524,7 +2524,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, |
20848 |
+ |
20849 |
+ switch (trb_comp_code) { |
20850 |
+ case COMP_SUCCESS: |
20851 |
+- ep_ring->err_count = 0; |
20852 |
++ ep->err_count = 0; |
20853 |
+ /* handle success with untransferred data as short packet */ |
20854 |
+ if (ep_trb != td->last_trb || remaining) { |
20855 |
+ xhci_warn(xhci, "WARN Successful completion on short TX\n"); |
20856 |
+@@ -2550,7 +2550,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, |
20857 |
+ break; |
20858 |
+ case COMP_USB_TRANSACTION_ERROR: |
20859 |
+ if (xhci->quirks & XHCI_NO_SOFT_RETRY || |
20860 |
+- (ep_ring->err_count++ > MAX_SOFT_RETRY) || |
20861 |
++ (ep->err_count++ > MAX_SOFT_RETRY) || |
20862 |
+ le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) |
20863 |
+ break; |
20864 |
+ |
20865 |
+@@ -2631,8 +2631,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, |
20866 |
+ case COMP_USB_TRANSACTION_ERROR: |
20867 |
+ case COMP_INVALID_STREAM_TYPE_ERROR: |
20868 |
+ case COMP_INVALID_STREAM_ID_ERROR: |
20869 |
+- xhci_handle_halted_endpoint(xhci, ep, 0, NULL, |
20870 |
+- EP_SOFT_RESET); |
20871 |
++ xhci_dbg(xhci, "Stream transaction error ep %u no id\n", |
20872 |
++ ep_index); |
20873 |
++ if (ep->err_count++ > MAX_SOFT_RETRY) |
20874 |
++ xhci_handle_halted_endpoint(xhci, ep, 0, NULL, |
20875 |
++ EP_HARD_RESET); |
20876 |
++ else |
20877 |
++ xhci_handle_halted_endpoint(xhci, ep, 0, NULL, |
20878 |
++ EP_SOFT_RESET); |
20879 |
+ goto cleanup; |
20880 |
+ case COMP_RING_UNDERRUN: |
20881 |
+ case COMP_RING_OVERRUN: |
20882 |
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h |
20883 |
+index d3c0766c1984b..4193427a7f0dc 100644 |
20884 |
+--- a/drivers/usb/host/xhci.h |
20885 |
++++ b/drivers/usb/host/xhci.h |
20886 |
+@@ -933,6 +933,7 @@ struct xhci_virt_ep { |
20887 |
+ * have to restore the device state to the previous state |
20888 |
+ */ |
20889 |
+ struct xhci_ring *new_ring; |
20890 |
++ unsigned int err_count; |
20891 |
+ unsigned int ep_state; |
20892 |
+ #define SET_DEQ_PENDING (1 << 0) |
20893 |
+ #define EP_HALTED (1 << 1) /* For stall handling */ |
20894 |
+@@ -1629,7 +1630,6 @@ struct xhci_ring { |
20895 |
+ * if we own the TRB (if we are the consumer). See section 4.9.1. |
20896 |
+ */ |
20897 |
+ u32 cycle_state; |
20898 |
+- unsigned int err_count; |
20899 |
+ unsigned int stream_id; |
20900 |
+ unsigned int num_segs; |
20901 |
+ unsigned int num_trbs_free; |
20902 |
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c |
20903 |
+index dc67fff8e9418..22c3df49ba8af 100644 |
20904 |
+--- a/drivers/usb/musb/musb_gadget.c |
20905 |
++++ b/drivers/usb/musb/musb_gadget.c |
20906 |
+@@ -1628,8 +1628,6 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) |
20907 |
+ { |
20908 |
+ struct musb *musb = gadget_to_musb(gadget); |
20909 |
+ |
20910 |
+- if (!musb->xceiv->set_power) |
20911 |
+- return -EOPNOTSUPP; |
20912 |
+ return usb_phy_set_power(musb->xceiv, mA); |
20913 |
+ } |
20914 |
+ |
20915 |
+diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c |
20916 |
+index dfaed7eee94fc..32e6d19f7011a 100644 |
20917 |
+--- a/drivers/usb/roles/class.c |
20918 |
++++ b/drivers/usb/roles/class.c |
20919 |
+@@ -106,10 +106,13 @@ usb_role_switch_is_parent(struct fwnode_handle *fwnode) |
20920 |
+ struct fwnode_handle *parent = fwnode_get_parent(fwnode); |
20921 |
+ struct device *dev; |
20922 |
+ |
20923 |
+- if (!parent || !fwnode_property_present(parent, "usb-role-switch")) |
20924 |
++ if (!fwnode_property_present(parent, "usb-role-switch")) { |
20925 |
++ fwnode_handle_put(parent); |
20926 |
+ return NULL; |
20927 |
++ } |
20928 |
+ |
20929 |
+ dev = class_find_device_by_fwnode(role_class, parent); |
20930 |
++ fwnode_handle_put(parent); |
20931 |
+ return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER); |
20932 |
+ } |
20933 |
+ |
20934 |
+diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c |
20935 |
+index 20b857e97e60c..7e4ce0e7e05a7 100644 |
20936 |
+--- a/drivers/usb/storage/alauda.c |
20937 |
++++ b/drivers/usb/storage/alauda.c |
20938 |
+@@ -438,6 +438,8 @@ static int alauda_init_media(struct us_data *us) |
20939 |
+ + MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift); |
20940 |
+ MEDIA_INFO(us).pba_to_lba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); |
20941 |
+ MEDIA_INFO(us).lba_to_pba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); |
20942 |
++ if (MEDIA_INFO(us).pba_to_lba == NULL || MEDIA_INFO(us).lba_to_pba == NULL) |
20943 |
++ return USB_STOR_TRANSPORT_ERROR; |
20944 |
+ |
20945 |
+ if (alauda_reset_media(us) != USB_STOR_XFER_GOOD) |
20946 |
+ return USB_STOR_TRANSPORT_ERROR; |
20947 |
+diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c |
20948 |
+index 78e0e78954f2d..0aefb9e14f228 100644 |
20949 |
+--- a/drivers/usb/typec/bus.c |
20950 |
++++ b/drivers/usb/typec/bus.c |
20951 |
+@@ -134,7 +134,7 @@ int typec_altmode_exit(struct typec_altmode *adev) |
20952 |
+ if (!adev || !adev->active) |
20953 |
+ return 0; |
20954 |
+ |
20955 |
+- if (!pdev->ops || !pdev->ops->enter) |
20956 |
++ if (!pdev->ops || !pdev->ops->exit) |
20957 |
+ return -EOPNOTSUPP; |
20958 |
+ |
20959 |
+ /* Moving to USB Safe State */ |
20960 |
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c |
20961 |
+index ff6c14d7b1a83..339752fef65e0 100644 |
20962 |
+--- a/drivers/usb/typec/class.c |
20963 |
++++ b/drivers/usb/typec/class.c |
20964 |
+@@ -1895,6 +1895,49 @@ void *typec_get_drvdata(struct typec_port *port) |
20965 |
+ } |
20966 |
+ EXPORT_SYMBOL_GPL(typec_get_drvdata); |
20967 |
+ |
20968 |
++int typec_get_fw_cap(struct typec_capability *cap, |
20969 |
++ struct fwnode_handle *fwnode) |
20970 |
++{ |
20971 |
++ const char *cap_str; |
20972 |
++ int ret; |
20973 |
++ |
20974 |
++ cap->fwnode = fwnode; |
20975 |
++ |
20976 |
++ ret = fwnode_property_read_string(fwnode, "power-role", &cap_str); |
20977 |
++ if (ret < 0) |
20978 |
++ return ret; |
20979 |
++ |
20980 |
++ ret = typec_find_port_power_role(cap_str); |
20981 |
++ if (ret < 0) |
20982 |
++ return ret; |
20983 |
++ cap->type = ret; |
20984 |
++ |
20985 |
++ /* USB data support is optional */ |
20986 |
++ ret = fwnode_property_read_string(fwnode, "data-role", &cap_str); |
20987 |
++ if (ret == 0) { |
20988 |
++ ret = typec_find_port_data_role(cap_str); |
20989 |
++ if (ret < 0) |
20990 |
++ return ret; |
20991 |
++ cap->data = ret; |
20992 |
++ } |
20993 |
++ |
20994 |
++ /* Get the preferred power role for a DRP */ |
20995 |
++ if (cap->type == TYPEC_PORT_DRP) { |
20996 |
++ cap->prefer_role = TYPEC_NO_PREFERRED_ROLE; |
20997 |
++ |
20998 |
++ ret = fwnode_property_read_string(fwnode, "try-power-role", &cap_str); |
20999 |
++ if (ret == 0) { |
21000 |
++ ret = typec_find_power_role(cap_str); |
21001 |
++ if (ret < 0) |
21002 |
++ return ret; |
21003 |
++ cap->prefer_role = ret; |
21004 |
++ } |
21005 |
++ } |
21006 |
++ |
21007 |
++ return 0; |
21008 |
++} |
21009 |
++EXPORT_SYMBOL_GPL(typec_get_fw_cap); |
21010 |
++ |
21011 |
+ /** |
21012 |
+ * typec_port_register_altmode - Register USB Type-C Port Alternate Mode |
21013 |
+ * @port: USB Type-C Port that supports the alternate mode |
21014 |
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c |
21015 |
+index 64e248117c41a..5340a3a3a81bb 100644 |
21016 |
+--- a/drivers/usb/typec/tcpm/tcpci.c |
21017 |
++++ b/drivers/usb/typec/tcpm/tcpci.c |
21018 |
+@@ -817,8 +817,10 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data) |
21019 |
+ return ERR_PTR(err); |
21020 |
+ |
21021 |
+ tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc); |
21022 |
+- if (IS_ERR(tcpci->port)) |
21023 |
++ if (IS_ERR(tcpci->port)) { |
21024 |
++ fwnode_handle_put(tcpci->tcpc.fwnode); |
21025 |
+ return ERR_CAST(tcpci->port); |
21026 |
++ } |
21027 |
+ |
21028 |
+ return tcpci; |
21029 |
+ } |
21030 |
+@@ -827,6 +829,7 @@ EXPORT_SYMBOL_GPL(tcpci_register_port); |
21031 |
+ void tcpci_unregister_port(struct tcpci *tcpci) |
21032 |
+ { |
21033 |
+ tcpm_unregister_port(tcpci->port); |
21034 |
++ fwnode_handle_put(tcpci->tcpc.fwnode); |
21035 |
+ } |
21036 |
+ EXPORT_SYMBOL_GPL(tcpci_unregister_port); |
21037 |
+ |
21038 |
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c |
21039 |
+index 33aadc0a29ea8..984a13a9efc22 100644 |
21040 |
+--- a/drivers/usb/typec/tcpm/tcpm.c |
21041 |
++++ b/drivers/usb/typec/tcpm/tcpm.c |
21042 |
+@@ -5928,7 +5928,6 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, |
21043 |
+ struct fwnode_handle *fwnode) |
21044 |
+ { |
21045 |
+ const char *opmode_str; |
21046 |
+- const char *cap_str; |
21047 |
+ int ret; |
21048 |
+ u32 mw, frs_current; |
21049 |
+ |
21050 |
+@@ -5944,23 +5943,10 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, |
21051 |
+ */ |
21052 |
+ fw_devlink_purge_absent_suppliers(fwnode); |
21053 |
+ |
21054 |
+- /* USB data support is optional */ |
21055 |
+- ret = fwnode_property_read_string(fwnode, "data-role", &cap_str); |
21056 |
+- if (ret == 0) { |
21057 |
+- ret = typec_find_port_data_role(cap_str); |
21058 |
+- if (ret < 0) |
21059 |
+- return ret; |
21060 |
+- port->typec_caps.data = ret; |
21061 |
+- } |
21062 |
+- |
21063 |
+- ret = fwnode_property_read_string(fwnode, "power-role", &cap_str); |
21064 |
++ ret = typec_get_fw_cap(&port->typec_caps, fwnode); |
21065 |
+ if (ret < 0) |
21066 |
+ return ret; |
21067 |
+ |
21068 |
+- ret = typec_find_port_power_role(cap_str); |
21069 |
+- if (ret < 0) |
21070 |
+- return ret; |
21071 |
+- port->typec_caps.type = ret; |
21072 |
+ port->port_type = port->typec_caps.type; |
21073 |
+ port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable"); |
21074 |
+ |
21075 |
+@@ -5997,14 +5983,6 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, |
21076 |
+ if (port->port_type == TYPEC_PORT_SRC) |
21077 |
+ return 0; |
21078 |
+ |
21079 |
+- /* Get the preferred power role for DRP */ |
21080 |
+- ret = fwnode_property_read_string(fwnode, "try-power-role", &cap_str); |
21081 |
+- if (ret < 0) |
21082 |
+- return ret; |
21083 |
+- |
21084 |
+- port->typec_caps.prefer_role = typec_find_power_role(cap_str); |
21085 |
+- if (port->typec_caps.prefer_role < 0) |
21086 |
+- return -EINVAL; |
21087 |
+ sink: |
21088 |
+ port->self_powered = fwnode_property_read_bool(fwnode, "self-powered"); |
21089 |
+ |
21090 |
+diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c |
21091 |
+index 23a8b9b0b1fef..2f32c3fceef87 100644 |
21092 |
+--- a/drivers/usb/typec/tipd/core.c |
21093 |
++++ b/drivers/usb/typec/tipd/core.c |
21094 |
+@@ -684,14 +684,13 @@ static int tps6598x_probe(struct i2c_client *client) |
21095 |
+ |
21096 |
+ ret = devm_tps6598_psy_register(tps); |
21097 |
+ if (ret) |
21098 |
+- return ret; |
21099 |
++ goto err_role_put; |
21100 |
+ |
21101 |
+ tps->port = typec_register_port(&client->dev, &typec_cap); |
21102 |
+ if (IS_ERR(tps->port)) { |
21103 |
+ ret = PTR_ERR(tps->port); |
21104 |
+ goto err_role_put; |
21105 |
+ } |
21106 |
+- fwnode_handle_put(fwnode); |
21107 |
+ |
21108 |
+ if (status & TPS_STATUS_PLUG_PRESENT) { |
21109 |
+ ret = tps6598x_connect(tps, status); |
21110 |
+@@ -710,6 +709,7 @@ static int tps6598x_probe(struct i2c_client *client) |
21111 |
+ } |
21112 |
+ |
21113 |
+ i2c_set_clientdata(client, tps); |
21114 |
++ fwnode_handle_put(fwnode); |
21115 |
+ |
21116 |
+ return 0; |
21117 |
+ |
21118 |
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c |
21119 |
+index 6af7ce7d619c2..701bd99a87198 100644 |
21120 |
+--- a/drivers/vfio/platform/vfio_platform_common.c |
21121 |
++++ b/drivers/vfio/platform/vfio_platform_common.c |
21122 |
+@@ -72,12 +72,11 @@ static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev, |
21123 |
+ const char **extra_dbg) |
21124 |
+ { |
21125 |
+ #ifdef CONFIG_ACPI |
21126 |
+- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
21127 |
+ struct device *dev = vdev->device; |
21128 |
+ acpi_handle handle = ACPI_HANDLE(dev); |
21129 |
+ acpi_status acpi_ret; |
21130 |
+ |
21131 |
+- acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer); |
21132 |
++ acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, NULL); |
21133 |
+ if (ACPI_FAILURE(acpi_ret)) { |
21134 |
+ if (extra_dbg) |
21135 |
+ *extra_dbg = acpi_format_exception(acpi_ret); |
21136 |
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig |
21137 |
+index 6ed5e608dd041..26531aa194282 100644 |
21138 |
+--- a/drivers/video/fbdev/Kconfig |
21139 |
++++ b/drivers/video/fbdev/Kconfig |
21140 |
+@@ -606,6 +606,7 @@ config FB_TGA |
21141 |
+ config FB_UVESA |
21142 |
+ tristate "Userspace VESA VGA graphics support" |
21143 |
+ depends on FB && CONNECTOR |
21144 |
++ depends on !UML |
21145 |
+ select FB_CFB_FILLRECT |
21146 |
+ select FB_CFB_COPYAREA |
21147 |
+ select FB_CFB_IMAGEBLIT |
21148 |
+@@ -2218,7 +2219,6 @@ config FB_SSD1307 |
21149 |
+ select FB_SYS_COPYAREA |
21150 |
+ select FB_SYS_IMAGEBLIT |
21151 |
+ select FB_DEFERRED_IO |
21152 |
+- select PWM |
21153 |
+ select FB_BACKLIGHT |
21154 |
+ help |
21155 |
+ This driver implements support for the Solomon SSD1307 |
21156 |
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c |
21157 |
+index 1f37904b0405e..df40360e04ff7 100644 |
21158 |
+--- a/drivers/video/fbdev/core/fbcon.c |
21159 |
++++ b/drivers/video/fbdev/core/fbcon.c |
21160 |
+@@ -2462,7 +2462,8 @@ err_out: |
21161 |
+ |
21162 |
+ if (userfont) { |
21163 |
+ p->userfont = old_userfont; |
21164 |
+- REFCOUNT(data)--; |
21165 |
++ if (--REFCOUNT(data) == 0) |
21166 |
++ kfree(data - FONT_EXTRA_WORDS * sizeof(int)); |
21167 |
+ } |
21168 |
+ |
21169 |
+ vc->vc_font.width = old_width; |
21170 |
+diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c |
21171 |
+index 2398b3d48fedf..305f1587bd898 100644 |
21172 |
+--- a/drivers/video/fbdev/ep93xx-fb.c |
21173 |
++++ b/drivers/video/fbdev/ep93xx-fb.c |
21174 |
+@@ -552,12 +552,14 @@ static int ep93xxfb_probe(struct platform_device *pdev) |
21175 |
+ |
21176 |
+ err = register_framebuffer(info); |
21177 |
+ if (err) |
21178 |
+- goto failed_check; |
21179 |
++ goto failed_framebuffer; |
21180 |
+ |
21181 |
+ dev_info(info->dev, "registered. Mode = %dx%d-%d\n", |
21182 |
+ info->var.xres, info->var.yres, info->var.bits_per_pixel); |
21183 |
+ return 0; |
21184 |
+ |
21185 |
++failed_framebuffer: |
21186 |
++ clk_disable_unprepare(fbi->clk); |
21187 |
+ failed_check: |
21188 |
+ if (fbi->mach_info->teardown) |
21189 |
+ fbi->mach_info->teardown(pdev); |
21190 |
+diff --git a/drivers/video/fbdev/geode/Kconfig b/drivers/video/fbdev/geode/Kconfig |
21191 |
+index ac9c860592aaf..85bc14b6faf64 100644 |
21192 |
+--- a/drivers/video/fbdev/geode/Kconfig |
21193 |
++++ b/drivers/video/fbdev/geode/Kconfig |
21194 |
+@@ -5,6 +5,7 @@ |
21195 |
+ config FB_GEODE |
21196 |
+ bool "AMD Geode family framebuffer support" |
21197 |
+ depends on FB && PCI && (X86_32 || (X86 && COMPILE_TEST)) |
21198 |
++ depends on !UML |
21199 |
+ help |
21200 |
+ Say 'Y' here to allow you to select framebuffer drivers for |
21201 |
+ the AMD Geode family of processors. |
21202 |
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c |
21203 |
+index 58c304a3b7c41..de865e197c8d9 100644 |
21204 |
+--- a/drivers/video/fbdev/hyperv_fb.c |
21205 |
++++ b/drivers/video/fbdev/hyperv_fb.c |
21206 |
+@@ -799,12 +799,18 @@ static void hvfb_ondemand_refresh_throttle(struct hvfb_par *par, |
21207 |
+ static int hvfb_on_panic(struct notifier_block *nb, |
21208 |
+ unsigned long e, void *p) |
21209 |
+ { |
21210 |
++ struct hv_device *hdev; |
21211 |
+ struct hvfb_par *par; |
21212 |
+ struct fb_info *info; |
21213 |
+ |
21214 |
+ par = container_of(nb, struct hvfb_par, hvfb_panic_nb); |
21215 |
+- par->synchronous_fb = true; |
21216 |
+ info = par->info; |
21217 |
++ hdev = device_to_hv_device(info->device); |
21218 |
++ |
21219 |
++ if (hv_ringbuffer_spinlock_busy(hdev->channel)) |
21220 |
++ return NOTIFY_DONE; |
21221 |
++ |
21222 |
++ par->synchronous_fb = true; |
21223 |
+ if (par->need_docopy) |
21224 |
+ hvfb_docopy(par, 0, dio_fb_size); |
21225 |
+ synthvid_update(info, 0, 0, INT_MAX, INT_MAX); |
21226 |
+diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c |
21227 |
+index cbcf112c88d30..e8690f7aea050 100644 |
21228 |
+--- a/drivers/video/fbdev/pm2fb.c |
21229 |
++++ b/drivers/video/fbdev/pm2fb.c |
21230 |
+@@ -1530,8 +1530,10 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
21231 |
+ } |
21232 |
+ |
21233 |
+ info = framebuffer_alloc(sizeof(struct pm2fb_par), &pdev->dev); |
21234 |
+- if (!info) |
21235 |
+- return -ENOMEM; |
21236 |
++ if (!info) { |
21237 |
++ err = -ENOMEM; |
21238 |
++ goto err_exit_disable; |
21239 |
++ } |
21240 |
+ default_par = info->par; |
21241 |
+ |
21242 |
+ switch (pdev->device) { |
21243 |
+@@ -1712,6 +1714,8 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
21244 |
+ release_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len); |
21245 |
+ err_exit_neither: |
21246 |
+ framebuffer_release(info); |
21247 |
++ err_exit_disable: |
21248 |
++ pci_disable_device(pdev); |
21249 |
+ return retval; |
21250 |
+ } |
21251 |
+ |
21252 |
+@@ -1738,6 +1742,7 @@ static void pm2fb_remove(struct pci_dev *pdev) |
21253 |
+ fb_dealloc_cmap(&info->cmap); |
21254 |
+ kfree(info->pixmap.addr); |
21255 |
+ framebuffer_release(info); |
21256 |
++ pci_disable_device(pdev); |
21257 |
+ } |
21258 |
+ |
21259 |
+ static const struct pci_device_id pm2fb_id_table[] = { |
21260 |
+diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c |
21261 |
+index 4df6772802d78..1f3b7e013568c 100644 |
21262 |
+--- a/drivers/video/fbdev/uvesafb.c |
21263 |
++++ b/drivers/video/fbdev/uvesafb.c |
21264 |
+@@ -1758,6 +1758,7 @@ static int uvesafb_probe(struct platform_device *dev) |
21265 |
+ out_unmap: |
21266 |
+ iounmap(info->screen_base); |
21267 |
+ out_mem: |
21268 |
++ arch_phys_wc_del(par->mtrr_handle); |
21269 |
+ release_mem_region(info->fix.smem_start, info->fix.smem_len); |
21270 |
+ out_reg: |
21271 |
+ release_region(0x3c0, 32); |
21272 |
+diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c |
21273 |
+index ff61605b8764f..a543643ce014d 100644 |
21274 |
+--- a/drivers/video/fbdev/vermilion/vermilion.c |
21275 |
++++ b/drivers/video/fbdev/vermilion/vermilion.c |
21276 |
+@@ -277,8 +277,10 @@ static int vmlfb_get_gpu(struct vml_par *par) |
21277 |
+ |
21278 |
+ mutex_unlock(&vml_mutex); |
21279 |
+ |
21280 |
+- if (pci_enable_device(par->gpu) < 0) |
21281 |
++ if (pci_enable_device(par->gpu) < 0) { |
21282 |
++ pci_dev_put(par->gpu); |
21283 |
+ return -ENODEV; |
21284 |
++ } |
21285 |
+ |
21286 |
+ return 0; |
21287 |
+ } |
21288 |
+diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c |
21289 |
+index 89d75079b7307..0363b478fa3ef 100644 |
21290 |
+--- a/drivers/video/fbdev/via/via-core.c |
21291 |
++++ b/drivers/video/fbdev/via/via-core.c |
21292 |
+@@ -725,7 +725,14 @@ static int __init via_core_init(void) |
21293 |
+ return ret; |
21294 |
+ viafb_i2c_init(); |
21295 |
+ viafb_gpio_init(); |
21296 |
+- return pci_register_driver(&via_driver); |
21297 |
++ ret = pci_register_driver(&via_driver); |
21298 |
++ if (ret) { |
21299 |
++ viafb_gpio_exit(); |
21300 |
++ viafb_i2c_exit(); |
21301 |
++ return ret; |
21302 |
++ } |
21303 |
++ |
21304 |
++ return 0; |
21305 |
+ } |
21306 |
+ |
21307 |
+ static void __exit via_core_exit(void) |
21308 |
+diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c |
21309 |
+index 6a1bc284f297c..eae78366eb028 100644 |
21310 |
+--- a/drivers/vme/bridges/vme_fake.c |
21311 |
++++ b/drivers/vme/bridges/vme_fake.c |
21312 |
+@@ -1073,6 +1073,8 @@ static int __init fake_init(void) |
21313 |
+ |
21314 |
+ /* We need a fake parent device */ |
21315 |
+ vme_root = __root_device_register("vme", THIS_MODULE); |
21316 |
++ if (IS_ERR(vme_root)) |
21317 |
++ return PTR_ERR(vme_root); |
21318 |
+ |
21319 |
+ /* If we want to support more than one bridge at some point, we need to |
21320 |
+ * dynamically allocate this so we get one per device. |
21321 |
+diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c |
21322 |
+index be9051b02f24c..5b4c766d15e69 100644 |
21323 |
+--- a/drivers/vme/bridges/vme_tsi148.c |
21324 |
++++ b/drivers/vme/bridges/vme_tsi148.c |
21325 |
+@@ -1765,6 +1765,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list, |
21326 |
+ return 0; |
21327 |
+ |
21328 |
+ err_dma: |
21329 |
++ list_del(&entry->list); |
21330 |
+ err_dest: |
21331 |
+ err_source: |
21332 |
+ err_align: |
21333 |
+diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c |
21334 |
+index e88e8f6f0a334..719c5d1dda274 100644 |
21335 |
+--- a/drivers/xen/privcmd.c |
21336 |
++++ b/drivers/xen/privcmd.c |
21337 |
+@@ -760,7 +760,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, |
21338 |
+ goto out; |
21339 |
+ } |
21340 |
+ |
21341 |
+- pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL); |
21342 |
++ pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN); |
21343 |
+ if (!pfns) { |
21344 |
+ rc = -ENOMEM; |
21345 |
+ goto out; |
21346 |
+diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c |
21347 |
+index 3ac5fcf98d0d6..daaf3810cc925 100644 |
21348 |
+--- a/fs/afs/fs_probe.c |
21349 |
++++ b/fs/afs/fs_probe.c |
21350 |
+@@ -366,12 +366,15 @@ void afs_fs_probe_dispatcher(struct work_struct *work) |
21351 |
+ unsigned long nowj, timer_at, poll_at; |
21352 |
+ bool first_pass = true, set_timer = false; |
21353 |
+ |
21354 |
+- if (!net->live) |
21355 |
++ if (!net->live) { |
21356 |
++ afs_dec_servers_outstanding(net); |
21357 |
+ return; |
21358 |
++ } |
21359 |
+ |
21360 |
+ _enter(""); |
21361 |
+ |
21362 |
+ if (list_empty(&net->fs_probe_fast) && list_empty(&net->fs_probe_slow)) { |
21363 |
++ afs_dec_servers_outstanding(net); |
21364 |
+ _leave(" [none]"); |
21365 |
+ return; |
21366 |
+ } |
21367 |
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c |
21368 |
+index e1eae7ea823ae..bb202ad369d53 100644 |
21369 |
+--- a/fs/binfmt_misc.c |
21370 |
++++ b/fs/binfmt_misc.c |
21371 |
+@@ -44,10 +44,10 @@ static LIST_HEAD(entries); |
21372 |
+ static int enabled = 1; |
21373 |
+ |
21374 |
+ enum {Enabled, Magic}; |
21375 |
+-#define MISC_FMT_PRESERVE_ARGV0 (1 << 31) |
21376 |
+-#define MISC_FMT_OPEN_BINARY (1 << 30) |
21377 |
+-#define MISC_FMT_CREDENTIALS (1 << 29) |
21378 |
+-#define MISC_FMT_OPEN_FILE (1 << 28) |
21379 |
++#define MISC_FMT_PRESERVE_ARGV0 (1UL << 31) |
21380 |
++#define MISC_FMT_OPEN_BINARY (1UL << 30) |
21381 |
++#define MISC_FMT_CREDENTIALS (1UL << 29) |
21382 |
++#define MISC_FMT_OPEN_FILE (1UL << 28) |
21383 |
+ |
21384 |
+ typedef struct { |
21385 |
+ struct list_head list; |
21386 |
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c |
21387 |
+index 90934711dcf0f..eae622ef4c6d5 100644 |
21388 |
+--- a/fs/btrfs/file.c |
21389 |
++++ b/fs/btrfs/file.c |
21390 |
+@@ -872,7 +872,10 @@ next_slot: |
21391 |
+ args->start - extent_offset, |
21392 |
+ 0, false); |
21393 |
+ ret = btrfs_inc_extent_ref(trans, &ref); |
21394 |
+- BUG_ON(ret); /* -ENOMEM */ |
21395 |
++ if (ret) { |
21396 |
++ btrfs_abort_transaction(trans, ret); |
21397 |
++ break; |
21398 |
++ } |
21399 |
+ } |
21400 |
+ key.offset = args->start; |
21401 |
+ } |
21402 |
+@@ -959,7 +962,10 @@ delete_extent_item: |
21403 |
+ key.offset - extent_offset, 0, |
21404 |
+ false); |
21405 |
+ ret = btrfs_free_extent(trans, &ref); |
21406 |
+- BUG_ON(ret); /* -ENOMEM */ |
21407 |
++ if (ret) { |
21408 |
++ btrfs_abort_transaction(trans, ret); |
21409 |
++ break; |
21410 |
++ } |
21411 |
+ args->bytes_found += extent_end - key.offset; |
21412 |
+ } |
21413 |
+ |
21414 |
+diff --git a/fs/char_dev.c b/fs/char_dev.c |
21415 |
+index ba0ded7842a77..3f667292608c0 100644 |
21416 |
+--- a/fs/char_dev.c |
21417 |
++++ b/fs/char_dev.c |
21418 |
+@@ -547,7 +547,7 @@ int cdev_device_add(struct cdev *cdev, struct device *dev) |
21419 |
+ } |
21420 |
+ |
21421 |
+ rc = device_add(dev); |
21422 |
+- if (rc) |
21423 |
++ if (rc && dev->devt) |
21424 |
+ cdev_del(cdev); |
21425 |
+ |
21426 |
+ return rc; |
21427 |
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c |
21428 |
+index d1f9d26322027..ec6519e1ca3bf 100644 |
21429 |
+--- a/fs/configfs/dir.c |
21430 |
++++ b/fs/configfs/dir.c |
21431 |
+@@ -316,6 +316,7 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry, |
21432 |
+ return 0; |
21433 |
+ |
21434 |
+ out_remove: |
21435 |
++ configfs_put(dentry->d_fsdata); |
21436 |
+ configfs_remove_dirent(dentry); |
21437 |
+ return PTR_ERR(inode); |
21438 |
+ } |
21439 |
+@@ -382,6 +383,7 @@ int configfs_create_link(struct configfs_dirent *target, struct dentry *parent, |
21440 |
+ return 0; |
21441 |
+ |
21442 |
+ out_remove: |
21443 |
++ configfs_put(dentry->d_fsdata); |
21444 |
+ configfs_remove_dirent(dentry); |
21445 |
+ return PTR_ERR(inode); |
21446 |
+ } |
21447 |
+diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c |
21448 |
+index 950c63fa4d0b2..38930d9b0bb73 100644 |
21449 |
+--- a/fs/debugfs/file.c |
21450 |
++++ b/fs/debugfs/file.c |
21451 |
+@@ -378,8 +378,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf, |
21452 |
+ } |
21453 |
+ EXPORT_SYMBOL_GPL(debugfs_attr_read); |
21454 |
+ |
21455 |
+-ssize_t debugfs_attr_write(struct file *file, const char __user *buf, |
21456 |
+- size_t len, loff_t *ppos) |
21457 |
++static ssize_t debugfs_attr_write_xsigned(struct file *file, const char __user *buf, |
21458 |
++ size_t len, loff_t *ppos, bool is_signed) |
21459 |
+ { |
21460 |
+ struct dentry *dentry = F_DENTRY(file); |
21461 |
+ ssize_t ret; |
21462 |
+@@ -387,12 +387,28 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf, |
21463 |
+ ret = debugfs_file_get(dentry); |
21464 |
+ if (unlikely(ret)) |
21465 |
+ return ret; |
21466 |
+- ret = simple_attr_write(file, buf, len, ppos); |
21467 |
++ if (is_signed) |
21468 |
++ ret = simple_attr_write_signed(file, buf, len, ppos); |
21469 |
++ else |
21470 |
++ ret = simple_attr_write(file, buf, len, ppos); |
21471 |
+ debugfs_file_put(dentry); |
21472 |
+ return ret; |
21473 |
+ } |
21474 |
++ |
21475 |
++ssize_t debugfs_attr_write(struct file *file, const char __user *buf, |
21476 |
++ size_t len, loff_t *ppos) |
21477 |
++{ |
21478 |
++ return debugfs_attr_write_xsigned(file, buf, len, ppos, false); |
21479 |
++} |
21480 |
+ EXPORT_SYMBOL_GPL(debugfs_attr_write); |
21481 |
+ |
21482 |
++ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf, |
21483 |
++ size_t len, loff_t *ppos) |
21484 |
++{ |
21485 |
++ return debugfs_attr_write_xsigned(file, buf, len, ppos, true); |
21486 |
++} |
21487 |
++EXPORT_SYMBOL_GPL(debugfs_attr_write_signed); |
21488 |
++ |
21489 |
+ static struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode, |
21490 |
+ struct dentry *parent, void *value, |
21491 |
+ const struct file_operations *fops, |
21492 |
+@@ -738,11 +754,11 @@ static int debugfs_atomic_t_get(void *data, u64 *val) |
21493 |
+ *val = atomic_read((atomic_t *)data); |
21494 |
+ return 0; |
21495 |
+ } |
21496 |
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get, |
21497 |
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t, debugfs_atomic_t_get, |
21498 |
+ debugfs_atomic_t_set, "%lld\n"); |
21499 |
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_ro, debugfs_atomic_t_get, NULL, |
21500 |
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_ro, debugfs_atomic_t_get, NULL, |
21501 |
+ "%lld\n"); |
21502 |
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, |
21503 |
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, |
21504 |
+ "%lld\n"); |
21505 |
+ |
21506 |
+ /** |
21507 |
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c |
21508 |
+index e75a276f5b9c7..7863f8fd3b95e 100644 |
21509 |
+--- a/fs/f2fs/gc.c |
21510 |
++++ b/fs/f2fs/gc.c |
21511 |
+@@ -1673,8 +1673,9 @@ freed: |
21512 |
+ get_valid_blocks(sbi, segno, false) == 0) |
21513 |
+ seg_freed++; |
21514 |
+ |
21515 |
+- if (__is_large_section(sbi) && segno + 1 < end_segno) |
21516 |
+- sbi->next_victim_seg[gc_type] = segno + 1; |
21517 |
++ if (__is_large_section(sbi)) |
21518 |
++ sbi->next_victim_seg[gc_type] = |
21519 |
++ (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO; |
21520 |
+ skip: |
21521 |
+ f2fs_put_page(sum_page, 0); |
21522 |
+ } |
21523 |
+@@ -2051,8 +2052,6 @@ out_unlock: |
21524 |
+ if (err) |
21525 |
+ return err; |
21526 |
+ |
21527 |
+- set_sbi_flag(sbi, SBI_IS_RESIZEFS); |
21528 |
+- |
21529 |
+ freeze_super(sbi->sb); |
21530 |
+ down_write(&sbi->gc_lock); |
21531 |
+ down_write(&sbi->cp_global_sem); |
21532 |
+@@ -2068,6 +2067,7 @@ out_unlock: |
21533 |
+ if (err) |
21534 |
+ goto out_err; |
21535 |
+ |
21536 |
++ set_sbi_flag(sbi, SBI_IS_RESIZEFS); |
21537 |
+ err = free_segment_range(sbi, secs, false); |
21538 |
+ if (err) |
21539 |
+ goto recover_out; |
21540 |
+@@ -2091,6 +2091,7 @@ out_unlock: |
21541 |
+ f2fs_commit_super(sbi, false); |
21542 |
+ } |
21543 |
+ recover_out: |
21544 |
++ clear_sbi_flag(sbi, SBI_IS_RESIZEFS); |
21545 |
+ if (err) { |
21546 |
+ set_sbi_flag(sbi, SBI_NEED_FSCK); |
21547 |
+ f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); |
21548 |
+@@ -2103,6 +2104,5 @@ out_err: |
21549 |
+ up_write(&sbi->cp_global_sem); |
21550 |
+ up_write(&sbi->gc_lock); |
21551 |
+ thaw_super(sbi->sb); |
21552 |
+- clear_sbi_flag(sbi, SBI_IS_RESIZEFS); |
21553 |
+ return err; |
21554 |
+ } |
21555 |
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c |
21556 |
+index af810b2d5d904..194c0811fbdfe 100644 |
21557 |
+--- a/fs/f2fs/segment.c |
21558 |
++++ b/fs/f2fs/segment.c |
21559 |
+@@ -1551,7 +1551,7 @@ retry: |
21560 |
+ if (i + 1 < dpolicy->granularity) |
21561 |
+ break; |
21562 |
+ |
21563 |
+- if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered) |
21564 |
++ if (i + 1 < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered) |
21565 |
+ return __issue_discard_cmd_orderly(sbi, dpolicy); |
21566 |
+ |
21567 |
+ pend_list = &dcc->pend_list[i]; |
21568 |
+@@ -2129,8 +2129,10 @@ int f2fs_start_discard_thread(struct f2fs_sb_info *sbi) |
21569 |
+ |
21570 |
+ dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, |
21571 |
+ "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev)); |
21572 |
+- if (IS_ERR(dcc->f2fs_issue_discard)) |
21573 |
++ if (IS_ERR(dcc->f2fs_issue_discard)) { |
21574 |
+ err = PTR_ERR(dcc->f2fs_issue_discard); |
21575 |
++ dcc->f2fs_issue_discard = NULL; |
21576 |
++ } |
21577 |
+ |
21578 |
+ return err; |
21579 |
+ } |
21580 |
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c |
21581 |
+index a0d1ef73b83ea..f4e8de1f47899 100644 |
21582 |
+--- a/fs/f2fs/super.c |
21583 |
++++ b/fs/f2fs/super.c |
21584 |
+@@ -4428,9 +4428,9 @@ free_nm: |
21585 |
+ f2fs_destroy_node_manager(sbi); |
21586 |
+ free_sm: |
21587 |
+ f2fs_destroy_segment_manager(sbi); |
21588 |
+- f2fs_destroy_post_read_wq(sbi); |
21589 |
+ stop_ckpt_thread: |
21590 |
+ f2fs_stop_ckpt_thread(sbi); |
21591 |
++ f2fs_destroy_post_read_wq(sbi); |
21592 |
+ free_devices: |
21593 |
+ destroy_device_list(sbi); |
21594 |
+ kvfree(sbi->ckpt); |
21595 |
+diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c |
21596 |
+index 4a95a92546a0d..a58a5b733224c 100644 |
21597 |
+--- a/fs/hfs/inode.c |
21598 |
++++ b/fs/hfs/inode.c |
21599 |
+@@ -456,6 +456,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
21600 |
+ /* panic? */ |
21601 |
+ return -EIO; |
21602 |
+ |
21603 |
++ if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN) |
21604 |
++ return -EIO; |
21605 |
+ fd.search_key->cat = HFS_I(main_inode)->cat_key; |
21606 |
+ if (hfs_brec_find(&fd)) |
21607 |
+ /* panic? */ |
21608 |
+diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c |
21609 |
+index 39f5e343bf4d4..fdb0edb8a607d 100644 |
21610 |
+--- a/fs/hfs/trans.c |
21611 |
++++ b/fs/hfs/trans.c |
21612 |
+@@ -109,7 +109,7 @@ void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr |
21613 |
+ if (nls_io) { |
21614 |
+ wchar_t ch; |
21615 |
+ |
21616 |
+- while (srclen > 0) { |
21617 |
++ while (srclen > 0 && dstlen > 0) { |
21618 |
+ size = nls_io->char2uni(src, srclen, &ch); |
21619 |
+ if (size < 0) { |
21620 |
+ ch = '?'; |
21621 |
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c |
21622 |
+index be8deec29ebe3..352230a011e08 100644 |
21623 |
+--- a/fs/hugetlbfs/inode.c |
21624 |
++++ b/fs/hugetlbfs/inode.c |
21625 |
+@@ -1250,7 +1250,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par |
21626 |
+ |
21627 |
+ case Opt_size: |
21628 |
+ /* memparse() will accept a K/M/G without a digit */ |
21629 |
+- if (!isdigit(param->string[0])) |
21630 |
++ if (!param->string || !isdigit(param->string[0])) |
21631 |
+ goto bad_val; |
21632 |
+ ctx->max_size_opt = memparse(param->string, &rest); |
21633 |
+ ctx->max_val_type = SIZE_STD; |
21634 |
+@@ -1260,7 +1260,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par |
21635 |
+ |
21636 |
+ case Opt_nr_inodes: |
21637 |
+ /* memparse() will accept a K/M/G without a digit */ |
21638 |
+- if (!isdigit(param->string[0])) |
21639 |
++ if (!param->string || !isdigit(param->string[0])) |
21640 |
+ goto bad_val; |
21641 |
+ ctx->nr_inodes = memparse(param->string, &rest); |
21642 |
+ return 0; |
21643 |
+@@ -1276,7 +1276,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par |
21644 |
+ |
21645 |
+ case Opt_min_size: |
21646 |
+ /* memparse() will accept a K/M/G without a digit */ |
21647 |
+- if (!isdigit(param->string[0])) |
21648 |
++ if (!param->string || !isdigit(param->string[0])) |
21649 |
+ goto bad_val; |
21650 |
+ ctx->min_size_opt = memparse(param->string, &rest); |
21651 |
+ ctx->min_val_type = SIZE_STD; |
21652 |
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c |
21653 |
+index e75f31b81d634..f401bc05d5ff6 100644 |
21654 |
+--- a/fs/jfs/jfs_dmap.c |
21655 |
++++ b/fs/jfs/jfs_dmap.c |
21656 |
+@@ -155,7 +155,7 @@ int dbMount(struct inode *ipbmap) |
21657 |
+ struct bmap *bmp; |
21658 |
+ struct dbmap_disk *dbmp_le; |
21659 |
+ struct metapage *mp; |
21660 |
+- int i; |
21661 |
++ int i, err; |
21662 |
+ |
21663 |
+ /* |
21664 |
+ * allocate/initialize the in-memory bmap descriptor |
21665 |
+@@ -170,8 +170,8 @@ int dbMount(struct inode *ipbmap) |
21666 |
+ BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage, |
21667 |
+ PSIZE, 0); |
21668 |
+ if (mp == NULL) { |
21669 |
+- kfree(bmp); |
21670 |
+- return -EIO; |
21671 |
++ err = -EIO; |
21672 |
++ goto err_kfree_bmp; |
21673 |
+ } |
21674 |
+ |
21675 |
+ /* copy the on-disk bmap descriptor to its in-memory version. */ |
21676 |
+@@ -181,9 +181,8 @@ int dbMount(struct inode *ipbmap) |
21677 |
+ bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage); |
21678 |
+ bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); |
21679 |
+ if (!bmp->db_numag) { |
21680 |
+- release_metapage(mp); |
21681 |
+- kfree(bmp); |
21682 |
+- return -EINVAL; |
21683 |
++ err = -EINVAL; |
21684 |
++ goto err_release_metapage; |
21685 |
+ } |
21686 |
+ |
21687 |
+ bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel); |
21688 |
+@@ -194,6 +193,16 @@ int dbMount(struct inode *ipbmap) |
21689 |
+ bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); |
21690 |
+ bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); |
21691 |
+ bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); |
21692 |
++ if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) { |
21693 |
++ err = -EINVAL; |
21694 |
++ goto err_release_metapage; |
21695 |
++ } |
21696 |
++ |
21697 |
++ if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) { |
21698 |
++ err = -EINVAL; |
21699 |
++ goto err_release_metapage; |
21700 |
++ } |
21701 |
++ |
21702 |
+ for (i = 0; i < MAXAG; i++) |
21703 |
+ bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]); |
21704 |
+ bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize); |
21705 |
+@@ -214,6 +223,12 @@ int dbMount(struct inode *ipbmap) |
21706 |
+ BMAP_LOCK_INIT(bmp); |
21707 |
+ |
21708 |
+ return (0); |
21709 |
++ |
21710 |
++err_release_metapage: |
21711 |
++ release_metapage(mp); |
21712 |
++err_kfree_bmp: |
21713 |
++ kfree(bmp); |
21714 |
++ return err; |
21715 |
+ } |
21716 |
+ |
21717 |
+ |
21718 |
+diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c |
21719 |
+index 9db4f5789c0ec..4fbbf88435e69 100644 |
21720 |
+--- a/fs/jfs/namei.c |
21721 |
++++ b/fs/jfs/namei.c |
21722 |
+@@ -946,7 +946,7 @@ static int jfs_symlink(struct user_namespace *mnt_userns, struct inode *dip, |
21723 |
+ if (ssize <= IDATASIZE) { |
21724 |
+ ip->i_op = &jfs_fast_symlink_inode_operations; |
21725 |
+ |
21726 |
+- ip->i_link = JFS_IP(ip)->i_inline; |
21727 |
++ ip->i_link = JFS_IP(ip)->i_inline_all; |
21728 |
+ memcpy(ip->i_link, name, ssize); |
21729 |
+ ip->i_size = ssize - 1; |
21730 |
+ |
21731 |
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c |
21732 |
+index 8d8ffd8c6f192..0fa467f2c8973 100644 |
21733 |
+--- a/fs/ksmbd/mgmt/user_session.c |
21734 |
++++ b/fs/ksmbd/mgmt/user_session.c |
21735 |
+@@ -106,15 +106,17 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name) |
21736 |
+ entry->method = method; |
21737 |
+ entry->id = ksmbd_ipc_id_alloc(); |
21738 |
+ if (entry->id < 0) |
21739 |
+- goto error; |
21740 |
++ goto free_entry; |
21741 |
+ |
21742 |
+ resp = ksmbd_rpc_open(sess, entry->id); |
21743 |
+ if (!resp) |
21744 |
+- goto error; |
21745 |
++ goto free_id; |
21746 |
+ |
21747 |
+ kvfree(resp); |
21748 |
+ return entry->id; |
21749 |
+-error: |
21750 |
++free_id: |
21751 |
++ ksmbd_rpc_id_free(entry->id); |
21752 |
++free_entry: |
21753 |
+ list_del(&entry->list); |
21754 |
+ kfree(entry); |
21755 |
+ return -EINVAL; |
21756 |
+diff --git a/fs/libfs.c b/fs/libfs.c |
21757 |
+index 51b4de3b3447f..7bb5d90319cc6 100644 |
21758 |
+--- a/fs/libfs.c |
21759 |
++++ b/fs/libfs.c |
21760 |
+@@ -967,8 +967,8 @@ out: |
21761 |
+ EXPORT_SYMBOL_GPL(simple_attr_read); |
21762 |
+ |
21763 |
+ /* interpret the buffer as a number to call the set function with */ |
21764 |
+-ssize_t simple_attr_write(struct file *file, const char __user *buf, |
21765 |
+- size_t len, loff_t *ppos) |
21766 |
++static ssize_t simple_attr_write_xsigned(struct file *file, const char __user *buf, |
21767 |
++ size_t len, loff_t *ppos, bool is_signed) |
21768 |
+ { |
21769 |
+ struct simple_attr *attr; |
21770 |
+ unsigned long long val; |
21771 |
+@@ -989,7 +989,10 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, |
21772 |
+ goto out; |
21773 |
+ |
21774 |
+ attr->set_buf[size] = '\0'; |
21775 |
+- ret = kstrtoull(attr->set_buf, 0, &val); |
21776 |
++ if (is_signed) |
21777 |
++ ret = kstrtoll(attr->set_buf, 0, &val); |
21778 |
++ else |
21779 |
++ ret = kstrtoull(attr->set_buf, 0, &val); |
21780 |
+ if (ret) |
21781 |
+ goto out; |
21782 |
+ ret = attr->set(attr->data, val); |
21783 |
+@@ -999,8 +1002,21 @@ out: |
21784 |
+ mutex_unlock(&attr->mutex); |
21785 |
+ return ret; |
21786 |
+ } |
21787 |
++ |
21788 |
++ssize_t simple_attr_write(struct file *file, const char __user *buf, |
21789 |
++ size_t len, loff_t *ppos) |
21790 |
++{ |
21791 |
++ return simple_attr_write_xsigned(file, buf, len, ppos, false); |
21792 |
++} |
21793 |
+ EXPORT_SYMBOL_GPL(simple_attr_write); |
21794 |
+ |
21795 |
++ssize_t simple_attr_write_signed(struct file *file, const char __user *buf, |
21796 |
++ size_t len, loff_t *ppos) |
21797 |
++{ |
21798 |
++ return simple_attr_write_xsigned(file, buf, len, ppos, true); |
21799 |
++} |
21800 |
++EXPORT_SYMBOL_GPL(simple_attr_write_signed); |
21801 |
++ |
21802 |
+ /** |
21803 |
+ * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation |
21804 |
+ * @sb: filesystem to do the file handle conversion on |
21805 |
+diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c |
21806 |
+index e1c4617de7714..3515f17eaf3fb 100644 |
21807 |
+--- a/fs/lockd/svcsubs.c |
21808 |
++++ b/fs/lockd/svcsubs.c |
21809 |
+@@ -176,7 +176,7 @@ nlm_delete_file(struct nlm_file *file) |
21810 |
+ } |
21811 |
+ } |
21812 |
+ |
21813 |
+-static int nlm_unlock_files(struct nlm_file *file, fl_owner_t owner) |
21814 |
++static int nlm_unlock_files(struct nlm_file *file, const struct file_lock *fl) |
21815 |
+ { |
21816 |
+ struct file_lock lock; |
21817 |
+ |
21818 |
+@@ -184,12 +184,15 @@ static int nlm_unlock_files(struct nlm_file *file, fl_owner_t owner) |
21819 |
+ lock.fl_type = F_UNLCK; |
21820 |
+ lock.fl_start = 0; |
21821 |
+ lock.fl_end = OFFSET_MAX; |
21822 |
+- lock.fl_owner = owner; |
21823 |
+- if (file->f_file[O_RDONLY] && |
21824 |
+- vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, &lock, NULL)) |
21825 |
++ lock.fl_owner = fl->fl_owner; |
21826 |
++ lock.fl_pid = fl->fl_pid; |
21827 |
++ lock.fl_flags = FL_POSIX; |
21828 |
++ |
21829 |
++ lock.fl_file = file->f_file[O_RDONLY]; |
21830 |
++ if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL)) |
21831 |
+ goto out_err; |
21832 |
+- if (file->f_file[O_WRONLY] && |
21833 |
+- vfs_lock_file(file->f_file[O_WRONLY], F_SETLK, &lock, NULL)) |
21834 |
++ lock.fl_file = file->f_file[O_WRONLY]; |
21835 |
++ if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL)) |
21836 |
+ goto out_err; |
21837 |
+ return 0; |
21838 |
+ out_err: |
21839 |
+@@ -226,7 +229,7 @@ again: |
21840 |
+ if (match(lockhost, host)) { |
21841 |
+ |
21842 |
+ spin_unlock(&flctx->flc_lock); |
21843 |
+- if (nlm_unlock_files(file, fl->fl_owner)) |
21844 |
++ if (nlm_unlock_files(file, fl)) |
21845 |
+ return 1; |
21846 |
+ goto again; |
21847 |
+ } |
21848 |
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c |
21849 |
+index bc0c698f33508..565421c6682ed 100644 |
21850 |
+--- a/fs/nfs/namespace.c |
21851 |
++++ b/fs/nfs/namespace.c |
21852 |
+@@ -147,7 +147,7 @@ struct vfsmount *nfs_d_automount(struct path *path) |
21853 |
+ struct nfs_fs_context *ctx; |
21854 |
+ struct fs_context *fc; |
21855 |
+ struct vfsmount *mnt = ERR_PTR(-ENOMEM); |
21856 |
+- struct nfs_server *server = NFS_SERVER(d_inode(path->dentry)); |
21857 |
++ struct nfs_server *server = NFS_SB(path->dentry->d_sb); |
21858 |
+ struct nfs_client *client = server->nfs_client; |
21859 |
+ int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout); |
21860 |
+ int ret; |
21861 |
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
21862 |
+index dc03924b6b71e..b6b1fad031c78 100644 |
21863 |
+--- a/fs/nfs/nfs4proc.c |
21864 |
++++ b/fs/nfs/nfs4proc.c |
21865 |
+@@ -126,6 +126,11 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry, |
21866 |
+ if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) |
21867 |
+ return NULL; |
21868 |
+ |
21869 |
++ label->lfs = 0; |
21870 |
++ label->pi = 0; |
21871 |
++ label->len = 0; |
21872 |
++ label->label = NULL; |
21873 |
++ |
21874 |
+ err = security_dentry_init_security(dentry, sattr->ia_mode, |
21875 |
+ &dentry->d_name, (void **)&label->label, &label->len); |
21876 |
+ if (err == 0) |
21877 |
+@@ -2139,18 +2144,18 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context |
21878 |
+ } |
21879 |
+ |
21880 |
+ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, |
21881 |
+- fmode_t fmode) |
21882 |
++ fmode_t fmode) |
21883 |
+ { |
21884 |
+ struct nfs4_state *newstate; |
21885 |
++ struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); |
21886 |
++ int openflags = opendata->o_arg.open_flags; |
21887 |
+ int ret; |
21888 |
+ |
21889 |
+ if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) |
21890 |
+ return 0; |
21891 |
+- opendata->o_arg.open_flags = 0; |
21892 |
+ opendata->o_arg.fmode = fmode; |
21893 |
+- opendata->o_arg.share_access = nfs4_map_atomic_open_share( |
21894 |
+- NFS_SB(opendata->dentry->d_sb), |
21895 |
+- fmode, 0); |
21896 |
++ opendata->o_arg.share_access = |
21897 |
++ nfs4_map_atomic_open_share(server, fmode, openflags); |
21898 |
+ memset(&opendata->o_res, 0, sizeof(opendata->o_res)); |
21899 |
+ memset(&opendata->c_res, 0, sizeof(opendata->c_res)); |
21900 |
+ nfs4_init_opendata_res(opendata); |
21901 |
+@@ -2730,10 +2735,15 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s |
21902 |
+ struct nfs4_opendata *opendata; |
21903 |
+ int ret; |
21904 |
+ |
21905 |
+- opendata = nfs4_open_recoverdata_alloc(ctx, state, |
21906 |
+- NFS4_OPEN_CLAIM_FH); |
21907 |
++ opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); |
21908 |
+ if (IS_ERR(opendata)) |
21909 |
+ return PTR_ERR(opendata); |
21910 |
++ /* |
21911 |
++ * We're not recovering a delegation, so ask for no delegation. |
21912 |
++ * Otherwise the recovery thread could deadlock with an outstanding |
21913 |
++ * delegation return. |
21914 |
++ */ |
21915 |
++ opendata->o_arg.open_flags = O_DIRECT; |
21916 |
+ ret = nfs4_open_recover(opendata, state); |
21917 |
+ if (ret == -ESTALE) |
21918 |
+ d_drop(ctx->dentry); |
21919 |
+@@ -3823,7 +3833,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, |
21920 |
+ int open_flags, struct iattr *attr, int *opened) |
21921 |
+ { |
21922 |
+ struct nfs4_state *state; |
21923 |
+- struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; |
21924 |
++ struct nfs4_label l, *label; |
21925 |
+ |
21926 |
+ label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); |
21927 |
+ |
21928 |
+@@ -3982,7 +3992,7 @@ static int _nfs4_discover_trunking(struct nfs_server *server, |
21929 |
+ |
21930 |
+ page = alloc_page(GFP_KERNEL); |
21931 |
+ if (!page) |
21932 |
+- return -ENOMEM; |
21933 |
++ goto out_put_cred; |
21934 |
+ locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); |
21935 |
+ if (!locations) |
21936 |
+ goto out_free; |
21937 |
+@@ -3998,6 +4008,8 @@ out_free_2: |
21938 |
+ kfree(locations); |
21939 |
+ out_free: |
21940 |
+ __free_page(page); |
21941 |
++out_put_cred: |
21942 |
++ put_cred(cred); |
21943 |
+ return status; |
21944 |
+ } |
21945 |
+ |
21946 |
+@@ -4657,7 +4669,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, |
21947 |
+ int flags) |
21948 |
+ { |
21949 |
+ struct nfs_server *server = NFS_SERVER(dir); |
21950 |
+- struct nfs4_label l, *ilabel = NULL; |
21951 |
++ struct nfs4_label l, *ilabel; |
21952 |
+ struct nfs_open_context *ctx; |
21953 |
+ struct nfs4_state *state; |
21954 |
+ int status = 0; |
21955 |
+@@ -5017,7 +5029,7 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, |
21956 |
+ struct nfs4_exception exception = { |
21957 |
+ .interruptible = true, |
21958 |
+ }; |
21959 |
+- struct nfs4_label l, *label = NULL; |
21960 |
++ struct nfs4_label l, *label; |
21961 |
+ int err; |
21962 |
+ |
21963 |
+ label = nfs4_label_init_security(dir, dentry, sattr, &l); |
21964 |
+@@ -5058,7 +5070,7 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, |
21965 |
+ struct nfs4_exception exception = { |
21966 |
+ .interruptible = true, |
21967 |
+ }; |
21968 |
+- struct nfs4_label l, *label = NULL; |
21969 |
++ struct nfs4_label l, *label; |
21970 |
+ int err; |
21971 |
+ |
21972 |
+ label = nfs4_label_init_security(dir, dentry, sattr, &l); |
21973 |
+@@ -5177,7 +5189,7 @@ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, |
21974 |
+ struct nfs4_exception exception = { |
21975 |
+ .interruptible = true, |
21976 |
+ }; |
21977 |
+- struct nfs4_label l, *label = NULL; |
21978 |
++ struct nfs4_label l, *label; |
21979 |
+ int err; |
21980 |
+ |
21981 |
+ label = nfs4_label_init_security(dir, dentry, sattr, &l); |
21982 |
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c |
21983 |
+index ecac56be6cb72..0cd803b4d90ce 100644 |
21984 |
+--- a/fs/nfs/nfs4state.c |
21985 |
++++ b/fs/nfs/nfs4state.c |
21986 |
+@@ -1227,6 +1227,8 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) |
21987 |
+ if (IS_ERR(task)) { |
21988 |
+ printk(KERN_ERR "%s: kthread_run: %ld\n", |
21989 |
+ __func__, PTR_ERR(task)); |
21990 |
++ if (!nfs_client_init_is_complete(clp)) |
21991 |
++ nfs_mark_client_ready(clp, PTR_ERR(task)); |
21992 |
+ nfs4_clear_state_manager_bit(clp); |
21993 |
+ nfs_put_client(clp); |
21994 |
+ module_put(THIS_MODULE); |
21995 |
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c |
21996 |
+index 046788afb6d94..0ae9e06a0bba2 100644 |
21997 |
+--- a/fs/nfs/nfs4xdr.c |
21998 |
++++ b/fs/nfs/nfs4xdr.c |
21999 |
+@@ -4179,19 +4179,17 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, |
22000 |
+ p = xdr_inline_decode(xdr, len); |
22001 |
+ if (unlikely(!p)) |
22002 |
+ return -EIO; |
22003 |
++ bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL; |
22004 |
+ if (len < NFS4_MAXLABELLEN) { |
22005 |
+- if (label) { |
22006 |
+- if (label->len) { |
22007 |
+- if (label->len < len) |
22008 |
+- return -ERANGE; |
22009 |
+- memcpy(label->label, p, len); |
22010 |
+- } |
22011 |
++ if (label && label->len) { |
22012 |
++ if (label->len < len) |
22013 |
++ return -ERANGE; |
22014 |
++ memcpy(label->label, p, len); |
22015 |
+ label->len = len; |
22016 |
+ label->pi = pi; |
22017 |
+ label->lfs = lfs; |
22018 |
+ status = NFS_ATTR_FATTR_V4_SECURITY_LABEL; |
22019 |
+ } |
22020 |
+- bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL; |
22021 |
+ } else |
22022 |
+ printk(KERN_WARNING "%s: label too long (%u)!\n", |
22023 |
+ __func__, len); |
22024 |
+diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c |
22025 |
+index 4b43929c1f255..30a1782a03f01 100644 |
22026 |
+--- a/fs/nfsd/nfs2acl.c |
22027 |
++++ b/fs/nfsd/nfs2acl.c |
22028 |
+@@ -246,37 +246,27 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p) |
22029 |
+ struct nfsd3_getaclres *resp = rqstp->rq_resp; |
22030 |
+ struct dentry *dentry = resp->fh.fh_dentry; |
22031 |
+ struct inode *inode; |
22032 |
+- int w; |
22033 |
+ |
22034 |
+ if (!svcxdr_encode_stat(xdr, resp->status)) |
22035 |
+- return 0; |
22036 |
++ return false; |
22037 |
+ |
22038 |
+ if (dentry == NULL || d_really_is_negative(dentry)) |
22039 |
+- return 1; |
22040 |
++ return true; |
22041 |
+ inode = d_inode(dentry); |
22042 |
+ |
22043 |
+ if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat)) |
22044 |
+- return 0; |
22045 |
++ return false; |
22046 |
+ if (xdr_stream_encode_u32(xdr, resp->mask) < 0) |
22047 |
+- return 0; |
22048 |
+- |
22049 |
+- rqstp->rq_res.page_len = w = nfsacl_size( |
22050 |
+- (resp->mask & NFS_ACL) ? resp->acl_access : NULL, |
22051 |
+- (resp->mask & NFS_DFACL) ? resp->acl_default : NULL); |
22052 |
+- while (w > 0) { |
22053 |
+- if (!*(rqstp->rq_next_page++)) |
22054 |
+- return 1; |
22055 |
+- w -= PAGE_SIZE; |
22056 |
+- } |
22057 |
++ return false; |
22058 |
+ |
22059 |
+ if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access, |
22060 |
+ resp->mask & NFS_ACL, 0)) |
22061 |
+- return 0; |
22062 |
++ return false; |
22063 |
+ if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default, |
22064 |
+ resp->mask & NFS_DFACL, NFS_ACL_DEFAULT)) |
22065 |
+- return 0; |
22066 |
++ return false; |
22067 |
+ |
22068 |
+- return 1; |
22069 |
++ return true; |
22070 |
+ } |
22071 |
+ |
22072 |
+ /* ACCESS */ |
22073 |
+@@ -286,17 +276,17 @@ static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p) |
22074 |
+ struct nfsd3_accessres *resp = rqstp->rq_resp; |
22075 |
+ |
22076 |
+ if (!svcxdr_encode_stat(xdr, resp->status)) |
22077 |
+- return 0; |
22078 |
++ return false; |
22079 |
+ switch (resp->status) { |
22080 |
+ case nfs_ok: |
22081 |
+ if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat)) |
22082 |
+- return 0; |
22083 |
++ return false; |
22084 |
+ if (xdr_stream_encode_u32(xdr, resp->access) < 0) |
22085 |
+- return 0; |
22086 |
++ return false; |
22087 |
+ break; |
22088 |
+ } |
22089 |
+ |
22090 |
+- return 1; |
22091 |
++ return true; |
22092 |
+ } |
22093 |
+ |
22094 |
+ /* |
22095 |
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c |
22096 |
+index 0f8b10f363e7f..2e0040d3bca79 100644 |
22097 |
+--- a/fs/nfsd/nfs4callback.c |
22098 |
++++ b/fs/nfsd/nfs4callback.c |
22099 |
+@@ -917,7 +917,6 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c |
22100 |
+ } else { |
22101 |
+ if (!conn->cb_xprt) |
22102 |
+ return -EINVAL; |
22103 |
+- clp->cl_cb_conn.cb_xprt = conn->cb_xprt; |
22104 |
+ clp->cl_cb_session = ses; |
22105 |
+ args.bc_xprt = conn->cb_xprt; |
22106 |
+ args.prognumber = clp->cl_cb_session->se_cb_prog; |
22107 |
+@@ -937,6 +936,9 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c |
22108 |
+ rpc_shutdown_client(client); |
22109 |
+ return -ENOMEM; |
22110 |
+ } |
22111 |
++ |
22112 |
++ if (clp->cl_minorversion != 0) |
22113 |
++ clp->cl_cb_conn.cb_xprt = conn->cb_xprt; |
22114 |
+ clp->cl_cb_client = client; |
22115 |
+ clp->cl_cb_cred = cred; |
22116 |
+ rcu_read_lock(); |
22117 |
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
22118 |
+index 7b763f146b621..c062728034ad0 100644 |
22119 |
+--- a/fs/nfsd/nfs4state.c |
22120 |
++++ b/fs/nfsd/nfs4state.c |
22121 |
+@@ -627,15 +627,26 @@ find_any_file(struct nfs4_file *f) |
22122 |
+ return ret; |
22123 |
+ } |
22124 |
+ |
22125 |
+-static struct nfsd_file *find_deleg_file(struct nfs4_file *f) |
22126 |
++static struct nfsd_file *find_any_file_locked(struct nfs4_file *f) |
22127 |
+ { |
22128 |
+- struct nfsd_file *ret = NULL; |
22129 |
++ lockdep_assert_held(&f->fi_lock); |
22130 |
++ |
22131 |
++ if (f->fi_fds[O_RDWR]) |
22132 |
++ return f->fi_fds[O_RDWR]; |
22133 |
++ if (f->fi_fds[O_WRONLY]) |
22134 |
++ return f->fi_fds[O_WRONLY]; |
22135 |
++ if (f->fi_fds[O_RDONLY]) |
22136 |
++ return f->fi_fds[O_RDONLY]; |
22137 |
++ return NULL; |
22138 |
++} |
22139 |
++ |
22140 |
++static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f) |
22141 |
++{ |
22142 |
++ lockdep_assert_held(&f->fi_lock); |
22143 |
+ |
22144 |
+- spin_lock(&f->fi_lock); |
22145 |
+ if (f->fi_deleg_file) |
22146 |
+- ret = nfsd_file_get(f->fi_deleg_file); |
22147 |
+- spin_unlock(&f->fi_lock); |
22148 |
+- return ret; |
22149 |
++ return f->fi_deleg_file; |
22150 |
++ return NULL; |
22151 |
+ } |
22152 |
+ |
22153 |
+ static atomic_long_t num_delegations; |
22154 |
+@@ -2501,9 +2512,11 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) |
22155 |
+ ols = openlockstateid(st); |
22156 |
+ oo = ols->st_stateowner; |
22157 |
+ nf = st->sc_file; |
22158 |
+- file = find_any_file(nf); |
22159 |
++ |
22160 |
++ spin_lock(&nf->fi_lock); |
22161 |
++ file = find_any_file_locked(nf); |
22162 |
+ if (!file) |
22163 |
+- return 0; |
22164 |
++ goto out; |
22165 |
+ |
22166 |
+ seq_printf(s, "- "); |
22167 |
+ nfs4_show_stateid(s, &st->sc_stateid); |
22168 |
+@@ -2525,8 +2538,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) |
22169 |
+ seq_printf(s, ", "); |
22170 |
+ nfs4_show_owner(s, oo); |
22171 |
+ seq_printf(s, " }\n"); |
22172 |
+- nfsd_file_put(file); |
22173 |
+- |
22174 |
++out: |
22175 |
++ spin_unlock(&nf->fi_lock); |
22176 |
+ return 0; |
22177 |
+ } |
22178 |
+ |
22179 |
+@@ -2540,9 +2553,10 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) |
22180 |
+ ols = openlockstateid(st); |
22181 |
+ oo = ols->st_stateowner; |
22182 |
+ nf = st->sc_file; |
22183 |
+- file = find_any_file(nf); |
22184 |
++ spin_lock(&nf->fi_lock); |
22185 |
++ file = find_any_file_locked(nf); |
22186 |
+ if (!file) |
22187 |
+- return 0; |
22188 |
++ goto out; |
22189 |
+ |
22190 |
+ seq_printf(s, "- "); |
22191 |
+ nfs4_show_stateid(s, &st->sc_stateid); |
22192 |
+@@ -2562,8 +2576,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) |
22193 |
+ seq_printf(s, ", "); |
22194 |
+ nfs4_show_owner(s, oo); |
22195 |
+ seq_printf(s, " }\n"); |
22196 |
+- nfsd_file_put(file); |
22197 |
+- |
22198 |
++out: |
22199 |
++ spin_unlock(&nf->fi_lock); |
22200 |
+ return 0; |
22201 |
+ } |
22202 |
+ |
22203 |
+@@ -2575,9 +2589,10 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) |
22204 |
+ |
22205 |
+ ds = delegstateid(st); |
22206 |
+ nf = st->sc_file; |
22207 |
+- file = find_deleg_file(nf); |
22208 |
++ spin_lock(&nf->fi_lock); |
22209 |
++ file = find_deleg_file_locked(nf); |
22210 |
+ if (!file) |
22211 |
+- return 0; |
22212 |
++ goto out; |
22213 |
+ |
22214 |
+ seq_printf(s, "- "); |
22215 |
+ nfs4_show_stateid(s, &st->sc_stateid); |
22216 |
+@@ -2593,8 +2608,8 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) |
22217 |
+ seq_printf(s, ", "); |
22218 |
+ nfs4_show_fname(s, file); |
22219 |
+ seq_printf(s, " }\n"); |
22220 |
+- nfsd_file_put(file); |
22221 |
+- |
22222 |
++out: |
22223 |
++ spin_unlock(&nf->fi_lock); |
22224 |
+ return 0; |
22225 |
+ } |
22226 |
+ |
22227 |
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c |
22228 |
+index 16994598a8db4..6aa6cef0757f1 100644 |
22229 |
+--- a/fs/nilfs2/the_nilfs.c |
22230 |
++++ b/fs/nilfs2/the_nilfs.c |
22231 |
+@@ -13,6 +13,7 @@ |
22232 |
+ #include <linux/blkdev.h> |
22233 |
+ #include <linux/backing-dev.h> |
22234 |
+ #include <linux/random.h> |
22235 |
++#include <linux/log2.h> |
22236 |
+ #include <linux/crc32.h> |
22237 |
+ #include "nilfs.h" |
22238 |
+ #include "segment.h" |
22239 |
+@@ -192,6 +193,34 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, |
22240 |
+ return ret; |
22241 |
+ } |
22242 |
+ |
22243 |
++/** |
22244 |
++ * nilfs_get_blocksize - get block size from raw superblock data |
22245 |
++ * @sb: super block instance |
22246 |
++ * @sbp: superblock raw data buffer |
22247 |
++ * @blocksize: place to store block size |
22248 |
++ * |
22249 |
++ * nilfs_get_blocksize() calculates the block size from the block size |
22250 |
++ * exponent information written in @sbp and stores it in @blocksize, |
22251 |
++ * or aborts with an error message if it's too large. |
22252 |
++ * |
22253 |
++ * Return Value: On success, 0 is returned. If the block size is too |
22254 |
++ * large, -EINVAL is returned. |
22255 |
++ */ |
22256 |
++static int nilfs_get_blocksize(struct super_block *sb, |
22257 |
++ struct nilfs_super_block *sbp, int *blocksize) |
22258 |
++{ |
22259 |
++ unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size); |
22260 |
++ |
22261 |
++ if (unlikely(shift_bits > |
22262 |
++ ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS)) { |
22263 |
++ nilfs_err(sb, "too large filesystem blocksize: 2 ^ %u KiB", |
22264 |
++ shift_bits); |
22265 |
++ return -EINVAL; |
22266 |
++ } |
22267 |
++ *blocksize = BLOCK_SIZE << shift_bits; |
22268 |
++ return 0; |
22269 |
++} |
22270 |
++ |
22271 |
+ /** |
22272 |
+ * load_nilfs - load and recover the nilfs |
22273 |
+ * @nilfs: the_nilfs structure to be released |
22274 |
+@@ -245,11 +274,15 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) |
22275 |
+ nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); |
22276 |
+ |
22277 |
+ /* verify consistency between two super blocks */ |
22278 |
+- blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size); |
22279 |
++ err = nilfs_get_blocksize(sb, sbp[0], &blocksize); |
22280 |
++ if (err) |
22281 |
++ goto scan_error; |
22282 |
++ |
22283 |
+ if (blocksize != nilfs->ns_blocksize) { |
22284 |
+ nilfs_warn(sb, |
22285 |
+ "blocksize differs between two super blocks (%d != %d)", |
22286 |
+ blocksize, nilfs->ns_blocksize); |
22287 |
++ err = -EINVAL; |
22288 |
+ goto scan_error; |
22289 |
+ } |
22290 |
+ |
22291 |
+@@ -443,11 +476,33 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp) |
22292 |
+ return crc == le32_to_cpu(sbp->s_sum); |
22293 |
+ } |
22294 |
+ |
22295 |
+-static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) |
22296 |
++/** |
22297 |
++ * nilfs_sb2_bad_offset - check the location of the second superblock |
22298 |
++ * @sbp: superblock raw data buffer |
22299 |
++ * @offset: byte offset of second superblock calculated from device size |
22300 |
++ * |
22301 |
++ * nilfs_sb2_bad_offset() checks if the position on the second |
22302 |
++ * superblock is valid or not based on the filesystem parameters |
22303 |
++ * stored in @sbp. If @offset points to a location within the segment |
22304 |
++ * area, or if the parameters themselves are not normal, it is |
22305 |
++ * determined to be invalid. |
22306 |
++ * |
22307 |
++ * Return Value: true if invalid, false if valid. |
22308 |
++ */ |
22309 |
++static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) |
22310 |
+ { |
22311 |
+- return offset < ((le64_to_cpu(sbp->s_nsegments) * |
22312 |
+- le32_to_cpu(sbp->s_blocks_per_segment)) << |
22313 |
+- (le32_to_cpu(sbp->s_log_block_size) + 10)); |
22314 |
++ unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size); |
22315 |
++ u32 blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); |
22316 |
++ u64 nsegments = le64_to_cpu(sbp->s_nsegments); |
22317 |
++ u64 index; |
22318 |
++ |
22319 |
++ if (blocks_per_segment < NILFS_SEG_MIN_BLOCKS || |
22320 |
++ shift_bits > ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS) |
22321 |
++ return true; |
22322 |
++ |
22323 |
++ index = offset >> (shift_bits + BLOCK_SIZE_BITS); |
22324 |
++ do_div(index, blocks_per_segment); |
22325 |
++ return index < nsegments; |
22326 |
+ } |
22327 |
+ |
22328 |
+ static void nilfs_release_super_block(struct the_nilfs *nilfs) |
22329 |
+@@ -586,9 +641,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) |
22330 |
+ if (err) |
22331 |
+ goto failed_sbh; |
22332 |
+ |
22333 |
+- blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); |
22334 |
+- if (blocksize < NILFS_MIN_BLOCK_SIZE || |
22335 |
+- blocksize > NILFS_MAX_BLOCK_SIZE) { |
22336 |
++ err = nilfs_get_blocksize(sb, sbp, &blocksize); |
22337 |
++ if (err) |
22338 |
++ goto failed_sbh; |
22339 |
++ |
22340 |
++ if (blocksize < NILFS_MIN_BLOCK_SIZE) { |
22341 |
+ nilfs_err(sb, |
22342 |
+ "couldn't mount because of unsupported filesystem blocksize %d", |
22343 |
+ blocksize); |
22344 |
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c |
22345 |
+index aa184407520f0..7f2055b7427a6 100644 |
22346 |
+--- a/fs/ntfs3/bitmap.c |
22347 |
++++ b/fs/ntfs3/bitmap.c |
22348 |
+@@ -1432,7 +1432,7 @@ int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range) |
22349 |
+ |
22350 |
+ down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS); |
22351 |
+ |
22352 |
+- for (; iw < wnd->nbits; iw++, wbit = 0) { |
22353 |
++ for (; iw < wnd->nwnd; iw++, wbit = 0) { |
22354 |
+ CLST lcn_wnd = iw * wbits; |
22355 |
+ struct buffer_head *bh; |
22356 |
+ |
22357 |
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c |
22358 |
+index f3b88c7e35f73..39b09f32f4db7 100644 |
22359 |
+--- a/fs/ntfs3/super.c |
22360 |
++++ b/fs/ntfs3/super.c |
22361 |
+@@ -672,7 +672,7 @@ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot) |
22362 |
+ if (boot->sectors_per_clusters <= 0x80) |
22363 |
+ return boot->sectors_per_clusters; |
22364 |
+ if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */ |
22365 |
+- return 1U << (0 - boot->sectors_per_clusters); |
22366 |
++ return 1U << -(s8)boot->sectors_per_clusters; |
22367 |
+ return -EINVAL; |
22368 |
+ } |
22369 |
+ |
22370 |
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c |
22371 |
+index eb799a5cdfade..8847db0159084 100644 |
22372 |
+--- a/fs/ntfs3/xattr.c |
22373 |
++++ b/fs/ntfs3/xattr.c |
22374 |
+@@ -107,7 +107,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea, |
22375 |
+ return -EFBIG; |
22376 |
+ |
22377 |
+ /* Allocate memory for packed Ea. */ |
22378 |
+- ea_p = kmalloc(size + add_bytes, GFP_NOFS); |
22379 |
++ ea_p = kmalloc(size_add(size, add_bytes), GFP_NOFS); |
22380 |
+ if (!ea_p) |
22381 |
+ return -ENOMEM; |
22382 |
+ |
22383 |
+diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c |
22384 |
+index 16f1bfc407f2a..955f475f9aca6 100644 |
22385 |
+--- a/fs/ocfs2/stackglue.c |
22386 |
++++ b/fs/ocfs2/stackglue.c |
22387 |
+@@ -703,6 +703,8 @@ static struct ctl_table_header *ocfs2_table_header; |
22388 |
+ |
22389 |
+ static int __init ocfs2_stack_glue_init(void) |
22390 |
+ { |
22391 |
++ int ret; |
22392 |
++ |
22393 |
+ strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB); |
22394 |
+ |
22395 |
+ ocfs2_table_header = register_sysctl_table(ocfs2_root_table); |
22396 |
+@@ -712,7 +714,11 @@ static int __init ocfs2_stack_glue_init(void) |
22397 |
+ return -ENOMEM; /* or something. */ |
22398 |
+ } |
22399 |
+ |
22400 |
+- return ocfs2_sysfs_init(); |
22401 |
++ ret = ocfs2_sysfs_init(); |
22402 |
++ if (ret) |
22403 |
++ unregister_sysctl_table(ocfs2_table_header); |
22404 |
++ |
22405 |
++ return ret; |
22406 |
+ } |
22407 |
+ |
22408 |
+ static void __exit ocfs2_stack_glue_exit(void) |
22409 |
+diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c |
22410 |
+index 29eaa45443727..1b508f5433846 100644 |
22411 |
+--- a/fs/orangefs/orangefs-debugfs.c |
22412 |
++++ b/fs/orangefs/orangefs-debugfs.c |
22413 |
+@@ -194,15 +194,10 @@ void orangefs_debugfs_init(int debug_mask) |
22414 |
+ */ |
22415 |
+ static void orangefs_kernel_debug_init(void) |
22416 |
+ { |
22417 |
+- int rc = -ENOMEM; |
22418 |
+- char *k_buffer = NULL; |
22419 |
++ static char k_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { }; |
22420 |
+ |
22421 |
+ gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); |
22422 |
+ |
22423 |
+- k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); |
22424 |
+- if (!k_buffer) |
22425 |
+- goto out; |
22426 |
+- |
22427 |
+ if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { |
22428 |
+ strcpy(k_buffer, kernel_debug_string); |
22429 |
+ strcat(k_buffer, "\n"); |
22430 |
+@@ -213,15 +208,14 @@ static void orangefs_kernel_debug_init(void) |
22431 |
+ |
22432 |
+ debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer, |
22433 |
+ &kernel_debug_fops); |
22434 |
+- |
22435 |
+-out: |
22436 |
+- gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc); |
22437 |
+ } |
22438 |
+ |
22439 |
+ |
22440 |
+ void orangefs_debugfs_cleanup(void) |
22441 |
+ { |
22442 |
+ debugfs_remove_recursive(debug_dir); |
22443 |
++ kfree(debug_help_string); |
22444 |
++ debug_help_string = NULL; |
22445 |
+ } |
22446 |
+ |
22447 |
+ /* open ORANGEFS_KMOD_DEBUG_HELP_FILE */ |
22448 |
+@@ -297,18 +291,13 @@ static int help_show(struct seq_file *m, void *v) |
22449 |
+ /* |
22450 |
+ * initialize the client-debug file. |
22451 |
+ */ |
22452 |
+-static int orangefs_client_debug_init(void) |
22453 |
++static void orangefs_client_debug_init(void) |
22454 |
+ { |
22455 |
+ |
22456 |
+- int rc = -ENOMEM; |
22457 |
+- char *c_buffer = NULL; |
22458 |
++ static char c_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { }; |
22459 |
+ |
22460 |
+ gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); |
22461 |
+ |
22462 |
+- c_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); |
22463 |
+- if (!c_buffer) |
22464 |
+- goto out; |
22465 |
+- |
22466 |
+ if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { |
22467 |
+ strcpy(c_buffer, client_debug_string); |
22468 |
+ strcat(c_buffer, "\n"); |
22469 |
+@@ -322,13 +311,6 @@ static int orangefs_client_debug_init(void) |
22470 |
+ debug_dir, |
22471 |
+ c_buffer, |
22472 |
+ &kernel_debug_fops); |
22473 |
+- |
22474 |
+- rc = 0; |
22475 |
+- |
22476 |
+-out: |
22477 |
+- |
22478 |
+- gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc); |
22479 |
+- return rc; |
22480 |
+ } |
22481 |
+ |
22482 |
+ /* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/ |
22483 |
+@@ -671,6 +653,7 @@ int orangefs_prepare_debugfs_help_string(int at_boot) |
22484 |
+ memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE); |
22485 |
+ strlcat(debug_help_string, new, string_size); |
22486 |
+ mutex_unlock(&orangefs_help_file_lock); |
22487 |
++ kfree(new); |
22488 |
+ } |
22489 |
+ |
22490 |
+ rc = 0; |
22491 |
+diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c |
22492 |
+index cd7297815f91e..5ab741c60b7e2 100644 |
22493 |
+--- a/fs/orangefs/orangefs-mod.c |
22494 |
++++ b/fs/orangefs/orangefs-mod.c |
22495 |
+@@ -141,7 +141,7 @@ static int __init orangefs_init(void) |
22496 |
+ gossip_err("%s: could not initialize device subsystem %d!\n", |
22497 |
+ __func__, |
22498 |
+ ret); |
22499 |
+- goto cleanup_device; |
22500 |
++ goto cleanup_sysfs; |
22501 |
+ } |
22502 |
+ |
22503 |
+ ret = register_filesystem(&orangefs_fs_type); |
22504 |
+@@ -152,11 +152,11 @@ static int __init orangefs_init(void) |
22505 |
+ goto out; |
22506 |
+ } |
22507 |
+ |
22508 |
+- orangefs_sysfs_exit(); |
22509 |
+- |
22510 |
+-cleanup_device: |
22511 |
+ orangefs_dev_cleanup(); |
22512 |
+ |
22513 |
++cleanup_sysfs: |
22514 |
++ orangefs_sysfs_exit(); |
22515 |
++ |
22516 |
+ sysfs_init_failed: |
22517 |
+ orangefs_debugfs_cleanup(); |
22518 |
+ |
22519 |
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c |
22520 |
+index f18490813170a..3fc86c51e260c 100644 |
22521 |
+--- a/fs/overlayfs/dir.c |
22522 |
++++ b/fs/overlayfs/dir.c |
22523 |
+@@ -880,7 +880,6 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir) |
22524 |
+ { |
22525 |
+ int err; |
22526 |
+ const struct cred *old_cred; |
22527 |
+- struct dentry *upperdentry; |
22528 |
+ bool lower_positive = ovl_lower_positive(dentry); |
22529 |
+ LIST_HEAD(list); |
22530 |
+ |
22531 |
+@@ -923,9 +922,8 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir) |
22532 |
+ * Note: we fail to update ctime if there was no copy-up, only a |
22533 |
+ * whiteout |
22534 |
+ */ |
22535 |
+- upperdentry = ovl_dentry_upper(dentry); |
22536 |
+- if (upperdentry) |
22537 |
+- ovl_copyattr(d_inode(upperdentry), d_inode(dentry)); |
22538 |
++ if (ovl_dentry_upper(dentry)) |
22539 |
++ ovl_copyattr(d_inode(dentry)); |
22540 |
+ |
22541 |
+ out_drop_write: |
22542 |
+ ovl_drop_write(dentry); |
22543 |
+@@ -1272,9 +1270,9 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir, |
22544 |
+ (d_inode(new) && ovl_type_origin(new))); |
22545 |
+ |
22546 |
+ /* copy ctime: */ |
22547 |
+- ovl_copyattr(d_inode(olddentry), d_inode(old)); |
22548 |
++ ovl_copyattr(d_inode(old)); |
22549 |
+ if (d_inode(new) && ovl_dentry_upper(new)) |
22550 |
+- ovl_copyattr(d_inode(newdentry), d_inode(new)); |
22551 |
++ ovl_copyattr(d_inode(new)); |
22552 |
+ |
22553 |
+ out_dput: |
22554 |
+ dput(newdentry); |
22555 |
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c |
22556 |
+index 44fea16751f1d..28cb05ef018c7 100644 |
22557 |
+--- a/fs/overlayfs/file.c |
22558 |
++++ b/fs/overlayfs/file.c |
22559 |
+@@ -273,7 +273,7 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req) |
22560 |
+ __sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb, |
22561 |
+ SB_FREEZE_WRITE); |
22562 |
+ file_end_write(iocb->ki_filp); |
22563 |
+- ovl_copyattr(ovl_inode_real(inode), inode); |
22564 |
++ ovl_copyattr(inode); |
22565 |
+ } |
22566 |
+ |
22567 |
+ orig_iocb->ki_pos = iocb->ki_pos; |
22568 |
+@@ -356,7 +356,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter) |
22569 |
+ |
22570 |
+ inode_lock(inode); |
22571 |
+ /* Update mode */ |
22572 |
+- ovl_copyattr(ovl_inode_real(inode), inode); |
22573 |
++ ovl_copyattr(inode); |
22574 |
+ ret = file_remove_privs(file); |
22575 |
+ if (ret) |
22576 |
+ goto out_unlock; |
22577 |
+@@ -381,7 +381,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter) |
22578 |
+ ovl_iocb_to_rwf(ifl)); |
22579 |
+ file_end_write(real.file); |
22580 |
+ /* Update size */ |
22581 |
+- ovl_copyattr(ovl_inode_real(inode), inode); |
22582 |
++ ovl_copyattr(inode); |
22583 |
+ } else { |
22584 |
+ struct ovl_aio_req *aio_req; |
22585 |
+ |
22586 |
+@@ -431,12 +431,11 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out, |
22587 |
+ struct fd real; |
22588 |
+ const struct cred *old_cred; |
22589 |
+ struct inode *inode = file_inode(out); |
22590 |
+- struct inode *realinode = ovl_inode_real(inode); |
22591 |
+ ssize_t ret; |
22592 |
+ |
22593 |
+ inode_lock(inode); |
22594 |
+ /* Update mode */ |
22595 |
+- ovl_copyattr(realinode, inode); |
22596 |
++ ovl_copyattr(inode); |
22597 |
+ ret = file_remove_privs(out); |
22598 |
+ if (ret) |
22599 |
+ goto out_unlock; |
22600 |
+@@ -452,7 +451,7 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out, |
22601 |
+ |
22602 |
+ file_end_write(real.file); |
22603 |
+ /* Update size */ |
22604 |
+- ovl_copyattr(realinode, inode); |
22605 |
++ ovl_copyattr(inode); |
22606 |
+ revert_creds(old_cred); |
22607 |
+ fdput(real); |
22608 |
+ |
22609 |
+@@ -517,19 +516,29 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len |
22610 |
+ const struct cred *old_cred; |
22611 |
+ int ret; |
22612 |
+ |
22613 |
++ inode_lock(inode); |
22614 |
++ /* Update mode */ |
22615 |
++ ovl_copyattr(inode); |
22616 |
++ ret = file_remove_privs(file); |
22617 |
++ if (ret) |
22618 |
++ goto out_unlock; |
22619 |
++ |
22620 |
+ ret = ovl_real_fdget(file, &real); |
22621 |
+ if (ret) |
22622 |
+- return ret; |
22623 |
++ goto out_unlock; |
22624 |
+ |
22625 |
+ old_cred = ovl_override_creds(file_inode(file)->i_sb); |
22626 |
+ ret = vfs_fallocate(real.file, mode, offset, len); |
22627 |
+ revert_creds(old_cred); |
22628 |
+ |
22629 |
+ /* Update size */ |
22630 |
+- ovl_copyattr(ovl_inode_real(inode), inode); |
22631 |
++ ovl_copyattr(inode); |
22632 |
+ |
22633 |
+ fdput(real); |
22634 |
+ |
22635 |
++out_unlock: |
22636 |
++ inode_unlock(inode); |
22637 |
++ |
22638 |
+ return ret; |
22639 |
+ } |
22640 |
+ |
22641 |
+@@ -567,14 +576,23 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in, |
22642 |
+ const struct cred *old_cred; |
22643 |
+ loff_t ret; |
22644 |
+ |
22645 |
++ inode_lock(inode_out); |
22646 |
++ if (op != OVL_DEDUPE) { |
22647 |
++ /* Update mode */ |
22648 |
++ ovl_copyattr(inode_out); |
22649 |
++ ret = file_remove_privs(file_out); |
22650 |
++ if (ret) |
22651 |
++ goto out_unlock; |
22652 |
++ } |
22653 |
++ |
22654 |
+ ret = ovl_real_fdget(file_out, &real_out); |
22655 |
+ if (ret) |
22656 |
+- return ret; |
22657 |
++ goto out_unlock; |
22658 |
+ |
22659 |
+ ret = ovl_real_fdget(file_in, &real_in); |
22660 |
+ if (ret) { |
22661 |
+ fdput(real_out); |
22662 |
+- return ret; |
22663 |
++ goto out_unlock; |
22664 |
+ } |
22665 |
+ |
22666 |
+ old_cred = ovl_override_creds(file_inode(file_out)->i_sb); |
22667 |
+@@ -598,11 +616,14 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in, |
22668 |
+ revert_creds(old_cred); |
22669 |
+ |
22670 |
+ /* Update size */ |
22671 |
+- ovl_copyattr(ovl_inode_real(inode_out), inode_out); |
22672 |
++ ovl_copyattr(inode_out); |
22673 |
+ |
22674 |
+ fdput(real_in); |
22675 |
+ fdput(real_out); |
22676 |
+ |
22677 |
++out_unlock: |
22678 |
++ inode_unlock(inode_out); |
22679 |
++ |
22680 |
+ return ret; |
22681 |
+ } |
22682 |
+ |
22683 |
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c |
22684 |
+index 1f36158c7dbe2..d41f0c8e0e2a5 100644 |
22685 |
+--- a/fs/overlayfs/inode.c |
22686 |
++++ b/fs/overlayfs/inode.c |
22687 |
+@@ -80,7 +80,7 @@ int ovl_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, |
22688 |
+ err = notify_change(&init_user_ns, upperdentry, attr, NULL); |
22689 |
+ revert_creds(old_cred); |
22690 |
+ if (!err) |
22691 |
+- ovl_copyattr(upperdentry->d_inode, dentry->d_inode); |
22692 |
++ ovl_copyattr(dentry->d_inode); |
22693 |
+ inode_unlock(upperdentry->d_inode); |
22694 |
+ |
22695 |
+ if (winode) |
22696 |
+@@ -377,7 +377,7 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name, |
22697 |
+ revert_creds(old_cred); |
22698 |
+ |
22699 |
+ /* copy c/mtime */ |
22700 |
+- ovl_copyattr(d_inode(realdentry), inode); |
22701 |
++ ovl_copyattr(inode); |
22702 |
+ |
22703 |
+ out_drop_write: |
22704 |
+ ovl_drop_write(dentry); |
22705 |
+@@ -579,7 +579,7 @@ int ovl_fileattr_set(struct user_namespace *mnt_userns, |
22706 |
+ inode_set_flags(inode, flags, OVL_COPY_I_FLAGS_MASK); |
22707 |
+ |
22708 |
+ /* Update ctime */ |
22709 |
+- ovl_copyattr(ovl_inode_real(inode), inode); |
22710 |
++ ovl_copyattr(inode); |
22711 |
+ } |
22712 |
+ ovl_drop_write(dentry); |
22713 |
+ out: |
22714 |
+@@ -777,16 +777,19 @@ void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip, |
22715 |
+ unsigned long ino, int fsid) |
22716 |
+ { |
22717 |
+ struct inode *realinode; |
22718 |
++ struct ovl_inode *oi = OVL_I(inode); |
22719 |
+ |
22720 |
+ if (oip->upperdentry) |
22721 |
+- OVL_I(inode)->__upperdentry = oip->upperdentry; |
22722 |
+- if (oip->lowerpath && oip->lowerpath->dentry) |
22723 |
+- OVL_I(inode)->lower = igrab(d_inode(oip->lowerpath->dentry)); |
22724 |
++ oi->__upperdentry = oip->upperdentry; |
22725 |
++ if (oip->lowerpath && oip->lowerpath->dentry) { |
22726 |
++ oi->lowerpath.dentry = dget(oip->lowerpath->dentry); |
22727 |
++ oi->lowerpath.layer = oip->lowerpath->layer; |
22728 |
++ } |
22729 |
+ if (oip->lowerdata) |
22730 |
+- OVL_I(inode)->lowerdata = igrab(d_inode(oip->lowerdata)); |
22731 |
++ oi->lowerdata = igrab(d_inode(oip->lowerdata)); |
22732 |
+ |
22733 |
+ realinode = ovl_inode_real(inode); |
22734 |
+- ovl_copyattr(realinode, inode); |
22735 |
++ ovl_copyattr(inode); |
22736 |
+ ovl_copyflags(realinode, inode); |
22737 |
+ ovl_map_ino(inode, ino, fsid); |
22738 |
+ } |
22739 |
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h |
22740 |
+index 2cd5741c873b6..2df3e74cdf0f7 100644 |
22741 |
+--- a/fs/overlayfs/overlayfs.h |
22742 |
++++ b/fs/overlayfs/overlayfs.h |
22743 |
+@@ -293,10 +293,12 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry); |
22744 |
+ void ovl_path_upper(struct dentry *dentry, struct path *path); |
22745 |
+ void ovl_path_lower(struct dentry *dentry, struct path *path); |
22746 |
+ void ovl_path_lowerdata(struct dentry *dentry, struct path *path); |
22747 |
++void ovl_i_path_real(struct inode *inode, struct path *path); |
22748 |
+ enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path); |
22749 |
+ struct dentry *ovl_dentry_upper(struct dentry *dentry); |
22750 |
+ struct dentry *ovl_dentry_lower(struct dentry *dentry); |
22751 |
+ struct dentry *ovl_dentry_lowerdata(struct dentry *dentry); |
22752 |
++const struct ovl_layer *ovl_i_layer_lower(struct inode *inode); |
22753 |
+ const struct ovl_layer *ovl_layer_lower(struct dentry *dentry); |
22754 |
+ struct dentry *ovl_dentry_real(struct dentry *dentry); |
22755 |
+ struct dentry *ovl_i_dentry_upper(struct inode *inode); |
22756 |
+@@ -520,16 +522,7 @@ bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir); |
22757 |
+ struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir); |
22758 |
+ struct inode *ovl_get_inode(struct super_block *sb, |
22759 |
+ struct ovl_inode_params *oip); |
22760 |
+-static inline void ovl_copyattr(struct inode *from, struct inode *to) |
22761 |
+-{ |
22762 |
+- to->i_uid = from->i_uid; |
22763 |
+- to->i_gid = from->i_gid; |
22764 |
+- to->i_mode = from->i_mode; |
22765 |
+- to->i_atime = from->i_atime; |
22766 |
+- to->i_mtime = from->i_mtime; |
22767 |
+- to->i_ctime = from->i_ctime; |
22768 |
+- i_size_write(to, i_size_read(from)); |
22769 |
+-} |
22770 |
++void ovl_copyattr(struct inode *to); |
22771 |
+ |
22772 |
+ /* vfs inode flags copied from real to ovl inode */ |
22773 |
+ #define OVL_COPY_I_FLAGS_MASK (S_SYNC | S_NOATIME | S_APPEND | S_IMMUTABLE) |
22774 |
+diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h |
22775 |
+index 63efee554f69a..b2d64f3c974bb 100644 |
22776 |
+--- a/fs/overlayfs/ovl_entry.h |
22777 |
++++ b/fs/overlayfs/ovl_entry.h |
22778 |
+@@ -129,7 +129,7 @@ struct ovl_inode { |
22779 |
+ unsigned long flags; |
22780 |
+ struct inode vfs_inode; |
22781 |
+ struct dentry *__upperdentry; |
22782 |
+- struct inode *lower; |
22783 |
++ struct ovl_path lowerpath; |
22784 |
+ |
22785 |
+ /* synchronize copy up and more */ |
22786 |
+ struct mutex lock; |
22787 |
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c |
22788 |
+index 9837aaf9caf18..b3675d13c1ac2 100644 |
22789 |
+--- a/fs/overlayfs/super.c |
22790 |
++++ b/fs/overlayfs/super.c |
22791 |
+@@ -138,11 +138,16 @@ static int ovl_dentry_revalidate_common(struct dentry *dentry, |
22792 |
+ unsigned int flags, bool weak) |
22793 |
+ { |
22794 |
+ struct ovl_entry *oe = dentry->d_fsdata; |
22795 |
++ struct inode *inode = d_inode_rcu(dentry); |
22796 |
+ struct dentry *upper; |
22797 |
+ unsigned int i; |
22798 |
+ int ret = 1; |
22799 |
+ |
22800 |
+- upper = ovl_dentry_upper(dentry); |
22801 |
++ /* Careful in RCU mode */ |
22802 |
++ if (!inode) |
22803 |
++ return -ECHILD; |
22804 |
++ |
22805 |
++ upper = ovl_i_dentry_upper(inode); |
22806 |
+ if (upper) |
22807 |
+ ret = ovl_revalidate_real(upper, flags, weak); |
22808 |
+ |
22809 |
+@@ -184,7 +189,8 @@ static struct inode *ovl_alloc_inode(struct super_block *sb) |
22810 |
+ oi->version = 0; |
22811 |
+ oi->flags = 0; |
22812 |
+ oi->__upperdentry = NULL; |
22813 |
+- oi->lower = NULL; |
22814 |
++ oi->lowerpath.dentry = NULL; |
22815 |
++ oi->lowerpath.layer = NULL; |
22816 |
+ oi->lowerdata = NULL; |
22817 |
+ mutex_init(&oi->lock); |
22818 |
+ |
22819 |
+@@ -205,7 +211,7 @@ static void ovl_destroy_inode(struct inode *inode) |
22820 |
+ struct ovl_inode *oi = OVL_I(inode); |
22821 |
+ |
22822 |
+ dput(oi->__upperdentry); |
22823 |
+- iput(oi->lower); |
22824 |
++ dput(oi->lowerpath.dentry); |
22825 |
+ if (S_ISDIR(inode->i_mode)) |
22826 |
+ ovl_dir_cache_free(inode); |
22827 |
+ else |
22828 |
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c |
22829 |
+index f48284a2a8960..9d33ce385bef0 100644 |
22830 |
+--- a/fs/overlayfs/util.c |
22831 |
++++ b/fs/overlayfs/util.c |
22832 |
+@@ -236,6 +236,17 @@ struct dentry *ovl_i_dentry_upper(struct inode *inode) |
22833 |
+ return ovl_upperdentry_dereference(OVL_I(inode)); |
22834 |
+ } |
22835 |
+ |
22836 |
++void ovl_i_path_real(struct inode *inode, struct path *path) |
22837 |
++{ |
22838 |
++ path->dentry = ovl_i_dentry_upper(inode); |
22839 |
++ if (!path->dentry) { |
22840 |
++ path->dentry = OVL_I(inode)->lowerpath.dentry; |
22841 |
++ path->mnt = OVL_I(inode)->lowerpath.layer->mnt; |
22842 |
++ } else { |
22843 |
++ path->mnt = ovl_upper_mnt(OVL_FS(inode->i_sb)); |
22844 |
++ } |
22845 |
++} |
22846 |
++ |
22847 |
+ struct inode *ovl_inode_upper(struct inode *inode) |
22848 |
+ { |
22849 |
+ struct dentry *upperdentry = ovl_i_dentry_upper(inode); |
22850 |
+@@ -245,7 +256,9 @@ struct inode *ovl_inode_upper(struct inode *inode) |
22851 |
+ |
22852 |
+ struct inode *ovl_inode_lower(struct inode *inode) |
22853 |
+ { |
22854 |
+- return OVL_I(inode)->lower; |
22855 |
++ struct dentry *lowerdentry = OVL_I(inode)->lowerpath.dentry; |
22856 |
++ |
22857 |
++ return lowerdentry ? d_inode(lowerdentry) : NULL; |
22858 |
+ } |
22859 |
+ |
22860 |
+ struct inode *ovl_inode_real(struct inode *inode) |
22861 |
+@@ -443,7 +456,7 @@ static void ovl_dir_version_inc(struct dentry *dentry, bool impurity) |
22862 |
+ void ovl_dir_modified(struct dentry *dentry, bool impurity) |
22863 |
+ { |
22864 |
+ /* Copy mtime/ctime */ |
22865 |
+- ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry)); |
22866 |
++ ovl_copyattr(d_inode(dentry)); |
22867 |
+ |
22868 |
+ ovl_dir_version_inc(dentry, impurity); |
22869 |
+ } |
22870 |
+@@ -1060,3 +1073,33 @@ int ovl_sync_status(struct ovl_fs *ofs) |
22871 |
+ |
22872 |
+ return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq); |
22873 |
+ } |
22874 |
++ |
22875 |
++/* |
22876 |
++ * ovl_copyattr() - copy inode attributes from layer to ovl inode |
22877 |
++ * |
22878 |
++ * When overlay copies inode information from an upper or lower layer to the |
22879 |
++ * relevant overlay inode it will apply the idmapping of the upper or lower |
22880 |
++ * layer when doing so ensuring that the ovl inode ownership will correctly |
22881 |
++ * reflect the ownership of the idmapped upper or lower layer. For example, an |
22882 |
++ * idmapped upper or lower layer mapping id 1001 to id 1000 will take care to |
22883 |
++ * map any lower or upper inode owned by id 1001 to id 1000. These mapping |
22884 |
++ * helpers are nops when the relevant layer isn't idmapped. |
22885 |
++ */ |
22886 |
++void ovl_copyattr(struct inode *inode) |
22887 |
++{ |
22888 |
++ struct path realpath; |
22889 |
++ struct inode *realinode; |
22890 |
++ struct user_namespace *real_mnt_userns; |
22891 |
++ |
22892 |
++ ovl_i_path_real(inode, &realpath); |
22893 |
++ realinode = d_inode(realpath.dentry); |
22894 |
++ real_mnt_userns = mnt_user_ns(realpath.mnt); |
22895 |
++ |
22896 |
++ inode->i_uid = i_uid_into_mnt(real_mnt_userns, realinode); |
22897 |
++ inode->i_gid = i_gid_into_mnt(real_mnt_userns, realinode); |
22898 |
++ inode->i_mode = realinode->i_mode; |
22899 |
++ inode->i_atime = realinode->i_atime; |
22900 |
++ inode->i_mtime = realinode->i_mtime; |
22901 |
++ inode->i_ctime = realinode->i_ctime; |
22902 |
++ i_size_write(inode, i_size_read(realinode)); |
22903 |
++} |
22904 |
+diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig |
22905 |
+index 8adabde685f13..c49d554cc9ae9 100644 |
22906 |
+--- a/fs/pstore/Kconfig |
22907 |
++++ b/fs/pstore/Kconfig |
22908 |
+@@ -126,6 +126,7 @@ config PSTORE_CONSOLE |
22909 |
+ config PSTORE_PMSG |
22910 |
+ bool "Log user space messages" |
22911 |
+ depends on PSTORE |
22912 |
++ select RT_MUTEXES |
22913 |
+ help |
22914 |
+ When the option is enabled, pstore will export a character |
22915 |
+ interface /dev/pmsg0 to log user space messages. On reboot |
22916 |
+diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c |
22917 |
+index d8542ec2f38c6..18cf94b597e05 100644 |
22918 |
+--- a/fs/pstore/pmsg.c |
22919 |
++++ b/fs/pstore/pmsg.c |
22920 |
+@@ -7,9 +7,10 @@ |
22921 |
+ #include <linux/device.h> |
22922 |
+ #include <linux/fs.h> |
22923 |
+ #include <linux/uaccess.h> |
22924 |
++#include <linux/rtmutex.h> |
22925 |
+ #include "internal.h" |
22926 |
+ |
22927 |
+-static DEFINE_MUTEX(pmsg_lock); |
22928 |
++static DEFINE_RT_MUTEX(pmsg_lock); |
22929 |
+ |
22930 |
+ static ssize_t write_pmsg(struct file *file, const char __user *buf, |
22931 |
+ size_t count, loff_t *ppos) |
22932 |
+@@ -28,9 +29,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf, |
22933 |
+ if (!access_ok(buf, count)) |
22934 |
+ return -EFAULT; |
22935 |
+ |
22936 |
+- mutex_lock(&pmsg_lock); |
22937 |
++ rt_mutex_lock(&pmsg_lock); |
22938 |
+ ret = psinfo->write_user(&record, buf); |
22939 |
+- mutex_unlock(&pmsg_lock); |
22940 |
++ rt_mutex_unlock(&pmsg_lock); |
22941 |
+ return ret ? ret : count; |
22942 |
+ } |
22943 |
+ |
22944 |
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c |
22945 |
+index fefe3d391d3af..74e4d93f3e08d 100644 |
22946 |
+--- a/fs/pstore/ram.c |
22947 |
++++ b/fs/pstore/ram.c |
22948 |
+@@ -735,6 +735,7 @@ static int ramoops_probe(struct platform_device *pdev) |
22949 |
+ /* Make sure we didn't get bogus platform data pointer. */ |
22950 |
+ if (!pdata) { |
22951 |
+ pr_err("NULL platform data\n"); |
22952 |
++ err = -EINVAL; |
22953 |
+ goto fail_out; |
22954 |
+ } |
22955 |
+ |
22956 |
+@@ -742,6 +743,7 @@ static int ramoops_probe(struct platform_device *pdev) |
22957 |
+ !pdata->ftrace_size && !pdata->pmsg_size)) { |
22958 |
+ pr_err("The memory size and the record/console size must be " |
22959 |
+ "non-zero\n"); |
22960 |
++ err = -EINVAL; |
22961 |
+ goto fail_out; |
22962 |
+ } |
22963 |
+ |
22964 |
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c |
22965 |
+index fe5305028c6e2..155c7010b1f83 100644 |
22966 |
+--- a/fs/pstore/ram_core.c |
22967 |
++++ b/fs/pstore/ram_core.c |
22968 |
+@@ -439,7 +439,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size, |
22969 |
+ phys_addr_t addr = page_start + i * PAGE_SIZE; |
22970 |
+ pages[i] = pfn_to_page(addr >> PAGE_SHIFT); |
22971 |
+ } |
22972 |
+- vaddr = vmap(pages, page_count, VM_MAP, prot); |
22973 |
++ /* |
22974 |
++ * VM_IOREMAP used here to bypass this region during vread() |
22975 |
++ * and kmap_atomic() (i.e. kcore) to avoid __va() failures. |
22976 |
++ */ |
22977 |
++ vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot); |
22978 |
+ kfree(pages); |
22979 |
+ |
22980 |
+ /* |
22981 |
+diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c |
22982 |
+index 3d7a35d6a18bc..b916859992ec8 100644 |
22983 |
+--- a/fs/reiserfs/namei.c |
22984 |
++++ b/fs/reiserfs/namei.c |
22985 |
+@@ -696,6 +696,7 @@ static int reiserfs_create(struct user_namespace *mnt_userns, struct inode *dir, |
22986 |
+ |
22987 |
+ out_failed: |
22988 |
+ reiserfs_write_unlock(dir->i_sb); |
22989 |
++ reiserfs_security_free(&security); |
22990 |
+ return retval; |
22991 |
+ } |
22992 |
+ |
22993 |
+@@ -779,6 +780,7 @@ static int reiserfs_mknod(struct user_namespace *mnt_userns, struct inode *dir, |
22994 |
+ |
22995 |
+ out_failed: |
22996 |
+ reiserfs_write_unlock(dir->i_sb); |
22997 |
++ reiserfs_security_free(&security); |
22998 |
+ return retval; |
22999 |
+ } |
23000 |
+ |
23001 |
+@@ -878,6 +880,7 @@ static int reiserfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir, |
23002 |
+ retval = journal_end(&th); |
23003 |
+ out_failed: |
23004 |
+ reiserfs_write_unlock(dir->i_sb); |
23005 |
++ reiserfs_security_free(&security); |
23006 |
+ return retval; |
23007 |
+ } |
23008 |
+ |
23009 |
+@@ -1194,6 +1197,7 @@ static int reiserfs_symlink(struct user_namespace *mnt_userns, |
23010 |
+ retval = journal_end(&th); |
23011 |
+ out_failed: |
23012 |
+ reiserfs_write_unlock(parent_dir->i_sb); |
23013 |
++ reiserfs_security_free(&security); |
23014 |
+ return retval; |
23015 |
+ } |
23016 |
+ |
23017 |
+diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c |
23018 |
+index 8965c8e5e172b..857a65b057264 100644 |
23019 |
+--- a/fs/reiserfs/xattr_security.c |
23020 |
++++ b/fs/reiserfs/xattr_security.c |
23021 |
+@@ -50,6 +50,7 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode, |
23022 |
+ int error; |
23023 |
+ |
23024 |
+ sec->name = NULL; |
23025 |
++ sec->value = NULL; |
23026 |
+ |
23027 |
+ /* Don't add selinux attributes on xattrs - they'll never get used */ |
23028 |
+ if (IS_PRIVATE(dir)) |
23029 |
+@@ -95,7 +96,6 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th, |
23030 |
+ |
23031 |
+ void reiserfs_security_free(struct reiserfs_security_handle *sec) |
23032 |
+ { |
23033 |
+- kfree(sec->name); |
23034 |
+ kfree(sec->value); |
23035 |
+ sec->name = NULL; |
23036 |
+ sec->value = NULL; |
23037 |
+diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c |
23038 |
+index 749385015a8d3..5a59d56a2038c 100644 |
23039 |
+--- a/fs/sysv/itree.c |
23040 |
++++ b/fs/sysv/itree.c |
23041 |
+@@ -438,7 +438,7 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size) |
23042 |
+ res += blocks; |
23043 |
+ direct = 1; |
23044 |
+ } |
23045 |
+- return blocks; |
23046 |
++ return res; |
23047 |
+ } |
23048 |
+ |
23049 |
+ int sysv_getattr(struct user_namespace *mnt_userns, const struct path *path, |
23050 |
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c |
23051 |
+index 865e658535b11..0e30a50060d9d 100644 |
23052 |
+--- a/fs/udf/namei.c |
23053 |
++++ b/fs/udf/namei.c |
23054 |
+@@ -1091,8 +1091,9 @@ static int udf_rename(struct user_namespace *mnt_userns, struct inode *old_dir, |
23055 |
+ return -EINVAL; |
23056 |
+ |
23057 |
+ ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); |
23058 |
+- if (IS_ERR(ofi)) { |
23059 |
+- retval = PTR_ERR(ofi); |
23060 |
++ if (!ofi || IS_ERR(ofi)) { |
23061 |
++ if (IS_ERR(ofi)) |
23062 |
++ retval = PTR_ERR(ofi); |
23063 |
+ goto end_rename; |
23064 |
+ } |
23065 |
+ |
23066 |
+@@ -1101,8 +1102,7 @@ static int udf_rename(struct user_namespace *mnt_userns, struct inode *old_dir, |
23067 |
+ |
23068 |
+ brelse(ofibh.sbh); |
23069 |
+ tloc = lelb_to_cpu(ocfi.icb.extLocation); |
23070 |
+- if (!ofi || udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) |
23071 |
+- != old_inode->i_ino) |
23072 |
++ if (udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) != old_inode->i_ino) |
23073 |
+ goto end_rename; |
23074 |
+ |
23075 |
+ nfi = udf_find_entry(new_dir, &new_dentry->d_name, &nfibh, &ncfi); |
23076 |
+diff --git a/fs/xattr.c b/fs/xattr.c |
23077 |
+index 7117cb2538640..4c82f271f4aa3 100644 |
23078 |
+--- a/fs/xattr.c |
23079 |
++++ b/fs/xattr.c |
23080 |
+@@ -1119,7 +1119,7 @@ static int xattr_list_one(char **buffer, ssize_t *remaining_size, |
23081 |
+ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, |
23082 |
+ char *buffer, size_t size) |
23083 |
+ { |
23084 |
+- bool trusted = capable(CAP_SYS_ADMIN); |
23085 |
++ bool trusted = ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN); |
23086 |
+ struct simple_xattr *xattr; |
23087 |
+ ssize_t remaining_size = size; |
23088 |
+ int err = 0; |
23089 |
+diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h |
23090 |
+index 01e8bab1d767a..1aa462e5cafd4 100644 |
23091 |
+--- a/include/dt-bindings/clock/imx8mn-clock.h |
23092 |
++++ b/include/dt-bindings/clock/imx8mn-clock.h |
23093 |
+@@ -19,7 +19,8 @@ |
23094 |
+ #define IMX8MN_VIDEO_PLL1_REF_SEL 10 |
23095 |
+ #define IMX8MN_DRAM_PLL_REF_SEL 11 |
23096 |
+ #define IMX8MN_GPU_PLL_REF_SEL 12 |
23097 |
+-#define IMX8MN_VPU_PLL_REF_SEL 13 |
23098 |
++#define IMX8MN_M7_ALT_PLL_REF_SEL 13 |
23099 |
++#define IMX8MN_VPU_PLL_REF_SEL IMX8MN_M7_ALT_PLL_REF_SEL |
23100 |
+ #define IMX8MN_ARM_PLL_REF_SEL 14 |
23101 |
+ #define IMX8MN_SYS_PLL1_REF_SEL 15 |
23102 |
+ #define IMX8MN_SYS_PLL2_REF_SEL 16 |
23103 |
+@@ -29,7 +30,8 @@ |
23104 |
+ #define IMX8MN_VIDEO_PLL1 20 |
23105 |
+ #define IMX8MN_DRAM_PLL 21 |
23106 |
+ #define IMX8MN_GPU_PLL 22 |
23107 |
+-#define IMX8MN_VPU_PLL 23 |
23108 |
++#define IMX8MN_M7_ALT_PLL 23 |
23109 |
++#define IMX8MN_VPU_PLL IMX8MN_M7_ALT_PLL |
23110 |
+ #define IMX8MN_ARM_PLL 24 |
23111 |
+ #define IMX8MN_SYS_PLL1 25 |
23112 |
+ #define IMX8MN_SYS_PLL2 26 |
23113 |
+@@ -39,7 +41,8 @@ |
23114 |
+ #define IMX8MN_VIDEO_PLL1_BYPASS 30 |
23115 |
+ #define IMX8MN_DRAM_PLL_BYPASS 31 |
23116 |
+ #define IMX8MN_GPU_PLL_BYPASS 32 |
23117 |
+-#define IMX8MN_VPU_PLL_BYPASS 33 |
23118 |
++#define IMX8MN_M7_ALT_PLL_BYPASS 33 |
23119 |
++#define IMX8MN_VPU_PLL_BYPASS IMX8MN_M7_ALT_PLL_BYPASS |
23120 |
+ #define IMX8MN_ARM_PLL_BYPASS 34 |
23121 |
+ #define IMX8MN_SYS_PLL1_BYPASS 35 |
23122 |
+ #define IMX8MN_SYS_PLL2_BYPASS 36 |
23123 |
+@@ -49,7 +52,8 @@ |
23124 |
+ #define IMX8MN_VIDEO_PLL1_OUT 40 |
23125 |
+ #define IMX8MN_DRAM_PLL_OUT 41 |
23126 |
+ #define IMX8MN_GPU_PLL_OUT 42 |
23127 |
+-#define IMX8MN_VPU_PLL_OUT 43 |
23128 |
++#define IMX8MN_M7_ALT_PLL_OUT 43 |
23129 |
++#define IMX8MN_VPU_PLL_OUT IMX8MN_M7_ALT_PLL_OUT |
23130 |
+ #define IMX8MN_ARM_PLL_OUT 44 |
23131 |
+ #define IMX8MN_SYS_PLL1_OUT 45 |
23132 |
+ #define IMX8MN_SYS_PLL2_OUT 46 |
23133 |
+diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h |
23134 |
+index f60674692d365..ea2d919fd9c79 100644 |
23135 |
+--- a/include/linux/debugfs.h |
23136 |
++++ b/include/linux/debugfs.h |
23137 |
+@@ -45,7 +45,7 @@ struct debugfs_u32_array { |
23138 |
+ |
23139 |
+ extern struct dentry *arch_debugfs_dir; |
23140 |
+ |
23141 |
+-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ |
23142 |
++#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \ |
23143 |
+ static int __fops ## _open(struct inode *inode, struct file *file) \ |
23144 |
+ { \ |
23145 |
+ __simple_attr_check_format(__fmt, 0ull); \ |
23146 |
+@@ -56,10 +56,16 @@ static const struct file_operations __fops = { \ |
23147 |
+ .open = __fops ## _open, \ |
23148 |
+ .release = simple_attr_release, \ |
23149 |
+ .read = debugfs_attr_read, \ |
23150 |
+- .write = debugfs_attr_write, \ |
23151 |
++ .write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \ |
23152 |
+ .llseek = no_llseek, \ |
23153 |
+ } |
23154 |
+ |
23155 |
++#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ |
23156 |
++ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false) |
23157 |
++ |
23158 |
++#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \ |
23159 |
++ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true) |
23160 |
++ |
23161 |
+ typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); |
23162 |
+ |
23163 |
+ #if defined(CONFIG_DEBUG_FS) |
23164 |
+@@ -102,6 +108,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf, |
23165 |
+ size_t len, loff_t *ppos); |
23166 |
+ ssize_t debugfs_attr_write(struct file *file, const char __user *buf, |
23167 |
+ size_t len, loff_t *ppos); |
23168 |
++ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf, |
23169 |
++ size_t len, loff_t *ppos); |
23170 |
+ |
23171 |
+ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, |
23172 |
+ struct dentry *new_dir, const char *new_name); |
23173 |
+@@ -254,6 +262,13 @@ static inline ssize_t debugfs_attr_write(struct file *file, |
23174 |
+ return -ENODEV; |
23175 |
+ } |
23176 |
+ |
23177 |
++static inline ssize_t debugfs_attr_write_signed(struct file *file, |
23178 |
++ const char __user *buf, |
23179 |
++ size_t len, loff_t *ppos) |
23180 |
++{ |
23181 |
++ return -ENODEV; |
23182 |
++} |
23183 |
++ |
23184 |
+ static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, |
23185 |
+ struct dentry *new_dir, char *new_name) |
23186 |
+ { |
23187 |
+diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h |
23188 |
+index 30eb30d6909b0..3cd202d3eefb3 100644 |
23189 |
+--- a/include/linux/eventfd.h |
23190 |
++++ b/include/linux/eventfd.h |
23191 |
+@@ -61,7 +61,7 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) |
23192 |
+ return ERR_PTR(-ENOSYS); |
23193 |
+ } |
23194 |
+ |
23195 |
+-static inline int eventfd_signal(struct eventfd_ctx *ctx, int n) |
23196 |
++static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n) |
23197 |
+ { |
23198 |
+ return -ENOSYS; |
23199 |
+ } |
23200 |
+diff --git a/include/linux/fs.h b/include/linux/fs.h |
23201 |
+index d55fdc02f82d8..68fcf3ec9cf6a 100644 |
23202 |
+--- a/include/linux/fs.h |
23203 |
++++ b/include/linux/fs.h |
23204 |
+@@ -3507,7 +3507,7 @@ void simple_transaction_set(struct file *file, size_t n); |
23205 |
+ * All attributes contain a text representation of a numeric value |
23206 |
+ * that are accessed with the get() and set() functions. |
23207 |
+ */ |
23208 |
+-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ |
23209 |
++#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \ |
23210 |
+ static int __fops ## _open(struct inode *inode, struct file *file) \ |
23211 |
+ { \ |
23212 |
+ __simple_attr_check_format(__fmt, 0ull); \ |
23213 |
+@@ -3518,10 +3518,16 @@ static const struct file_operations __fops = { \ |
23214 |
+ .open = __fops ## _open, \ |
23215 |
+ .release = simple_attr_release, \ |
23216 |
+ .read = simple_attr_read, \ |
23217 |
+- .write = simple_attr_write, \ |
23218 |
++ .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \ |
23219 |
+ .llseek = generic_file_llseek, \ |
23220 |
+ } |
23221 |
+ |
23222 |
++#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ |
23223 |
++ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false) |
23224 |
++ |
23225 |
++#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \ |
23226 |
++ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true) |
23227 |
++ |
23228 |
+ static inline __printf(1, 2) |
23229 |
+ void __simple_attr_check_format(const char *fmt, ...) |
23230 |
+ { |
23231 |
+@@ -3536,6 +3542,8 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, |
23232 |
+ size_t len, loff_t *ppos); |
23233 |
+ ssize_t simple_attr_write(struct file *file, const char __user *buf, |
23234 |
+ size_t len, loff_t *ppos); |
23235 |
++ssize_t simple_attr_write_signed(struct file *file, const char __user *buf, |
23236 |
++ size_t len, loff_t *ppos); |
23237 |
+ |
23238 |
+ struct ctl_table; |
23239 |
+ int proc_nr_files(struct ctl_table *table, int write, |
23240 |
+diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h |
23241 |
+index 97a28ad3393b5..369902d52f9cd 100644 |
23242 |
+--- a/include/linux/gpio/consumer.h |
23243 |
++++ b/include/linux/gpio/consumer.h |
23244 |
+@@ -8,27 +8,16 @@ |
23245 |
+ #include <linux/err.h> |
23246 |
+ |
23247 |
+ struct device; |
23248 |
+- |
23249 |
+-/** |
23250 |
+- * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are |
23251 |
+- * preferable to the old integer-based handles. |
23252 |
+- * |
23253 |
+- * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid |
23254 |
+- * until the GPIO is released. |
23255 |
+- */ |
23256 |
+ struct gpio_desc; |
23257 |
+- |
23258 |
+-/** |
23259 |
+- * Opaque descriptor for a structure of GPIO array attributes. This structure |
23260 |
+- * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be |
23261 |
+- * passed back to get/set array functions in order to activate fast processing |
23262 |
+- * path if applicable. |
23263 |
+- */ |
23264 |
+ struct gpio_array; |
23265 |
+ |
23266 |
+ /** |
23267 |
+- * Struct containing an array of descriptors that can be obtained using |
23268 |
+- * gpiod_get_array(). |
23269 |
++ * struct gpio_descs - Struct containing an array of descriptors that can be |
23270 |
++ * obtained using gpiod_get_array() |
23271 |
++ * |
23272 |
++ * @info: Pointer to the opaque gpio_array structure |
23273 |
++ * @ndescs: Number of held descriptors |
23274 |
++ * @desc: Array of pointers to GPIO descriptors |
23275 |
+ */ |
23276 |
+ struct gpio_descs { |
23277 |
+ struct gpio_array *info; |
23278 |
+@@ -43,8 +32,16 @@ struct gpio_descs { |
23279 |
+ #define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4) |
23280 |
+ |
23281 |
+ /** |
23282 |
+- * Optional flags that can be passed to one of gpiod_* to configure direction |
23283 |
+- * and output value. These values cannot be OR'd. |
23284 |
++ * enum gpiod_flags - Optional flags that can be passed to one of gpiod_* to |
23285 |
++ * configure direction and output value. These values |
23286 |
++ * cannot be OR'd. |
23287 |
++ * |
23288 |
++ * @GPIOD_ASIS: Don't change anything |
23289 |
++ * @GPIOD_IN: Set lines to input mode |
23290 |
++ * @GPIOD_OUT_LOW: Set lines to output and drive them low |
23291 |
++ * @GPIOD_OUT_HIGH: Set lines to output and drive them high |
23292 |
++ * @GPIOD_OUT_LOW_OPEN_DRAIN: Set lines to open-drain output and drive them low |
23293 |
++ * @GPIOD_OUT_HIGH_OPEN_DRAIN: Set lines to open-drain output and drive them high |
23294 |
+ */ |
23295 |
+ enum gpiod_flags { |
23296 |
+ GPIOD_ASIS = 0, |
23297 |
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h |
23298 |
+index ddc8713ce57b7..8499fc9220e07 100644 |
23299 |
+--- a/include/linux/hyperv.h |
23300 |
++++ b/include/linux/hyperv.h |
23301 |
+@@ -1307,6 +1307,8 @@ struct hv_ring_buffer_debug_info { |
23302 |
+ int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, |
23303 |
+ struct hv_ring_buffer_debug_info *debug_info); |
23304 |
+ |
23305 |
++bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel); |
23306 |
++ |
23307 |
+ /* Vmbus interface */ |
23308 |
+ #define vmbus_driver_register(driver) \ |
23309 |
+ __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) |
23310 |
+diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h |
23311 |
+index cf49997d5903e..8210a9e682154 100644 |
23312 |
+--- a/include/linux/iio/imu/adis.h |
23313 |
++++ b/include/linux/iio/imu/adis.h |
23314 |
+@@ -32,6 +32,7 @@ struct adis_timeout { |
23315 |
+ u16 sw_reset_ms; |
23316 |
+ u16 self_test_ms; |
23317 |
+ }; |
23318 |
++ |
23319 |
+ /** |
23320 |
+ * struct adis_data - ADIS chip variant specific data |
23321 |
+ * @read_delay: SPI delay for read operations in us |
23322 |
+@@ -45,10 +46,11 @@ struct adis_timeout { |
23323 |
+ * @self_test_mask: Bitmask of supported self-test operations |
23324 |
+ * @self_test_reg: Register address to request self test command |
23325 |
+ * @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg |
23326 |
+- * @status_error_msgs: Array of error messgaes |
23327 |
++ * @status_error_msgs: Array of error messages |
23328 |
+ * @status_error_mask: Bitmask of errors supported by the device |
23329 |
+ * @timeouts: Chip specific delays |
23330 |
+ * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable |
23331 |
++ * @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin |
23332 |
+ * @has_paging: True if ADIS device has paged registers |
23333 |
+ * @burst_reg_cmd: Register command that triggers burst |
23334 |
+ * @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined, |
23335 |
+@@ -78,6 +80,7 @@ struct adis_data { |
23336 |
+ unsigned int status_error_mask; |
23337 |
+ |
23338 |
+ int (*enable_irq)(struct adis *adis, bool enable); |
23339 |
++ bool unmasked_drdy; |
23340 |
+ |
23341 |
+ bool has_paging; |
23342 |
+ |
23343 |
+@@ -128,12 +131,12 @@ struct adis { |
23344 |
+ unsigned long irq_flag; |
23345 |
+ void *buffer; |
23346 |
+ |
23347 |
+- uint8_t tx[10] ____cacheline_aligned; |
23348 |
+- uint8_t rx[4]; |
23349 |
++ u8 tx[10] ____cacheline_aligned; |
23350 |
++ u8 rx[4]; |
23351 |
+ }; |
23352 |
+ |
23353 |
+ int adis_init(struct adis *adis, struct iio_dev *indio_dev, |
23354 |
+- struct spi_device *spi, const struct adis_data *data); |
23355 |
++ struct spi_device *spi, const struct adis_data *data); |
23356 |
+ int __adis_reset(struct adis *adis); |
23357 |
+ |
23358 |
+ /** |
23359 |
+@@ -154,9 +157,9 @@ static inline int adis_reset(struct adis *adis) |
23360 |
+ } |
23361 |
+ |
23362 |
+ int __adis_write_reg(struct adis *adis, unsigned int reg, |
23363 |
+- unsigned int val, unsigned int size); |
23364 |
++ unsigned int val, unsigned int size); |
23365 |
+ int __adis_read_reg(struct adis *adis, unsigned int reg, |
23366 |
+- unsigned int *val, unsigned int size); |
23367 |
++ unsigned int *val, unsigned int size); |
23368 |
+ |
23369 |
+ /** |
23370 |
+ * __adis_write_reg_8() - Write single byte to a register (unlocked) |
23371 |
+@@ -165,7 +168,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, |
23372 |
+ * @value: The value to write |
23373 |
+ */ |
23374 |
+ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg, |
23375 |
+- uint8_t val) |
23376 |
++ u8 val) |
23377 |
+ { |
23378 |
+ return __adis_write_reg(adis, reg, val, 1); |
23379 |
+ } |
23380 |
+@@ -177,7 +180,7 @@ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg, |
23381 |
+ * @value: Value to be written |
23382 |
+ */ |
23383 |
+ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg, |
23384 |
+- uint16_t val) |
23385 |
++ u16 val) |
23386 |
+ { |
23387 |
+ return __adis_write_reg(adis, reg, val, 2); |
23388 |
+ } |
23389 |
+@@ -189,7 +192,7 @@ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg, |
23390 |
+ * @value: Value to be written |
23391 |
+ */ |
23392 |
+ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg, |
23393 |
+- uint32_t val) |
23394 |
++ u32 val) |
23395 |
+ { |
23396 |
+ return __adis_write_reg(adis, reg, val, 4); |
23397 |
+ } |
23398 |
+@@ -201,7 +204,7 @@ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg, |
23399 |
+ * @val: The value read back from the device |
23400 |
+ */ |
23401 |
+ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg, |
23402 |
+- uint16_t *val) |
23403 |
++ u16 *val) |
23404 |
+ { |
23405 |
+ unsigned int tmp; |
23406 |
+ int ret; |
23407 |
+@@ -220,7 +223,7 @@ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg, |
23408 |
+ * @val: The value read back from the device |
23409 |
+ */ |
23410 |
+ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, |
23411 |
+- uint32_t *val) |
23412 |
++ u32 *val) |
23413 |
+ { |
23414 |
+ unsigned int tmp; |
23415 |
+ int ret; |
23416 |
+@@ -240,7 +243,7 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, |
23417 |
+ * @size: The size of the @value (in bytes) |
23418 |
+ */ |
23419 |
+ static inline int adis_write_reg(struct adis *adis, unsigned int reg, |
23420 |
+- unsigned int val, unsigned int size) |
23421 |
++ unsigned int val, unsigned int size) |
23422 |
+ { |
23423 |
+ int ret; |
23424 |
+ |
23425 |
+@@ -259,7 +262,7 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg, |
23426 |
+ * @size: The size of the @val buffer |
23427 |
+ */ |
23428 |
+ static int adis_read_reg(struct adis *adis, unsigned int reg, |
23429 |
+- unsigned int *val, unsigned int size) |
23430 |
++ unsigned int *val, unsigned int size) |
23431 |
+ { |
23432 |
+ int ret; |
23433 |
+ |
23434 |
+@@ -277,7 +280,7 @@ static int adis_read_reg(struct adis *adis, unsigned int reg, |
23435 |
+ * @value: The value to write |
23436 |
+ */ |
23437 |
+ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, |
23438 |
+- uint8_t val) |
23439 |
++ u8 val) |
23440 |
+ { |
23441 |
+ return adis_write_reg(adis, reg, val, 1); |
23442 |
+ } |
23443 |
+@@ -289,7 +292,7 @@ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, |
23444 |
+ * @value: Value to be written |
23445 |
+ */ |
23446 |
+ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, |
23447 |
+- uint16_t val) |
23448 |
++ u16 val) |
23449 |
+ { |
23450 |
+ return adis_write_reg(adis, reg, val, 2); |
23451 |
+ } |
23452 |
+@@ -301,7 +304,7 @@ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, |
23453 |
+ * @value: Value to be written |
23454 |
+ */ |
23455 |
+ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, |
23456 |
+- uint32_t val) |
23457 |
++ u32 val) |
23458 |
+ { |
23459 |
+ return adis_write_reg(adis, reg, val, 4); |
23460 |
+ } |
23461 |
+@@ -313,7 +316,7 @@ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, |
23462 |
+ * @val: The value read back from the device |
23463 |
+ */ |
23464 |
+ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, |
23465 |
+- uint16_t *val) |
23466 |
++ u16 *val) |
23467 |
+ { |
23468 |
+ unsigned int tmp; |
23469 |
+ int ret; |
23470 |
+@@ -332,7 +335,7 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, |
23471 |
+ * @val: The value read back from the device |
23472 |
+ */ |
23473 |
+ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg, |
23474 |
+- uint32_t *val) |
23475 |
++ u32 *val) |
23476 |
+ { |
23477 |
+ unsigned int tmp; |
23478 |
+ int ret; |
23479 |
+@@ -403,9 +406,20 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg, |
23480 |
+ __adis_update_bits_base(adis, reg, mask, val, 2)); \ |
23481 |
+ }) |
23482 |
+ |
23483 |
+-int adis_enable_irq(struct adis *adis, bool enable); |
23484 |
+ int __adis_check_status(struct adis *adis); |
23485 |
+ int __adis_initial_startup(struct adis *adis); |
23486 |
++int __adis_enable_irq(struct adis *adis, bool enable); |
23487 |
++ |
23488 |
++static inline int adis_enable_irq(struct adis *adis, bool enable) |
23489 |
++{ |
23490 |
++ int ret; |
23491 |
++ |
23492 |
++ mutex_lock(&adis->state_lock); |
23493 |
++ ret = __adis_enable_irq(adis, enable); |
23494 |
++ mutex_unlock(&adis->state_lock); |
23495 |
++ |
23496 |
++ return ret; |
23497 |
++} |
23498 |
+ |
23499 |
+ static inline int adis_check_status(struct adis *adis) |
23500 |
+ { |
23501 |
+@@ -441,8 +455,8 @@ static inline void adis_dev_unlock(struct adis *adis) |
23502 |
+ } |
23503 |
+ |
23504 |
+ int adis_single_conversion(struct iio_dev *indio_dev, |
23505 |
+- const struct iio_chan_spec *chan, unsigned int error_mask, |
23506 |
+- int *val); |
23507 |
++ const struct iio_chan_spec *chan, |
23508 |
++ unsigned int error_mask, int *val); |
23509 |
+ |
23510 |
+ #define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \ |
23511 |
+ .type = IIO_VOLTAGE, \ |
23512 |
+@@ -491,7 +505,7 @@ int adis_single_conversion(struct iio_dev *indio_dev, |
23513 |
+ .modified = 1, \ |
23514 |
+ .channel2 = IIO_MOD_ ## mod, \ |
23515 |
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ |
23516 |
+- info_sep, \ |
23517 |
++ (info_sep), \ |
23518 |
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ |
23519 |
+ .info_mask_shared_by_all = info_all, \ |
23520 |
+ .address = (addr), \ |
23521 |
+@@ -525,7 +539,7 @@ devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, |
23522 |
+ int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); |
23523 |
+ |
23524 |
+ int adis_update_scan_mode(struct iio_dev *indio_dev, |
23525 |
+- const unsigned long *scan_mask); |
23526 |
++ const unsigned long *scan_mask); |
23527 |
+ |
23528 |
+ #else /* CONFIG_IIO_BUFFER */ |
23529 |
+ |
23530 |
+@@ -549,7 +563,8 @@ static inline int devm_adis_probe_trigger(struct adis *adis, |
23531 |
+ #ifdef CONFIG_DEBUG_FS |
23532 |
+ |
23533 |
+ int adis_debugfs_reg_access(struct iio_dev *indio_dev, |
23534 |
+- unsigned int reg, unsigned int writeval, unsigned int *readval); |
23535 |
++ unsigned int reg, unsigned int writeval, |
23536 |
++ unsigned int *readval); |
23537 |
+ |
23538 |
+ #else |
23539 |
+ |
23540 |
+diff --git a/include/linux/libata.h b/include/linux/libata.h |
23541 |
+index a64e12605d31d..d890c43cff146 100644 |
23542 |
+--- a/include/linux/libata.h |
23543 |
++++ b/include/linux/libata.h |
23544 |
+@@ -565,7 +565,10 @@ struct ata_taskfile { |
23545 |
+ u8 hob_lbam; |
23546 |
+ u8 hob_lbah; |
23547 |
+ |
23548 |
+- u8 feature; |
23549 |
++ union { |
23550 |
++ u8 error; |
23551 |
++ u8 feature; |
23552 |
++ }; |
23553 |
+ u8 nsect; |
23554 |
+ u8 lbal; |
23555 |
+ u8 lbam; |
23556 |
+@@ -573,7 +576,10 @@ struct ata_taskfile { |
23557 |
+ |
23558 |
+ u8 device; |
23559 |
+ |
23560 |
+- u8 command; /* IO operation */ |
23561 |
++ union { |
23562 |
++ u8 status; |
23563 |
++ u8 command; |
23564 |
++ }; |
23565 |
+ |
23566 |
+ u32 auxiliary; /* auxiliary field */ |
23567 |
+ /* from SATA 3.1 and */ |
23568 |
+@@ -1471,51 +1477,61 @@ static inline int sata_srst_pmp(struct ata_link *link) |
23569 |
+ return link->pmp; |
23570 |
+ } |
23571 |
+ |
23572 |
+-/* |
23573 |
+- * printk helpers |
23574 |
+- */ |
23575 |
+-__printf(3, 4) |
23576 |
+-void ata_port_printk(const struct ata_port *ap, const char *level, |
23577 |
+- const char *fmt, ...); |
23578 |
+-__printf(3, 4) |
23579 |
+-void ata_link_printk(const struct ata_link *link, const char *level, |
23580 |
+- const char *fmt, ...); |
23581 |
+-__printf(3, 4) |
23582 |
+-void ata_dev_printk(const struct ata_device *dev, const char *level, |
23583 |
+- const char *fmt, ...); |
23584 |
++#define ata_port_printk(level, ap, fmt, ...) \ |
23585 |
++ pr_ ## level ("ata%u: " fmt, (ap)->print_id, ##__VA_ARGS__) |
23586 |
+ |
23587 |
+ #define ata_port_err(ap, fmt, ...) \ |
23588 |
+- ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__) |
23589 |
++ ata_port_printk(err, ap, fmt, ##__VA_ARGS__) |
23590 |
+ #define ata_port_warn(ap, fmt, ...) \ |
23591 |
+- ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__) |
23592 |
++ ata_port_printk(warn, ap, fmt, ##__VA_ARGS__) |
23593 |
+ #define ata_port_notice(ap, fmt, ...) \ |
23594 |
+- ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__) |
23595 |
++ ata_port_printk(notice, ap, fmt, ##__VA_ARGS__) |
23596 |
+ #define ata_port_info(ap, fmt, ...) \ |
23597 |
+- ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__) |
23598 |
++ ata_port_printk(info, ap, fmt, ##__VA_ARGS__) |
23599 |
+ #define ata_port_dbg(ap, fmt, ...) \ |
23600 |
+- ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__) |
23601 |
++ ata_port_printk(debug, ap, fmt, ##__VA_ARGS__) |
23602 |
++ |
23603 |
++#define ata_link_printk(level, link, fmt, ...) \ |
23604 |
++do { \ |
23605 |
++ if (sata_pmp_attached((link)->ap) || \ |
23606 |
++ (link)->ap->slave_link) \ |
23607 |
++ pr_ ## level ("ata%u.%02u: " fmt, \ |
23608 |
++ (link)->ap->print_id, \ |
23609 |
++ (link)->pmp, \ |
23610 |
++ ##__VA_ARGS__); \ |
23611 |
++ else \ |
23612 |
++ pr_ ## level ("ata%u: " fmt, \ |
23613 |
++ (link)->ap->print_id, \ |
23614 |
++ ##__VA_ARGS__); \ |
23615 |
++} while (0) |
23616 |
+ |
23617 |
+ #define ata_link_err(link, fmt, ...) \ |
23618 |
+- ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__) |
23619 |
++ ata_link_printk(err, link, fmt, ##__VA_ARGS__) |
23620 |
+ #define ata_link_warn(link, fmt, ...) \ |
23621 |
+- ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__) |
23622 |
++ ata_link_printk(warn, link, fmt, ##__VA_ARGS__) |
23623 |
+ #define ata_link_notice(link, fmt, ...) \ |
23624 |
+- ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__) |
23625 |
++ ata_link_printk(notice, link, fmt, ##__VA_ARGS__) |
23626 |
+ #define ata_link_info(link, fmt, ...) \ |
23627 |
+- ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__) |
23628 |
++ ata_link_printk(info, link, fmt, ##__VA_ARGS__) |
23629 |
+ #define ata_link_dbg(link, fmt, ...) \ |
23630 |
+- ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__) |
23631 |
++ ata_link_printk(debug, link, fmt, ##__VA_ARGS__) |
23632 |
++ |
23633 |
++#define ata_dev_printk(level, dev, fmt, ...) \ |
23634 |
++ pr_ ## level("ata%u.%02u: " fmt, \ |
23635 |
++ (dev)->link->ap->print_id, \ |
23636 |
++ (dev)->link->pmp + (dev)->devno, \ |
23637 |
++ ##__VA_ARGS__) |
23638 |
+ |
23639 |
+ #define ata_dev_err(dev, fmt, ...) \ |
23640 |
+- ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__) |
23641 |
++ ata_dev_printk(err, dev, fmt, ##__VA_ARGS__) |
23642 |
+ #define ata_dev_warn(dev, fmt, ...) \ |
23643 |
+- ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__) |
23644 |
++ ata_dev_printk(warn, dev, fmt, ##__VA_ARGS__) |
23645 |
+ #define ata_dev_notice(dev, fmt, ...) \ |
23646 |
+- ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__) |
23647 |
++ ata_dev_printk(notice, dev, fmt, ##__VA_ARGS__) |
23648 |
+ #define ata_dev_info(dev, fmt, ...) \ |
23649 |
+- ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__) |
23650 |
++ ata_dev_printk(info, dev, fmt, ##__VA_ARGS__) |
23651 |
+ #define ata_dev_dbg(dev, fmt, ...) \ |
23652 |
+- ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__) |
23653 |
++ ata_dev_printk(debug, dev, fmt, ##__VA_ARGS__) |
23654 |
+ |
23655 |
+ void ata_print_version(const struct device *dev, const char *version); |
23656 |
+ |
23657 |
+@@ -2049,11 +2065,8 @@ static inline u8 ata_wait_idle(struct ata_port *ap) |
23658 |
+ { |
23659 |
+ u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); |
23660 |
+ |
23661 |
+-#ifdef ATA_DEBUG |
23662 |
+ if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) |
23663 |
+- ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", |
23664 |
+- status); |
23665 |
+-#endif |
23666 |
++ ata_port_dbg(ap, "abnormal Status 0x%X\n", status); |
23667 |
+ |
23668 |
+ return status; |
23669 |
+ } |
23670 |
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h |
23671 |
+index 3b97438afe3e2..3a75d644a1204 100644 |
23672 |
+--- a/include/linux/netdevice.h |
23673 |
++++ b/include/linux/netdevice.h |
23674 |
+@@ -167,31 +167,38 @@ static inline bool dev_xmit_complete(int rc) |
23675 |
+ * (unsigned long) so they can be read and written atomically. |
23676 |
+ */ |
23677 |
+ |
23678 |
++#define NET_DEV_STAT(FIELD) \ |
23679 |
++ union { \ |
23680 |
++ unsigned long FIELD; \ |
23681 |
++ atomic_long_t __##FIELD; \ |
23682 |
++ } |
23683 |
++ |
23684 |
+ struct net_device_stats { |
23685 |
+- unsigned long rx_packets; |
23686 |
+- unsigned long tx_packets; |
23687 |
+- unsigned long rx_bytes; |
23688 |
+- unsigned long tx_bytes; |
23689 |
+- unsigned long rx_errors; |
23690 |
+- unsigned long tx_errors; |
23691 |
+- unsigned long rx_dropped; |
23692 |
+- unsigned long tx_dropped; |
23693 |
+- unsigned long multicast; |
23694 |
+- unsigned long collisions; |
23695 |
+- unsigned long rx_length_errors; |
23696 |
+- unsigned long rx_over_errors; |
23697 |
+- unsigned long rx_crc_errors; |
23698 |
+- unsigned long rx_frame_errors; |
23699 |
+- unsigned long rx_fifo_errors; |
23700 |
+- unsigned long rx_missed_errors; |
23701 |
+- unsigned long tx_aborted_errors; |
23702 |
+- unsigned long tx_carrier_errors; |
23703 |
+- unsigned long tx_fifo_errors; |
23704 |
+- unsigned long tx_heartbeat_errors; |
23705 |
+- unsigned long tx_window_errors; |
23706 |
+- unsigned long rx_compressed; |
23707 |
+- unsigned long tx_compressed; |
23708 |
++ NET_DEV_STAT(rx_packets); |
23709 |
++ NET_DEV_STAT(tx_packets); |
23710 |
++ NET_DEV_STAT(rx_bytes); |
23711 |
++ NET_DEV_STAT(tx_bytes); |
23712 |
++ NET_DEV_STAT(rx_errors); |
23713 |
++ NET_DEV_STAT(tx_errors); |
23714 |
++ NET_DEV_STAT(rx_dropped); |
23715 |
++ NET_DEV_STAT(tx_dropped); |
23716 |
++ NET_DEV_STAT(multicast); |
23717 |
++ NET_DEV_STAT(collisions); |
23718 |
++ NET_DEV_STAT(rx_length_errors); |
23719 |
++ NET_DEV_STAT(rx_over_errors); |
23720 |
++ NET_DEV_STAT(rx_crc_errors); |
23721 |
++ NET_DEV_STAT(rx_frame_errors); |
23722 |
++ NET_DEV_STAT(rx_fifo_errors); |
23723 |
++ NET_DEV_STAT(rx_missed_errors); |
23724 |
++ NET_DEV_STAT(tx_aborted_errors); |
23725 |
++ NET_DEV_STAT(tx_carrier_errors); |
23726 |
++ NET_DEV_STAT(tx_fifo_errors); |
23727 |
++ NET_DEV_STAT(tx_heartbeat_errors); |
23728 |
++ NET_DEV_STAT(tx_window_errors); |
23729 |
++ NET_DEV_STAT(rx_compressed); |
23730 |
++ NET_DEV_STAT(tx_compressed); |
23731 |
+ }; |
23732 |
++#undef NET_DEV_STAT |
23733 |
+ |
23734 |
+ |
23735 |
+ #include <linux/cache.h> |
23736 |
+@@ -5477,4 +5484,9 @@ extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
23737 |
+ |
23738 |
+ extern struct net_device *blackhole_netdev; |
23739 |
+ |
23740 |
++/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ |
23741 |
++#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) |
23742 |
++#define DEV_STATS_ADD(DEV, FIELD, VAL) \ |
23743 |
++ atomic_long_add((VAL), &(DEV)->stats.__##FIELD) |
23744 |
++ |
23745 |
+ #endif /* _LINUX_NETDEVICE_H */ |
23746 |
+diff --git a/include/linux/overflow.h b/include/linux/overflow.h |
23747 |
+index 4669632bd72bc..59d7228104d02 100644 |
23748 |
+--- a/include/linux/overflow.h |
23749 |
++++ b/include/linux/overflow.h |
23750 |
+@@ -118,81 +118,94 @@ static inline bool __must_check __must_check_overflow(bool overflow) |
23751 |
+ })) |
23752 |
+ |
23753 |
+ /** |
23754 |
+- * array_size() - Calculate size of 2-dimensional array. |
23755 |
+- * |
23756 |
+- * @a: dimension one |
23757 |
+- * @b: dimension two |
23758 |
++ * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX |
23759 |
+ * |
23760 |
+- * Calculates size of 2-dimensional array: @a * @b. |
23761 |
++ * @factor1: first factor |
23762 |
++ * @factor2: second factor |
23763 |
+ * |
23764 |
+- * Returns: number of bytes needed to represent the array or SIZE_MAX on |
23765 |
+- * overflow. |
23766 |
++ * Returns: calculate @factor1 * @factor2, both promoted to size_t, |
23767 |
++ * with any overflow causing the return value to be SIZE_MAX. The |
23768 |
++ * lvalue must be size_t to avoid implicit type conversion. |
23769 |
+ */ |
23770 |
+-static inline __must_check size_t array_size(size_t a, size_t b) |
23771 |
++static inline size_t __must_check size_mul(size_t factor1, size_t factor2) |
23772 |
+ { |
23773 |
+ size_t bytes; |
23774 |
+ |
23775 |
+- if (check_mul_overflow(a, b, &bytes)) |
23776 |
++ if (check_mul_overflow(factor1, factor2, &bytes)) |
23777 |
+ return SIZE_MAX; |
23778 |
+ |
23779 |
+ return bytes; |
23780 |
+ } |
23781 |
+ |
23782 |
+ /** |
23783 |
+- * array3_size() - Calculate size of 3-dimensional array. |
23784 |
++ * size_add() - Calculate size_t addition with saturation at SIZE_MAX |
23785 |
+ * |
23786 |
+- * @a: dimension one |
23787 |
+- * @b: dimension two |
23788 |
+- * @c: dimension three |
23789 |
+- * |
23790 |
+- * Calculates size of 3-dimensional array: @a * @b * @c. |
23791 |
++ * @addend1: first addend |
23792 |
++ * @addend2: second addend |
23793 |
+ * |
23794 |
+- * Returns: number of bytes needed to represent the array or SIZE_MAX on |
23795 |
+- * overflow. |
23796 |
++ * Returns: calculate @addend1 + @addend2, both promoted to size_t, |
23797 |
++ * with any overflow causing the return value to be SIZE_MAX. The |
23798 |
++ * lvalue must be size_t to avoid implicit type conversion. |
23799 |
+ */ |
23800 |
+-static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) |
23801 |
++static inline size_t __must_check size_add(size_t addend1, size_t addend2) |
23802 |
+ { |
23803 |
+ size_t bytes; |
23804 |
+ |
23805 |
+- if (check_mul_overflow(a, b, &bytes)) |
23806 |
+- return SIZE_MAX; |
23807 |
+- if (check_mul_overflow(bytes, c, &bytes)) |
23808 |
++ if (check_add_overflow(addend1, addend2, &bytes)) |
23809 |
+ return SIZE_MAX; |
23810 |
+ |
23811 |
+ return bytes; |
23812 |
+ } |
23813 |
+ |
23814 |
+-/* |
23815 |
+- * Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for |
23816 |
+- * struct_size() below. |
23817 |
++/** |
23818 |
++ * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX |
23819 |
++ * |
23820 |
++ * @minuend: value to subtract from |
23821 |
++ * @subtrahend: value to subtract from @minuend |
23822 |
++ * |
23823 |
++ * Returns: calculate @minuend - @subtrahend, both promoted to size_t, |
23824 |
++ * with any overflow causing the return value to be SIZE_MAX. For |
23825 |
++ * composition with the size_add() and size_mul() helpers, neither |
23826 |
++ * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX). |
23827 |
++ * The lvalue must be size_t to avoid implicit type conversion. |
23828 |
+ */ |
23829 |
+-static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) |
23830 |
++static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) |
23831 |
+ { |
23832 |
+ size_t bytes; |
23833 |
+ |
23834 |
+- if (check_mul_overflow(a, b, &bytes)) |
23835 |
+- return SIZE_MAX; |
23836 |
+- if (check_add_overflow(bytes, c, &bytes)) |
23837 |
++ if (minuend == SIZE_MAX || subtrahend == SIZE_MAX || |
23838 |
++ check_sub_overflow(minuend, subtrahend, &bytes)) |
23839 |
+ return SIZE_MAX; |
23840 |
+ |
23841 |
+ return bytes; |
23842 |
+ } |
23843 |
+ |
23844 |
+ /** |
23845 |
+- * struct_size() - Calculate size of structure with trailing array. |
23846 |
+- * @p: Pointer to the structure. |
23847 |
+- * @member: Name of the array member. |
23848 |
+- * @count: Number of elements in the array. |
23849 |
++ * array_size() - Calculate size of 2-dimensional array. |
23850 |
+ * |
23851 |
+- * Calculates size of memory needed for structure @p followed by an |
23852 |
+- * array of @count number of @member elements. |
23853 |
++ * @a: dimension one |
23854 |
++ * @b: dimension two |
23855 |
+ * |
23856 |
+- * Return: number of bytes needed or SIZE_MAX on overflow. |
23857 |
++ * Calculates size of 2-dimensional array: @a * @b. |
23858 |
++ * |
23859 |
++ * Returns: number of bytes needed to represent the array or SIZE_MAX on |
23860 |
++ * overflow. |
23861 |
+ */ |
23862 |
+-#define struct_size(p, member, count) \ |
23863 |
+- __ab_c_size(count, \ |
23864 |
+- sizeof(*(p)->member) + __must_be_array((p)->member),\ |
23865 |
+- sizeof(*(p))) |
23866 |
++#define array_size(a, b) size_mul(a, b) |
23867 |
++ |
23868 |
++/** |
23869 |
++ * array3_size() - Calculate size of 3-dimensional array. |
23870 |
++ * |
23871 |
++ * @a: dimension one |
23872 |
++ * @b: dimension two |
23873 |
++ * @c: dimension three |
23874 |
++ * |
23875 |
++ * Calculates size of 3-dimensional array: @a * @b * @c. |
23876 |
++ * |
23877 |
++ * Returns: number of bytes needed to represent the array or SIZE_MAX on |
23878 |
++ * overflow. |
23879 |
++ */ |
23880 |
++#define array3_size(a, b, c) size_mul(size_mul(a, b), c) |
23881 |
+ |
23882 |
+ /** |
23883 |
+ * flex_array_size() - Calculate size of a flexible array member |
23884 |
+@@ -208,7 +221,22 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) |
23885 |
+ * Return: number of bytes needed or SIZE_MAX on overflow. |
23886 |
+ */ |
23887 |
+ #define flex_array_size(p, member, count) \ |
23888 |
+- array_size(count, \ |
23889 |
+- sizeof(*(p)->member) + __must_be_array((p)->member)) |
23890 |
++ size_mul(count, \ |
23891 |
++ sizeof(*(p)->member) + __must_be_array((p)->member)) |
23892 |
++ |
23893 |
++/** |
23894 |
++ * struct_size() - Calculate size of structure with trailing flexible array. |
23895 |
++ * |
23896 |
++ * @p: Pointer to the structure. |
23897 |
++ * @member: Name of the array member. |
23898 |
++ * @count: Number of elements in the array. |
23899 |
++ * |
23900 |
++ * Calculates size of memory needed for structure @p followed by an |
23901 |
++ * array of @count number of @member elements. |
23902 |
++ * |
23903 |
++ * Return: number of bytes needed or SIZE_MAX on overflow. |
23904 |
++ */ |
23905 |
++#define struct_size(p, member, count) \ |
23906 |
++ size_add(sizeof(*(p)), flex_array_size(p, member, count)) |
23907 |
+ |
23908 |
+ #endif /* __LINUX_OVERFLOW_H */ |
23909 |
+diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h |
23910 |
+index 069c7fd953961..a2f25b26ae1ec 100644 |
23911 |
+--- a/include/linux/proc_fs.h |
23912 |
++++ b/include/linux/proc_fs.h |
23913 |
+@@ -191,8 +191,10 @@ static inline void proc_remove(struct proc_dir_entry *de) {} |
23914 |
+ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; } |
23915 |
+ |
23916 |
+ #define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;}) |
23917 |
++#define proc_create_net_data_write(name, mode, parent, ops, write, state_size, data) ({NULL;}) |
23918 |
+ #define proc_create_net(name, mode, parent, state_size, ops) ({NULL;}) |
23919 |
+ #define proc_create_net_single(name, mode, parent, show, data) ({NULL;}) |
23920 |
++#define proc_create_net_single_write(name, mode, parent, show, write, data) ({NULL;}) |
23921 |
+ |
23922 |
+ static inline struct pid *tgid_pidfd_to_pid(const struct file *file) |
23923 |
+ { |
23924 |
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h |
23925 |
+index ba015a77238aa..6e18ca234f812 100644 |
23926 |
+--- a/include/linux/skmsg.h |
23927 |
++++ b/include/linux/skmsg.h |
23928 |
+@@ -83,6 +83,7 @@ struct sk_psock { |
23929 |
+ u32 apply_bytes; |
23930 |
+ u32 cork_bytes; |
23931 |
+ u32 eval; |
23932 |
++ bool redir_ingress; /* undefined if sk_redir is null */ |
23933 |
+ struct sk_msg *cork; |
23934 |
+ struct sk_psock_progs progs; |
23935 |
+ #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) |
23936 |
+diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h |
23937 |
+index 137f9f2ac4c3c..7bca213a3f838 100644 |
23938 |
+--- a/include/linux/soc/qcom/apr.h |
23939 |
++++ b/include/linux/soc/qcom/apr.h |
23940 |
+@@ -79,6 +79,15 @@ struct apr_resp_pkt { |
23941 |
+ #define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF) |
23942 |
+ #define APR_SVC_MINOR_VERSION(v) (v & 0xFF) |
23943 |
+ |
23944 |
++struct packet_router; |
23945 |
++struct pkt_router_svc { |
23946 |
++ struct device *dev; |
23947 |
++ struct packet_router *pr; |
23948 |
++ spinlock_t lock; |
23949 |
++ int id; |
23950 |
++ void *priv; |
23951 |
++}; |
23952 |
++ |
23953 |
+ struct apr_device { |
23954 |
+ struct device dev; |
23955 |
+ uint16_t svc_id; |
23956 |
+@@ -86,11 +95,12 @@ struct apr_device { |
23957 |
+ uint32_t version; |
23958 |
+ char name[APR_NAME_SIZE]; |
23959 |
+ const char *service_path; |
23960 |
+- spinlock_t lock; |
23961 |
++ struct pkt_router_svc svc; |
23962 |
+ struct list_head node; |
23963 |
+ }; |
23964 |
+ |
23965 |
+ #define to_apr_device(d) container_of(d, struct apr_device, dev) |
23966 |
++#define svc_to_apr_device(d) container_of(d, struct apr_device, svc) |
23967 |
+ |
23968 |
+ struct apr_driver { |
23969 |
+ int (*probe)(struct apr_device *sl); |
23970 |
+diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h |
23971 |
+index 93884086f3924..adc80e29168ea 100644 |
23972 |
+--- a/include/linux/timerqueue.h |
23973 |
++++ b/include/linux/timerqueue.h |
23974 |
+@@ -35,7 +35,7 @@ struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) |
23975 |
+ { |
23976 |
+ struct rb_node *leftmost = rb_first_cached(&head->rb_root); |
23977 |
+ |
23978 |
+- return rb_entry(leftmost, struct timerqueue_node, node); |
23979 |
++ return rb_entry_safe(leftmost, struct timerqueue_node, node); |
23980 |
+ } |
23981 |
+ |
23982 |
+ static inline void timerqueue_init(struct timerqueue_node *node) |
23983 |
+diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h |
23984 |
+index e2e44bb1dad85..c1e5910809add 100644 |
23985 |
+--- a/include/linux/usb/typec.h |
23986 |
++++ b/include/linux/usb/typec.h |
23987 |
+@@ -295,6 +295,9 @@ int typec_set_mode(struct typec_port *port, int mode); |
23988 |
+ |
23989 |
+ void *typec_get_drvdata(struct typec_port *port); |
23990 |
+ |
23991 |
++int typec_get_fw_cap(struct typec_capability *cap, |
23992 |
++ struct fwnode_handle *fwnode); |
23993 |
++ |
23994 |
+ int typec_find_pwr_opmode(const char *name); |
23995 |
+ int typec_find_orientation(const char *name); |
23996 |
+ int typec_find_port_power_role(const char *name); |
23997 |
+diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h |
23998 |
+index 2f6b0861322ae..ac60c9fcfe9a6 100644 |
23999 |
+--- a/include/media/dvbdev.h |
24000 |
++++ b/include/media/dvbdev.h |
24001 |
+@@ -126,6 +126,7 @@ struct dvb_adapter { |
24002 |
+ * struct dvb_device - represents a DVB device node |
24003 |
+ * |
24004 |
+ * @list_head: List head with all DVB devices |
24005 |
++ * @ref: reference counter |
24006 |
+ * @fops: pointer to struct file_operations |
24007 |
+ * @adapter: pointer to the adapter that holds this device node |
24008 |
+ * @type: type of the device, as defined by &enum dvb_device_type. |
24009 |
+@@ -156,6 +157,7 @@ struct dvb_adapter { |
24010 |
+ */ |
24011 |
+ struct dvb_device { |
24012 |
+ struct list_head list_head; |
24013 |
++ struct kref ref; |
24014 |
+ const struct file_operations *fops; |
24015 |
+ struct dvb_adapter *adapter; |
24016 |
+ enum dvb_device_type type; |
24017 |
+@@ -187,6 +189,20 @@ struct dvb_device { |
24018 |
+ void *priv; |
24019 |
+ }; |
24020 |
+ |
24021 |
++/** |
24022 |
++ * dvb_device_get - Increase dvb_device reference |
24023 |
++ * |
24024 |
++ * @dvbdev: pointer to struct dvb_device |
24025 |
++ */ |
24026 |
++struct dvb_device *dvb_device_get(struct dvb_device *dvbdev); |
24027 |
++ |
24028 |
++/** |
24029 |
++ * dvb_device_put - Decrease dvb_device reference |
24030 |
++ * |
24031 |
++ * @dvbdev: pointer to struct dvb_device |
24032 |
++ */ |
24033 |
++void dvb_device_put(struct dvb_device *dvbdev); |
24034 |
++ |
24035 |
+ /** |
24036 |
+ * dvb_register_adapter - Registers a new DVB adapter |
24037 |
+ * |
24038 |
+@@ -231,29 +247,17 @@ int dvb_register_device(struct dvb_adapter *adap, |
24039 |
+ /** |
24040 |
+ * dvb_remove_device - Remove a registered DVB device |
24041 |
+ * |
24042 |
+- * This does not free memory. To do that, call dvb_free_device(). |
24043 |
++ * This does not free memory. dvb_free_device() will do that when |
24044 |
++ * reference counter is empty |
24045 |
+ * |
24046 |
+ * @dvbdev: pointer to struct dvb_device |
24047 |
+ */ |
24048 |
+ void dvb_remove_device(struct dvb_device *dvbdev); |
24049 |
+ |
24050 |
+-/** |
24051 |
+- * dvb_free_device - Free memory occupied by a DVB device. |
24052 |
+- * |
24053 |
+- * Call dvb_unregister_device() before calling this function. |
24054 |
+- * |
24055 |
+- * @dvbdev: pointer to struct dvb_device |
24056 |
+- */ |
24057 |
+-void dvb_free_device(struct dvb_device *dvbdev); |
24058 |
+ |
24059 |
+ /** |
24060 |
+ * dvb_unregister_device - Unregisters a DVB device |
24061 |
+ * |
24062 |
+- * This is a combination of dvb_remove_device() and dvb_free_device(). |
24063 |
+- * Using this function is usually a mistake, and is often an indicator |
24064 |
+- * for a use-after-free bug (when a userspace process keeps a file |
24065 |
+- * handle to a detached device). |
24066 |
+- * |
24067 |
+ * @dvbdev: pointer to struct dvb_device |
24068 |
+ */ |
24069 |
+ void dvb_unregister_device(struct dvb_device *dvbdev); |
24070 |
+diff --git a/include/net/dst.h b/include/net/dst.h |
24071 |
+index a057319aabefa..17697ec79949f 100644 |
24072 |
+--- a/include/net/dst.h |
24073 |
++++ b/include/net/dst.h |
24074 |
+@@ -361,9 +361,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
24075 |
+ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
24076 |
+ struct net *net) |
24077 |
+ { |
24078 |
+- /* TODO : stats should be SMP safe */ |
24079 |
+- dev->stats.rx_packets++; |
24080 |
+- dev->stats.rx_bytes += skb->len; |
24081 |
++ DEV_STATS_INC(dev, rx_packets); |
24082 |
++ DEV_STATS_ADD(dev, rx_bytes, skb->len); |
24083 |
+ __skb_tunnel_rx(skb, dev, net); |
24084 |
+ } |
24085 |
+ |
24086 |
+diff --git a/include/net/mrp.h b/include/net/mrp.h |
24087 |
+index 1c308c034e1a6..a8102661fd613 100644 |
24088 |
+--- a/include/net/mrp.h |
24089 |
++++ b/include/net/mrp.h |
24090 |
+@@ -120,6 +120,7 @@ struct mrp_applicant { |
24091 |
+ struct sk_buff *pdu; |
24092 |
+ struct rb_root mad; |
24093 |
+ struct rcu_head rcu; |
24094 |
++ bool active; |
24095 |
+ }; |
24096 |
+ |
24097 |
+ struct mrp_port { |
24098 |
+diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h |
24099 |
+index efc9085c68927..6ec140b0a61bf 100644 |
24100 |
+--- a/include/net/sock_reuseport.h |
24101 |
++++ b/include/net/sock_reuseport.h |
24102 |
+@@ -16,6 +16,7 @@ struct sock_reuseport { |
24103 |
+ u16 max_socks; /* length of socks */ |
24104 |
+ u16 num_socks; /* elements in socks */ |
24105 |
+ u16 num_closed_socks; /* closed elements in socks */ |
24106 |
++ u16 incoming_cpu; |
24107 |
+ /* The last synq overflow event timestamp of this |
24108 |
+ * reuse->socks[] group. |
24109 |
+ */ |
24110 |
+@@ -58,5 +59,6 @@ static inline bool reuseport_has_conns(struct sock *sk) |
24111 |
+ } |
24112 |
+ |
24113 |
+ void reuseport_has_conns_set(struct sock *sk); |
24114 |
++void reuseport_update_incoming_cpu(struct sock *sk, int val); |
24115 |
+ |
24116 |
+ #endif /* _SOCK_REUSEPORT_H */ |
24117 |
+diff --git a/include/net/tcp.h b/include/net/tcp.h |
24118 |
+index 81ef95dc27ba5..fdac6913b6c8f 100644 |
24119 |
+--- a/include/net/tcp.h |
24120 |
++++ b/include/net/tcp.h |
24121 |
+@@ -2243,8 +2243,8 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); |
24122 |
+ void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); |
24123 |
+ #endif /* CONFIG_BPF_SYSCALL */ |
24124 |
+ |
24125 |
+-int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes, |
24126 |
+- int flags); |
24127 |
++int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, |
24128 |
++ struct sk_msg *msg, u32 bytes, int flags); |
24129 |
+ #endif /* CONFIG_NET_SOCK_MSG */ |
24130 |
+ |
24131 |
+ #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG) |
24132 |
+diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h |
24133 |
+index 22af68b014262..658fccdc8660f 100644 |
24134 |
+--- a/include/sound/hdaudio.h |
24135 |
++++ b/include/sound/hdaudio.h |
24136 |
+@@ -558,6 +558,8 @@ int snd_hdac_stream_set_params(struct hdac_stream *azx_dev, |
24137 |
+ void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start); |
24138 |
+ void snd_hdac_stream_clear(struct hdac_stream *azx_dev); |
24139 |
+ void snd_hdac_stream_stop(struct hdac_stream *azx_dev); |
24140 |
++void snd_hdac_stop_streams(struct hdac_bus *bus); |
24141 |
++void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus); |
24142 |
+ void snd_hdac_stream_reset(struct hdac_stream *azx_dev); |
24143 |
+ void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set, |
24144 |
+ unsigned int streams, unsigned int reg); |
24145 |
+diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h |
24146 |
+index d4e31ea16aba3..56ea5cde5e63a 100644 |
24147 |
+--- a/include/sound/hdaudio_ext.h |
24148 |
++++ b/include/sound/hdaudio_ext.h |
24149 |
+@@ -92,7 +92,6 @@ void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus, |
24150 |
+ struct hdac_ext_stream *azx_dev, bool decouple); |
24151 |
+ void snd_hdac_ext_stream_decouple(struct hdac_bus *bus, |
24152 |
+ struct hdac_ext_stream *azx_dev, bool decouple); |
24153 |
+-void snd_hdac_ext_stop_streams(struct hdac_bus *bus); |
24154 |
+ |
24155 |
+ int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus, |
24156 |
+ struct hdac_ext_stream *stream, u32 value); |
24157 |
+diff --git a/include/sound/pcm.h b/include/sound/pcm.h |
24158 |
+index e08bf475d02d4..181df0452ae2e 100644 |
24159 |
+--- a/include/sound/pcm.h |
24160 |
++++ b/include/sound/pcm.h |
24161 |
+@@ -106,24 +106,24 @@ struct snd_pcm_ops { |
24162 |
+ #define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1) |
24163 |
+ |
24164 |
+ /* If you change this don't forget to change rates[] table in pcm_native.c */ |
24165 |
+-#define SNDRV_PCM_RATE_5512 (1<<0) /* 5512Hz */ |
24166 |
+-#define SNDRV_PCM_RATE_8000 (1<<1) /* 8000Hz */ |
24167 |
+-#define SNDRV_PCM_RATE_11025 (1<<2) /* 11025Hz */ |
24168 |
+-#define SNDRV_PCM_RATE_16000 (1<<3) /* 16000Hz */ |
24169 |
+-#define SNDRV_PCM_RATE_22050 (1<<4) /* 22050Hz */ |
24170 |
+-#define SNDRV_PCM_RATE_32000 (1<<5) /* 32000Hz */ |
24171 |
+-#define SNDRV_PCM_RATE_44100 (1<<6) /* 44100Hz */ |
24172 |
+-#define SNDRV_PCM_RATE_48000 (1<<7) /* 48000Hz */ |
24173 |
+-#define SNDRV_PCM_RATE_64000 (1<<8) /* 64000Hz */ |
24174 |
+-#define SNDRV_PCM_RATE_88200 (1<<9) /* 88200Hz */ |
24175 |
+-#define SNDRV_PCM_RATE_96000 (1<<10) /* 96000Hz */ |
24176 |
+-#define SNDRV_PCM_RATE_176400 (1<<11) /* 176400Hz */ |
24177 |
+-#define SNDRV_PCM_RATE_192000 (1<<12) /* 192000Hz */ |
24178 |
+-#define SNDRV_PCM_RATE_352800 (1<<13) /* 352800Hz */ |
24179 |
+-#define SNDRV_PCM_RATE_384000 (1<<14) /* 384000Hz */ |
24180 |
+- |
24181 |
+-#define SNDRV_PCM_RATE_CONTINUOUS (1<<30) /* continuous range */ |
24182 |
+-#define SNDRV_PCM_RATE_KNOT (1<<31) /* supports more non-continuos rates */ |
24183 |
++#define SNDRV_PCM_RATE_5512 (1U<<0) /* 5512Hz */ |
24184 |
++#define SNDRV_PCM_RATE_8000 (1U<<1) /* 8000Hz */ |
24185 |
++#define SNDRV_PCM_RATE_11025 (1U<<2) /* 11025Hz */ |
24186 |
++#define SNDRV_PCM_RATE_16000 (1U<<3) /* 16000Hz */ |
24187 |
++#define SNDRV_PCM_RATE_22050 (1U<<4) /* 22050Hz */ |
24188 |
++#define SNDRV_PCM_RATE_32000 (1U<<5) /* 32000Hz */ |
24189 |
++#define SNDRV_PCM_RATE_44100 (1U<<6) /* 44100Hz */ |
24190 |
++#define SNDRV_PCM_RATE_48000 (1U<<7) /* 48000Hz */ |
24191 |
++#define SNDRV_PCM_RATE_64000 (1U<<8) /* 64000Hz */ |
24192 |
++#define SNDRV_PCM_RATE_88200 (1U<<9) /* 88200Hz */ |
24193 |
++#define SNDRV_PCM_RATE_96000 (1U<<10) /* 96000Hz */ |
24194 |
++#define SNDRV_PCM_RATE_176400 (1U<<11) /* 176400Hz */ |
24195 |
++#define SNDRV_PCM_RATE_192000 (1U<<12) /* 192000Hz */ |
24196 |
++#define SNDRV_PCM_RATE_352800 (1U<<13) /* 352800Hz */ |
24197 |
++#define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */ |
24198 |
++ |
24199 |
++#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */ |
24200 |
++#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuos rates */ |
24201 |
+ |
24202 |
+ #define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\ |
24203 |
+ SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\ |
24204 |
+diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h |
24205 |
+index 59363a083ecb9..d92691c78cff6 100644 |
24206 |
+--- a/include/trace/events/ib_mad.h |
24207 |
++++ b/include/trace/events/ib_mad.h |
24208 |
+@@ -49,7 +49,6 @@ DECLARE_EVENT_CLASS(ib_mad_send_template, |
24209 |
+ __field(int, retries_left) |
24210 |
+ __field(int, max_retries) |
24211 |
+ __field(int, retry) |
24212 |
+- __field(u16, pkey) |
24213 |
+ ), |
24214 |
+ |
24215 |
+ TP_fast_assign( |
24216 |
+@@ -89,7 +88,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template, |
24217 |
+ "hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \ |
24218 |
+ "method 0x%x status 0x%x class_specific 0x%x tid 0x%llx " \ |
24219 |
+ "attr_id 0x%x attr_mod 0x%x => dlid 0x%08x sl %d "\ |
24220 |
+- "pkey 0x%x rpqn 0x%x rqpkey 0x%x", |
24221 |
++ "rpqn 0x%x rqpkey 0x%x", |
24222 |
+ __entry->dev_index, __entry->port_num, __entry->qp_num, |
24223 |
+ __entry->agent_priv, be64_to_cpu(__entry->wrtid), |
24224 |
+ __entry->retries_left, __entry->max_retries, |
24225 |
+@@ -100,7 +99,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template, |
24226 |
+ be16_to_cpu(__entry->class_specific), |
24227 |
+ be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id), |
24228 |
+ be32_to_cpu(__entry->attr_mod), |
24229 |
+- be32_to_cpu(__entry->dlid), __entry->sl, __entry->pkey, |
24230 |
++ be32_to_cpu(__entry->dlid), __entry->sl, |
24231 |
+ __entry->rqpn, __entry->rqkey |
24232 |
+ ) |
24233 |
+ ); |
24234 |
+@@ -204,7 +203,6 @@ TRACE_EVENT(ib_mad_recv_done_handler, |
24235 |
+ __field(u16, wc_status) |
24236 |
+ __field(u32, slid) |
24237 |
+ __field(u32, dev_index) |
24238 |
+- __field(u16, pkey) |
24239 |
+ ), |
24240 |
+ |
24241 |
+ TP_fast_assign( |
24242 |
+@@ -224,9 +222,6 @@ TRACE_EVENT(ib_mad_recv_done_handler, |
24243 |
+ __entry->slid = wc->slid; |
24244 |
+ __entry->src_qp = wc->src_qp; |
24245 |
+ __entry->sl = wc->sl; |
24246 |
+- ib_query_pkey(qp_info->port_priv->device, |
24247 |
+- qp_info->port_priv->port_num, |
24248 |
+- wc->pkey_index, &__entry->pkey); |
24249 |
+ __entry->wc_status = wc->status; |
24250 |
+ ), |
24251 |
+ |
24252 |
+@@ -234,7 +229,7 @@ TRACE_EVENT(ib_mad_recv_done_handler, |
24253 |
+ "base_ver 0x%02x class 0x%02x class_ver 0x%02x " \ |
24254 |
+ "method 0x%02x status 0x%04x class_specific 0x%04x " \ |
24255 |
+ "tid 0x%016llx attr_id 0x%04x attr_mod 0x%08x " \ |
24256 |
+- "slid 0x%08x src QP%d, sl %d pkey 0x%04x", |
24257 |
++ "slid 0x%08x src QP%d, sl %d", |
24258 |
+ __entry->dev_index, __entry->port_num, __entry->qp_num, |
24259 |
+ __entry->wc_status, |
24260 |
+ __entry->length, |
24261 |
+@@ -244,7 +239,7 @@ TRACE_EVENT(ib_mad_recv_done_handler, |
24262 |
+ be16_to_cpu(__entry->class_specific), |
24263 |
+ be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id), |
24264 |
+ be32_to_cpu(__entry->attr_mod), |
24265 |
+- __entry->slid, __entry->src_qp, __entry->sl, __entry->pkey |
24266 |
++ __entry->slid, __entry->src_qp, __entry->sl |
24267 |
+ ) |
24268 |
+ ); |
24269 |
+ |
24270 |
+diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h |
24271 |
+index 808c73c52820f..a50e4646bd6de 100644 |
24272 |
+--- a/include/uapi/drm/drm_fourcc.h |
24273 |
++++ b/include/uapi/drm/drm_fourcc.h |
24274 |
+@@ -308,6 +308,13 @@ extern "C" { |
24275 |
+ */ |
24276 |
+ #define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */ |
24277 |
+ |
24278 |
++/* 2 plane YCbCr420. |
24279 |
++ * 3 10 bit components and 2 padding bits packed into 4 bytes. |
24280 |
++ * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian |
24281 |
++ * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian |
24282 |
++ */ |
24283 |
++#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */ |
24284 |
++ |
24285 |
+ /* 3 plane non-subsampled (444) YCbCr |
24286 |
+ * 16 bits per component, but only 10 bits are used and 6 bits are padded |
24287 |
+ * index 0: Y plane, [15:0] Y:x [10:6] little endian |
24288 |
+@@ -842,6 +849,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) |
24289 |
+ * and UV. Some SAND-using hardware stores UV in a separate tiled |
24290 |
+ * image from Y to reduce the column height, which is not supported |
24291 |
+ * with these modifiers. |
24292 |
++ * |
24293 |
++ * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also |
24294 |
++ * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes |
24295 |
++ * wide, but as this is a 10 bpp format that translates to 96 pixels. |
24296 |
+ */ |
24297 |
+ |
24298 |
+ #define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \ |
24299 |
+diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h |
24300 |
+index c750eac09fc9c..f7c01709cb0ff 100644 |
24301 |
+--- a/include/uapi/linux/idxd.h |
24302 |
++++ b/include/uapi/linux/idxd.h |
24303 |
+@@ -272,7 +272,7 @@ struct dsa_completion_record { |
24304 |
+ }; |
24305 |
+ |
24306 |
+ uint32_t delta_rec_size; |
24307 |
+- uint32_t crc_val; |
24308 |
++ uint64_t crc_val; |
24309 |
+ |
24310 |
+ /* DIF check & strip */ |
24311 |
+ struct { |
24312 |
+diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h |
24313 |
+index 7272f85d6d6ab..3736f2fe15418 100644 |
24314 |
+--- a/include/uapi/linux/swab.h |
24315 |
++++ b/include/uapi/linux/swab.h |
24316 |
+@@ -3,7 +3,7 @@ |
24317 |
+ #define _UAPI_LINUX_SWAB_H |
24318 |
+ |
24319 |
+ #include <linux/types.h> |
24320 |
+-#include <linux/compiler.h> |
24321 |
++#include <linux/stddef.h> |
24322 |
+ #include <asm/bitsperlong.h> |
24323 |
+ #include <asm/swab.h> |
24324 |
+ |
24325 |
+diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h |
24326 |
+index a75e14edc957e..dbd60f48b4b01 100644 |
24327 |
+--- a/include/uapi/sound/asequencer.h |
24328 |
++++ b/include/uapi/sound/asequencer.h |
24329 |
+@@ -344,10 +344,10 @@ typedef int __bitwise snd_seq_client_type_t; |
24330 |
+ #define KERNEL_CLIENT ((__force snd_seq_client_type_t) 2) |
24331 |
+ |
24332 |
+ /* event filter flags */ |
24333 |
+-#define SNDRV_SEQ_FILTER_BROADCAST (1<<0) /* accept broadcast messages */ |
24334 |
+-#define SNDRV_SEQ_FILTER_MULTICAST (1<<1) /* accept multicast messages */ |
24335 |
+-#define SNDRV_SEQ_FILTER_BOUNCE (1<<2) /* accept bounce event in error */ |
24336 |
+-#define SNDRV_SEQ_FILTER_USE_EVENT (1<<31) /* use event filter */ |
24337 |
++#define SNDRV_SEQ_FILTER_BROADCAST (1U<<0) /* accept broadcast messages */ |
24338 |
++#define SNDRV_SEQ_FILTER_MULTICAST (1U<<1) /* accept multicast messages */ |
24339 |
++#define SNDRV_SEQ_FILTER_BOUNCE (1U<<2) /* accept bounce event in error */ |
24340 |
++#define SNDRV_SEQ_FILTER_USE_EVENT (1U<<31) /* use event filter */ |
24341 |
+ |
24342 |
+ struct snd_seq_client_info { |
24343 |
+ int client; /* client number to inquire */ |
24344 |
+diff --git a/kernel/Makefile b/kernel/Makefile |
24345 |
+index 0e119c52a2cd6..599cb926449a6 100644 |
24346 |
+--- a/kernel/Makefile |
24347 |
++++ b/kernel/Makefile |
24348 |
+@@ -59,7 +59,7 @@ obj-$(CONFIG_FREEZER) += freezer.o |
24349 |
+ obj-$(CONFIG_PROFILING) += profile.o |
24350 |
+ obj-$(CONFIG_STACKTRACE) += stacktrace.o |
24351 |
+ obj-y += time/ |
24352 |
+-obj-$(CONFIG_FUTEX) += futex.o |
24353 |
++obj-$(CONFIG_FUTEX) += futex/ |
24354 |
+ obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
24355 |
+ obj-$(CONFIG_SMP) += smp.o |
24356 |
+ ifneq ($(CONFIG_SMP),y) |
24357 |
+diff --git a/kernel/acct.c b/kernel/acct.c |
24358 |
+index 23a7ab8e6cbc8..2b5cc63eb295b 100644 |
24359 |
+--- a/kernel/acct.c |
24360 |
++++ b/kernel/acct.c |
24361 |
+@@ -331,6 +331,8 @@ static comp_t encode_comp_t(unsigned long value) |
24362 |
+ exp++; |
24363 |
+ } |
24364 |
+ |
24365 |
++ if (exp > (((comp_t) ~0U) >> MANTSIZE)) |
24366 |
++ return (comp_t) ~0U; |
24367 |
+ /* |
24368 |
+ * Clean it up and polish it off. |
24369 |
+ */ |
24370 |
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c |
24371 |
+index 7cb13b9f69a66..0c2fa93bd8d27 100644 |
24372 |
+--- a/kernel/bpf/btf.c |
24373 |
++++ b/kernel/bpf/btf.c |
24374 |
+@@ -3864,6 +3864,11 @@ static int btf_func_proto_check(struct btf_verifier_env *env, |
24375 |
+ break; |
24376 |
+ } |
24377 |
+ |
24378 |
++ if (btf_type_is_resolve_source_only(arg_type)) { |
24379 |
++ btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); |
24380 |
++ return -EINVAL; |
24381 |
++ } |
24382 |
++ |
24383 |
+ if (args[i].name_off && |
24384 |
+ (!btf_name_offset_valid(btf, args[i].name_off) || |
24385 |
+ !btf_name_valid_identifier(btf, args[i].name_off))) { |
24386 |
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
24387 |
+index cceb29b0585f0..488225bb42f63 100644 |
24388 |
+--- a/kernel/bpf/verifier.c |
24389 |
++++ b/kernel/bpf/verifier.c |
24390 |
+@@ -606,6 +606,14 @@ static const char *kernel_type_name(const struct btf* btf, u32 id) |
24391 |
+ return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); |
24392 |
+ } |
24393 |
+ |
24394 |
++/* The reg state of a pointer or a bounded scalar was saved when |
24395 |
++ * it was spilled to the stack. |
24396 |
++ */ |
24397 |
++static bool is_spilled_reg(const struct bpf_stack_state *stack) |
24398 |
++{ |
24399 |
++ return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; |
24400 |
++} |
24401 |
++ |
24402 |
+ static void print_verifier_state(struct bpf_verifier_env *env, |
24403 |
+ const struct bpf_func_state *state) |
24404 |
+ { |
24405 |
+@@ -709,7 +717,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, |
24406 |
+ continue; |
24407 |
+ verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); |
24408 |
+ print_liveness(env, state->stack[i].spilled_ptr.live); |
24409 |
+- if (state->stack[i].slot_type[0] == STACK_SPILL) { |
24410 |
++ if (is_spilled_reg(&state->stack[i])) { |
24411 |
+ reg = &state->stack[i].spilled_ptr; |
24412 |
+ t = reg->type; |
24413 |
+ verbose(env, "=%s", reg_type_str(env, t)); |
24414 |
+@@ -2351,7 +2359,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, |
24415 |
+ reg->precise = true; |
24416 |
+ } |
24417 |
+ for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { |
24418 |
+- if (func->stack[j].slot_type[0] != STACK_SPILL) |
24419 |
++ if (!is_spilled_reg(&func->stack[j])) |
24420 |
+ continue; |
24421 |
+ reg = &func->stack[j].spilled_ptr; |
24422 |
+ if (reg->type != SCALAR_VALUE) |
24423 |
+@@ -2361,7 +2369,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, |
24424 |
+ } |
24425 |
+ } |
24426 |
+ |
24427 |
+-static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, |
24428 |
++static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno, |
24429 |
+ int spi) |
24430 |
+ { |
24431 |
+ struct bpf_verifier_state *st = env->cur_state; |
24432 |
+@@ -2378,7 +2386,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, |
24433 |
+ if (!env->bpf_capable) |
24434 |
+ return 0; |
24435 |
+ |
24436 |
+- func = st->frame[st->curframe]; |
24437 |
++ func = st->frame[frame]; |
24438 |
+ if (regno >= 0) { |
24439 |
+ reg = &func->regs[regno]; |
24440 |
+ if (reg->type != SCALAR_VALUE) { |
24441 |
+@@ -2393,7 +2401,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, |
24442 |
+ } |
24443 |
+ |
24444 |
+ while (spi >= 0) { |
24445 |
+- if (func->stack[spi].slot_type[0] != STACK_SPILL) { |
24446 |
++ if (!is_spilled_reg(&func->stack[spi])) { |
24447 |
+ stack_mask = 0; |
24448 |
+ break; |
24449 |
+ } |
24450 |
+@@ -2459,7 +2467,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, |
24451 |
+ break; |
24452 |
+ |
24453 |
+ new_marks = false; |
24454 |
+- func = st->frame[st->curframe]; |
24455 |
++ func = st->frame[frame]; |
24456 |
+ bitmap_from_u64(mask, reg_mask); |
24457 |
+ for_each_set_bit(i, mask, 32) { |
24458 |
+ reg = &func->regs[i]; |
24459 |
+@@ -2492,7 +2500,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, |
24460 |
+ return 0; |
24461 |
+ } |
24462 |
+ |
24463 |
+- if (func->stack[i].slot_type[0] != STACK_SPILL) { |
24464 |
++ if (!is_spilled_reg(&func->stack[i])) { |
24465 |
+ stack_mask &= ~(1ull << i); |
24466 |
+ continue; |
24467 |
+ } |
24468 |
+@@ -2525,12 +2533,17 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, |
24469 |
+ |
24470 |
+ static int mark_chain_precision(struct bpf_verifier_env *env, int regno) |
24471 |
+ { |
24472 |
+- return __mark_chain_precision(env, regno, -1); |
24473 |
++ return __mark_chain_precision(env, env->cur_state->curframe, regno, -1); |
24474 |
+ } |
24475 |
+ |
24476 |
+-static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) |
24477 |
++static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno) |
24478 |
+ { |
24479 |
+- return __mark_chain_precision(env, -1, spi); |
24480 |
++ return __mark_chain_precision(env, frame, regno, -1); |
24481 |
++} |
24482 |
++ |
24483 |
++static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi) |
24484 |
++{ |
24485 |
++ return __mark_chain_precision(env, frame, -1, spi); |
24486 |
+ } |
24487 |
+ |
24488 |
+ static bool is_spillable_regtype(enum bpf_reg_type type) |
24489 |
+@@ -2682,7 +2695,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, |
24490 |
+ /* regular write of data into stack destroys any spilled ptr */ |
24491 |
+ state->stack[spi].spilled_ptr.type = NOT_INIT; |
24492 |
+ /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ |
24493 |
+- if (state->stack[spi].slot_type[0] == STACK_SPILL) |
24494 |
++ if (is_spilled_reg(&state->stack[spi])) |
24495 |
+ for (i = 0; i < BPF_REG_SIZE; i++) |
24496 |
+ state->stack[spi].slot_type[i] = STACK_MISC; |
24497 |
+ |
24498 |
+@@ -2772,14 +2785,17 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env, |
24499 |
+ spi = slot / BPF_REG_SIZE; |
24500 |
+ stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; |
24501 |
+ |
24502 |
+- if (!env->allow_ptr_leaks |
24503 |
+- && *stype != NOT_INIT |
24504 |
+- && *stype != SCALAR_VALUE) { |
24505 |
+- /* Reject the write if there's are spilled pointers in |
24506 |
+- * range. If we didn't reject here, the ptr status |
24507 |
+- * would be erased below (even though not all slots are |
24508 |
+- * actually overwritten), possibly opening the door to |
24509 |
+- * leaks. |
24510 |
++ if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { |
24511 |
++ /* Reject the write if range we may write to has not |
24512 |
++ * been initialized beforehand. If we didn't reject |
24513 |
++ * here, the ptr status would be erased below (even |
24514 |
++ * though not all slots are actually overwritten), |
24515 |
++ * possibly opening the door to leaks. |
24516 |
++ * |
24517 |
++ * We do however catch STACK_INVALID case below, and |
24518 |
++ * only allow reading possibly uninitialized memory |
24519 |
++ * later for CAP_PERFMON, as the write may not happen to |
24520 |
++ * that slot. |
24521 |
+ */ |
24522 |
+ verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", |
24523 |
+ insn_idx, i); |
24524 |
+@@ -2892,7 +2908,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, |
24525 |
+ stype = reg_state->stack[spi].slot_type; |
24526 |
+ reg = ®_state->stack[spi].spilled_ptr; |
24527 |
+ |
24528 |
+- if (stype[0] == STACK_SPILL) { |
24529 |
++ if (is_spilled_reg(®_state->stack[spi])) { |
24530 |
+ if (size != BPF_REG_SIZE) { |
24531 |
+ if (reg->type != SCALAR_VALUE) { |
24532 |
+ verbose_linfo(env, env->insn_idx, "; "); |
24533 |
+@@ -4531,11 +4547,11 @@ static int check_stack_range_initialized( |
24534 |
+ goto mark; |
24535 |
+ } |
24536 |
+ |
24537 |
+- if (state->stack[spi].slot_type[0] == STACK_SPILL && |
24538 |
++ if (is_spilled_reg(&state->stack[spi]) && |
24539 |
+ state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) |
24540 |
+ goto mark; |
24541 |
+ |
24542 |
+- if (state->stack[spi].slot_type[0] == STACK_SPILL && |
24543 |
++ if (is_spilled_reg(&state->stack[spi]) && |
24544 |
+ (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || |
24545 |
+ env->allow_ptr_leaks)) { |
24546 |
+ if (clobber) { |
24547 |
+@@ -7995,6 +8011,11 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, |
24548 |
+ return err; |
24549 |
+ return adjust_ptr_min_max_vals(env, insn, |
24550 |
+ dst_reg, src_reg); |
24551 |
++ } else if (dst_reg->precise) { |
24552 |
++ /* if dst_reg is precise, src_reg should be precise as well */ |
24553 |
++ err = mark_chain_precision(env, insn->src_reg); |
24554 |
++ if (err) |
24555 |
++ return err; |
24556 |
+ } |
24557 |
+ } else { |
24558 |
+ /* Pretend the src is a reg with a known value, since we only |
24559 |
+@@ -10334,9 +10355,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, |
24560 |
+ * return false to continue verification of this path |
24561 |
+ */ |
24562 |
+ return false; |
24563 |
+- if (i % BPF_REG_SIZE) |
24564 |
++ if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) |
24565 |
+ continue; |
24566 |
+- if (old->stack[spi].slot_type[0] != STACK_SPILL) |
24567 |
++ if (!is_spilled_reg(&old->stack[spi])) |
24568 |
+ continue; |
24569 |
+ if (!regsafe(env, &old->stack[spi].spilled_ptr, |
24570 |
+ &cur->stack[spi].spilled_ptr, idmap)) |
24571 |
+@@ -10527,34 +10548,36 @@ static int propagate_precision(struct bpf_verifier_env *env, |
24572 |
+ { |
24573 |
+ struct bpf_reg_state *state_reg; |
24574 |
+ struct bpf_func_state *state; |
24575 |
+- int i, err = 0; |
24576 |
++ int i, err = 0, fr; |
24577 |
+ |
24578 |
+- state = old->frame[old->curframe]; |
24579 |
+- state_reg = state->regs; |
24580 |
+- for (i = 0; i < BPF_REG_FP; i++, state_reg++) { |
24581 |
+- if (state_reg->type != SCALAR_VALUE || |
24582 |
+- !state_reg->precise) |
24583 |
+- continue; |
24584 |
+- if (env->log.level & BPF_LOG_LEVEL2) |
24585 |
+- verbose(env, "propagating r%d\n", i); |
24586 |
+- err = mark_chain_precision(env, i); |
24587 |
+- if (err < 0) |
24588 |
+- return err; |
24589 |
+- } |
24590 |
++ for (fr = old->curframe; fr >= 0; fr--) { |
24591 |
++ state = old->frame[fr]; |
24592 |
++ state_reg = state->regs; |
24593 |
++ for (i = 0; i < BPF_REG_FP; i++, state_reg++) { |
24594 |
++ if (state_reg->type != SCALAR_VALUE || |
24595 |
++ !state_reg->precise) |
24596 |
++ continue; |
24597 |
++ if (env->log.level & BPF_LOG_LEVEL2) |
24598 |
++ verbose(env, "frame %d: propagating r%d\n", i, fr); |
24599 |
++ err = mark_chain_precision_frame(env, fr, i); |
24600 |
++ if (err < 0) |
24601 |
++ return err; |
24602 |
++ } |
24603 |
+ |
24604 |
+- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { |
24605 |
+- if (state->stack[i].slot_type[0] != STACK_SPILL) |
24606 |
+- continue; |
24607 |
+- state_reg = &state->stack[i].spilled_ptr; |
24608 |
+- if (state_reg->type != SCALAR_VALUE || |
24609 |
+- !state_reg->precise) |
24610 |
+- continue; |
24611 |
+- if (env->log.level & BPF_LOG_LEVEL2) |
24612 |
+- verbose(env, "propagating fp%d\n", |
24613 |
+- (-i - 1) * BPF_REG_SIZE); |
24614 |
+- err = mark_chain_precision_stack(env, i); |
24615 |
+- if (err < 0) |
24616 |
+- return err; |
24617 |
++ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { |
24618 |
++ if (!is_spilled_reg(&state->stack[i])) |
24619 |
++ continue; |
24620 |
++ state_reg = &state->stack[i].spilled_ptr; |
24621 |
++ if (state_reg->type != SCALAR_VALUE || |
24622 |
++ !state_reg->precise) |
24623 |
++ continue; |
24624 |
++ if (env->log.level & BPF_LOG_LEVEL2) |
24625 |
++ verbose(env, "frame %d: propagating fp%d\n", |
24626 |
++ (-i - 1) * BPF_REG_SIZE, fr); |
24627 |
++ err = mark_chain_precision_stack_frame(env, fr, i); |
24628 |
++ if (err < 0) |
24629 |
++ return err; |
24630 |
++ } |
24631 |
+ } |
24632 |
+ return 0; |
24633 |
+ } |
24634 |
+@@ -12109,6 +12132,10 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, |
24635 |
+ if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn)) |
24636 |
+ continue; |
24637 |
+ |
24638 |
++ /* Zero-extension is done by the caller. */ |
24639 |
++ if (bpf_pseudo_kfunc_call(&insn)) |
24640 |
++ continue; |
24641 |
++ |
24642 |
+ if (WARN_ON(load_reg == -1)) { |
24643 |
+ verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n"); |
24644 |
+ return -EFAULT; |
24645 |
+diff --git a/kernel/cpu.c b/kernel/cpu.c |
24646 |
+index da871eb075662..393114c10c285 100644 |
24647 |
+--- a/kernel/cpu.c |
24648 |
++++ b/kernel/cpu.c |
24649 |
+@@ -662,21 +662,51 @@ static bool cpuhp_next_state(bool bringup, |
24650 |
+ return true; |
24651 |
+ } |
24652 |
+ |
24653 |
+-static int cpuhp_invoke_callback_range(bool bringup, |
24654 |
+- unsigned int cpu, |
24655 |
+- struct cpuhp_cpu_state *st, |
24656 |
+- enum cpuhp_state target) |
24657 |
++static int __cpuhp_invoke_callback_range(bool bringup, |
24658 |
++ unsigned int cpu, |
24659 |
++ struct cpuhp_cpu_state *st, |
24660 |
++ enum cpuhp_state target, |
24661 |
++ bool nofail) |
24662 |
+ { |
24663 |
+ enum cpuhp_state state; |
24664 |
+- int err = 0; |
24665 |
++ int ret = 0; |
24666 |
+ |
24667 |
+ while (cpuhp_next_state(bringup, &state, st, target)) { |
24668 |
++ int err; |
24669 |
++ |
24670 |
+ err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL); |
24671 |
+- if (err) |
24672 |
++ if (!err) |
24673 |
++ continue; |
24674 |
++ |
24675 |
++ if (nofail) { |
24676 |
++ pr_warn("CPU %u %s state %s (%d) failed (%d)\n", |
24677 |
++ cpu, bringup ? "UP" : "DOWN", |
24678 |
++ cpuhp_get_step(st->state)->name, |
24679 |
++ st->state, err); |
24680 |
++ ret = -1; |
24681 |
++ } else { |
24682 |
++ ret = err; |
24683 |
+ break; |
24684 |
++ } |
24685 |
+ } |
24686 |
+ |
24687 |
+- return err; |
24688 |
++ return ret; |
24689 |
++} |
24690 |
++ |
24691 |
++static inline int cpuhp_invoke_callback_range(bool bringup, |
24692 |
++ unsigned int cpu, |
24693 |
++ struct cpuhp_cpu_state *st, |
24694 |
++ enum cpuhp_state target) |
24695 |
++{ |
24696 |
++ return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false); |
24697 |
++} |
24698 |
++ |
24699 |
++static inline void cpuhp_invoke_callback_range_nofail(bool bringup, |
24700 |
++ unsigned int cpu, |
24701 |
++ struct cpuhp_cpu_state *st, |
24702 |
++ enum cpuhp_state target) |
24703 |
++{ |
24704 |
++ __cpuhp_invoke_callback_range(bringup, cpu, st, target, true); |
24705 |
+ } |
24706 |
+ |
24707 |
+ static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st) |
24708 |
+@@ -994,7 +1024,6 @@ static int take_cpu_down(void *_param) |
24709 |
+ struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
24710 |
+ enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); |
24711 |
+ int err, cpu = smp_processor_id(); |
24712 |
+- int ret; |
24713 |
+ |
24714 |
+ /* Ensure this CPU doesn't handle any more interrupts. */ |
24715 |
+ err = __cpu_disable(); |
24716 |
+@@ -1007,13 +1036,10 @@ static int take_cpu_down(void *_param) |
24717 |
+ */ |
24718 |
+ WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1)); |
24719 |
+ |
24720 |
+- /* Invoke the former CPU_DYING callbacks */ |
24721 |
+- ret = cpuhp_invoke_callback_range(false, cpu, st, target); |
24722 |
+- |
24723 |
+ /* |
24724 |
+- * DYING must not fail! |
24725 |
++ * Invoke the former CPU_DYING callbacks. DYING must not fail! |
24726 |
+ */ |
24727 |
+- WARN_ON_ONCE(ret); |
24728 |
++ cpuhp_invoke_callback_range_nofail(false, cpu, st, target); |
24729 |
+ |
24730 |
+ /* Give up timekeeping duties */ |
24731 |
+ tick_handover_do_timer(); |
24732 |
+@@ -1285,16 +1311,14 @@ void notify_cpu_starting(unsigned int cpu) |
24733 |
+ { |
24734 |
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
24735 |
+ enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); |
24736 |
+- int ret; |
24737 |
+ |
24738 |
+ rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ |
24739 |
+ cpumask_set_cpu(cpu, &cpus_booted_once_mask); |
24740 |
+- ret = cpuhp_invoke_callback_range(true, cpu, st, target); |
24741 |
+ |
24742 |
+ /* |
24743 |
+ * STARTING must not fail! |
24744 |
+ */ |
24745 |
+- WARN_ON_ONCE(ret); |
24746 |
++ cpuhp_invoke_callback_range_nofail(true, cpu, st, target); |
24747 |
+ } |
24748 |
+ |
24749 |
+ /* |
24750 |
+@@ -2315,8 +2339,10 @@ static ssize_t target_store(struct device *dev, struct device_attribute *attr, |
24751 |
+ |
24752 |
+ if (st->state < target) |
24753 |
+ ret = cpu_up(dev->id, target); |
24754 |
+- else |
24755 |
++ else if (st->state > target) |
24756 |
+ ret = cpu_down(dev->id, target); |
24757 |
++ else if (WARN_ON(st->target != target)) |
24758 |
++ st->target = target; |
24759 |
+ out: |
24760 |
+ unlock_device_hotplug(); |
24761 |
+ return ret ? ret : count; |
24762 |
+diff --git a/kernel/events/core.c b/kernel/events/core.c |
24763 |
+index 5422bd77c7d42..e950881444715 100644 |
24764 |
+--- a/kernel/events/core.c |
24765 |
++++ b/kernel/events/core.c |
24766 |
+@@ -11175,13 +11175,15 @@ static int pmu_dev_alloc(struct pmu *pmu) |
24767 |
+ |
24768 |
+ pmu->dev->groups = pmu->attr_groups; |
24769 |
+ device_initialize(pmu->dev); |
24770 |
+- ret = dev_set_name(pmu->dev, "%s", pmu->name); |
24771 |
+- if (ret) |
24772 |
+- goto free_dev; |
24773 |
+ |
24774 |
+ dev_set_drvdata(pmu->dev, pmu); |
24775 |
+ pmu->dev->bus = &pmu_bus; |
24776 |
+ pmu->dev->release = pmu_dev_release; |
24777 |
++ |
24778 |
++ ret = dev_set_name(pmu->dev, "%s", pmu->name); |
24779 |
++ if (ret) |
24780 |
++ goto free_dev; |
24781 |
++ |
24782 |
+ ret = device_add(pmu->dev); |
24783 |
+ if (ret) |
24784 |
+ goto free_dev; |
24785 |
+diff --git a/kernel/fork.c b/kernel/fork.c |
24786 |
+index 908ba3c93893f..3fb7e9e6a7b97 100644 |
24787 |
+--- a/kernel/fork.c |
24788 |
++++ b/kernel/fork.c |
24789 |
+@@ -446,6 +446,9 @@ void put_task_stack(struct task_struct *tsk) |
24790 |
+ |
24791 |
+ void free_task(struct task_struct *tsk) |
24792 |
+ { |
24793 |
++#ifdef CONFIG_SECCOMP |
24794 |
++ WARN_ON_ONCE(tsk->seccomp.filter); |
24795 |
++#endif |
24796 |
+ release_user_cpus_ptr(tsk); |
24797 |
+ scs_release(tsk); |
24798 |
+ |
24799 |
+@@ -2345,12 +2348,6 @@ static __latent_entropy struct task_struct *copy_process( |
24800 |
+ |
24801 |
+ spin_lock(¤t->sighand->siglock); |
24802 |
+ |
24803 |
+- /* |
24804 |
+- * Copy seccomp details explicitly here, in case they were changed |
24805 |
+- * before holding sighand lock. |
24806 |
+- */ |
24807 |
+- copy_seccomp(p); |
24808 |
+- |
24809 |
+ rseq_fork(p, clone_flags); |
24810 |
+ |
24811 |
+ /* Don't start children in a dying pid namespace */ |
24812 |
+@@ -2365,6 +2362,14 @@ static __latent_entropy struct task_struct *copy_process( |
24813 |
+ goto bad_fork_cancel_cgroup; |
24814 |
+ } |
24815 |
+ |
24816 |
++ /* No more failure paths after this point. */ |
24817 |
++ |
24818 |
++ /* |
24819 |
++ * Copy seccomp details explicitly here, in case they were changed |
24820 |
++ * before holding sighand lock. |
24821 |
++ */ |
24822 |
++ copy_seccomp(p); |
24823 |
++ |
24824 |
+ init_task_pid_links(p); |
24825 |
+ if (likely(p->pid)) { |
24826 |
+ ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); |
24827 |
+diff --git a/kernel/futex.c b/kernel/futex.c |
24828 |
+deleted file mode 100644 |
24829 |
+index c15ad276fd157..0000000000000 |
24830 |
+--- a/kernel/futex.c |
24831 |
++++ /dev/null |
24832 |
+@@ -1,4272 +0,0 @@ |
24833 |
+-// SPDX-License-Identifier: GPL-2.0-or-later |
24834 |
+-/* |
24835 |
+- * Fast Userspace Mutexes (which I call "Futexes!"). |
24836 |
+- * (C) Rusty Russell, IBM 2002 |
24837 |
+- * |
24838 |
+- * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar |
24839 |
+- * (C) Copyright 2003 Red Hat Inc, All Rights Reserved |
24840 |
+- * |
24841 |
+- * Removed page pinning, fix privately mapped COW pages and other cleanups |
24842 |
+- * (C) Copyright 2003, 2004 Jamie Lokier |
24843 |
+- * |
24844 |
+- * Robust futex support started by Ingo Molnar |
24845 |
+- * (C) Copyright 2006 Red Hat Inc, All Rights Reserved |
24846 |
+- * Thanks to Thomas Gleixner for suggestions, analysis and fixes. |
24847 |
+- * |
24848 |
+- * PI-futex support started by Ingo Molnar and Thomas Gleixner |
24849 |
+- * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@××××××.com> |
24850 |
+- * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@×××××××.com> |
24851 |
+- * |
24852 |
+- * PRIVATE futexes by Eric Dumazet |
24853 |
+- * Copyright (C) 2007 Eric Dumazet <dada1@×××××××××.com> |
24854 |
+- * |
24855 |
+- * Requeue-PI support by Darren Hart <dvhltc@××××××.com> |
24856 |
+- * Copyright (C) IBM Corporation, 2009 |
24857 |
+- * Thanks to Thomas Gleixner for conceptual design and careful reviews. |
24858 |
+- * |
24859 |
+- * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly |
24860 |
+- * enough at me, Linus for the original (flawed) idea, Matthew |
24861 |
+- * Kirkwood for proof-of-concept implementation. |
24862 |
+- * |
24863 |
+- * "The futexes are also cursed." |
24864 |
+- * "But they come in a choice of three flavours!" |
24865 |
+- */ |
24866 |
+-#include <linux/compat.h> |
24867 |
+-#include <linux/jhash.h> |
24868 |
+-#include <linux/pagemap.h> |
24869 |
+-#include <linux/syscalls.h> |
24870 |
+-#include <linux/freezer.h> |
24871 |
+-#include <linux/memblock.h> |
24872 |
+-#include <linux/fault-inject.h> |
24873 |
+-#include <linux/time_namespace.h> |
24874 |
+- |
24875 |
+-#include <asm/futex.h> |
24876 |
+- |
24877 |
+-#include "locking/rtmutex_common.h" |
24878 |
+- |
24879 |
+-/* |
24880 |
+- * READ this before attempting to hack on futexes! |
24881 |
+- * |
24882 |
+- * Basic futex operation and ordering guarantees |
24883 |
+- * ============================================= |
24884 |
+- * |
24885 |
+- * The waiter reads the futex value in user space and calls |
24886 |
+- * futex_wait(). This function computes the hash bucket and acquires |
24887 |
+- * the hash bucket lock. After that it reads the futex user space value |
24888 |
+- * again and verifies that the data has not changed. If it has not changed |
24889 |
+- * it enqueues itself into the hash bucket, releases the hash bucket lock |
24890 |
+- * and schedules. |
24891 |
+- * |
24892 |
+- * The waker side modifies the user space value of the futex and calls |
24893 |
+- * futex_wake(). This function computes the hash bucket and acquires the |
24894 |
+- * hash bucket lock. Then it looks for waiters on that futex in the hash |
24895 |
+- * bucket and wakes them. |
24896 |
+- * |
24897 |
+- * In futex wake up scenarios where no tasks are blocked on a futex, taking |
24898 |
+- * the hb spinlock can be avoided and simply return. In order for this |
24899 |
+- * optimization to work, ordering guarantees must exist so that the waiter |
24900 |
+- * being added to the list is acknowledged when the list is concurrently being |
24901 |
+- * checked by the waker, avoiding scenarios like the following: |
24902 |
+- * |
24903 |
+- * CPU 0 CPU 1 |
24904 |
+- * val = *futex; |
24905 |
+- * sys_futex(WAIT, futex, val); |
24906 |
+- * futex_wait(futex, val); |
24907 |
+- * uval = *futex; |
24908 |
+- * *futex = newval; |
24909 |
+- * sys_futex(WAKE, futex); |
24910 |
+- * futex_wake(futex); |
24911 |
+- * if (queue_empty()) |
24912 |
+- * return; |
24913 |
+- * if (uval == val) |
24914 |
+- * lock(hash_bucket(futex)); |
24915 |
+- * queue(); |
24916 |
+- * unlock(hash_bucket(futex)); |
24917 |
+- * schedule(); |
24918 |
+- * |
24919 |
+- * This would cause the waiter on CPU 0 to wait forever because it |
24920 |
+- * missed the transition of the user space value from val to newval |
24921 |
+- * and the waker did not find the waiter in the hash bucket queue. |
24922 |
+- * |
24923 |
+- * The correct serialization ensures that a waiter either observes |
24924 |
+- * the changed user space value before blocking or is woken by a |
24925 |
+- * concurrent waker: |
24926 |
+- * |
24927 |
+- * CPU 0 CPU 1 |
24928 |
+- * val = *futex; |
24929 |
+- * sys_futex(WAIT, futex, val); |
24930 |
+- * futex_wait(futex, val); |
24931 |
+- * |
24932 |
+- * waiters++; (a) |
24933 |
+- * smp_mb(); (A) <-- paired with -. |
24934 |
+- * | |
24935 |
+- * lock(hash_bucket(futex)); | |
24936 |
+- * | |
24937 |
+- * uval = *futex; | |
24938 |
+- * | *futex = newval; |
24939 |
+- * | sys_futex(WAKE, futex); |
24940 |
+- * | futex_wake(futex); |
24941 |
+- * | |
24942 |
+- * `--------> smp_mb(); (B) |
24943 |
+- * if (uval == val) |
24944 |
+- * queue(); |
24945 |
+- * unlock(hash_bucket(futex)); |
24946 |
+- * schedule(); if (waiters) |
24947 |
+- * lock(hash_bucket(futex)); |
24948 |
+- * else wake_waiters(futex); |
24949 |
+- * waiters--; (b) unlock(hash_bucket(futex)); |
24950 |
+- * |
24951 |
+- * Where (A) orders the waiters increment and the futex value read through |
24952 |
+- * atomic operations (see hb_waiters_inc) and where (B) orders the write |
24953 |
+- * to futex and the waiters read (see hb_waiters_pending()). |
24954 |
+- * |
24955 |
+- * This yields the following case (where X:=waiters, Y:=futex): |
24956 |
+- * |
24957 |
+- * X = Y = 0 |
24958 |
+- * |
24959 |
+- * w[X]=1 w[Y]=1 |
24960 |
+- * MB MB |
24961 |
+- * r[Y]=y r[X]=x |
24962 |
+- * |
24963 |
+- * Which guarantees that x==0 && y==0 is impossible; which translates back into |
24964 |
+- * the guarantee that we cannot both miss the futex variable change and the |
24965 |
+- * enqueue. |
24966 |
+- * |
24967 |
+- * Note that a new waiter is accounted for in (a) even when it is possible that |
24968 |
+- * the wait call can return error, in which case we backtrack from it in (b). |
24969 |
+- * Refer to the comment in queue_lock(). |
24970 |
+- * |
24971 |
+- * Similarly, in order to account for waiters being requeued on another |
24972 |
+- * address we always increment the waiters for the destination bucket before |
24973 |
+- * acquiring the lock. It then decrements them again after releasing it - |
24974 |
+- * the code that actually moves the futex(es) between hash buckets (requeue_futex) |
24975 |
+- * will do the additional required waiter count housekeeping. This is done for |
24976 |
+- * double_lock_hb() and double_unlock_hb(), respectively. |
24977 |
+- */ |
24978 |
+- |
24979 |
+-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG |
24980 |
+-#define futex_cmpxchg_enabled 1 |
24981 |
+-#else |
24982 |
+-static int __read_mostly futex_cmpxchg_enabled; |
24983 |
+-#endif |
24984 |
+- |
24985 |
+-/* |
24986 |
+- * Futex flags used to encode options to functions and preserve them across |
24987 |
+- * restarts. |
24988 |
+- */ |
24989 |
+-#ifdef CONFIG_MMU |
24990 |
+-# define FLAGS_SHARED 0x01 |
24991 |
+-#else |
24992 |
+-/* |
24993 |
+- * NOMMU does not have per process address space. Let the compiler optimize |
24994 |
+- * code away. |
24995 |
+- */ |
24996 |
+-# define FLAGS_SHARED 0x00 |
24997 |
+-#endif |
24998 |
+-#define FLAGS_CLOCKRT 0x02 |
24999 |
+-#define FLAGS_HAS_TIMEOUT 0x04 |
25000 |
+- |
25001 |
+-/* |
25002 |
+- * Priority Inheritance state: |
25003 |
+- */ |
25004 |
+-struct futex_pi_state { |
25005 |
+- /* |
25006 |
+- * list of 'owned' pi_state instances - these have to be |
25007 |
+- * cleaned up in do_exit() if the task exits prematurely: |
25008 |
+- */ |
25009 |
+- struct list_head list; |
25010 |
+- |
25011 |
+- /* |
25012 |
+- * The PI object: |
25013 |
+- */ |
25014 |
+- struct rt_mutex_base pi_mutex; |
25015 |
+- |
25016 |
+- struct task_struct *owner; |
25017 |
+- refcount_t refcount; |
25018 |
+- |
25019 |
+- union futex_key key; |
25020 |
+-} __randomize_layout; |
25021 |
+- |
25022 |
+-/** |
25023 |
+- * struct futex_q - The hashed futex queue entry, one per waiting task |
25024 |
+- * @list: priority-sorted list of tasks waiting on this futex |
25025 |
+- * @task: the task waiting on the futex |
25026 |
+- * @lock_ptr: the hash bucket lock |
25027 |
+- * @key: the key the futex is hashed on |
25028 |
+- * @pi_state: optional priority inheritance state |
25029 |
+- * @rt_waiter: rt_waiter storage for use with requeue_pi |
25030 |
+- * @requeue_pi_key: the requeue_pi target futex key |
25031 |
+- * @bitset: bitset for the optional bitmasked wakeup |
25032 |
+- * @requeue_state: State field for futex_requeue_pi() |
25033 |
+- * @requeue_wait: RCU wait for futex_requeue_pi() (RT only) |
25034 |
+- * |
25035 |
+- * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so |
25036 |
+- * we can wake only the relevant ones (hashed queues may be shared). |
25037 |
+- * |
25038 |
+- * A futex_q has a woken state, just like tasks have TASK_RUNNING. |
25039 |
+- * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. |
25040 |
+- * The order of wakeup is always to make the first condition true, then |
25041 |
+- * the second. |
25042 |
+- * |
25043 |
+- * PI futexes are typically woken before they are removed from the hash list via |
25044 |
+- * the rt_mutex code. See unqueue_me_pi(). |
25045 |
+- */ |
25046 |
+-struct futex_q { |
25047 |
+- struct plist_node list; |
25048 |
+- |
25049 |
+- struct task_struct *task; |
25050 |
+- spinlock_t *lock_ptr; |
25051 |
+- union futex_key key; |
25052 |
+- struct futex_pi_state *pi_state; |
25053 |
+- struct rt_mutex_waiter *rt_waiter; |
25054 |
+- union futex_key *requeue_pi_key; |
25055 |
+- u32 bitset; |
25056 |
+- atomic_t requeue_state; |
25057 |
+-#ifdef CONFIG_PREEMPT_RT |
25058 |
+- struct rcuwait requeue_wait; |
25059 |
+-#endif |
25060 |
+-} __randomize_layout; |
25061 |
+- |
25062 |
+-/* |
25063 |
+- * On PREEMPT_RT, the hash bucket lock is a 'sleeping' spinlock with an |
25064 |
+- * underlying rtmutex. The task which is about to be requeued could have |
25065 |
+- * just woken up (timeout, signal). After the wake up the task has to |
25066 |
+- * acquire hash bucket lock, which is held by the requeue code. As a task |
25067 |
+- * can only be blocked on _ONE_ rtmutex at a time, the proxy lock blocking |
25068 |
+- * and the hash bucket lock blocking would collide and corrupt state. |
25069 |
+- * |
25070 |
+- * On !PREEMPT_RT this is not a problem and everything could be serialized |
25071 |
+- * on hash bucket lock, but aside of having the benefit of common code, |
25072 |
+- * this allows to avoid doing the requeue when the task is already on the |
25073 |
+- * way out and taking the hash bucket lock of the original uaddr1 when the |
25074 |
+- * requeue has been completed. |
25075 |
+- * |
25076 |
+- * The following state transitions are valid: |
25077 |
+- * |
25078 |
+- * On the waiter side: |
25079 |
+- * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_IGNORE |
25080 |
+- * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_WAIT |
25081 |
+- * |
25082 |
+- * On the requeue side: |
25083 |
+- * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_INPROGRESS |
25084 |
+- * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_DONE/LOCKED |
25085 |
+- * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_NONE (requeue failed) |
25086 |
+- * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_DONE/LOCKED |
25087 |
+- * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_IGNORE (requeue failed) |
25088 |
+- * |
25089 |
+- * The requeue side ignores a waiter with state Q_REQUEUE_PI_IGNORE as this |
25090 |
+- * signals that the waiter is already on the way out. It also means that |
25091 |
+- * the waiter is still on the 'wait' futex, i.e. uaddr1. |
25092 |
+- * |
25093 |
+- * The waiter side signals early wakeup to the requeue side either through |
25094 |
+- * setting state to Q_REQUEUE_PI_IGNORE or to Q_REQUEUE_PI_WAIT depending |
25095 |
+- * on the current state. In case of Q_REQUEUE_PI_IGNORE it can immediately |
25096 |
+- * proceed to take the hash bucket lock of uaddr1. If it set state to WAIT, |
25097 |
+- * which means the wakeup is interleaving with a requeue in progress it has |
25098 |
+- * to wait for the requeue side to change the state. Either to DONE/LOCKED |
25099 |
+- * or to IGNORE. DONE/LOCKED means the waiter q is now on the uaddr2 futex |
25100 |
+- * and either blocked (DONE) or has acquired it (LOCKED). IGNORE is set by |
25101 |
+- * the requeue side when the requeue attempt failed via deadlock detection |
25102 |
+- * and therefore the waiter q is still on the uaddr1 futex. |
25103 |
+- */ |
25104 |
+-enum { |
25105 |
+- Q_REQUEUE_PI_NONE = 0, |
25106 |
+- Q_REQUEUE_PI_IGNORE, |
25107 |
+- Q_REQUEUE_PI_IN_PROGRESS, |
25108 |
+- Q_REQUEUE_PI_WAIT, |
25109 |
+- Q_REQUEUE_PI_DONE, |
25110 |
+- Q_REQUEUE_PI_LOCKED, |
25111 |
+-}; |
25112 |
+- |
25113 |
+-static const struct futex_q futex_q_init = { |
25114 |
+- /* list gets initialized in queue_me()*/ |
25115 |
+- .key = FUTEX_KEY_INIT, |
25116 |
+- .bitset = FUTEX_BITSET_MATCH_ANY, |
25117 |
+- .requeue_state = ATOMIC_INIT(Q_REQUEUE_PI_NONE), |
25118 |
+-}; |
25119 |
+- |
25120 |
+-/* |
25121 |
+- * Hash buckets are shared by all the futex_keys that hash to the same |
25122 |
+- * location. Each key may have multiple futex_q structures, one for each task |
25123 |
+- * waiting on a futex. |
25124 |
+- */ |
25125 |
+-struct futex_hash_bucket { |
25126 |
+- atomic_t waiters; |
25127 |
+- spinlock_t lock; |
25128 |
+- struct plist_head chain; |
25129 |
+-} ____cacheline_aligned_in_smp; |
25130 |
+- |
25131 |
+-/* |
25132 |
+- * The base of the bucket array and its size are always used together |
25133 |
+- * (after initialization only in hash_futex()), so ensure that they |
25134 |
+- * reside in the same cacheline. |
25135 |
+- */ |
25136 |
+-static struct { |
25137 |
+- struct futex_hash_bucket *queues; |
25138 |
+- unsigned long hashsize; |
25139 |
+-} __futex_data __read_mostly __aligned(2*sizeof(long)); |
25140 |
+-#define futex_queues (__futex_data.queues) |
25141 |
+-#define futex_hashsize (__futex_data.hashsize) |
25142 |
+- |
25143 |
+- |
25144 |
+-/* |
25145 |
+- * Fault injections for futexes. |
25146 |
+- */ |
25147 |
+-#ifdef CONFIG_FAIL_FUTEX |
25148 |
+- |
25149 |
+-static struct { |
25150 |
+- struct fault_attr attr; |
25151 |
+- |
25152 |
+- bool ignore_private; |
25153 |
+-} fail_futex = { |
25154 |
+- .attr = FAULT_ATTR_INITIALIZER, |
25155 |
+- .ignore_private = false, |
25156 |
+-}; |
25157 |
+- |
25158 |
+-static int __init setup_fail_futex(char *str) |
25159 |
+-{ |
25160 |
+- return setup_fault_attr(&fail_futex.attr, str); |
25161 |
+-} |
25162 |
+-__setup("fail_futex=", setup_fail_futex); |
25163 |
+- |
25164 |
+-static bool should_fail_futex(bool fshared) |
25165 |
+-{ |
25166 |
+- if (fail_futex.ignore_private && !fshared) |
25167 |
+- return false; |
25168 |
+- |
25169 |
+- return should_fail(&fail_futex.attr, 1); |
25170 |
+-} |
25171 |
+- |
25172 |
+-#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS |
25173 |
+- |
25174 |
+-static int __init fail_futex_debugfs(void) |
25175 |
+-{ |
25176 |
+- umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; |
25177 |
+- struct dentry *dir; |
25178 |
+- |
25179 |
+- dir = fault_create_debugfs_attr("fail_futex", NULL, |
25180 |
+- &fail_futex.attr); |
25181 |
+- if (IS_ERR(dir)) |
25182 |
+- return PTR_ERR(dir); |
25183 |
+- |
25184 |
+- debugfs_create_bool("ignore-private", mode, dir, |
25185 |
+- &fail_futex.ignore_private); |
25186 |
+- return 0; |
25187 |
+-} |
25188 |
+- |
25189 |
+-late_initcall(fail_futex_debugfs); |
25190 |
+- |
25191 |
+-#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ |
25192 |
+- |
25193 |
+-#else |
25194 |
+-static inline bool should_fail_futex(bool fshared) |
25195 |
+-{ |
25196 |
+- return false; |
25197 |
+-} |
25198 |
+-#endif /* CONFIG_FAIL_FUTEX */ |
25199 |
+- |
25200 |
+-#ifdef CONFIG_COMPAT |
25201 |
+-static void compat_exit_robust_list(struct task_struct *curr); |
25202 |
+-#endif |
25203 |
+- |
25204 |
+-/* |
25205 |
+- * Reflects a new waiter being added to the waitqueue. |
25206 |
+- */ |
25207 |
+-static inline void hb_waiters_inc(struct futex_hash_bucket *hb) |
25208 |
+-{ |
25209 |
+-#ifdef CONFIG_SMP |
25210 |
+- atomic_inc(&hb->waiters); |
25211 |
+- /* |
25212 |
+- * Full barrier (A), see the ordering comment above. |
25213 |
+- */ |
25214 |
+- smp_mb__after_atomic(); |
25215 |
+-#endif |
25216 |
+-} |
25217 |
+- |
25218 |
+-/* |
25219 |
+- * Reflects a waiter being removed from the waitqueue by wakeup |
25220 |
+- * paths. |
25221 |
+- */ |
25222 |
+-static inline void hb_waiters_dec(struct futex_hash_bucket *hb) |
25223 |
+-{ |
25224 |
+-#ifdef CONFIG_SMP |
25225 |
+- atomic_dec(&hb->waiters); |
25226 |
+-#endif |
25227 |
+-} |
25228 |
+- |
25229 |
+-static inline int hb_waiters_pending(struct futex_hash_bucket *hb) |
25230 |
+-{ |
25231 |
+-#ifdef CONFIG_SMP |
25232 |
+- /* |
25233 |
+- * Full barrier (B), see the ordering comment above. |
25234 |
+- */ |
25235 |
+- smp_mb(); |
25236 |
+- return atomic_read(&hb->waiters); |
25237 |
+-#else |
25238 |
+- return 1; |
25239 |
+-#endif |
25240 |
+-} |
25241 |
+- |
25242 |
+-/** |
25243 |
+- * hash_futex - Return the hash bucket in the global hash |
25244 |
+- * @key: Pointer to the futex key for which the hash is calculated |
25245 |
+- * |
25246 |
+- * We hash on the keys returned from get_futex_key (see below) and return the |
25247 |
+- * corresponding hash bucket in the global hash. |
25248 |
+- */ |
25249 |
+-static struct futex_hash_bucket *hash_futex(union futex_key *key) |
25250 |
+-{ |
25251 |
+- u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4, |
25252 |
+- key->both.offset); |
25253 |
+- |
25254 |
+- return &futex_queues[hash & (futex_hashsize - 1)]; |
25255 |
+-} |
25256 |
+- |
25257 |
+- |
25258 |
+-/** |
25259 |
+- * match_futex - Check whether two futex keys are equal |
25260 |
+- * @key1: Pointer to key1 |
25261 |
+- * @key2: Pointer to key2 |
25262 |
+- * |
25263 |
+- * Return 1 if two futex_keys are equal, 0 otherwise. |
25264 |
+- */ |
25265 |
+-static inline int match_futex(union futex_key *key1, union futex_key *key2) |
25266 |
+-{ |
25267 |
+- return (key1 && key2 |
25268 |
+- && key1->both.word == key2->both.word |
25269 |
+- && key1->both.ptr == key2->both.ptr |
25270 |
+- && key1->both.offset == key2->both.offset); |
25271 |
+-} |
25272 |
+- |
25273 |
+-enum futex_access { |
25274 |
+- FUTEX_READ, |
25275 |
+- FUTEX_WRITE |
25276 |
+-}; |
25277 |
+- |
25278 |
+-/** |
25279 |
+- * futex_setup_timer - set up the sleeping hrtimer. |
25280 |
+- * @time: ptr to the given timeout value |
25281 |
+- * @timeout: the hrtimer_sleeper structure to be set up |
25282 |
+- * @flags: futex flags |
25283 |
+- * @range_ns: optional range in ns |
25284 |
+- * |
25285 |
+- * Return: Initialized hrtimer_sleeper structure or NULL if no timeout |
25286 |
+- * value given |
25287 |
+- */ |
25288 |
+-static inline struct hrtimer_sleeper * |
25289 |
+-futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, |
25290 |
+- int flags, u64 range_ns) |
25291 |
+-{ |
25292 |
+- if (!time) |
25293 |
+- return NULL; |
25294 |
+- |
25295 |
+- hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ? |
25296 |
+- CLOCK_REALTIME : CLOCK_MONOTONIC, |
25297 |
+- HRTIMER_MODE_ABS); |
25298 |
+- /* |
25299 |
+- * If range_ns is 0, calling hrtimer_set_expires_range_ns() is |
25300 |
+- * effectively the same as calling hrtimer_set_expires(). |
25301 |
+- */ |
25302 |
+- hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns); |
25303 |
+- |
25304 |
+- return timeout; |
25305 |
+-} |
25306 |
+- |
25307 |
+-/* |
25308 |
+- * Generate a machine wide unique identifier for this inode. |
25309 |
+- * |
25310 |
+- * This relies on u64 not wrapping in the life-time of the machine; which with |
25311 |
+- * 1ns resolution means almost 585 years. |
25312 |
+- * |
25313 |
+- * This further relies on the fact that a well formed program will not unmap |
25314 |
+- * the file while it has a (shared) futex waiting on it. This mapping will have |
25315 |
+- * a file reference which pins the mount and inode. |
25316 |
+- * |
25317 |
+- * If for some reason an inode gets evicted and read back in again, it will get |
25318 |
+- * a new sequence number and will _NOT_ match, even though it is the exact same |
25319 |
+- * file. |
25320 |
+- * |
25321 |
+- * It is important that match_futex() will never have a false-positive, esp. |
25322 |
+- * for PI futexes that can mess up the state. The above argues that false-negatives |
25323 |
+- * are only possible for malformed programs. |
25324 |
+- */ |
25325 |
+-static u64 get_inode_sequence_number(struct inode *inode) |
25326 |
+-{ |
25327 |
+- static atomic64_t i_seq; |
25328 |
+- u64 old; |
25329 |
+- |
25330 |
+- /* Does the inode already have a sequence number? */ |
25331 |
+- old = atomic64_read(&inode->i_sequence); |
25332 |
+- if (likely(old)) |
25333 |
+- return old; |
25334 |
+- |
25335 |
+- for (;;) { |
25336 |
+- u64 new = atomic64_add_return(1, &i_seq); |
25337 |
+- if (WARN_ON_ONCE(!new)) |
25338 |
+- continue; |
25339 |
+- |
25340 |
+- old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new); |
25341 |
+- if (old) |
25342 |
+- return old; |
25343 |
+- return new; |
25344 |
+- } |
25345 |
+-} |
25346 |
+- |
25347 |
+-/** |
25348 |
+- * get_futex_key() - Get parameters which are the keys for a futex |
25349 |
+- * @uaddr: virtual address of the futex |
25350 |
+- * @fshared: false for a PROCESS_PRIVATE futex, true for PROCESS_SHARED |
25351 |
+- * @key: address where result is stored. |
25352 |
+- * @rw: mapping needs to be read/write (values: FUTEX_READ, |
25353 |
+- * FUTEX_WRITE) |
25354 |
+- * |
25355 |
+- * Return: a negative error code or 0 |
25356 |
+- * |
25357 |
+- * The key words are stored in @key on success. |
25358 |
+- * |
25359 |
+- * For shared mappings (when @fshared), the key is: |
25360 |
+- * |
25361 |
+- * ( inode->i_sequence, page->index, offset_within_page ) |
25362 |
+- * |
25363 |
+- * [ also see get_inode_sequence_number() ] |
25364 |
+- * |
25365 |
+- * For private mappings (or when !@fshared), the key is: |
25366 |
+- * |
25367 |
+- * ( current->mm, address, 0 ) |
25368 |
+- * |
25369 |
+- * This allows (cross process, where applicable) identification of the futex |
25370 |
+- * without keeping the page pinned for the duration of the FUTEX_WAIT. |
25371 |
+- * |
25372 |
+- * lock_page() might sleep, the caller should not hold a spinlock. |
25373 |
+- */ |
25374 |
+-static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key, |
25375 |
+- enum futex_access rw) |
25376 |
+-{ |
25377 |
+- unsigned long address = (unsigned long)uaddr; |
25378 |
+- struct mm_struct *mm = current->mm; |
25379 |
+- struct page *page, *tail; |
25380 |
+- struct address_space *mapping; |
25381 |
+- int err, ro = 0; |
25382 |
+- |
25383 |
+- /* |
25384 |
+- * The futex address must be "naturally" aligned. |
25385 |
+- */ |
25386 |
+- key->both.offset = address % PAGE_SIZE; |
25387 |
+- if (unlikely((address % sizeof(u32)) != 0)) |
25388 |
+- return -EINVAL; |
25389 |
+- address -= key->both.offset; |
25390 |
+- |
25391 |
+- if (unlikely(!access_ok(uaddr, sizeof(u32)))) |
25392 |
+- return -EFAULT; |
25393 |
+- |
25394 |
+- if (unlikely(should_fail_futex(fshared))) |
25395 |
+- return -EFAULT; |
25396 |
+- |
25397 |
+- /* |
25398 |
+- * PROCESS_PRIVATE futexes are fast. |
25399 |
+- * As the mm cannot disappear under us and the 'key' only needs |
25400 |
+- * virtual address, we dont even have to find the underlying vma. |
25401 |
+- * Note : We do have to check 'uaddr' is a valid user address, |
25402 |
+- * but access_ok() should be faster than find_vma() |
25403 |
+- */ |
25404 |
+- if (!fshared) { |
25405 |
+- key->private.mm = mm; |
25406 |
+- key->private.address = address; |
25407 |
+- return 0; |
25408 |
+- } |
25409 |
+- |
25410 |
+-again: |
25411 |
+- /* Ignore any VERIFY_READ mapping (futex common case) */ |
25412 |
+- if (unlikely(should_fail_futex(true))) |
25413 |
+- return -EFAULT; |
25414 |
+- |
25415 |
+- err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); |
25416 |
+- /* |
25417 |
+- * If write access is not required (eg. FUTEX_WAIT), try |
25418 |
+- * and get read-only access. |
25419 |
+- */ |
25420 |
+- if (err == -EFAULT && rw == FUTEX_READ) { |
25421 |
+- err = get_user_pages_fast(address, 1, 0, &page); |
25422 |
+- ro = 1; |
25423 |
+- } |
25424 |
+- if (err < 0) |
25425 |
+- return err; |
25426 |
+- else |
25427 |
+- err = 0; |
25428 |
+- |
25429 |
+- /* |
25430 |
+- * The treatment of mapping from this point on is critical. The page |
25431 |
+- * lock protects many things but in this context the page lock |
25432 |
+- * stabilizes mapping, prevents inode freeing in the shared |
25433 |
+- * file-backed region case and guards against movement to swap cache. |
25434 |
+- * |
25435 |
+- * Strictly speaking the page lock is not needed in all cases being |
25436 |
+- * considered here and page lock forces unnecessarily serialization |
25437 |
+- * From this point on, mapping will be re-verified if necessary and |
25438 |
+- * page lock will be acquired only if it is unavoidable |
25439 |
+- * |
25440 |
+- * Mapping checks require the head page for any compound page so the |
25441 |
+- * head page and mapping is looked up now. For anonymous pages, it |
25442 |
+- * does not matter if the page splits in the future as the key is |
25443 |
+- * based on the address. For filesystem-backed pages, the tail is |
25444 |
+- * required as the index of the page determines the key. For |
25445 |
+- * base pages, there is no tail page and tail == page. |
25446 |
+- */ |
25447 |
+- tail = page; |
25448 |
+- page = compound_head(page); |
25449 |
+- mapping = READ_ONCE(page->mapping); |
25450 |
+- |
25451 |
+- /* |
25452 |
+- * If page->mapping is NULL, then it cannot be a PageAnon |
25453 |
+- * page; but it might be the ZERO_PAGE or in the gate area or |
25454 |
+- * in a special mapping (all cases which we are happy to fail); |
25455 |
+- * or it may have been a good file page when get_user_pages_fast |
25456 |
+- * found it, but truncated or holepunched or subjected to |
25457 |
+- * invalidate_complete_page2 before we got the page lock (also |
25458 |
+- * cases which we are happy to fail). And we hold a reference, |
25459 |
+- * so refcount care in invalidate_complete_page's remove_mapping |
25460 |
+- * prevents drop_caches from setting mapping to NULL beneath us. |
25461 |
+- * |
25462 |
+- * The case we do have to guard against is when memory pressure made |
25463 |
+- * shmem_writepage move it from filecache to swapcache beneath us: |
25464 |
+- * an unlikely race, but we do need to retry for page->mapping. |
25465 |
+- */ |
25466 |
+- if (unlikely(!mapping)) { |
25467 |
+- int shmem_swizzled; |
25468 |
+- |
25469 |
+- /* |
25470 |
+- * Page lock is required to identify which special case above |
25471 |
+- * applies. If this is really a shmem page then the page lock |
25472 |
+- * will prevent unexpected transitions. |
25473 |
+- */ |
25474 |
+- lock_page(page); |
25475 |
+- shmem_swizzled = PageSwapCache(page) || page->mapping; |
25476 |
+- unlock_page(page); |
25477 |
+- put_page(page); |
25478 |
+- |
25479 |
+- if (shmem_swizzled) |
25480 |
+- goto again; |
25481 |
+- |
25482 |
+- return -EFAULT; |
25483 |
+- } |
25484 |
+- |
25485 |
+- /* |
25486 |
+- * Private mappings are handled in a simple way. |
25487 |
+- * |
25488 |
+- * If the futex key is stored on an anonymous page, then the associated |
25489 |
+- * object is the mm which is implicitly pinned by the calling process. |
25490 |
+- * |
25491 |
+- * NOTE: When userspace waits on a MAP_SHARED mapping, even if |
25492 |
+- * it's a read-only handle, it's expected that futexes attach to |
25493 |
+- * the object not the particular process. |
25494 |
+- */ |
25495 |
+- if (PageAnon(page)) { |
25496 |
+- /* |
25497 |
+- * A RO anonymous page will never change and thus doesn't make |
25498 |
+- * sense for futex operations. |
25499 |
+- */ |
25500 |
+- if (unlikely(should_fail_futex(true)) || ro) { |
25501 |
+- err = -EFAULT; |
25502 |
+- goto out; |
25503 |
+- } |
25504 |
+- |
25505 |
+- key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ |
25506 |
+- key->private.mm = mm; |
25507 |
+- key->private.address = address; |
25508 |
+- |
25509 |
+- } else { |
25510 |
+- struct inode *inode; |
25511 |
+- |
25512 |
+- /* |
25513 |
+- * The associated futex object in this case is the inode and |
25514 |
+- * the page->mapping must be traversed. Ordinarily this should |
25515 |
+- * be stabilised under page lock but it's not strictly |
25516 |
+- * necessary in this case as we just want to pin the inode, not |
25517 |
+- * update the radix tree or anything like that. |
25518 |
+- * |
25519 |
+- * The RCU read lock is taken as the inode is finally freed |
25520 |
+- * under RCU. If the mapping still matches expectations then the |
25521 |
+- * mapping->host can be safely accessed as being a valid inode. |
25522 |
+- */ |
25523 |
+- rcu_read_lock(); |
25524 |
+- |
25525 |
+- if (READ_ONCE(page->mapping) != mapping) { |
25526 |
+- rcu_read_unlock(); |
25527 |
+- put_page(page); |
25528 |
+- |
25529 |
+- goto again; |
25530 |
+- } |
25531 |
+- |
25532 |
+- inode = READ_ONCE(mapping->host); |
25533 |
+- if (!inode) { |
25534 |
+- rcu_read_unlock(); |
25535 |
+- put_page(page); |
25536 |
+- |
25537 |
+- goto again; |
25538 |
+- } |
25539 |
+- |
25540 |
+- key->both.offset |= FUT_OFF_INODE; /* inode-based key */ |
25541 |
+- key->shared.i_seq = get_inode_sequence_number(inode); |
25542 |
+- key->shared.pgoff = page_to_pgoff(tail); |
25543 |
+- rcu_read_unlock(); |
25544 |
+- } |
25545 |
+- |
25546 |
+-out: |
25547 |
+- put_page(page); |
25548 |
+- return err; |
25549 |
+-} |
25550 |
+- |
25551 |
+-/** |
25552 |
+- * fault_in_user_writeable() - Fault in user address and verify RW access |
25553 |
+- * @uaddr: pointer to faulting user space address |
25554 |
+- * |
25555 |
+- * Slow path to fixup the fault we just took in the atomic write |
25556 |
+- * access to @uaddr. |
25557 |
+- * |
25558 |
+- * We have no generic implementation of a non-destructive write to the |
25559 |
+- * user address. We know that we faulted in the atomic pagefault |
25560 |
+- * disabled section so we can as well avoid the #PF overhead by |
25561 |
+- * calling get_user_pages() right away. |
25562 |
+- */ |
25563 |
+-static int fault_in_user_writeable(u32 __user *uaddr) |
25564 |
+-{ |
25565 |
+- struct mm_struct *mm = current->mm; |
25566 |
+- int ret; |
25567 |
+- |
25568 |
+- mmap_read_lock(mm); |
25569 |
+- ret = fixup_user_fault(mm, (unsigned long)uaddr, |
25570 |
+- FAULT_FLAG_WRITE, NULL); |
25571 |
+- mmap_read_unlock(mm); |
25572 |
+- |
25573 |
+- return ret < 0 ? ret : 0; |
25574 |
+-} |
25575 |
+- |
25576 |
+-/** |
25577 |
+- * futex_top_waiter() - Return the highest priority waiter on a futex |
25578 |
+- * @hb: the hash bucket the futex_q's reside in |
25579 |
+- * @key: the futex key (to distinguish it from other futex futex_q's) |
25580 |
+- * |
25581 |
+- * Must be called with the hb lock held. |
25582 |
+- */ |
25583 |
+-static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, |
25584 |
+- union futex_key *key) |
25585 |
+-{ |
25586 |
+- struct futex_q *this; |
25587 |
+- |
25588 |
+- plist_for_each_entry(this, &hb->chain, list) { |
25589 |
+- if (match_futex(&this->key, key)) |
25590 |
+- return this; |
25591 |
+- } |
25592 |
+- return NULL; |
25593 |
+-} |
25594 |
+- |
25595 |
+-static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, |
25596 |
+- u32 uval, u32 newval) |
25597 |
+-{ |
25598 |
+- int ret; |
25599 |
+- |
25600 |
+- pagefault_disable(); |
25601 |
+- ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); |
25602 |
+- pagefault_enable(); |
25603 |
+- |
25604 |
+- return ret; |
25605 |
+-} |
25606 |
+- |
25607 |
+-static int get_futex_value_locked(u32 *dest, u32 __user *from) |
25608 |
+-{ |
25609 |
+- int ret; |
25610 |
+- |
25611 |
+- pagefault_disable(); |
25612 |
+- ret = __get_user(*dest, from); |
25613 |
+- pagefault_enable(); |
25614 |
+- |
25615 |
+- return ret ? -EFAULT : 0; |
25616 |
+-} |
25617 |
+- |
25618 |
+- |
25619 |
+-/* |
25620 |
+- * PI code: |
25621 |
+- */ |
25622 |
+-static int refill_pi_state_cache(void) |
25623 |
+-{ |
25624 |
+- struct futex_pi_state *pi_state; |
25625 |
+- |
25626 |
+- if (likely(current->pi_state_cache)) |
25627 |
+- return 0; |
25628 |
+- |
25629 |
+- pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); |
25630 |
+- |
25631 |
+- if (!pi_state) |
25632 |
+- return -ENOMEM; |
25633 |
+- |
25634 |
+- INIT_LIST_HEAD(&pi_state->list); |
25635 |
+- /* pi_mutex gets initialized later */ |
25636 |
+- pi_state->owner = NULL; |
25637 |
+- refcount_set(&pi_state->refcount, 1); |
25638 |
+- pi_state->key = FUTEX_KEY_INIT; |
25639 |
+- |
25640 |
+- current->pi_state_cache = pi_state; |
25641 |
+- |
25642 |
+- return 0; |
25643 |
+-} |
25644 |
+- |
25645 |
+-static struct futex_pi_state *alloc_pi_state(void) |
25646 |
+-{ |
25647 |
+- struct futex_pi_state *pi_state = current->pi_state_cache; |
25648 |
+- |
25649 |
+- WARN_ON(!pi_state); |
25650 |
+- current->pi_state_cache = NULL; |
25651 |
+- |
25652 |
+- return pi_state; |
25653 |
+-} |
25654 |
+- |
25655 |
+-static void pi_state_update_owner(struct futex_pi_state *pi_state, |
25656 |
+- struct task_struct *new_owner) |
25657 |
+-{ |
25658 |
+- struct task_struct *old_owner = pi_state->owner; |
25659 |
+- |
25660 |
+- lockdep_assert_held(&pi_state->pi_mutex.wait_lock); |
25661 |
+- |
25662 |
+- if (old_owner) { |
25663 |
+- raw_spin_lock(&old_owner->pi_lock); |
25664 |
+- WARN_ON(list_empty(&pi_state->list)); |
25665 |
+- list_del_init(&pi_state->list); |
25666 |
+- raw_spin_unlock(&old_owner->pi_lock); |
25667 |
+- } |
25668 |
+- |
25669 |
+- if (new_owner) { |
25670 |
+- raw_spin_lock(&new_owner->pi_lock); |
25671 |
+- WARN_ON(!list_empty(&pi_state->list)); |
25672 |
+- list_add(&pi_state->list, &new_owner->pi_state_list); |
25673 |
+- pi_state->owner = new_owner; |
25674 |
+- raw_spin_unlock(&new_owner->pi_lock); |
25675 |
+- } |
25676 |
+-} |
25677 |
+- |
25678 |
+-static void get_pi_state(struct futex_pi_state *pi_state) |
25679 |
+-{ |
25680 |
+- WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount)); |
25681 |
+-} |
25682 |
+- |
25683 |
+-/* |
25684 |
+- * Drops a reference to the pi_state object and frees or caches it |
25685 |
+- * when the last reference is gone. |
25686 |
+- */ |
25687 |
+-static void put_pi_state(struct futex_pi_state *pi_state) |
25688 |
+-{ |
25689 |
+- if (!pi_state) |
25690 |
+- return; |
25691 |
+- |
25692 |
+- if (!refcount_dec_and_test(&pi_state->refcount)) |
25693 |
+- return; |
25694 |
+- |
25695 |
+- /* |
25696 |
+- * If pi_state->owner is NULL, the owner is most probably dying |
25697 |
+- * and has cleaned up the pi_state already |
25698 |
+- */ |
25699 |
+- if (pi_state->owner) { |
25700 |
+- unsigned long flags; |
25701 |
+- |
25702 |
+- raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags); |
25703 |
+- pi_state_update_owner(pi_state, NULL); |
25704 |
+- rt_mutex_proxy_unlock(&pi_state->pi_mutex); |
25705 |
+- raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags); |
25706 |
+- } |
25707 |
+- |
25708 |
+- if (current->pi_state_cache) { |
25709 |
+- kfree(pi_state); |
25710 |
+- } else { |
25711 |
+- /* |
25712 |
+- * pi_state->list is already empty. |
25713 |
+- * clear pi_state->owner. |
25714 |
+- * refcount is at 0 - put it back to 1. |
25715 |
+- */ |
25716 |
+- pi_state->owner = NULL; |
25717 |
+- refcount_set(&pi_state->refcount, 1); |
25718 |
+- current->pi_state_cache = pi_state; |
25719 |
+- } |
25720 |
+-} |
25721 |
+- |
25722 |
+-#ifdef CONFIG_FUTEX_PI |
25723 |
+- |
25724 |
+-/* |
25725 |
+- * This task is holding PI mutexes at exit time => bad. |
25726 |
+- * Kernel cleans up PI-state, but userspace is likely hosed. |
25727 |
+- * (Robust-futex cleanup is separate and might save the day for userspace.) |
25728 |
+- */ |
25729 |
+-static void exit_pi_state_list(struct task_struct *curr) |
25730 |
+-{ |
25731 |
+- struct list_head *next, *head = &curr->pi_state_list; |
25732 |
+- struct futex_pi_state *pi_state; |
25733 |
+- struct futex_hash_bucket *hb; |
25734 |
+- union futex_key key = FUTEX_KEY_INIT; |
25735 |
+- |
25736 |
+- if (!futex_cmpxchg_enabled) |
25737 |
+- return; |
25738 |
+- /* |
25739 |
+- * We are a ZOMBIE and nobody can enqueue itself on |
25740 |
+- * pi_state_list anymore, but we have to be careful |
25741 |
+- * versus waiters unqueueing themselves: |
25742 |
+- */ |
25743 |
+- raw_spin_lock_irq(&curr->pi_lock); |
25744 |
+- while (!list_empty(head)) { |
25745 |
+- next = head->next; |
25746 |
+- pi_state = list_entry(next, struct futex_pi_state, list); |
25747 |
+- key = pi_state->key; |
25748 |
+- hb = hash_futex(&key); |
25749 |
+- |
25750 |
+- /* |
25751 |
+- * We can race against put_pi_state() removing itself from the |
25752 |
+- * list (a waiter going away). put_pi_state() will first |
25753 |
+- * decrement the reference count and then modify the list, so |
25754 |
+- * its possible to see the list entry but fail this reference |
25755 |
+- * acquire. |
25756 |
+- * |
25757 |
+- * In that case; drop the locks to let put_pi_state() make |
25758 |
+- * progress and retry the loop. |
25759 |
+- */ |
25760 |
+- if (!refcount_inc_not_zero(&pi_state->refcount)) { |
25761 |
+- raw_spin_unlock_irq(&curr->pi_lock); |
25762 |
+- cpu_relax(); |
25763 |
+- raw_spin_lock_irq(&curr->pi_lock); |
25764 |
+- continue; |
25765 |
+- } |
25766 |
+- raw_spin_unlock_irq(&curr->pi_lock); |
25767 |
+- |
25768 |
+- spin_lock(&hb->lock); |
25769 |
+- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
25770 |
+- raw_spin_lock(&curr->pi_lock); |
25771 |
+- /* |
25772 |
+- * We dropped the pi-lock, so re-check whether this |
25773 |
+- * task still owns the PI-state: |
25774 |
+- */ |
25775 |
+- if (head->next != next) { |
25776 |
+- /* retain curr->pi_lock for the loop invariant */ |
25777 |
+- raw_spin_unlock(&pi_state->pi_mutex.wait_lock); |
25778 |
+- spin_unlock(&hb->lock); |
25779 |
+- put_pi_state(pi_state); |
25780 |
+- continue; |
25781 |
+- } |
25782 |
+- |
25783 |
+- WARN_ON(pi_state->owner != curr); |
25784 |
+- WARN_ON(list_empty(&pi_state->list)); |
25785 |
+- list_del_init(&pi_state->list); |
25786 |
+- pi_state->owner = NULL; |
25787 |
+- |
25788 |
+- raw_spin_unlock(&curr->pi_lock); |
25789 |
+- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
25790 |
+- spin_unlock(&hb->lock); |
25791 |
+- |
25792 |
+- rt_mutex_futex_unlock(&pi_state->pi_mutex); |
25793 |
+- put_pi_state(pi_state); |
25794 |
+- |
25795 |
+- raw_spin_lock_irq(&curr->pi_lock); |
25796 |
+- } |
25797 |
+- raw_spin_unlock_irq(&curr->pi_lock); |
25798 |
+-} |
25799 |
+-#else |
25800 |
+-static inline void exit_pi_state_list(struct task_struct *curr) { } |
25801 |
+-#endif |
25802 |
+- |
25803 |
+-/* |
25804 |
+- * We need to check the following states: |
25805 |
+- * |
25806 |
+- * Waiter | pi_state | pi->owner | uTID | uODIED | ? |
25807 |
+- * |
25808 |
+- * [1] NULL | --- | --- | 0 | 0/1 | Valid |
25809 |
+- * [2] NULL | --- | --- | >0 | 0/1 | Valid |
25810 |
+- * |
25811 |
+- * [3] Found | NULL | -- | Any | 0/1 | Invalid |
25812 |
+- * |
25813 |
+- * [4] Found | Found | NULL | 0 | 1 | Valid |
25814 |
+- * [5] Found | Found | NULL | >0 | 1 | Invalid |
25815 |
+- * |
25816 |
+- * [6] Found | Found | task | 0 | 1 | Valid |
25817 |
+- * |
25818 |
+- * [7] Found | Found | NULL | Any | 0 | Invalid |
25819 |
+- * |
25820 |
+- * [8] Found | Found | task | ==taskTID | 0/1 | Valid |
25821 |
+- * [9] Found | Found | task | 0 | 0 | Invalid |
25822 |
+- * [10] Found | Found | task | !=taskTID | 0/1 | Invalid |
25823 |
+- * |
25824 |
+- * [1] Indicates that the kernel can acquire the futex atomically. We |
25825 |
+- * came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit. |
25826 |
+- * |
25827 |
+- * [2] Valid, if TID does not belong to a kernel thread. If no matching |
25828 |
+- * thread is found then it indicates that the owner TID has died. |
25829 |
+- * |
25830 |
+- * [3] Invalid. The waiter is queued on a non PI futex |
25831 |
+- * |
25832 |
+- * [4] Valid state after exit_robust_list(), which sets the user space |
25833 |
+- * value to FUTEX_WAITERS | FUTEX_OWNER_DIED. |
25834 |
+- * |
25835 |
+- * [5] The user space value got manipulated between exit_robust_list() |
25836 |
+- * and exit_pi_state_list() |
25837 |
+- * |
25838 |
+- * [6] Valid state after exit_pi_state_list() which sets the new owner in |
25839 |
+- * the pi_state but cannot access the user space value. |
25840 |
+- * |
25841 |
+- * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set. |
25842 |
+- * |
25843 |
+- * [8] Owner and user space value match |
25844 |
+- * |
25845 |
+- * [9] There is no transient state which sets the user space TID to 0 |
25846 |
+- * except exit_robust_list(), but this is indicated by the |
25847 |
+- * FUTEX_OWNER_DIED bit. See [4] |
25848 |
+- * |
25849 |
+- * [10] There is no transient state which leaves owner and user space |
25850 |
+- * TID out of sync. Except one error case where the kernel is denied |
25851 |
+- * write access to the user address, see fixup_pi_state_owner(). |
25852 |
+- * |
25853 |
+- * |
25854 |
+- * Serialization and lifetime rules: |
25855 |
+- * |
25856 |
+- * hb->lock: |
25857 |
+- * |
25858 |
+- * hb -> futex_q, relation |
25859 |
+- * futex_q -> pi_state, relation |
25860 |
+- * |
25861 |
+- * (cannot be raw because hb can contain arbitrary amount |
25862 |
+- * of futex_q's) |
25863 |
+- * |
25864 |
+- * pi_mutex->wait_lock: |
25865 |
+- * |
25866 |
+- * {uval, pi_state} |
25867 |
+- * |
25868 |
+- * (and pi_mutex 'obviously') |
25869 |
+- * |
25870 |
+- * p->pi_lock: |
25871 |
+- * |
25872 |
+- * p->pi_state_list -> pi_state->list, relation |
25873 |
+- * pi_mutex->owner -> pi_state->owner, relation |
25874 |
+- * |
25875 |
+- * pi_state->refcount: |
25876 |
+- * |
25877 |
+- * pi_state lifetime |
25878 |
+- * |
25879 |
+- * |
25880 |
+- * Lock order: |
25881 |
+- * |
25882 |
+- * hb->lock |
25883 |
+- * pi_mutex->wait_lock |
25884 |
+- * p->pi_lock |
25885 |
+- * |
25886 |
+- */ |
25887 |
+- |
25888 |
+-/* |
25889 |
+- * Validate that the existing waiter has a pi_state and sanity check |
25890 |
+- * the pi_state against the user space value. If correct, attach to |
25891 |
+- * it. |
25892 |
+- */ |
25893 |
+-static int attach_to_pi_state(u32 __user *uaddr, u32 uval, |
25894 |
+- struct futex_pi_state *pi_state, |
25895 |
+- struct futex_pi_state **ps) |
25896 |
+-{ |
25897 |
+- pid_t pid = uval & FUTEX_TID_MASK; |
25898 |
+- u32 uval2; |
25899 |
+- int ret; |
25900 |
+- |
25901 |
+- /* |
25902 |
+- * Userspace might have messed up non-PI and PI futexes [3] |
25903 |
+- */ |
25904 |
+- if (unlikely(!pi_state)) |
25905 |
+- return -EINVAL; |
25906 |
+- |
25907 |
+- /* |
25908 |
+- * We get here with hb->lock held, and having found a |
25909 |
+- * futex_top_waiter(). This means that futex_lock_pi() of said futex_q |
25910 |
+- * has dropped the hb->lock in between queue_me() and unqueue_me_pi(), |
25911 |
+- * which in turn means that futex_lock_pi() still has a reference on |
25912 |
+- * our pi_state. |
25913 |
+- * |
25914 |
+- * The waiter holding a reference on @pi_state also protects against |
25915 |
+- * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi() |
25916 |
+- * and futex_wait_requeue_pi() as it cannot go to 0 and consequently |
25917 |
+- * free pi_state before we can take a reference ourselves. |
25918 |
+- */ |
25919 |
+- WARN_ON(!refcount_read(&pi_state->refcount)); |
25920 |
+- |
25921 |
+- /* |
25922 |
+- * Now that we have a pi_state, we can acquire wait_lock |
25923 |
+- * and do the state validation. |
25924 |
+- */ |
25925 |
+- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
25926 |
+- |
25927 |
+- /* |
25928 |
+- * Since {uval, pi_state} is serialized by wait_lock, and our current |
25929 |
+- * uval was read without holding it, it can have changed. Verify it |
25930 |
+- * still is what we expect it to be, otherwise retry the entire |
25931 |
+- * operation. |
25932 |
+- */ |
25933 |
+- if (get_futex_value_locked(&uval2, uaddr)) |
25934 |
+- goto out_efault; |
25935 |
+- |
25936 |
+- if (uval != uval2) |
25937 |
+- goto out_eagain; |
25938 |
+- |
25939 |
+- /* |
25940 |
+- * Handle the owner died case: |
25941 |
+- */ |
25942 |
+- if (uval & FUTEX_OWNER_DIED) { |
25943 |
+- /* |
25944 |
+- * exit_pi_state_list sets owner to NULL and wakes the |
25945 |
+- * topmost waiter. The task which acquires the |
25946 |
+- * pi_state->rt_mutex will fixup owner. |
25947 |
+- */ |
25948 |
+- if (!pi_state->owner) { |
25949 |
+- /* |
25950 |
+- * No pi state owner, but the user space TID |
25951 |
+- * is not 0. Inconsistent state. [5] |
25952 |
+- */ |
25953 |
+- if (pid) |
25954 |
+- goto out_einval; |
25955 |
+- /* |
25956 |
+- * Take a ref on the state and return success. [4] |
25957 |
+- */ |
25958 |
+- goto out_attach; |
25959 |
+- } |
25960 |
+- |
25961 |
+- /* |
25962 |
+- * If TID is 0, then either the dying owner has not |
25963 |
+- * yet executed exit_pi_state_list() or some waiter |
25964 |
+- * acquired the rtmutex in the pi state, but did not |
25965 |
+- * yet fixup the TID in user space. |
25966 |
+- * |
25967 |
+- * Take a ref on the state and return success. [6] |
25968 |
+- */ |
25969 |
+- if (!pid) |
25970 |
+- goto out_attach; |
25971 |
+- } else { |
25972 |
+- /* |
25973 |
+- * If the owner died bit is not set, then the pi_state |
25974 |
+- * must have an owner. [7] |
25975 |
+- */ |
25976 |
+- if (!pi_state->owner) |
25977 |
+- goto out_einval; |
25978 |
+- } |
25979 |
+- |
25980 |
+- /* |
25981 |
+- * Bail out if user space manipulated the futex value. If pi |
25982 |
+- * state exists then the owner TID must be the same as the |
25983 |
+- * user space TID. [9/10] |
25984 |
+- */ |
25985 |
+- if (pid != task_pid_vnr(pi_state->owner)) |
25986 |
+- goto out_einval; |
25987 |
+- |
25988 |
+-out_attach: |
25989 |
+- get_pi_state(pi_state); |
25990 |
+- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
25991 |
+- *ps = pi_state; |
25992 |
+- return 0; |
25993 |
+- |
25994 |
+-out_einval: |
25995 |
+- ret = -EINVAL; |
25996 |
+- goto out_error; |
25997 |
+- |
25998 |
+-out_eagain: |
25999 |
+- ret = -EAGAIN; |
26000 |
+- goto out_error; |
26001 |
+- |
26002 |
+-out_efault: |
26003 |
+- ret = -EFAULT; |
26004 |
+- goto out_error; |
26005 |
+- |
26006 |
+-out_error: |
26007 |
+- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
26008 |
+- return ret; |
26009 |
+-} |
26010 |
+- |
26011 |
+-/** |
26012 |
+- * wait_for_owner_exiting - Block until the owner has exited |
26013 |
+- * @ret: owner's current futex lock status |
26014 |
+- * @exiting: Pointer to the exiting task |
26015 |
+- * |
26016 |
+- * Caller must hold a refcount on @exiting. |
26017 |
+- */ |
26018 |
+-static void wait_for_owner_exiting(int ret, struct task_struct *exiting) |
26019 |
+-{ |
26020 |
+- if (ret != -EBUSY) { |
26021 |
+- WARN_ON_ONCE(exiting); |
26022 |
+- return; |
26023 |
+- } |
26024 |
+- |
26025 |
+- if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) |
26026 |
+- return; |
26027 |
+- |
26028 |
+- mutex_lock(&exiting->futex_exit_mutex); |
26029 |
+- /* |
26030 |
+- * No point in doing state checking here. If the waiter got here |
26031 |
+- * while the task was in exec()->exec_futex_release() then it can |
26032 |
+- * have any FUTEX_STATE_* value when the waiter has acquired the |
26033 |
+- * mutex. OK, if running, EXITING or DEAD if it reached exit() |
26034 |
+- * already. Highly unlikely and not a problem. Just one more round |
26035 |
+- * through the futex maze. |
26036 |
+- */ |
26037 |
+- mutex_unlock(&exiting->futex_exit_mutex); |
26038 |
+- |
26039 |
+- put_task_struct(exiting); |
26040 |
+-} |
26041 |
+- |
26042 |
+-static int handle_exit_race(u32 __user *uaddr, u32 uval, |
26043 |
+- struct task_struct *tsk) |
26044 |
+-{ |
26045 |
+- u32 uval2; |
26046 |
+- |
26047 |
+- /* |
26048 |
+- * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the |
26049 |
+- * caller that the alleged owner is busy. |
26050 |
+- */ |
26051 |
+- if (tsk && tsk->futex_state != FUTEX_STATE_DEAD) |
26052 |
+- return -EBUSY; |
26053 |
+- |
26054 |
+- /* |
26055 |
+- * Reread the user space value to handle the following situation: |
26056 |
+- * |
26057 |
+- * CPU0 CPU1 |
26058 |
+- * |
26059 |
+- * sys_exit() sys_futex() |
26060 |
+- * do_exit() futex_lock_pi() |
26061 |
+- * futex_lock_pi_atomic() |
26062 |
+- * exit_signals(tsk) No waiters: |
26063 |
+- * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID |
26064 |
+- * mm_release(tsk) Set waiter bit |
26065 |
+- * exit_robust_list(tsk) { *uaddr = 0x80000PID; |
26066 |
+- * Set owner died attach_to_pi_owner() { |
26067 |
+- * *uaddr = 0xC0000000; tsk = get_task(PID); |
26068 |
+- * } if (!tsk->flags & PF_EXITING) { |
26069 |
+- * ... attach(); |
26070 |
+- * tsk->futex_state = } else { |
26071 |
+- * FUTEX_STATE_DEAD; if (tsk->futex_state != |
26072 |
+- * FUTEX_STATE_DEAD) |
26073 |
+- * return -EAGAIN; |
26074 |
+- * return -ESRCH; <--- FAIL |
26075 |
+- * } |
26076 |
+- * |
26077 |
+- * Returning ESRCH unconditionally is wrong here because the |
26078 |
+- * user space value has been changed by the exiting task. |
26079 |
+- * |
26080 |
+- * The same logic applies to the case where the exiting task is |
26081 |
+- * already gone. |
26082 |
+- */ |
26083 |
+- if (get_futex_value_locked(&uval2, uaddr)) |
26084 |
+- return -EFAULT; |
26085 |
+- |
26086 |
+- /* If the user space value has changed, try again. */ |
26087 |
+- if (uval2 != uval) |
26088 |
+- return -EAGAIN; |
26089 |
+- |
26090 |
+- /* |
26091 |
+- * The exiting task did not have a robust list, the robust list was |
26092 |
+- * corrupted or the user space value in *uaddr is simply bogus. |
26093 |
+- * Give up and tell user space. |
26094 |
+- */ |
26095 |
+- return -ESRCH; |
26096 |
+-} |
26097 |
+- |
26098 |
+-static void __attach_to_pi_owner(struct task_struct *p, union futex_key *key, |
26099 |
+- struct futex_pi_state **ps) |
26100 |
+-{ |
26101 |
+- /* |
26102 |
+- * No existing pi state. First waiter. [2] |
26103 |
+- * |
26104 |
+- * This creates pi_state, we have hb->lock held, this means nothing can |
26105 |
+- * observe this state, wait_lock is irrelevant. |
26106 |
+- */ |
26107 |
+- struct futex_pi_state *pi_state = alloc_pi_state(); |
26108 |
+- |
26109 |
+- /* |
26110 |
+- * Initialize the pi_mutex in locked state and make @p |
26111 |
+- * the owner of it: |
26112 |
+- */ |
26113 |
+- rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); |
26114 |
+- |
26115 |
+- /* Store the key for possible exit cleanups: */ |
26116 |
+- pi_state->key = *key; |
26117 |
+- |
26118 |
+- WARN_ON(!list_empty(&pi_state->list)); |
26119 |
+- list_add(&pi_state->list, &p->pi_state_list); |
26120 |
+- /* |
26121 |
+- * Assignment without holding pi_state->pi_mutex.wait_lock is safe |
26122 |
+- * because there is no concurrency as the object is not published yet. |
26123 |
+- */ |
26124 |
+- pi_state->owner = p; |
26125 |
+- |
26126 |
+- *ps = pi_state; |
26127 |
+-} |
26128 |
+-/* |
26129 |
+- * Lookup the task for the TID provided from user space and attach to |
26130 |
+- * it after doing proper sanity checks. |
26131 |
+- */ |
26132 |
+-static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, |
26133 |
+- struct futex_pi_state **ps, |
26134 |
+- struct task_struct **exiting) |
26135 |
+-{ |
26136 |
+- pid_t pid = uval & FUTEX_TID_MASK; |
26137 |
+- struct task_struct *p; |
26138 |
+- |
26139 |
+- /* |
26140 |
+- * We are the first waiter - try to look up the real owner and attach |
26141 |
+- * the new pi_state to it, but bail out when TID = 0 [1] |
26142 |
+- * |
26143 |
+- * The !pid check is paranoid. None of the call sites should end up |
26144 |
+- * with pid == 0, but better safe than sorry. Let the caller retry |
26145 |
+- */ |
26146 |
+- if (!pid) |
26147 |
+- return -EAGAIN; |
26148 |
+- p = find_get_task_by_vpid(pid); |
26149 |
+- if (!p) |
26150 |
+- return handle_exit_race(uaddr, uval, NULL); |
26151 |
+- |
26152 |
+- if (unlikely(p->flags & PF_KTHREAD)) { |
26153 |
+- put_task_struct(p); |
26154 |
+- return -EPERM; |
26155 |
+- } |
26156 |
+- |
26157 |
+- /* |
26158 |
+- * We need to look at the task state to figure out, whether the |
26159 |
+- * task is exiting. To protect against the change of the task state |
26160 |
+- * in futex_exit_release(), we do this protected by p->pi_lock: |
26161 |
+- */ |
26162 |
+- raw_spin_lock_irq(&p->pi_lock); |
26163 |
+- if (unlikely(p->futex_state != FUTEX_STATE_OK)) { |
26164 |
+- /* |
26165 |
+- * The task is on the way out. When the futex state is |
26166 |
+- * FUTEX_STATE_DEAD, we know that the task has finished |
26167 |
+- * the cleanup: |
26168 |
+- */ |
26169 |
+- int ret = handle_exit_race(uaddr, uval, p); |
26170 |
+- |
26171 |
+- raw_spin_unlock_irq(&p->pi_lock); |
26172 |
+- /* |
26173 |
+- * If the owner task is between FUTEX_STATE_EXITING and |
26174 |
+- * FUTEX_STATE_DEAD then store the task pointer and keep |
26175 |
+- * the reference on the task struct. The calling code will |
26176 |
+- * drop all locks, wait for the task to reach |
26177 |
+- * FUTEX_STATE_DEAD and then drop the refcount. This is |
26178 |
+- * required to prevent a live lock when the current task |
26179 |
+- * preempted the exiting task between the two states. |
26180 |
+- */ |
26181 |
+- if (ret == -EBUSY) |
26182 |
+- *exiting = p; |
26183 |
+- else |
26184 |
+- put_task_struct(p); |
26185 |
+- return ret; |
26186 |
+- } |
26187 |
+- |
26188 |
+- __attach_to_pi_owner(p, key, ps); |
26189 |
+- raw_spin_unlock_irq(&p->pi_lock); |
26190 |
+- |
26191 |
+- put_task_struct(p); |
26192 |
+- |
26193 |
+- return 0; |
26194 |
+-} |
26195 |
+- |
26196 |
+-static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) |
26197 |
+-{ |
26198 |
+- int err; |
26199 |
+- u32 curval; |
26200 |
+- |
26201 |
+- if (unlikely(should_fail_futex(true))) |
26202 |
+- return -EFAULT; |
26203 |
+- |
26204 |
+- err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); |
26205 |
+- if (unlikely(err)) |
26206 |
+- return err; |
26207 |
+- |
26208 |
+- /* If user space value changed, let the caller retry */ |
26209 |
+- return curval != uval ? -EAGAIN : 0; |
26210 |
+-} |
26211 |
+- |
26212 |
+-/** |
26213 |
+- * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex |
26214 |
+- * @uaddr: the pi futex user address |
26215 |
+- * @hb: the pi futex hash bucket |
26216 |
+- * @key: the futex key associated with uaddr and hb |
26217 |
+- * @ps: the pi_state pointer where we store the result of the |
26218 |
+- * lookup |
26219 |
+- * @task: the task to perform the atomic lock work for. This will |
26220 |
+- * be "current" except in the case of requeue pi. |
26221 |
+- * @exiting: Pointer to store the task pointer of the owner task |
26222 |
+- * which is in the middle of exiting |
26223 |
+- * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
26224 |
+- * |
26225 |
+- * Return: |
26226 |
+- * - 0 - ready to wait; |
26227 |
+- * - 1 - acquired the lock; |
26228 |
+- * - <0 - error |
26229 |
+- * |
26230 |
+- * The hb->lock must be held by the caller. |
26231 |
+- * |
26232 |
+- * @exiting is only set when the return value is -EBUSY. If so, this holds |
26233 |
+- * a refcount on the exiting task on return and the caller needs to drop it |
26234 |
+- * after waiting for the exit to complete. |
26235 |
+- */ |
26236 |
+-static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, |
26237 |
+- union futex_key *key, |
26238 |
+- struct futex_pi_state **ps, |
26239 |
+- struct task_struct *task, |
26240 |
+- struct task_struct **exiting, |
26241 |
+- int set_waiters) |
26242 |
+-{ |
26243 |
+- u32 uval, newval, vpid = task_pid_vnr(task); |
26244 |
+- struct futex_q *top_waiter; |
26245 |
+- int ret; |
26246 |
+- |
26247 |
+- /* |
26248 |
+- * Read the user space value first so we can validate a few |
26249 |
+- * things before proceeding further. |
26250 |
+- */ |
26251 |
+- if (get_futex_value_locked(&uval, uaddr)) |
26252 |
+- return -EFAULT; |
26253 |
+- |
26254 |
+- if (unlikely(should_fail_futex(true))) |
26255 |
+- return -EFAULT; |
26256 |
+- |
26257 |
+- /* |
26258 |
+- * Detect deadlocks. |
26259 |
+- */ |
26260 |
+- if ((unlikely((uval & FUTEX_TID_MASK) == vpid))) |
26261 |
+- return -EDEADLK; |
26262 |
+- |
26263 |
+- if ((unlikely(should_fail_futex(true)))) |
26264 |
+- return -EDEADLK; |
26265 |
+- |
26266 |
+- /* |
26267 |
+- * Lookup existing state first. If it exists, try to attach to |
26268 |
+- * its pi_state. |
26269 |
+- */ |
26270 |
+- top_waiter = futex_top_waiter(hb, key); |
26271 |
+- if (top_waiter) |
26272 |
+- return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps); |
26273 |
+- |
26274 |
+- /* |
26275 |
+- * No waiter and user TID is 0. We are here because the |
26276 |
+- * waiters or the owner died bit is set or called from |
26277 |
+- * requeue_cmp_pi or for whatever reason something took the |
26278 |
+- * syscall. |
26279 |
+- */ |
26280 |
+- if (!(uval & FUTEX_TID_MASK)) { |
26281 |
+- /* |
26282 |
+- * We take over the futex. No other waiters and the user space |
26283 |
+- * TID is 0. We preserve the owner died bit. |
26284 |
+- */ |
26285 |
+- newval = uval & FUTEX_OWNER_DIED; |
26286 |
+- newval |= vpid; |
26287 |
+- |
26288 |
+- /* The futex requeue_pi code can enforce the waiters bit */ |
26289 |
+- if (set_waiters) |
26290 |
+- newval |= FUTEX_WAITERS; |
26291 |
+- |
26292 |
+- ret = lock_pi_update_atomic(uaddr, uval, newval); |
26293 |
+- if (ret) |
26294 |
+- return ret; |
26295 |
+- |
26296 |
+- /* |
26297 |
+- * If the waiter bit was requested the caller also needs PI |
26298 |
+- * state attached to the new owner of the user space futex. |
26299 |
+- * |
26300 |
+- * @task is guaranteed to be alive and it cannot be exiting |
26301 |
+- * because it is either sleeping or waiting in |
26302 |
+- * futex_requeue_pi_wakeup_sync(). |
26303 |
+- * |
26304 |
+- * No need to do the full attach_to_pi_owner() exercise |
26305 |
+- * because @task is known and valid. |
26306 |
+- */ |
26307 |
+- if (set_waiters) { |
26308 |
+- raw_spin_lock_irq(&task->pi_lock); |
26309 |
+- __attach_to_pi_owner(task, key, ps); |
26310 |
+- raw_spin_unlock_irq(&task->pi_lock); |
26311 |
+- } |
26312 |
+- return 1; |
26313 |
+- } |
26314 |
+- |
26315 |
+- /* |
26316 |
+- * First waiter. Set the waiters bit before attaching ourself to |
26317 |
+- * the owner. If owner tries to unlock, it will be forced into |
26318 |
+- * the kernel and blocked on hb->lock. |
26319 |
+- */ |
26320 |
+- newval = uval | FUTEX_WAITERS; |
26321 |
+- ret = lock_pi_update_atomic(uaddr, uval, newval); |
26322 |
+- if (ret) |
26323 |
+- return ret; |
26324 |
+- /* |
26325 |
+- * If the update of the user space value succeeded, we try to |
26326 |
+- * attach to the owner. If that fails, no harm done, we only |
26327 |
+- * set the FUTEX_WAITERS bit in the user space variable. |
26328 |
+- */ |
26329 |
+- return attach_to_pi_owner(uaddr, newval, key, ps, exiting); |
26330 |
+-} |
26331 |
+- |
26332 |
+-/** |
26333 |
+- * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket |
26334 |
+- * @q: The futex_q to unqueue |
26335 |
+- * |
26336 |
+- * The q->lock_ptr must not be NULL and must be held by the caller. |
26337 |
+- */ |
26338 |
+-static void __unqueue_futex(struct futex_q *q) |
26339 |
+-{ |
26340 |
+- struct futex_hash_bucket *hb; |
26341 |
+- |
26342 |
+- if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) |
26343 |
+- return; |
26344 |
+- lockdep_assert_held(q->lock_ptr); |
26345 |
+- |
26346 |
+- hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); |
26347 |
+- plist_del(&q->list, &hb->chain); |
26348 |
+- hb_waiters_dec(hb); |
26349 |
+-} |
26350 |
+- |
26351 |
+-/* |
26352 |
+- * The hash bucket lock must be held when this is called. |
26353 |
+- * Afterwards, the futex_q must not be accessed. Callers |
26354 |
+- * must ensure to later call wake_up_q() for the actual |
26355 |
+- * wakeups to occur. |
26356 |
+- */ |
26357 |
+-static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) |
26358 |
+-{ |
26359 |
+- struct task_struct *p = q->task; |
26360 |
+- |
26361 |
+- if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) |
26362 |
+- return; |
26363 |
+- |
26364 |
+- get_task_struct(p); |
26365 |
+- __unqueue_futex(q); |
26366 |
+- /* |
26367 |
+- * The waiting task can free the futex_q as soon as q->lock_ptr = NULL |
26368 |
+- * is written, without taking any locks. This is possible in the event |
26369 |
+- * of a spurious wakeup, for example. A memory barrier is required here |
26370 |
+- * to prevent the following store to lock_ptr from getting ahead of the |
26371 |
+- * plist_del in __unqueue_futex(). |
26372 |
+- */ |
26373 |
+- smp_store_release(&q->lock_ptr, NULL); |
26374 |
+- |
26375 |
+- /* |
26376 |
+- * Queue the task for later wakeup for after we've released |
26377 |
+- * the hb->lock. |
26378 |
+- */ |
26379 |
+- wake_q_add_safe(wake_q, p); |
26380 |
+-} |
26381 |
+- |
26382 |
+-/* |
26383 |
+- * Caller must hold a reference on @pi_state. |
26384 |
+- */ |
26385 |
+-static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) |
26386 |
+-{ |
26387 |
+- struct rt_mutex_waiter *top_waiter; |
26388 |
+- struct task_struct *new_owner; |
26389 |
+- bool postunlock = false; |
26390 |
+- DEFINE_RT_WAKE_Q(wqh); |
26391 |
+- u32 curval, newval; |
26392 |
+- int ret = 0; |
26393 |
+- |
26394 |
+- top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex); |
26395 |
+- if (WARN_ON_ONCE(!top_waiter)) { |
26396 |
+- /* |
26397 |
+- * As per the comment in futex_unlock_pi() this should not happen. |
26398 |
+- * |
26399 |
+- * When this happens, give up our locks and try again, giving |
26400 |
+- * the futex_lock_pi() instance time to complete, either by |
26401 |
+- * waiting on the rtmutex or removing itself from the futex |
26402 |
+- * queue. |
26403 |
+- */ |
26404 |
+- ret = -EAGAIN; |
26405 |
+- goto out_unlock; |
26406 |
+- } |
26407 |
+- |
26408 |
+- new_owner = top_waiter->task; |
26409 |
+- |
26410 |
+- /* |
26411 |
+- * We pass it to the next owner. The WAITERS bit is always kept |
26412 |
+- * enabled while there is PI state around. We cleanup the owner |
26413 |
+- * died bit, because we are the owner. |
26414 |
+- */ |
26415 |
+- newval = FUTEX_WAITERS | task_pid_vnr(new_owner); |
26416 |
+- |
26417 |
+- if (unlikely(should_fail_futex(true))) { |
26418 |
+- ret = -EFAULT; |
26419 |
+- goto out_unlock; |
26420 |
+- } |
26421 |
+- |
26422 |
+- ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); |
26423 |
+- if (!ret && (curval != uval)) { |
26424 |
+- /* |
26425 |
+- * If a unconditional UNLOCK_PI operation (user space did not |
26426 |
+- * try the TID->0 transition) raced with a waiter setting the |
26427 |
+- * FUTEX_WAITERS flag between get_user() and locking the hash |
26428 |
+- * bucket lock, retry the operation. |
26429 |
+- */ |
26430 |
+- if ((FUTEX_TID_MASK & curval) == uval) |
26431 |
+- ret = -EAGAIN; |
26432 |
+- else |
26433 |
+- ret = -EINVAL; |
26434 |
+- } |
26435 |
+- |
26436 |
+- if (!ret) { |
26437 |
+- /* |
26438 |
+- * This is a point of no return; once we modified the uval |
26439 |
+- * there is no going back and subsequent operations must |
26440 |
+- * not fail. |
26441 |
+- */ |
26442 |
+- pi_state_update_owner(pi_state, new_owner); |
26443 |
+- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wqh); |
26444 |
+- } |
26445 |
+- |
26446 |
+-out_unlock: |
26447 |
+- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
26448 |
+- |
26449 |
+- if (postunlock) |
26450 |
+- rt_mutex_postunlock(&wqh); |
26451 |
+- |
26452 |
+- return ret; |
26453 |
+-} |
26454 |
+- |
26455 |
+-/* |
26456 |
+- * Express the locking dependencies for lockdep: |
26457 |
+- */ |
26458 |
+-static inline void |
26459 |
+-double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) |
26460 |
+-{ |
26461 |
+- if (hb1 <= hb2) { |
26462 |
+- spin_lock(&hb1->lock); |
26463 |
+- if (hb1 < hb2) |
26464 |
+- spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); |
26465 |
+- } else { /* hb1 > hb2 */ |
26466 |
+- spin_lock(&hb2->lock); |
26467 |
+- spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); |
26468 |
+- } |
26469 |
+-} |
26470 |
+- |
26471 |
+-static inline void |
26472 |
+-double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) |
26473 |
+-{ |
26474 |
+- spin_unlock(&hb1->lock); |
26475 |
+- if (hb1 != hb2) |
26476 |
+- spin_unlock(&hb2->lock); |
26477 |
+-} |
26478 |
+- |
26479 |
+-/* |
26480 |
+- * Wake up waiters matching bitset queued on this futex (uaddr). |
26481 |
+- */ |
26482 |
+-static int |
26483 |
+-futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) |
26484 |
+-{ |
26485 |
+- struct futex_hash_bucket *hb; |
26486 |
+- struct futex_q *this, *next; |
26487 |
+- union futex_key key = FUTEX_KEY_INIT; |
26488 |
+- int ret; |
26489 |
+- DEFINE_WAKE_Q(wake_q); |
26490 |
+- |
26491 |
+- if (!bitset) |
26492 |
+- return -EINVAL; |
26493 |
+- |
26494 |
+- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ); |
26495 |
+- if (unlikely(ret != 0)) |
26496 |
+- return ret; |
26497 |
+- |
26498 |
+- hb = hash_futex(&key); |
26499 |
+- |
26500 |
+- /* Make sure we really have tasks to wakeup */ |
26501 |
+- if (!hb_waiters_pending(hb)) |
26502 |
+- return ret; |
26503 |
+- |
26504 |
+- spin_lock(&hb->lock); |
26505 |
+- |
26506 |
+- plist_for_each_entry_safe(this, next, &hb->chain, list) { |
26507 |
+- if (match_futex (&this->key, &key)) { |
26508 |
+- if (this->pi_state || this->rt_waiter) { |
26509 |
+- ret = -EINVAL; |
26510 |
+- break; |
26511 |
+- } |
26512 |
+- |
26513 |
+- /* Check if one of the bits is set in both bitsets */ |
26514 |
+- if (!(this->bitset & bitset)) |
26515 |
+- continue; |
26516 |
+- |
26517 |
+- mark_wake_futex(&wake_q, this); |
26518 |
+- if (++ret >= nr_wake) |
26519 |
+- break; |
26520 |
+- } |
26521 |
+- } |
26522 |
+- |
26523 |
+- spin_unlock(&hb->lock); |
26524 |
+- wake_up_q(&wake_q); |
26525 |
+- return ret; |
26526 |
+-} |
26527 |
+- |
26528 |
+-static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr) |
26529 |
+-{ |
26530 |
+- unsigned int op = (encoded_op & 0x70000000) >> 28; |
26531 |
+- unsigned int cmp = (encoded_op & 0x0f000000) >> 24; |
26532 |
+- int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11); |
26533 |
+- int cmparg = sign_extend32(encoded_op & 0x00000fff, 11); |
26534 |
+- int oldval, ret; |
26535 |
+- |
26536 |
+- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) { |
26537 |
+- if (oparg < 0 || oparg > 31) { |
26538 |
+- char comm[sizeof(current->comm)]; |
26539 |
+- /* |
26540 |
+- * kill this print and return -EINVAL when userspace |
26541 |
+- * is sane again |
26542 |
+- */ |
26543 |
+- pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n", |
26544 |
+- get_task_comm(comm, current), oparg); |
26545 |
+- oparg &= 31; |
26546 |
+- } |
26547 |
+- oparg = 1 << oparg; |
26548 |
+- } |
26549 |
+- |
26550 |
+- pagefault_disable(); |
26551 |
+- ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr); |
26552 |
+- pagefault_enable(); |
26553 |
+- if (ret) |
26554 |
+- return ret; |
26555 |
+- |
26556 |
+- switch (cmp) { |
26557 |
+- case FUTEX_OP_CMP_EQ: |
26558 |
+- return oldval == cmparg; |
26559 |
+- case FUTEX_OP_CMP_NE: |
26560 |
+- return oldval != cmparg; |
26561 |
+- case FUTEX_OP_CMP_LT: |
26562 |
+- return oldval < cmparg; |
26563 |
+- case FUTEX_OP_CMP_GE: |
26564 |
+- return oldval >= cmparg; |
26565 |
+- case FUTEX_OP_CMP_LE: |
26566 |
+- return oldval <= cmparg; |
26567 |
+- case FUTEX_OP_CMP_GT: |
26568 |
+- return oldval > cmparg; |
26569 |
+- default: |
26570 |
+- return -ENOSYS; |
26571 |
+- } |
26572 |
+-} |
26573 |
+- |
26574 |
+-/* |
26575 |
+- * Wake up all waiters hashed on the physical page that is mapped |
26576 |
+- * to this virtual address: |
26577 |
+- */ |
26578 |
+-static int |
26579 |
+-futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, |
26580 |
+- int nr_wake, int nr_wake2, int op) |
26581 |
+-{ |
26582 |
+- union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
26583 |
+- struct futex_hash_bucket *hb1, *hb2; |
26584 |
+- struct futex_q *this, *next; |
26585 |
+- int ret, op_ret; |
26586 |
+- DEFINE_WAKE_Q(wake_q); |
26587 |
+- |
26588 |
+-retry: |
26589 |
+- ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ); |
26590 |
+- if (unlikely(ret != 0)) |
26591 |
+- return ret; |
26592 |
+- ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); |
26593 |
+- if (unlikely(ret != 0)) |
26594 |
+- return ret; |
26595 |
+- |
26596 |
+- hb1 = hash_futex(&key1); |
26597 |
+- hb2 = hash_futex(&key2); |
26598 |
+- |
26599 |
+-retry_private: |
26600 |
+- double_lock_hb(hb1, hb2); |
26601 |
+- op_ret = futex_atomic_op_inuser(op, uaddr2); |
26602 |
+- if (unlikely(op_ret < 0)) { |
26603 |
+- double_unlock_hb(hb1, hb2); |
26604 |
+- |
26605 |
+- if (!IS_ENABLED(CONFIG_MMU) || |
26606 |
+- unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) { |
26607 |
+- /* |
26608 |
+- * we don't get EFAULT from MMU faults if we don't have |
26609 |
+- * an MMU, but we might get them from range checking |
26610 |
+- */ |
26611 |
+- ret = op_ret; |
26612 |
+- return ret; |
26613 |
+- } |
26614 |
+- |
26615 |
+- if (op_ret == -EFAULT) { |
26616 |
+- ret = fault_in_user_writeable(uaddr2); |
26617 |
+- if (ret) |
26618 |
+- return ret; |
26619 |
+- } |
26620 |
+- |
26621 |
+- cond_resched(); |
26622 |
+- if (!(flags & FLAGS_SHARED)) |
26623 |
+- goto retry_private; |
26624 |
+- goto retry; |
26625 |
+- } |
26626 |
+- |
26627 |
+- plist_for_each_entry_safe(this, next, &hb1->chain, list) { |
26628 |
+- if (match_futex (&this->key, &key1)) { |
26629 |
+- if (this->pi_state || this->rt_waiter) { |
26630 |
+- ret = -EINVAL; |
26631 |
+- goto out_unlock; |
26632 |
+- } |
26633 |
+- mark_wake_futex(&wake_q, this); |
26634 |
+- if (++ret >= nr_wake) |
26635 |
+- break; |
26636 |
+- } |
26637 |
+- } |
26638 |
+- |
26639 |
+- if (op_ret > 0) { |
26640 |
+- op_ret = 0; |
26641 |
+- plist_for_each_entry_safe(this, next, &hb2->chain, list) { |
26642 |
+- if (match_futex (&this->key, &key2)) { |
26643 |
+- if (this->pi_state || this->rt_waiter) { |
26644 |
+- ret = -EINVAL; |
26645 |
+- goto out_unlock; |
26646 |
+- } |
26647 |
+- mark_wake_futex(&wake_q, this); |
26648 |
+- if (++op_ret >= nr_wake2) |
26649 |
+- break; |
26650 |
+- } |
26651 |
+- } |
26652 |
+- ret += op_ret; |
26653 |
+- } |
26654 |
+- |
26655 |
+-out_unlock: |
26656 |
+- double_unlock_hb(hb1, hb2); |
26657 |
+- wake_up_q(&wake_q); |
26658 |
+- return ret; |
26659 |
+-} |
26660 |
+- |
26661 |
+-/** |
26662 |
+- * requeue_futex() - Requeue a futex_q from one hb to another |
26663 |
+- * @q: the futex_q to requeue |
26664 |
+- * @hb1: the source hash_bucket |
26665 |
+- * @hb2: the target hash_bucket |
26666 |
+- * @key2: the new key for the requeued futex_q |
26667 |
+- */ |
26668 |
+-static inline |
26669 |
+-void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, |
26670 |
+- struct futex_hash_bucket *hb2, union futex_key *key2) |
26671 |
+-{ |
26672 |
+- |
26673 |
+- /* |
26674 |
+- * If key1 and key2 hash to the same bucket, no need to |
26675 |
+- * requeue. |
26676 |
+- */ |
26677 |
+- if (likely(&hb1->chain != &hb2->chain)) { |
26678 |
+- plist_del(&q->list, &hb1->chain); |
26679 |
+- hb_waiters_dec(hb1); |
26680 |
+- hb_waiters_inc(hb2); |
26681 |
+- plist_add(&q->list, &hb2->chain); |
26682 |
+- q->lock_ptr = &hb2->lock; |
26683 |
+- } |
26684 |
+- q->key = *key2; |
26685 |
+-} |
26686 |
+- |
26687 |
+-static inline bool futex_requeue_pi_prepare(struct futex_q *q, |
26688 |
+- struct futex_pi_state *pi_state) |
26689 |
+-{ |
26690 |
+- int old, new; |
26691 |
+- |
26692 |
+- /* |
26693 |
+- * Set state to Q_REQUEUE_PI_IN_PROGRESS unless an early wakeup has |
26694 |
+- * already set Q_REQUEUE_PI_IGNORE to signal that requeue should |
26695 |
+- * ignore the waiter. |
26696 |
+- */ |
26697 |
+- old = atomic_read_acquire(&q->requeue_state); |
26698 |
+- do { |
26699 |
+- if (old == Q_REQUEUE_PI_IGNORE) |
26700 |
+- return false; |
26701 |
+- |
26702 |
+- /* |
26703 |
+- * futex_proxy_trylock_atomic() might have set it to |
26704 |
+- * IN_PROGRESS and a interleaved early wake to WAIT. |
26705 |
+- * |
26706 |
+- * It was considered to have an extra state for that |
26707 |
+- * trylock, but that would just add more conditionals |
26708 |
+- * all over the place for a dubious value. |
26709 |
+- */ |
26710 |
+- if (old != Q_REQUEUE_PI_NONE) |
26711 |
+- break; |
26712 |
+- |
26713 |
+- new = Q_REQUEUE_PI_IN_PROGRESS; |
26714 |
+- } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); |
26715 |
+- |
26716 |
+- q->pi_state = pi_state; |
26717 |
+- return true; |
26718 |
+-} |
26719 |
+- |
26720 |
+-static inline void futex_requeue_pi_complete(struct futex_q *q, int locked) |
26721 |
+-{ |
26722 |
+- int old, new; |
26723 |
+- |
26724 |
+- old = atomic_read_acquire(&q->requeue_state); |
26725 |
+- do { |
26726 |
+- if (old == Q_REQUEUE_PI_IGNORE) |
26727 |
+- return; |
26728 |
+- |
26729 |
+- if (locked >= 0) { |
26730 |
+- /* Requeue succeeded. Set DONE or LOCKED */ |
26731 |
+- WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_PROGRESS && |
26732 |
+- old != Q_REQUEUE_PI_WAIT); |
26733 |
+- new = Q_REQUEUE_PI_DONE + locked; |
26734 |
+- } else if (old == Q_REQUEUE_PI_IN_PROGRESS) { |
26735 |
+- /* Deadlock, no early wakeup interleave */ |
26736 |
+- new = Q_REQUEUE_PI_NONE; |
26737 |
+- } else { |
26738 |
+- /* Deadlock, early wakeup interleave. */ |
26739 |
+- WARN_ON_ONCE(old != Q_REQUEUE_PI_WAIT); |
26740 |
+- new = Q_REQUEUE_PI_IGNORE; |
26741 |
+- } |
26742 |
+- } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); |
26743 |
+- |
26744 |
+-#ifdef CONFIG_PREEMPT_RT |
26745 |
+- /* If the waiter interleaved with the requeue let it know */ |
26746 |
+- if (unlikely(old == Q_REQUEUE_PI_WAIT)) |
26747 |
+- rcuwait_wake_up(&q->requeue_wait); |
26748 |
+-#endif |
26749 |
+-} |
26750 |
+- |
26751 |
+-static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q) |
26752 |
+-{ |
26753 |
+- int old, new; |
26754 |
+- |
26755 |
+- old = atomic_read_acquire(&q->requeue_state); |
26756 |
+- do { |
26757 |
+- /* Is requeue done already? */ |
26758 |
+- if (old >= Q_REQUEUE_PI_DONE) |
26759 |
+- return old; |
26760 |
+- |
26761 |
+- /* |
26762 |
+- * If not done, then tell the requeue code to either ignore |
26763 |
+- * the waiter or to wake it up once the requeue is done. |
26764 |
+- */ |
26765 |
+- new = Q_REQUEUE_PI_WAIT; |
26766 |
+- if (old == Q_REQUEUE_PI_NONE) |
26767 |
+- new = Q_REQUEUE_PI_IGNORE; |
26768 |
+- } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); |
26769 |
+- |
26770 |
+- /* If the requeue was in progress, wait for it to complete */ |
26771 |
+- if (old == Q_REQUEUE_PI_IN_PROGRESS) { |
26772 |
+-#ifdef CONFIG_PREEMPT_RT |
26773 |
+- rcuwait_wait_event(&q->requeue_wait, |
26774 |
+- atomic_read(&q->requeue_state) != Q_REQUEUE_PI_WAIT, |
26775 |
+- TASK_UNINTERRUPTIBLE); |
26776 |
+-#else |
26777 |
+- (void)atomic_cond_read_relaxed(&q->requeue_state, VAL != Q_REQUEUE_PI_WAIT); |
26778 |
+-#endif |
26779 |
+- } |
26780 |
+- |
26781 |
+- /* |
26782 |
+- * Requeue is now either prohibited or complete. Reread state |
26783 |
+- * because during the wait above it might have changed. Nothing |
26784 |
+- * will modify q->requeue_state after this point. |
26785 |
+- */ |
26786 |
+- return atomic_read(&q->requeue_state); |
26787 |
+-} |
26788 |
+- |
26789 |
+-/** |
26790 |
+- * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue |
26791 |
+- * @q: the futex_q |
26792 |
+- * @key: the key of the requeue target futex |
26793 |
+- * @hb: the hash_bucket of the requeue target futex |
26794 |
+- * |
26795 |
+- * During futex_requeue, with requeue_pi=1, it is possible to acquire the |
26796 |
+- * target futex if it is uncontended or via a lock steal. |
26797 |
+- * |
26798 |
+- * 1) Set @q::key to the requeue target futex key so the waiter can detect |
26799 |
+- * the wakeup on the right futex. |
26800 |
+- * |
26801 |
+- * 2) Dequeue @q from the hash bucket. |
26802 |
+- * |
26803 |
+- * 3) Set @q::rt_waiter to NULL so the woken up task can detect atomic lock |
26804 |
+- * acquisition. |
26805 |
+- * |
26806 |
+- * 4) Set the q->lock_ptr to the requeue target hb->lock for the case that |
26807 |
+- * the waiter has to fixup the pi state. |
26808 |
+- * |
26809 |
+- * 5) Complete the requeue state so the waiter can make progress. After |
26810 |
+- * this point the waiter task can return from the syscall immediately in |
26811 |
+- * case that the pi state does not have to be fixed up. |
26812 |
+- * |
26813 |
+- * 6) Wake the waiter task. |
26814 |
+- * |
26815 |
+- * Must be called with both q->lock_ptr and hb->lock held. |
26816 |
+- */ |
26817 |
+-static inline |
26818 |
+-void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
26819 |
+- struct futex_hash_bucket *hb) |
26820 |
+-{ |
26821 |
+- q->key = *key; |
26822 |
+- |
26823 |
+- __unqueue_futex(q); |
26824 |
+- |
26825 |
+- WARN_ON(!q->rt_waiter); |
26826 |
+- q->rt_waiter = NULL; |
26827 |
+- |
26828 |
+- q->lock_ptr = &hb->lock; |
26829 |
+- |
26830 |
+- /* Signal locked state to the waiter */ |
26831 |
+- futex_requeue_pi_complete(q, 1); |
26832 |
+- wake_up_state(q->task, TASK_NORMAL); |
26833 |
+-} |
26834 |
+- |
26835 |
+-/** |
26836 |
+- * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter |
26837 |
+- * @pifutex: the user address of the to futex |
26838 |
+- * @hb1: the from futex hash bucket, must be locked by the caller |
26839 |
+- * @hb2: the to futex hash bucket, must be locked by the caller |
26840 |
+- * @key1: the from futex key |
26841 |
+- * @key2: the to futex key |
26842 |
+- * @ps: address to store the pi_state pointer |
26843 |
+- * @exiting: Pointer to store the task pointer of the owner task |
26844 |
+- * which is in the middle of exiting |
26845 |
+- * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
26846 |
+- * |
26847 |
+- * Try and get the lock on behalf of the top waiter if we can do it atomically. |
26848 |
+- * Wake the top waiter if we succeed. If the caller specified set_waiters, |
26849 |
+- * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. |
26850 |
+- * hb1 and hb2 must be held by the caller. |
26851 |
+- * |
26852 |
+- * @exiting is only set when the return value is -EBUSY. If so, this holds |
26853 |
+- * a refcount on the exiting task on return and the caller needs to drop it |
26854 |
+- * after waiting for the exit to complete. |
26855 |
+- * |
26856 |
+- * Return: |
26857 |
+- * - 0 - failed to acquire the lock atomically; |
26858 |
+- * - >0 - acquired the lock, return value is vpid of the top_waiter |
26859 |
+- * - <0 - error |
26860 |
+- */ |
26861 |
+-static int |
26862 |
+-futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, |
26863 |
+- struct futex_hash_bucket *hb2, union futex_key *key1, |
26864 |
+- union futex_key *key2, struct futex_pi_state **ps, |
26865 |
+- struct task_struct **exiting, int set_waiters) |
26866 |
+-{ |
26867 |
+- struct futex_q *top_waiter = NULL; |
26868 |
+- u32 curval; |
26869 |
+- int ret; |
26870 |
+- |
26871 |
+- if (get_futex_value_locked(&curval, pifutex)) |
26872 |
+- return -EFAULT; |
26873 |
+- |
26874 |
+- if (unlikely(should_fail_futex(true))) |
26875 |
+- return -EFAULT; |
26876 |
+- |
26877 |
+- /* |
26878 |
+- * Find the top_waiter and determine if there are additional waiters. |
26879 |
+- * If the caller intends to requeue more than 1 waiter to pifutex, |
26880 |
+- * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, |
26881 |
+- * as we have means to handle the possible fault. If not, don't set |
26882 |
+- * the bit unnecessarily as it will force the subsequent unlock to enter |
26883 |
+- * the kernel. |
26884 |
+- */ |
26885 |
+- top_waiter = futex_top_waiter(hb1, key1); |
26886 |
+- |
26887 |
+- /* There are no waiters, nothing for us to do. */ |
26888 |
+- if (!top_waiter) |
26889 |
+- return 0; |
26890 |
+- |
26891 |
+- /* |
26892 |
+- * Ensure that this is a waiter sitting in futex_wait_requeue_pi() |
26893 |
+- * and waiting on the 'waitqueue' futex which is always !PI. |
26894 |
+- */ |
26895 |
+- if (!top_waiter->rt_waiter || top_waiter->pi_state) |
26896 |
+- return -EINVAL; |
26897 |
+- |
26898 |
+- /* Ensure we requeue to the expected futex. */ |
26899 |
+- if (!match_futex(top_waiter->requeue_pi_key, key2)) |
26900 |
+- return -EINVAL; |
26901 |
+- |
26902 |
+- /* Ensure that this does not race against an early wakeup */ |
26903 |
+- if (!futex_requeue_pi_prepare(top_waiter, NULL)) |
26904 |
+- return -EAGAIN; |
26905 |
+- |
26906 |
+- /* |
26907 |
+- * Try to take the lock for top_waiter and set the FUTEX_WAITERS bit |
26908 |
+- * in the contended case or if @set_waiters is true. |
26909 |
+- * |
26910 |
+- * In the contended case PI state is attached to the lock owner. If |
26911 |
+- * the user space lock can be acquired then PI state is attached to |
26912 |
+- * the new owner (@top_waiter->task) when @set_waiters is true. |
26913 |
+- */ |
26914 |
+- ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, |
26915 |
+- exiting, set_waiters); |
26916 |
+- if (ret == 1) { |
26917 |
+- /* |
26918 |
+- * Lock was acquired in user space and PI state was |
26919 |
+- * attached to @top_waiter->task. That means state is fully |
26920 |
+- * consistent and the waiter can return to user space |
26921 |
+- * immediately after the wakeup. |
26922 |
+- */ |
26923 |
+- requeue_pi_wake_futex(top_waiter, key2, hb2); |
26924 |
+- } else if (ret < 0) { |
26925 |
+- /* Rewind top_waiter::requeue_state */ |
26926 |
+- futex_requeue_pi_complete(top_waiter, ret); |
26927 |
+- } else { |
26928 |
+- /* |
26929 |
+- * futex_lock_pi_atomic() did not acquire the user space |
26930 |
+- * futex, but managed to establish the proxy lock and pi |
26931 |
+- * state. top_waiter::requeue_state cannot be fixed up here |
26932 |
+- * because the waiter is not enqueued on the rtmutex |
26933 |
+- * yet. This is handled at the callsite depending on the |
26934 |
+- * result of rt_mutex_start_proxy_lock() which is |
26935 |
+- * guaranteed to be reached with this function returning 0. |
26936 |
+- */ |
26937 |
+- } |
26938 |
+- return ret; |
26939 |
+-} |
26940 |
+- |
26941 |
+-/** |
26942 |
+- * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 |
26943 |
+- * @uaddr1: source futex user address |
26944 |
+- * @flags: futex flags (FLAGS_SHARED, etc.) |
26945 |
+- * @uaddr2: target futex user address |
26946 |
+- * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) |
26947 |
+- * @nr_requeue: number of waiters to requeue (0-INT_MAX) |
26948 |
+- * @cmpval: @uaddr1 expected value (or %NULL) |
26949 |
+- * @requeue_pi: if we are attempting to requeue from a non-pi futex to a |
26950 |
+- * pi futex (pi to pi requeue is not supported) |
26951 |
+- * |
26952 |
+- * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire |
26953 |
+- * uaddr2 atomically on behalf of the top waiter. |
26954 |
+- * |
26955 |
+- * Return: |
26956 |
+- * - >=0 - on success, the number of tasks requeued or woken; |
26957 |
+- * - <0 - on error |
26958 |
+- */ |
26959 |
+-static int futex_requeue(u32 __user *uaddr1, unsigned int flags, |
26960 |
+- u32 __user *uaddr2, int nr_wake, int nr_requeue, |
26961 |
+- u32 *cmpval, int requeue_pi) |
26962 |
+-{ |
26963 |
+- union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
26964 |
+- int task_count = 0, ret; |
26965 |
+- struct futex_pi_state *pi_state = NULL; |
26966 |
+- struct futex_hash_bucket *hb1, *hb2; |
26967 |
+- struct futex_q *this, *next; |
26968 |
+- DEFINE_WAKE_Q(wake_q); |
26969 |
+- |
26970 |
+- if (nr_wake < 0 || nr_requeue < 0) |
26971 |
+- return -EINVAL; |
26972 |
+- |
26973 |
+- /* |
26974 |
+- * When PI not supported: return -ENOSYS if requeue_pi is true, |
26975 |
+- * consequently the compiler knows requeue_pi is always false past |
26976 |
+- * this point which will optimize away all the conditional code |
26977 |
+- * further down. |
26978 |
+- */ |
26979 |
+- if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi) |
26980 |
+- return -ENOSYS; |
26981 |
+- |
26982 |
+- if (requeue_pi) { |
26983 |
+- /* |
26984 |
+- * Requeue PI only works on two distinct uaddrs. This |
26985 |
+- * check is only valid for private futexes. See below. |
26986 |
+- */ |
26987 |
+- if (uaddr1 == uaddr2) |
26988 |
+- return -EINVAL; |
26989 |
+- |
26990 |
+- /* |
26991 |
+- * futex_requeue() allows the caller to define the number |
26992 |
+- * of waiters to wake up via the @nr_wake argument. With |
26993 |
+- * REQUEUE_PI, waking up more than one waiter is creating |
26994 |
+- * more problems than it solves. Waking up a waiter makes |
26995 |
+- * only sense if the PI futex @uaddr2 is uncontended as |
26996 |
+- * this allows the requeue code to acquire the futex |
26997 |
+- * @uaddr2 before waking the waiter. The waiter can then |
26998 |
+- * return to user space without further action. A secondary |
26999 |
+- * wakeup would just make the futex_wait_requeue_pi() |
27000 |
+- * handling more complex, because that code would have to |
27001 |
+- * look up pi_state and do more or less all the handling |
27002 |
+- * which the requeue code has to do for the to be requeued |
27003 |
+- * waiters. So restrict the number of waiters to wake to |
27004 |
+- * one, and only wake it up when the PI futex is |
27005 |
+- * uncontended. Otherwise requeue it and let the unlock of |
27006 |
+- * the PI futex handle the wakeup. |
27007 |
+- * |
27008 |
+- * All REQUEUE_PI users, e.g. pthread_cond_signal() and |
27009 |
+- * pthread_cond_broadcast() must use nr_wake=1. |
27010 |
+- */ |
27011 |
+- if (nr_wake != 1) |
27012 |
+- return -EINVAL; |
27013 |
+- |
27014 |
+- /* |
27015 |
+- * requeue_pi requires a pi_state, try to allocate it now |
27016 |
+- * without any locks in case it fails. |
27017 |
+- */ |
27018 |
+- if (refill_pi_state_cache()) |
27019 |
+- return -ENOMEM; |
27020 |
+- } |
27021 |
+- |
27022 |
+-retry: |
27023 |
+- ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ); |
27024 |
+- if (unlikely(ret != 0)) |
27025 |
+- return ret; |
27026 |
+- ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, |
27027 |
+- requeue_pi ? FUTEX_WRITE : FUTEX_READ); |
27028 |
+- if (unlikely(ret != 0)) |
27029 |
+- return ret; |
27030 |
+- |
27031 |
+- /* |
27032 |
+- * The check above which compares uaddrs is not sufficient for |
27033 |
+- * shared futexes. We need to compare the keys: |
27034 |
+- */ |
27035 |
+- if (requeue_pi && match_futex(&key1, &key2)) |
27036 |
+- return -EINVAL; |
27037 |
+- |
27038 |
+- hb1 = hash_futex(&key1); |
27039 |
+- hb2 = hash_futex(&key2); |
27040 |
+- |
27041 |
+-retry_private: |
27042 |
+- hb_waiters_inc(hb2); |
27043 |
+- double_lock_hb(hb1, hb2); |
27044 |
+- |
27045 |
+- if (likely(cmpval != NULL)) { |
27046 |
+- u32 curval; |
27047 |
+- |
27048 |
+- ret = get_futex_value_locked(&curval, uaddr1); |
27049 |
+- |
27050 |
+- if (unlikely(ret)) { |
27051 |
+- double_unlock_hb(hb1, hb2); |
27052 |
+- hb_waiters_dec(hb2); |
27053 |
+- |
27054 |
+- ret = get_user(curval, uaddr1); |
27055 |
+- if (ret) |
27056 |
+- return ret; |
27057 |
+- |
27058 |
+- if (!(flags & FLAGS_SHARED)) |
27059 |
+- goto retry_private; |
27060 |
+- |
27061 |
+- goto retry; |
27062 |
+- } |
27063 |
+- if (curval != *cmpval) { |
27064 |
+- ret = -EAGAIN; |
27065 |
+- goto out_unlock; |
27066 |
+- } |
27067 |
+- } |
27068 |
+- |
27069 |
+- if (requeue_pi) { |
27070 |
+- struct task_struct *exiting = NULL; |
27071 |
+- |
27072 |
+- /* |
27073 |
+- * Attempt to acquire uaddr2 and wake the top waiter. If we |
27074 |
+- * intend to requeue waiters, force setting the FUTEX_WAITERS |
27075 |
+- * bit. We force this here where we are able to easily handle |
27076 |
+- * faults rather in the requeue loop below. |
27077 |
+- * |
27078 |
+- * Updates topwaiter::requeue_state if a top waiter exists. |
27079 |
+- */ |
27080 |
+- ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, |
27081 |
+- &key2, &pi_state, |
27082 |
+- &exiting, nr_requeue); |
27083 |
+- |
27084 |
+- /* |
27085 |
+- * At this point the top_waiter has either taken uaddr2 or |
27086 |
+- * is waiting on it. In both cases pi_state has been |
27087 |
+- * established and an initial refcount on it. In case of an |
27088 |
+- * error there's nothing. |
27089 |
+- * |
27090 |
+- * The top waiter's requeue_state is up to date: |
27091 |
+- * |
27092 |
+- * - If the lock was acquired atomically (ret == 1), then |
27093 |
+- * the state is Q_REQUEUE_PI_LOCKED. |
27094 |
+- * |
27095 |
+- * The top waiter has been dequeued and woken up and can |
27096 |
+- * return to user space immediately. The kernel/user |
27097 |
+- * space state is consistent. In case that there must be |
27098 |
+- * more waiters requeued the WAITERS bit in the user |
27099 |
+- * space futex is set so the top waiter task has to go |
27100 |
+- * into the syscall slowpath to unlock the futex. This |
27101 |
+- * will block until this requeue operation has been |
27102 |
+- * completed and the hash bucket locks have been |
27103 |
+- * dropped. |
27104 |
+- * |
27105 |
+- * - If the trylock failed with an error (ret < 0) then |
27106 |
+- * the state is either Q_REQUEUE_PI_NONE, i.e. "nothing |
27107 |
+- * happened", or Q_REQUEUE_PI_IGNORE when there was an |
27108 |
+- * interleaved early wakeup. |
27109 |
+- * |
27110 |
+- * - If the trylock did not succeed (ret == 0) then the |
27111 |
+- * state is either Q_REQUEUE_PI_IN_PROGRESS or |
27112 |
+- * Q_REQUEUE_PI_WAIT if an early wakeup interleaved. |
27113 |
+- * This will be cleaned up in the loop below, which |
27114 |
+- * cannot fail because futex_proxy_trylock_atomic() did |
27115 |
+- * the same sanity checks for requeue_pi as the loop |
27116 |
+- * below does. |
27117 |
+- */ |
27118 |
+- switch (ret) { |
27119 |
+- case 0: |
27120 |
+- /* We hold a reference on the pi state. */ |
27121 |
+- break; |
27122 |
+- |
27123 |
+- case 1: |
27124 |
+- /* |
27125 |
+- * futex_proxy_trylock_atomic() acquired the user space |
27126 |
+- * futex. Adjust task_count. |
27127 |
+- */ |
27128 |
+- task_count++; |
27129 |
+- ret = 0; |
27130 |
+- break; |
27131 |
+- |
27132 |
+- /* |
27133 |
+- * If the above failed, then pi_state is NULL and |
27134 |
+- * waiter::requeue_state is correct. |
27135 |
+- */ |
27136 |
+- case -EFAULT: |
27137 |
+- double_unlock_hb(hb1, hb2); |
27138 |
+- hb_waiters_dec(hb2); |
27139 |
+- ret = fault_in_user_writeable(uaddr2); |
27140 |
+- if (!ret) |
27141 |
+- goto retry; |
27142 |
+- return ret; |
27143 |
+- case -EBUSY: |
27144 |
+- case -EAGAIN: |
27145 |
+- /* |
27146 |
+- * Two reasons for this: |
27147 |
+- * - EBUSY: Owner is exiting and we just wait for the |
27148 |
+- * exit to complete. |
27149 |
+- * - EAGAIN: The user space value changed. |
27150 |
+- */ |
27151 |
+- double_unlock_hb(hb1, hb2); |
27152 |
+- hb_waiters_dec(hb2); |
27153 |
+- /* |
27154 |
+- * Handle the case where the owner is in the middle of |
27155 |
+- * exiting. Wait for the exit to complete otherwise |
27156 |
+- * this task might loop forever, aka. live lock. |
27157 |
+- */ |
27158 |
+- wait_for_owner_exiting(ret, exiting); |
27159 |
+- cond_resched(); |
27160 |
+- goto retry; |
27161 |
+- default: |
27162 |
+- goto out_unlock; |
27163 |
+- } |
27164 |
+- } |
27165 |
+- |
27166 |
+- plist_for_each_entry_safe(this, next, &hb1->chain, list) { |
27167 |
+- if (task_count - nr_wake >= nr_requeue) |
27168 |
+- break; |
27169 |
+- |
27170 |
+- if (!match_futex(&this->key, &key1)) |
27171 |
+- continue; |
27172 |
+- |
27173 |
+- /* |
27174 |
+- * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always |
27175 |
+- * be paired with each other and no other futex ops. |
27176 |
+- * |
27177 |
+- * We should never be requeueing a futex_q with a pi_state, |
27178 |
+- * which is awaiting a futex_unlock_pi(). |
27179 |
+- */ |
27180 |
+- if ((requeue_pi && !this->rt_waiter) || |
27181 |
+- (!requeue_pi && this->rt_waiter) || |
27182 |
+- this->pi_state) { |
27183 |
+- ret = -EINVAL; |
27184 |
+- break; |
27185 |
+- } |
27186 |
+- |
27187 |
+- /* Plain futexes just wake or requeue and are done */ |
27188 |
+- if (!requeue_pi) { |
27189 |
+- if (++task_count <= nr_wake) |
27190 |
+- mark_wake_futex(&wake_q, this); |
27191 |
+- else |
27192 |
+- requeue_futex(this, hb1, hb2, &key2); |
27193 |
+- continue; |
27194 |
+- } |
27195 |
+- |
27196 |
+- /* Ensure we requeue to the expected futex for requeue_pi. */ |
27197 |
+- if (!match_futex(this->requeue_pi_key, &key2)) { |
27198 |
+- ret = -EINVAL; |
27199 |
+- break; |
27200 |
+- } |
27201 |
+- |
27202 |
+- /* |
27203 |
+- * Requeue nr_requeue waiters and possibly one more in the case |
27204 |
+- * of requeue_pi if we couldn't acquire the lock atomically. |
27205 |
+- * |
27206 |
+- * Prepare the waiter to take the rt_mutex. Take a refcount |
27207 |
+- * on the pi_state and store the pointer in the futex_q |
27208 |
+- * object of the waiter. |
27209 |
+- */ |
27210 |
+- get_pi_state(pi_state); |
27211 |
+- |
27212 |
+- /* Don't requeue when the waiter is already on the way out. */ |
27213 |
+- if (!futex_requeue_pi_prepare(this, pi_state)) { |
27214 |
+- /* |
27215 |
+- * Early woken waiter signaled that it is on the |
27216 |
+- * way out. Drop the pi_state reference and try the |
27217 |
+- * next waiter. @this->pi_state is still NULL. |
27218 |
+- */ |
27219 |
+- put_pi_state(pi_state); |
27220 |
+- continue; |
27221 |
+- } |
27222 |
+- |
27223 |
+- ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, |
27224 |
+- this->rt_waiter, |
27225 |
+- this->task); |
27226 |
+- |
27227 |
+- if (ret == 1) { |
27228 |
+- /* |
27229 |
+- * We got the lock. We do neither drop the refcount |
27230 |
+- * on pi_state nor clear this->pi_state because the |
27231 |
+- * waiter needs the pi_state for cleaning up the |
27232 |
+- * user space value. It will drop the refcount |
27233 |
+- * after doing so. this::requeue_state is updated |
27234 |
+- * in the wakeup as well. |
27235 |
+- */ |
27236 |
+- requeue_pi_wake_futex(this, &key2, hb2); |
27237 |
+- task_count++; |
27238 |
+- } else if (!ret) { |
27239 |
+- /* Waiter is queued, move it to hb2 */ |
27240 |
+- requeue_futex(this, hb1, hb2, &key2); |
27241 |
+- futex_requeue_pi_complete(this, 0); |
27242 |
+- task_count++; |
27243 |
+- } else { |
27244 |
+- /* |
27245 |
+- * rt_mutex_start_proxy_lock() detected a potential |
27246 |
+- * deadlock when we tried to queue that waiter. |
27247 |
+- * Drop the pi_state reference which we took above |
27248 |
+- * and remove the pointer to the state from the |
27249 |
+- * waiters futex_q object. |
27250 |
+- */ |
27251 |
+- this->pi_state = NULL; |
27252 |
+- put_pi_state(pi_state); |
27253 |
+- futex_requeue_pi_complete(this, ret); |
27254 |
+- /* |
27255 |
+- * We stop queueing more waiters and let user space |
27256 |
+- * deal with the mess. |
27257 |
+- */ |
27258 |
+- break; |
27259 |
+- } |
27260 |
+- } |
27261 |
+- |
27262 |
+- /* |
27263 |
+- * We took an extra initial reference to the pi_state in |
27264 |
+- * futex_proxy_trylock_atomic(). We need to drop it here again. |
27265 |
+- */ |
27266 |
+- put_pi_state(pi_state); |
27267 |
+- |
27268 |
+-out_unlock: |
27269 |
+- double_unlock_hb(hb1, hb2); |
27270 |
+- wake_up_q(&wake_q); |
27271 |
+- hb_waiters_dec(hb2); |
27272 |
+- return ret ? ret : task_count; |
27273 |
+-} |
27274 |
+- |
27275 |
+-/* The key must be already stored in q->key. */ |
27276 |
+-static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) |
27277 |
+- __acquires(&hb->lock) |
27278 |
+-{ |
27279 |
+- struct futex_hash_bucket *hb; |
27280 |
+- |
27281 |
+- hb = hash_futex(&q->key); |
27282 |
+- |
27283 |
+- /* |
27284 |
+- * Increment the counter before taking the lock so that |
27285 |
+- * a potential waker won't miss a to-be-slept task that is |
27286 |
+- * waiting for the spinlock. This is safe as all queue_lock() |
27287 |
+- * users end up calling queue_me(). Similarly, for housekeeping, |
27288 |
+- * decrement the counter at queue_unlock() when some error has |
27289 |
+- * occurred and we don't end up adding the task to the list. |
27290 |
+- */ |
27291 |
+- hb_waiters_inc(hb); /* implies smp_mb(); (A) */ |
27292 |
+- |
27293 |
+- q->lock_ptr = &hb->lock; |
27294 |
+- |
27295 |
+- spin_lock(&hb->lock); |
27296 |
+- return hb; |
27297 |
+-} |
27298 |
+- |
27299 |
+-static inline void |
27300 |
+-queue_unlock(struct futex_hash_bucket *hb) |
27301 |
+- __releases(&hb->lock) |
27302 |
+-{ |
27303 |
+- spin_unlock(&hb->lock); |
27304 |
+- hb_waiters_dec(hb); |
27305 |
+-} |
27306 |
+- |
27307 |
+-static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) |
27308 |
+-{ |
27309 |
+- int prio; |
27310 |
+- |
27311 |
+- /* |
27312 |
+- * The priority used to register this element is |
27313 |
+- * - either the real thread-priority for the real-time threads |
27314 |
+- * (i.e. threads with a priority lower than MAX_RT_PRIO) |
27315 |
+- * - or MAX_RT_PRIO for non-RT threads. |
27316 |
+- * Thus, all RT-threads are woken first in priority order, and |
27317 |
+- * the others are woken last, in FIFO order. |
27318 |
+- */ |
27319 |
+- prio = min(current->normal_prio, MAX_RT_PRIO); |
27320 |
+- |
27321 |
+- plist_node_init(&q->list, prio); |
27322 |
+- plist_add(&q->list, &hb->chain); |
27323 |
+- q->task = current; |
27324 |
+-} |
27325 |
+- |
27326 |
+-/** |
27327 |
+- * queue_me() - Enqueue the futex_q on the futex_hash_bucket |
27328 |
+- * @q: The futex_q to enqueue |
27329 |
+- * @hb: The destination hash bucket |
27330 |
+- * |
27331 |
+- * The hb->lock must be held by the caller, and is released here. A call to |
27332 |
+- * queue_me() is typically paired with exactly one call to unqueue_me(). The |
27333 |
+- * exceptions involve the PI related operations, which may use unqueue_me_pi() |
27334 |
+- * or nothing if the unqueue is done as part of the wake process and the unqueue |
27335 |
+- * state is implicit in the state of woken task (see futex_wait_requeue_pi() for |
27336 |
+- * an example). |
27337 |
+- */ |
27338 |
+-static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) |
27339 |
+- __releases(&hb->lock) |
27340 |
+-{ |
27341 |
+- __queue_me(q, hb); |
27342 |
+- spin_unlock(&hb->lock); |
27343 |
+-} |
27344 |
+- |
27345 |
+-/** |
27346 |
+- * unqueue_me() - Remove the futex_q from its futex_hash_bucket |
27347 |
+- * @q: The futex_q to unqueue |
27348 |
+- * |
27349 |
+- * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must |
27350 |
+- * be paired with exactly one earlier call to queue_me(). |
27351 |
+- * |
27352 |
+- * Return: |
27353 |
+- * - 1 - if the futex_q was still queued (and we removed unqueued it); |
27354 |
+- * - 0 - if the futex_q was already removed by the waking thread |
27355 |
+- */ |
27356 |
+-static int unqueue_me(struct futex_q *q) |
27357 |
+-{ |
27358 |
+- spinlock_t *lock_ptr; |
27359 |
+- int ret = 0; |
27360 |
+- |
27361 |
+- /* In the common case we don't take the spinlock, which is nice. */ |
27362 |
+-retry: |
27363 |
+- /* |
27364 |
+- * q->lock_ptr can change between this read and the following spin_lock. |
27365 |
+- * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and |
27366 |
+- * optimizing lock_ptr out of the logic below. |
27367 |
+- */ |
27368 |
+- lock_ptr = READ_ONCE(q->lock_ptr); |
27369 |
+- if (lock_ptr != NULL) { |
27370 |
+- spin_lock(lock_ptr); |
27371 |
+- /* |
27372 |
+- * q->lock_ptr can change between reading it and |
27373 |
+- * spin_lock(), causing us to take the wrong lock. This |
27374 |
+- * corrects the race condition. |
27375 |
+- * |
27376 |
+- * Reasoning goes like this: if we have the wrong lock, |
27377 |
+- * q->lock_ptr must have changed (maybe several times) |
27378 |
+- * between reading it and the spin_lock(). It can |
27379 |
+- * change again after the spin_lock() but only if it was |
27380 |
+- * already changed before the spin_lock(). It cannot, |
27381 |
+- * however, change back to the original value. Therefore |
27382 |
+- * we can detect whether we acquired the correct lock. |
27383 |
+- */ |
27384 |
+- if (unlikely(lock_ptr != q->lock_ptr)) { |
27385 |
+- spin_unlock(lock_ptr); |
27386 |
+- goto retry; |
27387 |
+- } |
27388 |
+- __unqueue_futex(q); |
27389 |
+- |
27390 |
+- BUG_ON(q->pi_state); |
27391 |
+- |
27392 |
+- spin_unlock(lock_ptr); |
27393 |
+- ret = 1; |
27394 |
+- } |
27395 |
+- |
27396 |
+- return ret; |
27397 |
+-} |
27398 |
+- |
27399 |
+-/* |
27400 |
+- * PI futexes can not be requeued and must remove themselves from the |
27401 |
+- * hash bucket. The hash bucket lock (i.e. lock_ptr) is held. |
27402 |
+- */ |
27403 |
+-static void unqueue_me_pi(struct futex_q *q) |
27404 |
+-{ |
27405 |
+- __unqueue_futex(q); |
27406 |
+- |
27407 |
+- BUG_ON(!q->pi_state); |
27408 |
+- put_pi_state(q->pi_state); |
27409 |
+- q->pi_state = NULL; |
27410 |
+-} |
27411 |
+- |
27412 |
+-static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
27413 |
+- struct task_struct *argowner) |
27414 |
+-{ |
27415 |
+- struct futex_pi_state *pi_state = q->pi_state; |
27416 |
+- struct task_struct *oldowner, *newowner; |
27417 |
+- u32 uval, curval, newval, newtid; |
27418 |
+- int err = 0; |
27419 |
+- |
27420 |
+- oldowner = pi_state->owner; |
27421 |
+- |
27422 |
+- /* |
27423 |
+- * We are here because either: |
27424 |
+- * |
27425 |
+- * - we stole the lock and pi_state->owner needs updating to reflect |
27426 |
+- * that (@argowner == current), |
27427 |
+- * |
27428 |
+- * or: |
27429 |
+- * |
27430 |
+- * - someone stole our lock and we need to fix things to point to the |
27431 |
+- * new owner (@argowner == NULL). |
27432 |
+- * |
27433 |
+- * Either way, we have to replace the TID in the user space variable. |
27434 |
+- * This must be atomic as we have to preserve the owner died bit here. |
27435 |
+- * |
27436 |
+- * Note: We write the user space value _before_ changing the pi_state |
27437 |
+- * because we can fault here. Imagine swapped out pages or a fork |
27438 |
+- * that marked all the anonymous memory readonly for cow. |
27439 |
+- * |
27440 |
+- * Modifying pi_state _before_ the user space value would leave the |
27441 |
+- * pi_state in an inconsistent state when we fault here, because we |
27442 |
+- * need to drop the locks to handle the fault. This might be observed |
27443 |
+- * in the PID checks when attaching to PI state . |
27444 |
+- */ |
27445 |
+-retry: |
27446 |
+- if (!argowner) { |
27447 |
+- if (oldowner != current) { |
27448 |
+- /* |
27449 |
+- * We raced against a concurrent self; things are |
27450 |
+- * already fixed up. Nothing to do. |
27451 |
+- */ |
27452 |
+- return 0; |
27453 |
+- } |
27454 |
+- |
27455 |
+- if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { |
27456 |
+- /* We got the lock. pi_state is correct. Tell caller. */ |
27457 |
+- return 1; |
27458 |
+- } |
27459 |
+- |
27460 |
+- /* |
27461 |
+- * The trylock just failed, so either there is an owner or |
27462 |
+- * there is a higher priority waiter than this one. |
27463 |
+- */ |
27464 |
+- newowner = rt_mutex_owner(&pi_state->pi_mutex); |
27465 |
+- /* |
27466 |
+- * If the higher priority waiter has not yet taken over the |
27467 |
+- * rtmutex then newowner is NULL. We can't return here with |
27468 |
+- * that state because it's inconsistent vs. the user space |
27469 |
+- * state. So drop the locks and try again. It's a valid |
27470 |
+- * situation and not any different from the other retry |
27471 |
+- * conditions. |
27472 |
+- */ |
27473 |
+- if (unlikely(!newowner)) { |
27474 |
+- err = -EAGAIN; |
27475 |
+- goto handle_err; |
27476 |
+- } |
27477 |
+- } else { |
27478 |
+- WARN_ON_ONCE(argowner != current); |
27479 |
+- if (oldowner == current) { |
27480 |
+- /* |
27481 |
+- * We raced against a concurrent self; things are |
27482 |
+- * already fixed up. Nothing to do. |
27483 |
+- */ |
27484 |
+- return 1; |
27485 |
+- } |
27486 |
+- newowner = argowner; |
27487 |
+- } |
27488 |
+- |
27489 |
+- newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; |
27490 |
+- /* Owner died? */ |
27491 |
+- if (!pi_state->owner) |
27492 |
+- newtid |= FUTEX_OWNER_DIED; |
27493 |
+- |
27494 |
+- err = get_futex_value_locked(&uval, uaddr); |
27495 |
+- if (err) |
27496 |
+- goto handle_err; |
27497 |
+- |
27498 |
+- for (;;) { |
27499 |
+- newval = (uval & FUTEX_OWNER_DIED) | newtid; |
27500 |
+- |
27501 |
+- err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); |
27502 |
+- if (err) |
27503 |
+- goto handle_err; |
27504 |
+- |
27505 |
+- if (curval == uval) |
27506 |
+- break; |
27507 |
+- uval = curval; |
27508 |
+- } |
27509 |
+- |
27510 |
+- /* |
27511 |
+- * We fixed up user space. Now we need to fix the pi_state |
27512 |
+- * itself. |
27513 |
+- */ |
27514 |
+- pi_state_update_owner(pi_state, newowner); |
27515 |
+- |
27516 |
+- return argowner == current; |
27517 |
+- |
27518 |
+- /* |
27519 |
+- * In order to reschedule or handle a page fault, we need to drop the |
27520 |
+- * locks here. In the case of a fault, this gives the other task |
27521 |
+- * (either the highest priority waiter itself or the task which stole |
27522 |
+- * the rtmutex) the chance to try the fixup of the pi_state. So once we |
27523 |
+- * are back from handling the fault we need to check the pi_state after |
27524 |
+- * reacquiring the locks and before trying to do another fixup. When |
27525 |
+- * the fixup has been done already we simply return. |
27526 |
+- * |
27527 |
+- * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely |
27528 |
+- * drop hb->lock since the caller owns the hb -> futex_q relation. |
27529 |
+- * Dropping the pi_mutex->wait_lock requires the state revalidate. |
27530 |
+- */ |
27531 |
+-handle_err: |
27532 |
+- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
27533 |
+- spin_unlock(q->lock_ptr); |
27534 |
+- |
27535 |
+- switch (err) { |
27536 |
+- case -EFAULT: |
27537 |
+- err = fault_in_user_writeable(uaddr); |
27538 |
+- break; |
27539 |
+- |
27540 |
+- case -EAGAIN: |
27541 |
+- cond_resched(); |
27542 |
+- err = 0; |
27543 |
+- break; |
27544 |
+- |
27545 |
+- default: |
27546 |
+- WARN_ON_ONCE(1); |
27547 |
+- break; |
27548 |
+- } |
27549 |
+- |
27550 |
+- spin_lock(q->lock_ptr); |
27551 |
+- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
27552 |
+- |
27553 |
+- /* |
27554 |
+- * Check if someone else fixed it for us: |
27555 |
+- */ |
27556 |
+- if (pi_state->owner != oldowner) |
27557 |
+- return argowner == current; |
27558 |
+- |
27559 |
+- /* Retry if err was -EAGAIN or the fault in succeeded */ |
27560 |
+- if (!err) |
27561 |
+- goto retry; |
27562 |
+- |
27563 |
+- /* |
27564 |
+- * fault_in_user_writeable() failed so user state is immutable. At |
27565 |
+- * best we can make the kernel state consistent but user state will |
27566 |
+- * be most likely hosed and any subsequent unlock operation will be |
27567 |
+- * rejected due to PI futex rule [10]. |
27568 |
+- * |
27569 |
+- * Ensure that the rtmutex owner is also the pi_state owner despite |
27570 |
+- * the user space value claiming something different. There is no |
27571 |
+- * point in unlocking the rtmutex if current is the owner as it |
27572 |
+- * would need to wait until the next waiter has taken the rtmutex |
27573 |
+- * to guarantee consistent state. Keep it simple. Userspace asked |
27574 |
+- * for this wreckaged state. |
27575 |
+- * |
27576 |
+- * The rtmutex has an owner - either current or some other |
27577 |
+- * task. See the EAGAIN loop above. |
27578 |
+- */ |
27579 |
+- pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex)); |
27580 |
+- |
27581 |
+- return err; |
27582 |
+-} |
27583 |
+- |
27584 |
+-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
27585 |
+- struct task_struct *argowner) |
27586 |
+-{ |
27587 |
+- struct futex_pi_state *pi_state = q->pi_state; |
27588 |
+- int ret; |
27589 |
+- |
27590 |
+- lockdep_assert_held(q->lock_ptr); |
27591 |
+- |
27592 |
+- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
27593 |
+- ret = __fixup_pi_state_owner(uaddr, q, argowner); |
27594 |
+- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
27595 |
+- return ret; |
27596 |
+-} |
27597 |
+- |
27598 |
+-static long futex_wait_restart(struct restart_block *restart); |
27599 |
+- |
27600 |
+-/** |
27601 |
+- * fixup_owner() - Post lock pi_state and corner case management |
27602 |
+- * @uaddr: user address of the futex |
27603 |
+- * @q: futex_q (contains pi_state and access to the rt_mutex) |
27604 |
+- * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) |
27605 |
+- * |
27606 |
+- * After attempting to lock an rt_mutex, this function is called to cleanup |
27607 |
+- * the pi_state owner as well as handle race conditions that may allow us to |
27608 |
+- * acquire the lock. Must be called with the hb lock held. |
27609 |
+- * |
27610 |
+- * Return: |
27611 |
+- * - 1 - success, lock taken; |
27612 |
+- * - 0 - success, lock not taken; |
27613 |
+- * - <0 - on error (-EFAULT) |
27614 |
+- */ |
27615 |
+-static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
27616 |
+-{ |
27617 |
+- if (locked) { |
27618 |
+- /* |
27619 |
+- * Got the lock. We might not be the anticipated owner if we |
27620 |
+- * did a lock-steal - fix up the PI-state in that case: |
27621 |
+- * |
27622 |
+- * Speculative pi_state->owner read (we don't hold wait_lock); |
27623 |
+- * since we own the lock pi_state->owner == current is the |
27624 |
+- * stable state, anything else needs more attention. |
27625 |
+- */ |
27626 |
+- if (q->pi_state->owner != current) |
27627 |
+- return fixup_pi_state_owner(uaddr, q, current); |
27628 |
+- return 1; |
27629 |
+- } |
27630 |
+- |
27631 |
+- /* |
27632 |
+- * If we didn't get the lock; check if anybody stole it from us. In |
27633 |
+- * that case, we need to fix up the uval to point to them instead of |
27634 |
+- * us, otherwise bad things happen. [10] |
27635 |
+- * |
27636 |
+- * Another speculative read; pi_state->owner == current is unstable |
27637 |
+- * but needs our attention. |
27638 |
+- */ |
27639 |
+- if (q->pi_state->owner == current) |
27640 |
+- return fixup_pi_state_owner(uaddr, q, NULL); |
27641 |
+- |
27642 |
+- /* |
27643 |
+- * Paranoia check. If we did not take the lock, then we should not be |
27644 |
+- * the owner of the rt_mutex. Warn and establish consistent state. |
27645 |
+- */ |
27646 |
+- if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) |
27647 |
+- return fixup_pi_state_owner(uaddr, q, current); |
27648 |
+- |
27649 |
+- return 0; |
27650 |
+-} |
27651 |
+- |
27652 |
+-/** |
27653 |
+- * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal |
27654 |
+- * @hb: the futex hash bucket, must be locked by the caller |
27655 |
+- * @q: the futex_q to queue up on |
27656 |
+- * @timeout: the prepared hrtimer_sleeper, or null for no timeout |
27657 |
+- */ |
27658 |
+-static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, |
27659 |
+- struct hrtimer_sleeper *timeout) |
27660 |
+-{ |
27661 |
+- /* |
27662 |
+- * The task state is guaranteed to be set before another task can |
27663 |
+- * wake it. set_current_state() is implemented using smp_store_mb() and |
27664 |
+- * queue_me() calls spin_unlock() upon completion, both serializing |
27665 |
+- * access to the hash list and forcing another memory barrier. |
27666 |
+- */ |
27667 |
+- set_current_state(TASK_INTERRUPTIBLE); |
27668 |
+- queue_me(q, hb); |
27669 |
+- |
27670 |
+- /* Arm the timer */ |
27671 |
+- if (timeout) |
27672 |
+- hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS); |
27673 |
+- |
27674 |
+- /* |
27675 |
+- * If we have been removed from the hash list, then another task |
27676 |
+- * has tried to wake us, and we can skip the call to schedule(). |
27677 |
+- */ |
27678 |
+- if (likely(!plist_node_empty(&q->list))) { |
27679 |
+- /* |
27680 |
+- * If the timer has already expired, current will already be |
27681 |
+- * flagged for rescheduling. Only call schedule if there |
27682 |
+- * is no timeout, or if it has yet to expire. |
27683 |
+- */ |
27684 |
+- if (!timeout || timeout->task) |
27685 |
+- freezable_schedule(); |
27686 |
+- } |
27687 |
+- __set_current_state(TASK_RUNNING); |
27688 |
+-} |
27689 |
+- |
27690 |
+-/** |
27691 |
+- * futex_wait_setup() - Prepare to wait on a futex |
27692 |
+- * @uaddr: the futex userspace address |
27693 |
+- * @val: the expected value |
27694 |
+- * @flags: futex flags (FLAGS_SHARED, etc.) |
27695 |
+- * @q: the associated futex_q |
27696 |
+- * @hb: storage for hash_bucket pointer to be returned to caller |
27697 |
+- * |
27698 |
+- * Setup the futex_q and locate the hash_bucket. Get the futex value and |
27699 |
+- * compare it with the expected value. Handle atomic faults internally. |
27700 |
+- * Return with the hb lock held on success, and unlocked on failure. |
27701 |
+- * |
27702 |
+- * Return: |
27703 |
+- * - 0 - uaddr contains val and hb has been locked; |
27704 |
+- * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked |
27705 |
+- */ |
27706 |
+-static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, |
27707 |
+- struct futex_q *q, struct futex_hash_bucket **hb) |
27708 |
+-{ |
27709 |
+- u32 uval; |
27710 |
+- int ret; |
27711 |
+- |
27712 |
+- /* |
27713 |
+- * Access the page AFTER the hash-bucket is locked. |
27714 |
+- * Order is important: |
27715 |
+- * |
27716 |
+- * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); |
27717 |
+- * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } |
27718 |
+- * |
27719 |
+- * The basic logical guarantee of a futex is that it blocks ONLY |
27720 |
+- * if cond(var) is known to be true at the time of blocking, for |
27721 |
+- * any cond. If we locked the hash-bucket after testing *uaddr, that |
27722 |
+- * would open a race condition where we could block indefinitely with |
27723 |
+- * cond(var) false, which would violate the guarantee. |
27724 |
+- * |
27725 |
+- * On the other hand, we insert q and release the hash-bucket only |
27726 |
+- * after testing *uaddr. This guarantees that futex_wait() will NOT |
27727 |
+- * absorb a wakeup if *uaddr does not match the desired values |
27728 |
+- * while the syscall executes. |
27729 |
+- */ |
27730 |
+-retry: |
27731 |
+- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ); |
27732 |
+- if (unlikely(ret != 0)) |
27733 |
+- return ret; |
27734 |
+- |
27735 |
+-retry_private: |
27736 |
+- *hb = queue_lock(q); |
27737 |
+- |
27738 |
+- ret = get_futex_value_locked(&uval, uaddr); |
27739 |
+- |
27740 |
+- if (ret) { |
27741 |
+- queue_unlock(*hb); |
27742 |
+- |
27743 |
+- ret = get_user(uval, uaddr); |
27744 |
+- if (ret) |
27745 |
+- return ret; |
27746 |
+- |
27747 |
+- if (!(flags & FLAGS_SHARED)) |
27748 |
+- goto retry_private; |
27749 |
+- |
27750 |
+- goto retry; |
27751 |
+- } |
27752 |
+- |
27753 |
+- if (uval != val) { |
27754 |
+- queue_unlock(*hb); |
27755 |
+- ret = -EWOULDBLOCK; |
27756 |
+- } |
27757 |
+- |
27758 |
+- return ret; |
27759 |
+-} |
27760 |
+- |
27761 |
+-static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, |
27762 |
+- ktime_t *abs_time, u32 bitset) |
27763 |
+-{ |
27764 |
+- struct hrtimer_sleeper timeout, *to; |
27765 |
+- struct restart_block *restart; |
27766 |
+- struct futex_hash_bucket *hb; |
27767 |
+- struct futex_q q = futex_q_init; |
27768 |
+- int ret; |
27769 |
+- |
27770 |
+- if (!bitset) |
27771 |
+- return -EINVAL; |
27772 |
+- q.bitset = bitset; |
27773 |
+- |
27774 |
+- to = futex_setup_timer(abs_time, &timeout, flags, |
27775 |
+- current->timer_slack_ns); |
27776 |
+-retry: |
27777 |
+- /* |
27778 |
+- * Prepare to wait on uaddr. On success, it holds hb->lock and q |
27779 |
+- * is initialized. |
27780 |
+- */ |
27781 |
+- ret = futex_wait_setup(uaddr, val, flags, &q, &hb); |
27782 |
+- if (ret) |
27783 |
+- goto out; |
27784 |
+- |
27785 |
+- /* queue_me and wait for wakeup, timeout, or a signal. */ |
27786 |
+- futex_wait_queue_me(hb, &q, to); |
27787 |
+- |
27788 |
+- /* If we were woken (and unqueued), we succeeded, whatever. */ |
27789 |
+- ret = 0; |
27790 |
+- if (!unqueue_me(&q)) |
27791 |
+- goto out; |
27792 |
+- ret = -ETIMEDOUT; |
27793 |
+- if (to && !to->task) |
27794 |
+- goto out; |
27795 |
+- |
27796 |
+- /* |
27797 |
+- * We expect signal_pending(current), but we might be the |
27798 |
+- * victim of a spurious wakeup as well. |
27799 |
+- */ |
27800 |
+- if (!signal_pending(current)) |
27801 |
+- goto retry; |
27802 |
+- |
27803 |
+- ret = -ERESTARTSYS; |
27804 |
+- if (!abs_time) |
27805 |
+- goto out; |
27806 |
+- |
27807 |
+- restart = ¤t->restart_block; |
27808 |
+- restart->futex.uaddr = uaddr; |
27809 |
+- restart->futex.val = val; |
27810 |
+- restart->futex.time = *abs_time; |
27811 |
+- restart->futex.bitset = bitset; |
27812 |
+- restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; |
27813 |
+- |
27814 |
+- ret = set_restart_fn(restart, futex_wait_restart); |
27815 |
+- |
27816 |
+-out: |
27817 |
+- if (to) { |
27818 |
+- hrtimer_cancel(&to->timer); |
27819 |
+- destroy_hrtimer_on_stack(&to->timer); |
27820 |
+- } |
27821 |
+- return ret; |
27822 |
+-} |
27823 |
+- |
27824 |
+- |
27825 |
+-static long futex_wait_restart(struct restart_block *restart) |
27826 |
+-{ |
27827 |
+- u32 __user *uaddr = restart->futex.uaddr; |
27828 |
+- ktime_t t, *tp = NULL; |
27829 |
+- |
27830 |
+- if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { |
27831 |
+- t = restart->futex.time; |
27832 |
+- tp = &t; |
27833 |
+- } |
27834 |
+- restart->fn = do_no_restart_syscall; |
27835 |
+- |
27836 |
+- return (long)futex_wait(uaddr, restart->futex.flags, |
27837 |
+- restart->futex.val, tp, restart->futex.bitset); |
27838 |
+-} |
27839 |
+- |
27840 |
+- |
27841 |
+-/* |
27842 |
+- * Userspace tried a 0 -> TID atomic transition of the futex value |
27843 |
+- * and failed. The kernel side here does the whole locking operation: |
27844 |
+- * if there are waiters then it will block as a consequence of relying |
27845 |
+- * on rt-mutexes, it does PI, etc. (Due to races the kernel might see |
27846 |
+- * a 0 value of the futex too.). |
27847 |
+- * |
27848 |
+- * Also serves as futex trylock_pi()'ing, and due semantics. |
27849 |
+- */ |
27850 |
+-static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, |
27851 |
+- ktime_t *time, int trylock) |
27852 |
+-{ |
27853 |
+- struct hrtimer_sleeper timeout, *to; |
27854 |
+- struct task_struct *exiting = NULL; |
27855 |
+- struct rt_mutex_waiter rt_waiter; |
27856 |
+- struct futex_hash_bucket *hb; |
27857 |
+- struct futex_q q = futex_q_init; |
27858 |
+- int res, ret; |
27859 |
+- |
27860 |
+- if (!IS_ENABLED(CONFIG_FUTEX_PI)) |
27861 |
+- return -ENOSYS; |
27862 |
+- |
27863 |
+- if (refill_pi_state_cache()) |
27864 |
+- return -ENOMEM; |
27865 |
+- |
27866 |
+- to = futex_setup_timer(time, &timeout, flags, 0); |
27867 |
+- |
27868 |
+-retry: |
27869 |
+- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE); |
27870 |
+- if (unlikely(ret != 0)) |
27871 |
+- goto out; |
27872 |
+- |
27873 |
+-retry_private: |
27874 |
+- hb = queue_lock(&q); |
27875 |
+- |
27876 |
+- ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, |
27877 |
+- &exiting, 0); |
27878 |
+- if (unlikely(ret)) { |
27879 |
+- /* |
27880 |
+- * Atomic work succeeded and we got the lock, |
27881 |
+- * or failed. Either way, we do _not_ block. |
27882 |
+- */ |
27883 |
+- switch (ret) { |
27884 |
+- case 1: |
27885 |
+- /* We got the lock. */ |
27886 |
+- ret = 0; |
27887 |
+- goto out_unlock_put_key; |
27888 |
+- case -EFAULT: |
27889 |
+- goto uaddr_faulted; |
27890 |
+- case -EBUSY: |
27891 |
+- case -EAGAIN: |
27892 |
+- /* |
27893 |
+- * Two reasons for this: |
27894 |
+- * - EBUSY: Task is exiting and we just wait for the |
27895 |
+- * exit to complete. |
27896 |
+- * - EAGAIN: The user space value changed. |
27897 |
+- */ |
27898 |
+- queue_unlock(hb); |
27899 |
+- /* |
27900 |
+- * Handle the case where the owner is in the middle of |
27901 |
+- * exiting. Wait for the exit to complete otherwise |
27902 |
+- * this task might loop forever, aka. live lock. |
27903 |
+- */ |
27904 |
+- wait_for_owner_exiting(ret, exiting); |
27905 |
+- cond_resched(); |
27906 |
+- goto retry; |
27907 |
+- default: |
27908 |
+- goto out_unlock_put_key; |
27909 |
+- } |
27910 |
+- } |
27911 |
+- |
27912 |
+- WARN_ON(!q.pi_state); |
27913 |
+- |
27914 |
+- /* |
27915 |
+- * Only actually queue now that the atomic ops are done: |
27916 |
+- */ |
27917 |
+- __queue_me(&q, hb); |
27918 |
+- |
27919 |
+- if (trylock) { |
27920 |
+- ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); |
27921 |
+- /* Fixup the trylock return value: */ |
27922 |
+- ret = ret ? 0 : -EWOULDBLOCK; |
27923 |
+- goto no_block; |
27924 |
+- } |
27925 |
+- |
27926 |
+- rt_mutex_init_waiter(&rt_waiter); |
27927 |
+- |
27928 |
+- /* |
27929 |
+- * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not |
27930 |
+- * hold it while doing rt_mutex_start_proxy(), because then it will |
27931 |
+- * include hb->lock in the blocking chain, even through we'll not in |
27932 |
+- * fact hold it while blocking. This will lead it to report -EDEADLK |
27933 |
+- * and BUG when futex_unlock_pi() interleaves with this. |
27934 |
+- * |
27935 |
+- * Therefore acquire wait_lock while holding hb->lock, but drop the |
27936 |
+- * latter before calling __rt_mutex_start_proxy_lock(). This |
27937 |
+- * interleaves with futex_unlock_pi() -- which does a similar lock |
27938 |
+- * handoff -- such that the latter can observe the futex_q::pi_state |
27939 |
+- * before __rt_mutex_start_proxy_lock() is done. |
27940 |
+- */ |
27941 |
+- raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); |
27942 |
+- spin_unlock(q.lock_ptr); |
27943 |
+- /* |
27944 |
+- * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter |
27945 |
+- * such that futex_unlock_pi() is guaranteed to observe the waiter when |
27946 |
+- * it sees the futex_q::pi_state. |
27947 |
+- */ |
27948 |
+- ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); |
27949 |
+- raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); |
27950 |
+- |
27951 |
+- if (ret) { |
27952 |
+- if (ret == 1) |
27953 |
+- ret = 0; |
27954 |
+- goto cleanup; |
27955 |
+- } |
27956 |
+- |
27957 |
+- if (unlikely(to)) |
27958 |
+- hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS); |
27959 |
+- |
27960 |
+- ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); |
27961 |
+- |
27962 |
+-cleanup: |
27963 |
+- spin_lock(q.lock_ptr); |
27964 |
+- /* |
27965 |
+- * If we failed to acquire the lock (deadlock/signal/timeout), we must |
27966 |
+- * first acquire the hb->lock before removing the lock from the |
27967 |
+- * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait |
27968 |
+- * lists consistent. |
27969 |
+- * |
27970 |
+- * In particular; it is important that futex_unlock_pi() can not |
27971 |
+- * observe this inconsistency. |
27972 |
+- */ |
27973 |
+- if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) |
27974 |
+- ret = 0; |
27975 |
+- |
27976 |
+-no_block: |
27977 |
+- /* |
27978 |
+- * Fixup the pi_state owner and possibly acquire the lock if we |
27979 |
+- * haven't already. |
27980 |
+- */ |
27981 |
+- res = fixup_owner(uaddr, &q, !ret); |
27982 |
+- /* |
27983 |
+- * If fixup_owner() returned an error, propagate that. If it acquired |
27984 |
+- * the lock, clear our -ETIMEDOUT or -EINTR. |
27985 |
+- */ |
27986 |
+- if (res) |
27987 |
+- ret = (res < 0) ? res : 0; |
27988 |
+- |
27989 |
+- unqueue_me_pi(&q); |
27990 |
+- spin_unlock(q.lock_ptr); |
27991 |
+- goto out; |
27992 |
+- |
27993 |
+-out_unlock_put_key: |
27994 |
+- queue_unlock(hb); |
27995 |
+- |
27996 |
+-out: |
27997 |
+- if (to) { |
27998 |
+- hrtimer_cancel(&to->timer); |
27999 |
+- destroy_hrtimer_on_stack(&to->timer); |
28000 |
+- } |
28001 |
+- return ret != -EINTR ? ret : -ERESTARTNOINTR; |
28002 |
+- |
28003 |
+-uaddr_faulted: |
28004 |
+- queue_unlock(hb); |
28005 |
+- |
28006 |
+- ret = fault_in_user_writeable(uaddr); |
28007 |
+- if (ret) |
28008 |
+- goto out; |
28009 |
+- |
28010 |
+- if (!(flags & FLAGS_SHARED)) |
28011 |
+- goto retry_private; |
28012 |
+- |
28013 |
+- goto retry; |
28014 |
+-} |
28015 |
+- |
28016 |
+-/* |
28017 |
+- * Userspace attempted a TID -> 0 atomic transition, and failed. |
28018 |
+- * This is the in-kernel slowpath: we look up the PI state (if any), |
28019 |
+- * and do the rt-mutex unlock. |
28020 |
+- */ |
28021 |
+-static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) |
28022 |
+-{ |
28023 |
+- u32 curval, uval, vpid = task_pid_vnr(current); |
28024 |
+- union futex_key key = FUTEX_KEY_INIT; |
28025 |
+- struct futex_hash_bucket *hb; |
28026 |
+- struct futex_q *top_waiter; |
28027 |
+- int ret; |
28028 |
+- |
28029 |
+- if (!IS_ENABLED(CONFIG_FUTEX_PI)) |
28030 |
+- return -ENOSYS; |
28031 |
+- |
28032 |
+-retry: |
28033 |
+- if (get_user(uval, uaddr)) |
28034 |
+- return -EFAULT; |
28035 |
+- /* |
28036 |
+- * We release only a lock we actually own: |
28037 |
+- */ |
28038 |
+- if ((uval & FUTEX_TID_MASK) != vpid) |
28039 |
+- return -EPERM; |
28040 |
+- |
28041 |
+- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE); |
28042 |
+- if (ret) |
28043 |
+- return ret; |
28044 |
+- |
28045 |
+- hb = hash_futex(&key); |
28046 |
+- spin_lock(&hb->lock); |
28047 |
+- |
28048 |
+- /* |
28049 |
+- * Check waiters first. We do not trust user space values at |
28050 |
+- * all and we at least want to know if user space fiddled |
28051 |
+- * with the futex value instead of blindly unlocking. |
28052 |
+- */ |
28053 |
+- top_waiter = futex_top_waiter(hb, &key); |
28054 |
+- if (top_waiter) { |
28055 |
+- struct futex_pi_state *pi_state = top_waiter->pi_state; |
28056 |
+- |
28057 |
+- ret = -EINVAL; |
28058 |
+- if (!pi_state) |
28059 |
+- goto out_unlock; |
28060 |
+- |
28061 |
+- /* |
28062 |
+- * If current does not own the pi_state then the futex is |
28063 |
+- * inconsistent and user space fiddled with the futex value. |
28064 |
+- */ |
28065 |
+- if (pi_state->owner != current) |
28066 |
+- goto out_unlock; |
28067 |
+- |
28068 |
+- get_pi_state(pi_state); |
28069 |
+- /* |
28070 |
+- * By taking wait_lock while still holding hb->lock, we ensure |
28071 |
+- * there is no point where we hold neither; and therefore |
28072 |
+- * wake_futex_pi() must observe a state consistent with what we |
28073 |
+- * observed. |
28074 |
+- * |
28075 |
+- * In particular; this forces __rt_mutex_start_proxy() to |
28076 |
+- * complete such that we're guaranteed to observe the |
28077 |
+- * rt_waiter. Also see the WARN in wake_futex_pi(). |
28078 |
+- */ |
28079 |
+- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
28080 |
+- spin_unlock(&hb->lock); |
28081 |
+- |
28082 |
+- /* drops pi_state->pi_mutex.wait_lock */ |
28083 |
+- ret = wake_futex_pi(uaddr, uval, pi_state); |
28084 |
+- |
28085 |
+- put_pi_state(pi_state); |
28086 |
+- |
28087 |
+- /* |
28088 |
+- * Success, we're done! No tricky corner cases. |
28089 |
+- */ |
28090 |
+- if (!ret) |
28091 |
+- return ret; |
28092 |
+- /* |
28093 |
+- * The atomic access to the futex value generated a |
28094 |
+- * pagefault, so retry the user-access and the wakeup: |
28095 |
+- */ |
28096 |
+- if (ret == -EFAULT) |
28097 |
+- goto pi_faulted; |
28098 |
+- /* |
28099 |
+- * A unconditional UNLOCK_PI op raced against a waiter |
28100 |
+- * setting the FUTEX_WAITERS bit. Try again. |
28101 |
+- */ |
28102 |
+- if (ret == -EAGAIN) |
28103 |
+- goto pi_retry; |
28104 |
+- /* |
28105 |
+- * wake_futex_pi has detected invalid state. Tell user |
28106 |
+- * space. |
28107 |
+- */ |
28108 |
+- return ret; |
28109 |
+- } |
28110 |
+- |
28111 |
+- /* |
28112 |
+- * We have no kernel internal state, i.e. no waiters in the |
28113 |
+- * kernel. Waiters which are about to queue themselves are stuck |
28114 |
+- * on hb->lock. So we can safely ignore them. We do neither |
28115 |
+- * preserve the WAITERS bit not the OWNER_DIED one. We are the |
28116 |
+- * owner. |
28117 |
+- */ |
28118 |
+- if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) { |
28119 |
+- spin_unlock(&hb->lock); |
28120 |
+- switch (ret) { |
28121 |
+- case -EFAULT: |
28122 |
+- goto pi_faulted; |
28123 |
+- |
28124 |
+- case -EAGAIN: |
28125 |
+- goto pi_retry; |
28126 |
+- |
28127 |
+- default: |
28128 |
+- WARN_ON_ONCE(1); |
28129 |
+- return ret; |
28130 |
+- } |
28131 |
+- } |
28132 |
+- |
28133 |
+- /* |
28134 |
+- * If uval has changed, let user space handle it. |
28135 |
+- */ |
28136 |
+- ret = (curval == uval) ? 0 : -EAGAIN; |
28137 |
+- |
28138 |
+-out_unlock: |
28139 |
+- spin_unlock(&hb->lock); |
28140 |
+- return ret; |
28141 |
+- |
28142 |
+-pi_retry: |
28143 |
+- cond_resched(); |
28144 |
+- goto retry; |
28145 |
+- |
28146 |
+-pi_faulted: |
28147 |
+- |
28148 |
+- ret = fault_in_user_writeable(uaddr); |
28149 |
+- if (!ret) |
28150 |
+- goto retry; |
28151 |
+- |
28152 |
+- return ret; |
28153 |
+-} |
28154 |
+- |
28155 |
+-/** |
28156 |
+- * handle_early_requeue_pi_wakeup() - Handle early wakeup on the initial futex |
28157 |
+- * @hb: the hash_bucket futex_q was original enqueued on |
28158 |
+- * @q: the futex_q woken while waiting to be requeued |
28159 |
+- * @timeout: the timeout associated with the wait (NULL if none) |
28160 |
+- * |
28161 |
+- * Determine the cause for the early wakeup. |
28162 |
+- * |
28163 |
+- * Return: |
28164 |
+- * -EWOULDBLOCK or -ETIMEDOUT or -ERESTARTNOINTR |
28165 |
+- */ |
28166 |
+-static inline |
28167 |
+-int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, |
28168 |
+- struct futex_q *q, |
28169 |
+- struct hrtimer_sleeper *timeout) |
28170 |
+-{ |
28171 |
+- int ret; |
28172 |
+- |
28173 |
+- /* |
28174 |
+- * With the hb lock held, we avoid races while we process the wakeup. |
28175 |
+- * We only need to hold hb (and not hb2) to ensure atomicity as the |
28176 |
+- * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. |
28177 |
+- * It can't be requeued from uaddr2 to something else since we don't |
28178 |
+- * support a PI aware source futex for requeue. |
28179 |
+- */ |
28180 |
+- WARN_ON_ONCE(&hb->lock != q->lock_ptr); |
28181 |
+- |
28182 |
+- /* |
28183 |
+- * We were woken prior to requeue by a timeout or a signal. |
28184 |
+- * Unqueue the futex_q and determine which it was. |
28185 |
+- */ |
28186 |
+- plist_del(&q->list, &hb->chain); |
28187 |
+- hb_waiters_dec(hb); |
28188 |
+- |
28189 |
+- /* Handle spurious wakeups gracefully */ |
28190 |
+- ret = -EWOULDBLOCK; |
28191 |
+- if (timeout && !timeout->task) |
28192 |
+- ret = -ETIMEDOUT; |
28193 |
+- else if (signal_pending(current)) |
28194 |
+- ret = -ERESTARTNOINTR; |
28195 |
+- return ret; |
28196 |
+-} |
28197 |
+- |
28198 |
+-/** |
28199 |
+- * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 |
28200 |
+- * @uaddr: the futex we initially wait on (non-pi) |
28201 |
+- * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be |
28202 |
+- * the same type, no requeueing from private to shared, etc. |
28203 |
+- * @val: the expected value of uaddr |
28204 |
+- * @abs_time: absolute timeout |
28205 |
+- * @bitset: 32 bit wakeup bitset set by userspace, defaults to all |
28206 |
+- * @uaddr2: the pi futex we will take prior to returning to user-space |
28207 |
+- * |
28208 |
+- * The caller will wait on uaddr and will be requeued by futex_requeue() to |
28209 |
+- * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake |
28210 |
+- * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to |
28211 |
+- * userspace. This ensures the rt_mutex maintains an owner when it has waiters; |
28212 |
+- * without one, the pi logic would not know which task to boost/deboost, if |
28213 |
+- * there was a need to. |
28214 |
+- * |
28215 |
+- * We call schedule in futex_wait_queue_me() when we enqueue and return there |
28216 |
+- * via the following-- |
28217 |
+- * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() |
28218 |
+- * 2) wakeup on uaddr2 after a requeue |
28219 |
+- * 3) signal |
28220 |
+- * 4) timeout |
28221 |
+- * |
28222 |
+- * If 3, cleanup and return -ERESTARTNOINTR. |
28223 |
+- * |
28224 |
+- * If 2, we may then block on trying to take the rt_mutex and return via: |
28225 |
+- * 5) successful lock |
28226 |
+- * 6) signal |
28227 |
+- * 7) timeout |
28228 |
+- * 8) other lock acquisition failure |
28229 |
+- * |
28230 |
+- * If 6, return -EWOULDBLOCK (restarting the syscall would do the same). |
28231 |
+- * |
28232 |
+- * If 4 or 7, we cleanup and return with -ETIMEDOUT. |
28233 |
+- * |
28234 |
+- * Return: |
28235 |
+- * - 0 - On success; |
28236 |
+- * - <0 - On error |
28237 |
+- */ |
28238 |
+-static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
28239 |
+- u32 val, ktime_t *abs_time, u32 bitset, |
28240 |
+- u32 __user *uaddr2) |
28241 |
+-{ |
28242 |
+- struct hrtimer_sleeper timeout, *to; |
28243 |
+- struct rt_mutex_waiter rt_waiter; |
28244 |
+- struct futex_hash_bucket *hb; |
28245 |
+- union futex_key key2 = FUTEX_KEY_INIT; |
28246 |
+- struct futex_q q = futex_q_init; |
28247 |
+- struct rt_mutex_base *pi_mutex; |
28248 |
+- int res, ret; |
28249 |
+- |
28250 |
+- if (!IS_ENABLED(CONFIG_FUTEX_PI)) |
28251 |
+- return -ENOSYS; |
28252 |
+- |
28253 |
+- if (uaddr == uaddr2) |
28254 |
+- return -EINVAL; |
28255 |
+- |
28256 |
+- if (!bitset) |
28257 |
+- return -EINVAL; |
28258 |
+- |
28259 |
+- to = futex_setup_timer(abs_time, &timeout, flags, |
28260 |
+- current->timer_slack_ns); |
28261 |
+- |
28262 |
+- /* |
28263 |
+- * The waiter is allocated on our stack, manipulated by the requeue |
28264 |
+- * code while we sleep on uaddr. |
28265 |
+- */ |
28266 |
+- rt_mutex_init_waiter(&rt_waiter); |
28267 |
+- |
28268 |
+- ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); |
28269 |
+- if (unlikely(ret != 0)) |
28270 |
+- goto out; |
28271 |
+- |
28272 |
+- q.bitset = bitset; |
28273 |
+- q.rt_waiter = &rt_waiter; |
28274 |
+- q.requeue_pi_key = &key2; |
28275 |
+- |
28276 |
+- /* |
28277 |
+- * Prepare to wait on uaddr. On success, it holds hb->lock and q |
28278 |
+- * is initialized. |
28279 |
+- */ |
28280 |
+- ret = futex_wait_setup(uaddr, val, flags, &q, &hb); |
28281 |
+- if (ret) |
28282 |
+- goto out; |
28283 |
+- |
28284 |
+- /* |
28285 |
+- * The check above which compares uaddrs is not sufficient for |
28286 |
+- * shared futexes. We need to compare the keys: |
28287 |
+- */ |
28288 |
+- if (match_futex(&q.key, &key2)) { |
28289 |
+- queue_unlock(hb); |
28290 |
+- ret = -EINVAL; |
28291 |
+- goto out; |
28292 |
+- } |
28293 |
+- |
28294 |
+- /* Queue the futex_q, drop the hb lock, wait for wakeup. */ |
28295 |
+- futex_wait_queue_me(hb, &q, to); |
28296 |
+- |
28297 |
+- switch (futex_requeue_pi_wakeup_sync(&q)) { |
28298 |
+- case Q_REQUEUE_PI_IGNORE: |
28299 |
+- /* The waiter is still on uaddr1 */ |
28300 |
+- spin_lock(&hb->lock); |
28301 |
+- ret = handle_early_requeue_pi_wakeup(hb, &q, to); |
28302 |
+- spin_unlock(&hb->lock); |
28303 |
+- break; |
28304 |
+- |
28305 |
+- case Q_REQUEUE_PI_LOCKED: |
28306 |
+- /* The requeue acquired the lock */ |
28307 |
+- if (q.pi_state && (q.pi_state->owner != current)) { |
28308 |
+- spin_lock(q.lock_ptr); |
28309 |
+- ret = fixup_owner(uaddr2, &q, true); |
28310 |
+- /* |
28311 |
+- * Drop the reference to the pi state which the |
28312 |
+- * requeue_pi() code acquired for us. |
28313 |
+- */ |
28314 |
+- put_pi_state(q.pi_state); |
28315 |
+- spin_unlock(q.lock_ptr); |
28316 |
+- /* |
28317 |
+- * Adjust the return value. It's either -EFAULT or |
28318 |
+- * success (1) but the caller expects 0 for success. |
28319 |
+- */ |
28320 |
+- ret = ret < 0 ? ret : 0; |
28321 |
+- } |
28322 |
+- break; |
28323 |
+- |
28324 |
+- case Q_REQUEUE_PI_DONE: |
28325 |
+- /* Requeue completed. Current is 'pi_blocked_on' the rtmutex */ |
28326 |
+- pi_mutex = &q.pi_state->pi_mutex; |
28327 |
+- ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); |
28328 |
+- |
28329 |
+- /* Current is not longer pi_blocked_on */ |
28330 |
+- spin_lock(q.lock_ptr); |
28331 |
+- if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) |
28332 |
+- ret = 0; |
28333 |
+- |
28334 |
+- debug_rt_mutex_free_waiter(&rt_waiter); |
28335 |
+- /* |
28336 |
+- * Fixup the pi_state owner and possibly acquire the lock if we |
28337 |
+- * haven't already. |
28338 |
+- */ |
28339 |
+- res = fixup_owner(uaddr2, &q, !ret); |
28340 |
+- /* |
28341 |
+- * If fixup_owner() returned an error, propagate that. If it |
28342 |
+- * acquired the lock, clear -ETIMEDOUT or -EINTR. |
28343 |
+- */ |
28344 |
+- if (res) |
28345 |
+- ret = (res < 0) ? res : 0; |
28346 |
+- |
28347 |
+- unqueue_me_pi(&q); |
28348 |
+- spin_unlock(q.lock_ptr); |
28349 |
+- |
28350 |
+- if (ret == -EINTR) { |
28351 |
+- /* |
28352 |
+- * We've already been requeued, but cannot restart |
28353 |
+- * by calling futex_lock_pi() directly. We could |
28354 |
+- * restart this syscall, but it would detect that |
28355 |
+- * the user space "val" changed and return |
28356 |
+- * -EWOULDBLOCK. Save the overhead of the restart |
28357 |
+- * and return -EWOULDBLOCK directly. |
28358 |
+- */ |
28359 |
+- ret = -EWOULDBLOCK; |
28360 |
+- } |
28361 |
+- break; |
28362 |
+- default: |
28363 |
+- BUG(); |
28364 |
+- } |
28365 |
+- |
28366 |
+-out: |
28367 |
+- if (to) { |
28368 |
+- hrtimer_cancel(&to->timer); |
28369 |
+- destroy_hrtimer_on_stack(&to->timer); |
28370 |
+- } |
28371 |
+- return ret; |
28372 |
+-} |
28373 |
+- |
28374 |
+-/* |
28375 |
+- * Support for robust futexes: the kernel cleans up held futexes at |
28376 |
+- * thread exit time. |
28377 |
+- * |
28378 |
+- * Implementation: user-space maintains a per-thread list of locks it |
28379 |
+- * is holding. Upon do_exit(), the kernel carefully walks this list, |
28380 |
+- * and marks all locks that are owned by this thread with the |
28381 |
+- * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is |
28382 |
+- * always manipulated with the lock held, so the list is private and |
28383 |
+- * per-thread. Userspace also maintains a per-thread 'list_op_pending' |
28384 |
+- * field, to allow the kernel to clean up if the thread dies after |
28385 |
+- * acquiring the lock, but just before it could have added itself to |
28386 |
+- * the list. There can only be one such pending lock. |
28387 |
+- */ |
28388 |
+- |
28389 |
+-/** |
28390 |
+- * sys_set_robust_list() - Set the robust-futex list head of a task |
28391 |
+- * @head: pointer to the list-head |
28392 |
+- * @len: length of the list-head, as userspace expects |
28393 |
+- */ |
28394 |
+-SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, |
28395 |
+- size_t, len) |
28396 |
+-{ |
28397 |
+- if (!futex_cmpxchg_enabled) |
28398 |
+- return -ENOSYS; |
28399 |
+- /* |
28400 |
+- * The kernel knows only one size for now: |
28401 |
+- */ |
28402 |
+- if (unlikely(len != sizeof(*head))) |
28403 |
+- return -EINVAL; |
28404 |
+- |
28405 |
+- current->robust_list = head; |
28406 |
+- |
28407 |
+- return 0; |
28408 |
+-} |
28409 |
+- |
28410 |
+-/** |
28411 |
+- * sys_get_robust_list() - Get the robust-futex list head of a task |
28412 |
+- * @pid: pid of the process [zero for current task] |
28413 |
+- * @head_ptr: pointer to a list-head pointer, the kernel fills it in |
28414 |
+- * @len_ptr: pointer to a length field, the kernel fills in the header size |
28415 |
+- */ |
28416 |
+-SYSCALL_DEFINE3(get_robust_list, int, pid, |
28417 |
+- struct robust_list_head __user * __user *, head_ptr, |
28418 |
+- size_t __user *, len_ptr) |
28419 |
+-{ |
28420 |
+- struct robust_list_head __user *head; |
28421 |
+- unsigned long ret; |
28422 |
+- struct task_struct *p; |
28423 |
+- |
28424 |
+- if (!futex_cmpxchg_enabled) |
28425 |
+- return -ENOSYS; |
28426 |
+- |
28427 |
+- rcu_read_lock(); |
28428 |
+- |
28429 |
+- ret = -ESRCH; |
28430 |
+- if (!pid) |
28431 |
+- p = current; |
28432 |
+- else { |
28433 |
+- p = find_task_by_vpid(pid); |
28434 |
+- if (!p) |
28435 |
+- goto err_unlock; |
28436 |
+- } |
28437 |
+- |
28438 |
+- ret = -EPERM; |
28439 |
+- if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) |
28440 |
+- goto err_unlock; |
28441 |
+- |
28442 |
+- head = p->robust_list; |
28443 |
+- rcu_read_unlock(); |
28444 |
+- |
28445 |
+- if (put_user(sizeof(*head), len_ptr)) |
28446 |
+- return -EFAULT; |
28447 |
+- return put_user(head, head_ptr); |
28448 |
+- |
28449 |
+-err_unlock: |
28450 |
+- rcu_read_unlock(); |
28451 |
+- |
28452 |
+- return ret; |
28453 |
+-} |
28454 |
+- |
28455 |
+-/* Constants for the pending_op argument of handle_futex_death */ |
28456 |
+-#define HANDLE_DEATH_PENDING true |
28457 |
+-#define HANDLE_DEATH_LIST false |
28458 |
+- |
28459 |
+-/* |
28460 |
+- * Process a futex-list entry, check whether it's owned by the |
28461 |
+- * dying task, and do notification if so: |
28462 |
+- */ |
28463 |
+-static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, |
28464 |
+- bool pi, bool pending_op) |
28465 |
+-{ |
28466 |
+- u32 uval, nval, mval; |
28467 |
+- int err; |
28468 |
+- |
28469 |
+- /* Futex address must be 32bit aligned */ |
28470 |
+- if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0) |
28471 |
+- return -1; |
28472 |
+- |
28473 |
+-retry: |
28474 |
+- if (get_user(uval, uaddr)) |
28475 |
+- return -1; |
28476 |
+- |
28477 |
+- /* |
28478 |
+- * Special case for regular (non PI) futexes. The unlock path in |
28479 |
+- * user space has two race scenarios: |
28480 |
+- * |
28481 |
+- * 1. The unlock path releases the user space futex value and |
28482 |
+- * before it can execute the futex() syscall to wake up |
28483 |
+- * waiters it is killed. |
28484 |
+- * |
28485 |
+- * 2. A woken up waiter is killed before it can acquire the |
28486 |
+- * futex in user space. |
28487 |
+- * |
28488 |
+- * In both cases the TID validation below prevents a wakeup of |
28489 |
+- * potential waiters which can cause these waiters to block |
28490 |
+- * forever. |
28491 |
+- * |
28492 |
+- * In both cases the following conditions are met: |
28493 |
+- * |
28494 |
+- * 1) task->robust_list->list_op_pending != NULL |
28495 |
+- * @pending_op == true |
28496 |
+- * 2) User space futex value == 0 |
28497 |
+- * 3) Regular futex: @pi == false |
28498 |
+- * |
28499 |
+- * If these conditions are met, it is safe to attempt waking up a |
28500 |
+- * potential waiter without touching the user space futex value and |
28501 |
+- * trying to set the OWNER_DIED bit. The user space futex value is |
28502 |
+- * uncontended and the rest of the user space mutex state is |
28503 |
+- * consistent, so a woken waiter will just take over the |
28504 |
+- * uncontended futex. Setting the OWNER_DIED bit would create |
28505 |
+- * inconsistent state and malfunction of the user space owner died |
28506 |
+- * handling. |
28507 |
+- */ |
28508 |
+- if (pending_op && !pi && !uval) { |
28509 |
+- futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); |
28510 |
+- return 0; |
28511 |
+- } |
28512 |
+- |
28513 |
+- if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr)) |
28514 |
+- return 0; |
28515 |
+- |
28516 |
+- /* |
28517 |
+- * Ok, this dying thread is truly holding a futex |
28518 |
+- * of interest. Set the OWNER_DIED bit atomically |
28519 |
+- * via cmpxchg, and if the value had FUTEX_WAITERS |
28520 |
+- * set, wake up a waiter (if any). (We have to do a |
28521 |
+- * futex_wake() even if OWNER_DIED is already set - |
28522 |
+- * to handle the rare but possible case of recursive |
28523 |
+- * thread-death.) The rest of the cleanup is done in |
28524 |
+- * userspace. |
28525 |
+- */ |
28526 |
+- mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; |
28527 |
+- |
28528 |
+- /* |
28529 |
+- * We are not holding a lock here, but we want to have |
28530 |
+- * the pagefault_disable/enable() protection because |
28531 |
+- * we want to handle the fault gracefully. If the |
28532 |
+- * access fails we try to fault in the futex with R/W |
28533 |
+- * verification via get_user_pages. get_user() above |
28534 |
+- * does not guarantee R/W access. If that fails we |
28535 |
+- * give up and leave the futex locked. |
28536 |
+- */ |
28537 |
+- if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) { |
28538 |
+- switch (err) { |
28539 |
+- case -EFAULT: |
28540 |
+- if (fault_in_user_writeable(uaddr)) |
28541 |
+- return -1; |
28542 |
+- goto retry; |
28543 |
+- |
28544 |
+- case -EAGAIN: |
28545 |
+- cond_resched(); |
28546 |
+- goto retry; |
28547 |
+- |
28548 |
+- default: |
28549 |
+- WARN_ON_ONCE(1); |
28550 |
+- return err; |
28551 |
+- } |
28552 |
+- } |
28553 |
+- |
28554 |
+- if (nval != uval) |
28555 |
+- goto retry; |
28556 |
+- |
28557 |
+- /* |
28558 |
+- * Wake robust non-PI futexes here. The wakeup of |
28559 |
+- * PI futexes happens in exit_pi_state(): |
28560 |
+- */ |
28561 |
+- if (!pi && (uval & FUTEX_WAITERS)) |
28562 |
+- futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); |
28563 |
+- |
28564 |
+- return 0; |
28565 |
+-} |
28566 |
+- |
28567 |
+-/* |
28568 |
+- * Fetch a robust-list pointer. Bit 0 signals PI futexes: |
28569 |
+- */ |
28570 |
+-static inline int fetch_robust_entry(struct robust_list __user **entry, |
28571 |
+- struct robust_list __user * __user *head, |
28572 |
+- unsigned int *pi) |
28573 |
+-{ |
28574 |
+- unsigned long uentry; |
28575 |
+- |
28576 |
+- if (get_user(uentry, (unsigned long __user *)head)) |
28577 |
+- return -EFAULT; |
28578 |
+- |
28579 |
+- *entry = (void __user *)(uentry & ~1UL); |
28580 |
+- *pi = uentry & 1; |
28581 |
+- |
28582 |
+- return 0; |
28583 |
+-} |
28584 |
+- |
28585 |
+-/* |
28586 |
+- * Walk curr->robust_list (very carefully, it's a userspace list!) |
28587 |
+- * and mark any locks found there dead, and notify any waiters. |
28588 |
+- * |
28589 |
+- * We silently return on any sign of list-walking problem. |
28590 |
+- */ |
28591 |
+-static void exit_robust_list(struct task_struct *curr) |
28592 |
+-{ |
28593 |
+- struct robust_list_head __user *head = curr->robust_list; |
28594 |
+- struct robust_list __user *entry, *next_entry, *pending; |
28595 |
+- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
28596 |
+- unsigned int next_pi; |
28597 |
+- unsigned long futex_offset; |
28598 |
+- int rc; |
28599 |
+- |
28600 |
+- if (!futex_cmpxchg_enabled) |
28601 |
+- return; |
28602 |
+- |
28603 |
+- /* |
28604 |
+- * Fetch the list head (which was registered earlier, via |
28605 |
+- * sys_set_robust_list()): |
28606 |
+- */ |
28607 |
+- if (fetch_robust_entry(&entry, &head->list.next, &pi)) |
28608 |
+- return; |
28609 |
+- /* |
28610 |
+- * Fetch the relative futex offset: |
28611 |
+- */ |
28612 |
+- if (get_user(futex_offset, &head->futex_offset)) |
28613 |
+- return; |
28614 |
+- /* |
28615 |
+- * Fetch any possibly pending lock-add first, and handle it |
28616 |
+- * if it exists: |
28617 |
+- */ |
28618 |
+- if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) |
28619 |
+- return; |
28620 |
+- |
28621 |
+- next_entry = NULL; /* avoid warning with gcc */ |
28622 |
+- while (entry != &head->list) { |
28623 |
+- /* |
28624 |
+- * Fetch the next entry in the list before calling |
28625 |
+- * handle_futex_death: |
28626 |
+- */ |
28627 |
+- rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); |
28628 |
+- /* |
28629 |
+- * A pending lock might already be on the list, so |
28630 |
+- * don't process it twice: |
28631 |
+- */ |
28632 |
+- if (entry != pending) { |
28633 |
+- if (handle_futex_death((void __user *)entry + futex_offset, |
28634 |
+- curr, pi, HANDLE_DEATH_LIST)) |
28635 |
+- return; |
28636 |
+- } |
28637 |
+- if (rc) |
28638 |
+- return; |
28639 |
+- entry = next_entry; |
28640 |
+- pi = next_pi; |
28641 |
+- /* |
28642 |
+- * Avoid excessively long or circular lists: |
28643 |
+- */ |
28644 |
+- if (!--limit) |
28645 |
+- break; |
28646 |
+- |
28647 |
+- cond_resched(); |
28648 |
+- } |
28649 |
+- |
28650 |
+- if (pending) { |
28651 |
+- handle_futex_death((void __user *)pending + futex_offset, |
28652 |
+- curr, pip, HANDLE_DEATH_PENDING); |
28653 |
+- } |
28654 |
+-} |
28655 |
+- |
28656 |
+-static void futex_cleanup(struct task_struct *tsk) |
28657 |
+-{ |
28658 |
+- if (unlikely(tsk->robust_list)) { |
28659 |
+- exit_robust_list(tsk); |
28660 |
+- tsk->robust_list = NULL; |
28661 |
+- } |
28662 |
+- |
28663 |
+-#ifdef CONFIG_COMPAT |
28664 |
+- if (unlikely(tsk->compat_robust_list)) { |
28665 |
+- compat_exit_robust_list(tsk); |
28666 |
+- tsk->compat_robust_list = NULL; |
28667 |
+- } |
28668 |
+-#endif |
28669 |
+- |
28670 |
+- if (unlikely(!list_empty(&tsk->pi_state_list))) |
28671 |
+- exit_pi_state_list(tsk); |
28672 |
+-} |
28673 |
+- |
28674 |
+-/** |
28675 |
+- * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD |
28676 |
+- * @tsk: task to set the state on |
28677 |
+- * |
28678 |
+- * Set the futex exit state of the task lockless. The futex waiter code |
28679 |
+- * observes that state when a task is exiting and loops until the task has |
28680 |
+- * actually finished the futex cleanup. The worst case for this is that the |
28681 |
+- * waiter runs through the wait loop until the state becomes visible. |
28682 |
+- * |
28683 |
+- * This is called from the recursive fault handling path in do_exit(). |
28684 |
+- * |
28685 |
+- * This is best effort. Either the futex exit code has run already or |
28686 |
+- * not. If the OWNER_DIED bit has been set on the futex then the waiter can |
28687 |
+- * take it over. If not, the problem is pushed back to user space. If the |
28688 |
+- * futex exit code did not run yet, then an already queued waiter might |
28689 |
+- * block forever, but there is nothing which can be done about that. |
28690 |
+- */ |
28691 |
+-void futex_exit_recursive(struct task_struct *tsk) |
28692 |
+-{ |
28693 |
+- /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ |
28694 |
+- if (tsk->futex_state == FUTEX_STATE_EXITING) |
28695 |
+- mutex_unlock(&tsk->futex_exit_mutex); |
28696 |
+- tsk->futex_state = FUTEX_STATE_DEAD; |
28697 |
+-} |
28698 |
+- |
28699 |
+-static void futex_cleanup_begin(struct task_struct *tsk) |
28700 |
+-{ |
28701 |
+- /* |
28702 |
+- * Prevent various race issues against a concurrent incoming waiter |
28703 |
+- * including live locks by forcing the waiter to block on |
28704 |
+- * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in |
28705 |
+- * attach_to_pi_owner(). |
28706 |
+- */ |
28707 |
+- mutex_lock(&tsk->futex_exit_mutex); |
28708 |
+- |
28709 |
+- /* |
28710 |
+- * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. |
28711 |
+- * |
28712 |
+- * This ensures that all subsequent checks of tsk->futex_state in |
28713 |
+- * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with |
28714 |
+- * tsk->pi_lock held. |
28715 |
+- * |
28716 |
+- * It guarantees also that a pi_state which was queued right before |
28717 |
+- * the state change under tsk->pi_lock by a concurrent waiter must |
28718 |
+- * be observed in exit_pi_state_list(). |
28719 |
+- */ |
28720 |
+- raw_spin_lock_irq(&tsk->pi_lock); |
28721 |
+- tsk->futex_state = FUTEX_STATE_EXITING; |
28722 |
+- raw_spin_unlock_irq(&tsk->pi_lock); |
28723 |
+-} |
28724 |
+- |
28725 |
+-static void futex_cleanup_end(struct task_struct *tsk, int state) |
28726 |
+-{ |
28727 |
+- /* |
28728 |
+- * Lockless store. The only side effect is that an observer might |
28729 |
+- * take another loop until it becomes visible. |
28730 |
+- */ |
28731 |
+- tsk->futex_state = state; |
28732 |
+- /* |
28733 |
+- * Drop the exit protection. This unblocks waiters which observed |
28734 |
+- * FUTEX_STATE_EXITING to reevaluate the state. |
28735 |
+- */ |
28736 |
+- mutex_unlock(&tsk->futex_exit_mutex); |
28737 |
+-} |
28738 |
+- |
28739 |
+-void futex_exec_release(struct task_struct *tsk) |
28740 |
+-{ |
28741 |
+- /* |
28742 |
+- * The state handling is done for consistency, but in the case of |
28743 |
+- * exec() there is no way to prevent further damage as the PID stays |
28744 |
+- * the same. But for the unlikely and arguably buggy case that a |
28745 |
+- * futex is held on exec(), this provides at least as much state |
28746 |
+- * consistency protection which is possible. |
28747 |
+- */ |
28748 |
+- futex_cleanup_begin(tsk); |
28749 |
+- futex_cleanup(tsk); |
28750 |
+- /* |
28751 |
+- * Reset the state to FUTEX_STATE_OK. The task is alive and about |
28752 |
+- * exec a new binary. |
28753 |
+- */ |
28754 |
+- futex_cleanup_end(tsk, FUTEX_STATE_OK); |
28755 |
+-} |
28756 |
+- |
28757 |
+-void futex_exit_release(struct task_struct *tsk) |
28758 |
+-{ |
28759 |
+- futex_cleanup_begin(tsk); |
28760 |
+- futex_cleanup(tsk); |
28761 |
+- futex_cleanup_end(tsk, FUTEX_STATE_DEAD); |
28762 |
+-} |
28763 |
+- |
28764 |
+-long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
28765 |
+- u32 __user *uaddr2, u32 val2, u32 val3) |
28766 |
+-{ |
28767 |
+- int cmd = op & FUTEX_CMD_MASK; |
28768 |
+- unsigned int flags = 0; |
28769 |
+- |
28770 |
+- if (!(op & FUTEX_PRIVATE_FLAG)) |
28771 |
+- flags |= FLAGS_SHARED; |
28772 |
+- |
28773 |
+- if (op & FUTEX_CLOCK_REALTIME) { |
28774 |
+- flags |= FLAGS_CLOCKRT; |
28775 |
+- if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI && |
28776 |
+- cmd != FUTEX_LOCK_PI2) |
28777 |
+- return -ENOSYS; |
28778 |
+- } |
28779 |
+- |
28780 |
+- switch (cmd) { |
28781 |
+- case FUTEX_LOCK_PI: |
28782 |
+- case FUTEX_LOCK_PI2: |
28783 |
+- case FUTEX_UNLOCK_PI: |
28784 |
+- case FUTEX_TRYLOCK_PI: |
28785 |
+- case FUTEX_WAIT_REQUEUE_PI: |
28786 |
+- case FUTEX_CMP_REQUEUE_PI: |
28787 |
+- if (!futex_cmpxchg_enabled) |
28788 |
+- return -ENOSYS; |
28789 |
+- } |
28790 |
+- |
28791 |
+- switch (cmd) { |
28792 |
+- case FUTEX_WAIT: |
28793 |
+- val3 = FUTEX_BITSET_MATCH_ANY; |
28794 |
+- fallthrough; |
28795 |
+- case FUTEX_WAIT_BITSET: |
28796 |
+- return futex_wait(uaddr, flags, val, timeout, val3); |
28797 |
+- case FUTEX_WAKE: |
28798 |
+- val3 = FUTEX_BITSET_MATCH_ANY; |
28799 |
+- fallthrough; |
28800 |
+- case FUTEX_WAKE_BITSET: |
28801 |
+- return futex_wake(uaddr, flags, val, val3); |
28802 |
+- case FUTEX_REQUEUE: |
28803 |
+- return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0); |
28804 |
+- case FUTEX_CMP_REQUEUE: |
28805 |
+- return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0); |
28806 |
+- case FUTEX_WAKE_OP: |
28807 |
+- return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); |
28808 |
+- case FUTEX_LOCK_PI: |
28809 |
+- flags |= FLAGS_CLOCKRT; |
28810 |
+- fallthrough; |
28811 |
+- case FUTEX_LOCK_PI2: |
28812 |
+- return futex_lock_pi(uaddr, flags, timeout, 0); |
28813 |
+- case FUTEX_UNLOCK_PI: |
28814 |
+- return futex_unlock_pi(uaddr, flags); |
28815 |
+- case FUTEX_TRYLOCK_PI: |
28816 |
+- return futex_lock_pi(uaddr, flags, NULL, 1); |
28817 |
+- case FUTEX_WAIT_REQUEUE_PI: |
28818 |
+- val3 = FUTEX_BITSET_MATCH_ANY; |
28819 |
+- return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, |
28820 |
+- uaddr2); |
28821 |
+- case FUTEX_CMP_REQUEUE_PI: |
28822 |
+- return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1); |
28823 |
+- } |
28824 |
+- return -ENOSYS; |
28825 |
+-} |
28826 |
+- |
28827 |
+-static __always_inline bool futex_cmd_has_timeout(u32 cmd) |
28828 |
+-{ |
28829 |
+- switch (cmd) { |
28830 |
+- case FUTEX_WAIT: |
28831 |
+- case FUTEX_LOCK_PI: |
28832 |
+- case FUTEX_LOCK_PI2: |
28833 |
+- case FUTEX_WAIT_BITSET: |
28834 |
+- case FUTEX_WAIT_REQUEUE_PI: |
28835 |
+- return true; |
28836 |
+- } |
28837 |
+- return false; |
28838 |
+-} |
28839 |
+- |
28840 |
+-static __always_inline int |
28841 |
+-futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t) |
28842 |
+-{ |
28843 |
+- if (!timespec64_valid(ts)) |
28844 |
+- return -EINVAL; |
28845 |
+- |
28846 |
+- *t = timespec64_to_ktime(*ts); |
28847 |
+- if (cmd == FUTEX_WAIT) |
28848 |
+- *t = ktime_add_safe(ktime_get(), *t); |
28849 |
+- else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME)) |
28850 |
+- *t = timens_ktime_to_host(CLOCK_MONOTONIC, *t); |
28851 |
+- return 0; |
28852 |
+-} |
28853 |
+- |
28854 |
+-SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
28855 |
+- const struct __kernel_timespec __user *, utime, |
28856 |
+- u32 __user *, uaddr2, u32, val3) |
28857 |
+-{ |
28858 |
+- int ret, cmd = op & FUTEX_CMD_MASK; |
28859 |
+- ktime_t t, *tp = NULL; |
28860 |
+- struct timespec64 ts; |
28861 |
+- |
28862 |
+- if (utime && futex_cmd_has_timeout(cmd)) { |
28863 |
+- if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG)))) |
28864 |
+- return -EFAULT; |
28865 |
+- if (get_timespec64(&ts, utime)) |
28866 |
+- return -EFAULT; |
28867 |
+- ret = futex_init_timeout(cmd, op, &ts, &t); |
28868 |
+- if (ret) |
28869 |
+- return ret; |
28870 |
+- tp = &t; |
28871 |
+- } |
28872 |
+- |
28873 |
+- return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3); |
28874 |
+-} |
28875 |
+- |
28876 |
+-#ifdef CONFIG_COMPAT |
28877 |
+-/* |
28878 |
+- * Fetch a robust-list pointer. Bit 0 signals PI futexes: |
28879 |
+- */ |
28880 |
+-static inline int |
28881 |
+-compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, |
28882 |
+- compat_uptr_t __user *head, unsigned int *pi) |
28883 |
+-{ |
28884 |
+- if (get_user(*uentry, head)) |
28885 |
+- return -EFAULT; |
28886 |
+- |
28887 |
+- *entry = compat_ptr((*uentry) & ~1); |
28888 |
+- *pi = (unsigned int)(*uentry) & 1; |
28889 |
+- |
28890 |
+- return 0; |
28891 |
+-} |
28892 |
+- |
28893 |
+-static void __user *futex_uaddr(struct robust_list __user *entry, |
28894 |
+- compat_long_t futex_offset) |
28895 |
+-{ |
28896 |
+- compat_uptr_t base = ptr_to_compat(entry); |
28897 |
+- void __user *uaddr = compat_ptr(base + futex_offset); |
28898 |
+- |
28899 |
+- return uaddr; |
28900 |
+-} |
28901 |
+- |
28902 |
+-/* |
28903 |
+- * Walk curr->robust_list (very carefully, it's a userspace list!) |
28904 |
+- * and mark any locks found there dead, and notify any waiters. |
28905 |
+- * |
28906 |
+- * We silently return on any sign of list-walking problem. |
28907 |
+- */ |
28908 |
+-static void compat_exit_robust_list(struct task_struct *curr) |
28909 |
+-{ |
28910 |
+- struct compat_robust_list_head __user *head = curr->compat_robust_list; |
28911 |
+- struct robust_list __user *entry, *next_entry, *pending; |
28912 |
+- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
28913 |
+- unsigned int next_pi; |
28914 |
+- compat_uptr_t uentry, next_uentry, upending; |
28915 |
+- compat_long_t futex_offset; |
28916 |
+- int rc; |
28917 |
+- |
28918 |
+- if (!futex_cmpxchg_enabled) |
28919 |
+- return; |
28920 |
+- |
28921 |
+- /* |
28922 |
+- * Fetch the list head (which was registered earlier, via |
28923 |
+- * sys_set_robust_list()): |
28924 |
+- */ |
28925 |
+- if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) |
28926 |
+- return; |
28927 |
+- /* |
28928 |
+- * Fetch the relative futex offset: |
28929 |
+- */ |
28930 |
+- if (get_user(futex_offset, &head->futex_offset)) |
28931 |
+- return; |
28932 |
+- /* |
28933 |
+- * Fetch any possibly pending lock-add first, and handle it |
28934 |
+- * if it exists: |
28935 |
+- */ |
28936 |
+- if (compat_fetch_robust_entry(&upending, &pending, |
28937 |
+- &head->list_op_pending, &pip)) |
28938 |
+- return; |
28939 |
+- |
28940 |
+- next_entry = NULL; /* avoid warning with gcc */ |
28941 |
+- while (entry != (struct robust_list __user *) &head->list) { |
28942 |
+- /* |
28943 |
+- * Fetch the next entry in the list before calling |
28944 |
+- * handle_futex_death: |
28945 |
+- */ |
28946 |
+- rc = compat_fetch_robust_entry(&next_uentry, &next_entry, |
28947 |
+- (compat_uptr_t __user *)&entry->next, &next_pi); |
28948 |
+- /* |
28949 |
+- * A pending lock might already be on the list, so |
28950 |
+- * dont process it twice: |
28951 |
+- */ |
28952 |
+- if (entry != pending) { |
28953 |
+- void __user *uaddr = futex_uaddr(entry, futex_offset); |
28954 |
+- |
28955 |
+- if (handle_futex_death(uaddr, curr, pi, |
28956 |
+- HANDLE_DEATH_LIST)) |
28957 |
+- return; |
28958 |
+- } |
28959 |
+- if (rc) |
28960 |
+- return; |
28961 |
+- uentry = next_uentry; |
28962 |
+- entry = next_entry; |
28963 |
+- pi = next_pi; |
28964 |
+- /* |
28965 |
+- * Avoid excessively long or circular lists: |
28966 |
+- */ |
28967 |
+- if (!--limit) |
28968 |
+- break; |
28969 |
+- |
28970 |
+- cond_resched(); |
28971 |
+- } |
28972 |
+- if (pending) { |
28973 |
+- void __user *uaddr = futex_uaddr(pending, futex_offset); |
28974 |
+- |
28975 |
+- handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); |
28976 |
+- } |
28977 |
+-} |
28978 |
+- |
28979 |
+-COMPAT_SYSCALL_DEFINE2(set_robust_list, |
28980 |
+- struct compat_robust_list_head __user *, head, |
28981 |
+- compat_size_t, len) |
28982 |
+-{ |
28983 |
+- if (!futex_cmpxchg_enabled) |
28984 |
+- return -ENOSYS; |
28985 |
+- |
28986 |
+- if (unlikely(len != sizeof(*head))) |
28987 |
+- return -EINVAL; |
28988 |
+- |
28989 |
+- current->compat_robust_list = head; |
28990 |
+- |
28991 |
+- return 0; |
28992 |
+-} |
28993 |
+- |
28994 |
+-COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, |
28995 |
+- compat_uptr_t __user *, head_ptr, |
28996 |
+- compat_size_t __user *, len_ptr) |
28997 |
+-{ |
28998 |
+- struct compat_robust_list_head __user *head; |
28999 |
+- unsigned long ret; |
29000 |
+- struct task_struct *p; |
29001 |
+- |
29002 |
+- if (!futex_cmpxchg_enabled) |
29003 |
+- return -ENOSYS; |
29004 |
+- |
29005 |
+- rcu_read_lock(); |
29006 |
+- |
29007 |
+- ret = -ESRCH; |
29008 |
+- if (!pid) |
29009 |
+- p = current; |
29010 |
+- else { |
29011 |
+- p = find_task_by_vpid(pid); |
29012 |
+- if (!p) |
29013 |
+- goto err_unlock; |
29014 |
+- } |
29015 |
+- |
29016 |
+- ret = -EPERM; |
29017 |
+- if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) |
29018 |
+- goto err_unlock; |
29019 |
+- |
29020 |
+- head = p->compat_robust_list; |
29021 |
+- rcu_read_unlock(); |
29022 |
+- |
29023 |
+- if (put_user(sizeof(*head), len_ptr)) |
29024 |
+- return -EFAULT; |
29025 |
+- return put_user(ptr_to_compat(head), head_ptr); |
29026 |
+- |
29027 |
+-err_unlock: |
29028 |
+- rcu_read_unlock(); |
29029 |
+- |
29030 |
+- return ret; |
29031 |
+-} |
29032 |
+-#endif /* CONFIG_COMPAT */ |
29033 |
+- |
29034 |
+-#ifdef CONFIG_COMPAT_32BIT_TIME |
29035 |
+-SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, |
29036 |
+- const struct old_timespec32 __user *, utime, u32 __user *, uaddr2, |
29037 |
+- u32, val3) |
29038 |
+-{ |
29039 |
+- int ret, cmd = op & FUTEX_CMD_MASK; |
29040 |
+- ktime_t t, *tp = NULL; |
29041 |
+- struct timespec64 ts; |
29042 |
+- |
29043 |
+- if (utime && futex_cmd_has_timeout(cmd)) { |
29044 |
+- if (get_old_timespec32(&ts, utime)) |
29045 |
+- return -EFAULT; |
29046 |
+- ret = futex_init_timeout(cmd, op, &ts, &t); |
29047 |
+- if (ret) |
29048 |
+- return ret; |
29049 |
+- tp = &t; |
29050 |
+- } |
29051 |
+- |
29052 |
+- return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3); |
29053 |
+-} |
29054 |
+-#endif /* CONFIG_COMPAT_32BIT_TIME */ |
29055 |
+- |
29056 |
+-static void __init futex_detect_cmpxchg(void) |
29057 |
+-{ |
29058 |
+-#ifndef CONFIG_HAVE_FUTEX_CMPXCHG |
29059 |
+- u32 curval; |
29060 |
+- |
29061 |
+- /* |
29062 |
+- * This will fail and we want it. Some arch implementations do |
29063 |
+- * runtime detection of the futex_atomic_cmpxchg_inatomic() |
29064 |
+- * functionality. We want to know that before we call in any |
29065 |
+- * of the complex code paths. Also we want to prevent |
29066 |
+- * registration of robust lists in that case. NULL is |
29067 |
+- * guaranteed to fault and we get -EFAULT on functional |
29068 |
+- * implementation, the non-functional ones will return |
29069 |
+- * -ENOSYS. |
29070 |
+- */ |
29071 |
+- if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) |
29072 |
+- futex_cmpxchg_enabled = 1; |
29073 |
+-#endif |
29074 |
+-} |
29075 |
+- |
29076 |
+-static int __init futex_init(void) |
29077 |
+-{ |
29078 |
+- unsigned int futex_shift; |
29079 |
+- unsigned long i; |
29080 |
+- |
29081 |
+-#if CONFIG_BASE_SMALL |
29082 |
+- futex_hashsize = 16; |
29083 |
+-#else |
29084 |
+- futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus()); |
29085 |
+-#endif |
29086 |
+- |
29087 |
+- futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues), |
29088 |
+- futex_hashsize, 0, |
29089 |
+- futex_hashsize < 256 ? HASH_SMALL : 0, |
29090 |
+- &futex_shift, NULL, |
29091 |
+- futex_hashsize, futex_hashsize); |
29092 |
+- futex_hashsize = 1UL << futex_shift; |
29093 |
+- |
29094 |
+- futex_detect_cmpxchg(); |
29095 |
+- |
29096 |
+- for (i = 0; i < futex_hashsize; i++) { |
29097 |
+- atomic_set(&futex_queues[i].waiters, 0); |
29098 |
+- plist_head_init(&futex_queues[i].chain); |
29099 |
+- spin_lock_init(&futex_queues[i].lock); |
29100 |
+- } |
29101 |
+- |
29102 |
+- return 0; |
29103 |
+-} |
29104 |
+-core_initcall(futex_init); |
29105 |
+diff --git a/kernel/futex/Makefile b/kernel/futex/Makefile |
29106 |
+new file mode 100644 |
29107 |
+index 0000000000000..b89ba3fba3437 |
29108 |
+--- /dev/null |
29109 |
++++ b/kernel/futex/Makefile |
29110 |
+@@ -0,0 +1,3 @@ |
29111 |
++# SPDX-License-Identifier: GPL-2.0 |
29112 |
++ |
29113 |
++obj-y += core.o |
29114 |
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c |
29115 |
+new file mode 100644 |
29116 |
+index 0000000000000..764e73622b386 |
29117 |
+--- /dev/null |
29118 |
++++ b/kernel/futex/core.c |
29119 |
+@@ -0,0 +1,4280 @@ |
29120 |
++// SPDX-License-Identifier: GPL-2.0-or-later |
29121 |
++/* |
29122 |
++ * Fast Userspace Mutexes (which I call "Futexes!"). |
29123 |
++ * (C) Rusty Russell, IBM 2002 |
29124 |
++ * |
29125 |
++ * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar |
29126 |
++ * (C) Copyright 2003 Red Hat Inc, All Rights Reserved |
29127 |
++ * |
29128 |
++ * Removed page pinning, fix privately mapped COW pages and other cleanups |
29129 |
++ * (C) Copyright 2003, 2004 Jamie Lokier |
29130 |
++ * |
29131 |
++ * Robust futex support started by Ingo Molnar |
29132 |
++ * (C) Copyright 2006 Red Hat Inc, All Rights Reserved |
29133 |
++ * Thanks to Thomas Gleixner for suggestions, analysis and fixes. |
29134 |
++ * |
29135 |
++ * PI-futex support started by Ingo Molnar and Thomas Gleixner |
29136 |
++ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@××××××.com> |
29137 |
++ * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@×××××××.com> |
29138 |
++ * |
29139 |
++ * PRIVATE futexes by Eric Dumazet |
29140 |
++ * Copyright (C) 2007 Eric Dumazet <dada1@×××××××××.com> |
29141 |
++ * |
29142 |
++ * Requeue-PI support by Darren Hart <dvhltc@××××××.com> |
29143 |
++ * Copyright (C) IBM Corporation, 2009 |
29144 |
++ * Thanks to Thomas Gleixner for conceptual design and careful reviews. |
29145 |
++ * |
29146 |
++ * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly |
29147 |
++ * enough at me, Linus for the original (flawed) idea, Matthew |
29148 |
++ * Kirkwood for proof-of-concept implementation. |
29149 |
++ * |
29150 |
++ * "The futexes are also cursed." |
29151 |
++ * "But they come in a choice of three flavours!" |
29152 |
++ */ |
29153 |
++#include <linux/compat.h> |
29154 |
++#include <linux/jhash.h> |
29155 |
++#include <linux/pagemap.h> |
29156 |
++#include <linux/syscalls.h> |
29157 |
++#include <linux/freezer.h> |
29158 |
++#include <linux/memblock.h> |
29159 |
++#include <linux/fault-inject.h> |
29160 |
++#include <linux/time_namespace.h> |
29161 |
++ |
29162 |
++#include <asm/futex.h> |
29163 |
++ |
29164 |
++#include "../locking/rtmutex_common.h" |
29165 |
++ |
29166 |
++/* |
29167 |
++ * READ this before attempting to hack on futexes! |
29168 |
++ * |
29169 |
++ * Basic futex operation and ordering guarantees |
29170 |
++ * ============================================= |
29171 |
++ * |
29172 |
++ * The waiter reads the futex value in user space and calls |
29173 |
++ * futex_wait(). This function computes the hash bucket and acquires |
29174 |
++ * the hash bucket lock. After that it reads the futex user space value |
29175 |
++ * again and verifies that the data has not changed. If it has not changed |
29176 |
++ * it enqueues itself into the hash bucket, releases the hash bucket lock |
29177 |
++ * and schedules. |
29178 |
++ * |
29179 |
++ * The waker side modifies the user space value of the futex and calls |
29180 |
++ * futex_wake(). This function computes the hash bucket and acquires the |
29181 |
++ * hash bucket lock. Then it looks for waiters on that futex in the hash |
29182 |
++ * bucket and wakes them. |
29183 |
++ * |
29184 |
++ * In futex wake up scenarios where no tasks are blocked on a futex, taking |
29185 |
++ * the hb spinlock can be avoided and simply return. In order for this |
29186 |
++ * optimization to work, ordering guarantees must exist so that the waiter |
29187 |
++ * being added to the list is acknowledged when the list is concurrently being |
29188 |
++ * checked by the waker, avoiding scenarios like the following: |
29189 |
++ * |
29190 |
++ * CPU 0 CPU 1 |
29191 |
++ * val = *futex; |
29192 |
++ * sys_futex(WAIT, futex, val); |
29193 |
++ * futex_wait(futex, val); |
29194 |
++ * uval = *futex; |
29195 |
++ * *futex = newval; |
29196 |
++ * sys_futex(WAKE, futex); |
29197 |
++ * futex_wake(futex); |
29198 |
++ * if (queue_empty()) |
29199 |
++ * return; |
29200 |
++ * if (uval == val) |
29201 |
++ * lock(hash_bucket(futex)); |
29202 |
++ * queue(); |
29203 |
++ * unlock(hash_bucket(futex)); |
29204 |
++ * schedule(); |
29205 |
++ * |
29206 |
++ * This would cause the waiter on CPU 0 to wait forever because it |
29207 |
++ * missed the transition of the user space value from val to newval |
29208 |
++ * and the waker did not find the waiter in the hash bucket queue. |
29209 |
++ * |
29210 |
++ * The correct serialization ensures that a waiter either observes |
29211 |
++ * the changed user space value before blocking or is woken by a |
29212 |
++ * concurrent waker: |
29213 |
++ * |
29214 |
++ * CPU 0 CPU 1 |
29215 |
++ * val = *futex; |
29216 |
++ * sys_futex(WAIT, futex, val); |
29217 |
++ * futex_wait(futex, val); |
29218 |
++ * |
29219 |
++ * waiters++; (a) |
29220 |
++ * smp_mb(); (A) <-- paired with -. |
29221 |
++ * | |
29222 |
++ * lock(hash_bucket(futex)); | |
29223 |
++ * | |
29224 |
++ * uval = *futex; | |
29225 |
++ * | *futex = newval; |
29226 |
++ * | sys_futex(WAKE, futex); |
29227 |
++ * | futex_wake(futex); |
29228 |
++ * | |
29229 |
++ * `--------> smp_mb(); (B) |
29230 |
++ * if (uval == val) |
29231 |
++ * queue(); |
29232 |
++ * unlock(hash_bucket(futex)); |
29233 |
++ * schedule(); if (waiters) |
29234 |
++ * lock(hash_bucket(futex)); |
29235 |
++ * else wake_waiters(futex); |
29236 |
++ * waiters--; (b) unlock(hash_bucket(futex)); |
29237 |
++ * |
29238 |
++ * Where (A) orders the waiters increment and the futex value read through |
29239 |
++ * atomic operations (see hb_waiters_inc) and where (B) orders the write |
29240 |
++ * to futex and the waiters read (see hb_waiters_pending()). |
29241 |
++ * |
29242 |
++ * This yields the following case (where X:=waiters, Y:=futex): |
29243 |
++ * |
29244 |
++ * X = Y = 0 |
29245 |
++ * |
29246 |
++ * w[X]=1 w[Y]=1 |
29247 |
++ * MB MB |
29248 |
++ * r[Y]=y r[X]=x |
29249 |
++ * |
29250 |
++ * Which guarantees that x==0 && y==0 is impossible; which translates back into |
29251 |
++ * the guarantee that we cannot both miss the futex variable change and the |
29252 |
++ * enqueue. |
29253 |
++ * |
29254 |
++ * Note that a new waiter is accounted for in (a) even when it is possible that |
29255 |
++ * the wait call can return error, in which case we backtrack from it in (b). |
29256 |
++ * Refer to the comment in queue_lock(). |
29257 |
++ * |
29258 |
++ * Similarly, in order to account for waiters being requeued on another |
29259 |
++ * address we always increment the waiters for the destination bucket before |
29260 |
++ * acquiring the lock. It then decrements them again after releasing it - |
29261 |
++ * the code that actually moves the futex(es) between hash buckets (requeue_futex) |
29262 |
++ * will do the additional required waiter count housekeeping. This is done for |
29263 |
++ * double_lock_hb() and double_unlock_hb(), respectively. |
29264 |
++ */ |
29265 |
++ |
29266 |
++#ifdef CONFIG_HAVE_FUTEX_CMPXCHG |
29267 |
++#define futex_cmpxchg_enabled 1 |
29268 |
++#else |
29269 |
++static int __read_mostly futex_cmpxchg_enabled; |
29270 |
++#endif |
29271 |
++ |
29272 |
++/* |
29273 |
++ * Futex flags used to encode options to functions and preserve them across |
29274 |
++ * restarts. |
29275 |
++ */ |
29276 |
++#ifdef CONFIG_MMU |
29277 |
++# define FLAGS_SHARED 0x01 |
29278 |
++#else |
29279 |
++/* |
29280 |
++ * NOMMU does not have per process address space. Let the compiler optimize |
29281 |
++ * code away. |
29282 |
++ */ |
29283 |
++# define FLAGS_SHARED 0x00 |
29284 |
++#endif |
29285 |
++#define FLAGS_CLOCKRT 0x02 |
29286 |
++#define FLAGS_HAS_TIMEOUT 0x04 |
29287 |
++ |
29288 |
++/* |
29289 |
++ * Priority Inheritance state: |
29290 |
++ */ |
29291 |
++struct futex_pi_state { |
29292 |
++ /* |
29293 |
++ * list of 'owned' pi_state instances - these have to be |
29294 |
++ * cleaned up in do_exit() if the task exits prematurely: |
29295 |
++ */ |
29296 |
++ struct list_head list; |
29297 |
++ |
29298 |
++ /* |
29299 |
++ * The PI object: |
29300 |
++ */ |
29301 |
++ struct rt_mutex_base pi_mutex; |
29302 |
++ |
29303 |
++ struct task_struct *owner; |
29304 |
++ refcount_t refcount; |
29305 |
++ |
29306 |
++ union futex_key key; |
29307 |
++} __randomize_layout; |
29308 |
++ |
29309 |
++/** |
29310 |
++ * struct futex_q - The hashed futex queue entry, one per waiting task |
29311 |
++ * @list: priority-sorted list of tasks waiting on this futex |
29312 |
++ * @task: the task waiting on the futex |
29313 |
++ * @lock_ptr: the hash bucket lock |
29314 |
++ * @key: the key the futex is hashed on |
29315 |
++ * @pi_state: optional priority inheritance state |
29316 |
++ * @rt_waiter: rt_waiter storage for use with requeue_pi |
29317 |
++ * @requeue_pi_key: the requeue_pi target futex key |
29318 |
++ * @bitset: bitset for the optional bitmasked wakeup |
29319 |
++ * @requeue_state: State field for futex_requeue_pi() |
29320 |
++ * @requeue_wait: RCU wait for futex_requeue_pi() (RT only) |
29321 |
++ * |
29322 |
++ * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so |
29323 |
++ * we can wake only the relevant ones (hashed queues may be shared). |
29324 |
++ * |
29325 |
++ * A futex_q has a woken state, just like tasks have TASK_RUNNING. |
29326 |
++ * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. |
29327 |
++ * The order of wakeup is always to make the first condition true, then |
29328 |
++ * the second. |
29329 |
++ * |
29330 |
++ * PI futexes are typically woken before they are removed from the hash list via |
29331 |
++ * the rt_mutex code. See unqueue_me_pi(). |
29332 |
++ */ |
29333 |
++struct futex_q { |
29334 |
++ struct plist_node list; |
29335 |
++ |
29336 |
++ struct task_struct *task; |
29337 |
++ spinlock_t *lock_ptr; |
29338 |
++ union futex_key key; |
29339 |
++ struct futex_pi_state *pi_state; |
29340 |
++ struct rt_mutex_waiter *rt_waiter; |
29341 |
++ union futex_key *requeue_pi_key; |
29342 |
++ u32 bitset; |
29343 |
++ atomic_t requeue_state; |
29344 |
++#ifdef CONFIG_PREEMPT_RT |
29345 |
++ struct rcuwait requeue_wait; |
29346 |
++#endif |
29347 |
++} __randomize_layout; |
29348 |
++ |
29349 |
++/* |
29350 |
++ * On PREEMPT_RT, the hash bucket lock is a 'sleeping' spinlock with an |
29351 |
++ * underlying rtmutex. The task which is about to be requeued could have |
29352 |
++ * just woken up (timeout, signal). After the wake up the task has to |
29353 |
++ * acquire hash bucket lock, which is held by the requeue code. As a task |
29354 |
++ * can only be blocked on _ONE_ rtmutex at a time, the proxy lock blocking |
29355 |
++ * and the hash bucket lock blocking would collide and corrupt state. |
29356 |
++ * |
29357 |
++ * On !PREEMPT_RT this is not a problem and everything could be serialized |
29358 |
++ * on hash bucket lock, but aside of having the benefit of common code, |
29359 |
++ * this allows to avoid doing the requeue when the task is already on the |
29360 |
++ * way out and taking the hash bucket lock of the original uaddr1 when the |
29361 |
++ * requeue has been completed. |
29362 |
++ * |
29363 |
++ * The following state transitions are valid: |
29364 |
++ * |
29365 |
++ * On the waiter side: |
29366 |
++ * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_IGNORE |
29367 |
++ * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_WAIT |
29368 |
++ * |
29369 |
++ * On the requeue side: |
29370 |
++ * Q_REQUEUE_PI_NONE -> Q_REQUEUE_PI_INPROGRESS |
29371 |
++ * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_DONE/LOCKED |
29372 |
++ * Q_REQUEUE_PI_IN_PROGRESS -> Q_REQUEUE_PI_NONE (requeue failed) |
29373 |
++ * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_DONE/LOCKED |
29374 |
++ * Q_REQUEUE_PI_WAIT -> Q_REQUEUE_PI_IGNORE (requeue failed) |
29375 |
++ * |
29376 |
++ * The requeue side ignores a waiter with state Q_REQUEUE_PI_IGNORE as this |
29377 |
++ * signals that the waiter is already on the way out. It also means that |
29378 |
++ * the waiter is still on the 'wait' futex, i.e. uaddr1. |
29379 |
++ * |
29380 |
++ * The waiter side signals early wakeup to the requeue side either through |
29381 |
++ * setting state to Q_REQUEUE_PI_IGNORE or to Q_REQUEUE_PI_WAIT depending |
29382 |
++ * on the current state. In case of Q_REQUEUE_PI_IGNORE it can immediately |
29383 |
++ * proceed to take the hash bucket lock of uaddr1. If it set state to WAIT, |
29384 |
++ * which means the wakeup is interleaving with a requeue in progress it has |
29385 |
++ * to wait for the requeue side to change the state. Either to DONE/LOCKED |
29386 |
++ * or to IGNORE. DONE/LOCKED means the waiter q is now on the uaddr2 futex |
29387 |
++ * and either blocked (DONE) or has acquired it (LOCKED). IGNORE is set by |
29388 |
++ * the requeue side when the requeue attempt failed via deadlock detection |
29389 |
++ * and therefore the waiter q is still on the uaddr1 futex. |
29390 |
++ */ |
29391 |
++enum { |
29392 |
++ Q_REQUEUE_PI_NONE = 0, |
29393 |
++ Q_REQUEUE_PI_IGNORE, |
29394 |
++ Q_REQUEUE_PI_IN_PROGRESS, |
29395 |
++ Q_REQUEUE_PI_WAIT, |
29396 |
++ Q_REQUEUE_PI_DONE, |
29397 |
++ Q_REQUEUE_PI_LOCKED, |
29398 |
++}; |
29399 |
++ |
29400 |
++static const struct futex_q futex_q_init = { |
29401 |
++ /* list gets initialized in queue_me()*/ |
29402 |
++ .key = FUTEX_KEY_INIT, |
29403 |
++ .bitset = FUTEX_BITSET_MATCH_ANY, |
29404 |
++ .requeue_state = ATOMIC_INIT(Q_REQUEUE_PI_NONE), |
29405 |
++}; |
29406 |
++ |
29407 |
++/* |
29408 |
++ * Hash buckets are shared by all the futex_keys that hash to the same |
29409 |
++ * location. Each key may have multiple futex_q structures, one for each task |
29410 |
++ * waiting on a futex. |
29411 |
++ */ |
29412 |
++struct futex_hash_bucket { |
29413 |
++ atomic_t waiters; |
29414 |
++ spinlock_t lock; |
29415 |
++ struct plist_head chain; |
29416 |
++} ____cacheline_aligned_in_smp; |
29417 |
++ |
29418 |
++/* |
29419 |
++ * The base of the bucket array and its size are always used together |
29420 |
++ * (after initialization only in hash_futex()), so ensure that they |
29421 |
++ * reside in the same cacheline. |
29422 |
++ */ |
29423 |
++static struct { |
29424 |
++ struct futex_hash_bucket *queues; |
29425 |
++ unsigned long hashsize; |
29426 |
++} __futex_data __read_mostly __aligned(2*sizeof(long)); |
29427 |
++#define futex_queues (__futex_data.queues) |
29428 |
++#define futex_hashsize (__futex_data.hashsize) |
29429 |
++ |
29430 |
++ |
29431 |
++/* |
29432 |
++ * Fault injections for futexes. |
29433 |
++ */ |
29434 |
++#ifdef CONFIG_FAIL_FUTEX |
29435 |
++ |
29436 |
++static struct { |
29437 |
++ struct fault_attr attr; |
29438 |
++ |
29439 |
++ bool ignore_private; |
29440 |
++} fail_futex = { |
29441 |
++ .attr = FAULT_ATTR_INITIALIZER, |
29442 |
++ .ignore_private = false, |
29443 |
++}; |
29444 |
++ |
29445 |
++static int __init setup_fail_futex(char *str) |
29446 |
++{ |
29447 |
++ return setup_fault_attr(&fail_futex.attr, str); |
29448 |
++} |
29449 |
++__setup("fail_futex=", setup_fail_futex); |
29450 |
++ |
29451 |
++static bool should_fail_futex(bool fshared) |
29452 |
++{ |
29453 |
++ if (fail_futex.ignore_private && !fshared) |
29454 |
++ return false; |
29455 |
++ |
29456 |
++ return should_fail(&fail_futex.attr, 1); |
29457 |
++} |
29458 |
++ |
29459 |
++#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS |
29460 |
++ |
29461 |
++static int __init fail_futex_debugfs(void) |
29462 |
++{ |
29463 |
++ umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; |
29464 |
++ struct dentry *dir; |
29465 |
++ |
29466 |
++ dir = fault_create_debugfs_attr("fail_futex", NULL, |
29467 |
++ &fail_futex.attr); |
29468 |
++ if (IS_ERR(dir)) |
29469 |
++ return PTR_ERR(dir); |
29470 |
++ |
29471 |
++ debugfs_create_bool("ignore-private", mode, dir, |
29472 |
++ &fail_futex.ignore_private); |
29473 |
++ return 0; |
29474 |
++} |
29475 |
++ |
29476 |
++late_initcall(fail_futex_debugfs); |
29477 |
++ |
29478 |
++#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ |
29479 |
++ |
29480 |
++#else |
29481 |
++static inline bool should_fail_futex(bool fshared) |
29482 |
++{ |
29483 |
++ return false; |
29484 |
++} |
29485 |
++#endif /* CONFIG_FAIL_FUTEX */ |
29486 |
++ |
29487 |
++#ifdef CONFIG_COMPAT |
29488 |
++static void compat_exit_robust_list(struct task_struct *curr); |
29489 |
++#endif |
29490 |
++ |
29491 |
++/* |
29492 |
++ * Reflects a new waiter being added to the waitqueue. |
29493 |
++ */ |
29494 |
++static inline void hb_waiters_inc(struct futex_hash_bucket *hb) |
29495 |
++{ |
29496 |
++#ifdef CONFIG_SMP |
29497 |
++ atomic_inc(&hb->waiters); |
29498 |
++ /* |
29499 |
++ * Full barrier (A), see the ordering comment above. |
29500 |
++ */ |
29501 |
++ smp_mb__after_atomic(); |
29502 |
++#endif |
29503 |
++} |
29504 |
++ |
29505 |
++/* |
29506 |
++ * Reflects a waiter being removed from the waitqueue by wakeup |
29507 |
++ * paths. |
29508 |
++ */ |
29509 |
++static inline void hb_waiters_dec(struct futex_hash_bucket *hb) |
29510 |
++{ |
29511 |
++#ifdef CONFIG_SMP |
29512 |
++ atomic_dec(&hb->waiters); |
29513 |
++#endif |
29514 |
++} |
29515 |
++ |
29516 |
++static inline int hb_waiters_pending(struct futex_hash_bucket *hb) |
29517 |
++{ |
29518 |
++#ifdef CONFIG_SMP |
29519 |
++ /* |
29520 |
++ * Full barrier (B), see the ordering comment above. |
29521 |
++ */ |
29522 |
++ smp_mb(); |
29523 |
++ return atomic_read(&hb->waiters); |
29524 |
++#else |
29525 |
++ return 1; |
29526 |
++#endif |
29527 |
++} |
29528 |
++ |
29529 |
++/** |
29530 |
++ * hash_futex - Return the hash bucket in the global hash |
29531 |
++ * @key: Pointer to the futex key for which the hash is calculated |
29532 |
++ * |
29533 |
++ * We hash on the keys returned from get_futex_key (see below) and return the |
29534 |
++ * corresponding hash bucket in the global hash. |
29535 |
++ */ |
29536 |
++static struct futex_hash_bucket *hash_futex(union futex_key *key) |
29537 |
++{ |
29538 |
++ u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4, |
29539 |
++ key->both.offset); |
29540 |
++ |
29541 |
++ return &futex_queues[hash & (futex_hashsize - 1)]; |
29542 |
++} |
29543 |
++ |
29544 |
++ |
29545 |
++/** |
29546 |
++ * match_futex - Check whether two futex keys are equal |
29547 |
++ * @key1: Pointer to key1 |
29548 |
++ * @key2: Pointer to key2 |
29549 |
++ * |
29550 |
++ * Return 1 if two futex_keys are equal, 0 otherwise. |
29551 |
++ */ |
29552 |
++static inline int match_futex(union futex_key *key1, union futex_key *key2) |
29553 |
++{ |
29554 |
++ return (key1 && key2 |
29555 |
++ && key1->both.word == key2->both.word |
29556 |
++ && key1->both.ptr == key2->both.ptr |
29557 |
++ && key1->both.offset == key2->both.offset); |
29558 |
++} |
29559 |
++ |
29560 |
++enum futex_access { |
29561 |
++ FUTEX_READ, |
29562 |
++ FUTEX_WRITE |
29563 |
++}; |
29564 |
++ |
29565 |
++/** |
29566 |
++ * futex_setup_timer - set up the sleeping hrtimer. |
29567 |
++ * @time: ptr to the given timeout value |
29568 |
++ * @timeout: the hrtimer_sleeper structure to be set up |
29569 |
++ * @flags: futex flags |
29570 |
++ * @range_ns: optional range in ns |
29571 |
++ * |
29572 |
++ * Return: Initialized hrtimer_sleeper structure or NULL if no timeout |
29573 |
++ * value given |
29574 |
++ */ |
29575 |
++static inline struct hrtimer_sleeper * |
29576 |
++futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, |
29577 |
++ int flags, u64 range_ns) |
29578 |
++{ |
29579 |
++ if (!time) |
29580 |
++ return NULL; |
29581 |
++ |
29582 |
++ hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ? |
29583 |
++ CLOCK_REALTIME : CLOCK_MONOTONIC, |
29584 |
++ HRTIMER_MODE_ABS); |
29585 |
++ /* |
29586 |
++ * If range_ns is 0, calling hrtimer_set_expires_range_ns() is |
29587 |
++ * effectively the same as calling hrtimer_set_expires(). |
29588 |
++ */ |
29589 |
++ hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns); |
29590 |
++ |
29591 |
++ return timeout; |
29592 |
++} |
29593 |
++ |
29594 |
++/* |
29595 |
++ * Generate a machine wide unique identifier for this inode. |
29596 |
++ * |
29597 |
++ * This relies on u64 not wrapping in the life-time of the machine; which with |
29598 |
++ * 1ns resolution means almost 585 years. |
29599 |
++ * |
29600 |
++ * This further relies on the fact that a well formed program will not unmap |
29601 |
++ * the file while it has a (shared) futex waiting on it. This mapping will have |
29602 |
++ * a file reference which pins the mount and inode. |
29603 |
++ * |
29604 |
++ * If for some reason an inode gets evicted and read back in again, it will get |
29605 |
++ * a new sequence number and will _NOT_ match, even though it is the exact same |
29606 |
++ * file. |
29607 |
++ * |
29608 |
++ * It is important that match_futex() will never have a false-positive, esp. |
29609 |
++ * for PI futexes that can mess up the state. The above argues that false-negatives |
29610 |
++ * are only possible for malformed programs. |
29611 |
++ */ |
29612 |
++static u64 get_inode_sequence_number(struct inode *inode) |
29613 |
++{ |
29614 |
++ static atomic64_t i_seq; |
29615 |
++ u64 old; |
29616 |
++ |
29617 |
++ /* Does the inode already have a sequence number? */ |
29618 |
++ old = atomic64_read(&inode->i_sequence); |
29619 |
++ if (likely(old)) |
29620 |
++ return old; |
29621 |
++ |
29622 |
++ for (;;) { |
29623 |
++ u64 new = atomic64_add_return(1, &i_seq); |
29624 |
++ if (WARN_ON_ONCE(!new)) |
29625 |
++ continue; |
29626 |
++ |
29627 |
++ old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new); |
29628 |
++ if (old) |
29629 |
++ return old; |
29630 |
++ return new; |
29631 |
++ } |
29632 |
++} |
29633 |
++ |
29634 |
++/** |
29635 |
++ * get_futex_key() - Get parameters which are the keys for a futex |
29636 |
++ * @uaddr: virtual address of the futex |
29637 |
++ * @fshared: false for a PROCESS_PRIVATE futex, true for PROCESS_SHARED |
29638 |
++ * @key: address where result is stored. |
29639 |
++ * @rw: mapping needs to be read/write (values: FUTEX_READ, |
29640 |
++ * FUTEX_WRITE) |
29641 |
++ * |
29642 |
++ * Return: a negative error code or 0 |
29643 |
++ * |
29644 |
++ * The key words are stored in @key on success. |
29645 |
++ * |
29646 |
++ * For shared mappings (when @fshared), the key is: |
29647 |
++ * |
29648 |
++ * ( inode->i_sequence, page->index, offset_within_page ) |
29649 |
++ * |
29650 |
++ * [ also see get_inode_sequence_number() ] |
29651 |
++ * |
29652 |
++ * For private mappings (or when !@fshared), the key is: |
29653 |
++ * |
29654 |
++ * ( current->mm, address, 0 ) |
29655 |
++ * |
29656 |
++ * This allows (cross process, where applicable) identification of the futex |
29657 |
++ * without keeping the page pinned for the duration of the FUTEX_WAIT. |
29658 |
++ * |
29659 |
++ * lock_page() might sleep, the caller should not hold a spinlock. |
29660 |
++ */ |
29661 |
++static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key, |
29662 |
++ enum futex_access rw) |
29663 |
++{ |
29664 |
++ unsigned long address = (unsigned long)uaddr; |
29665 |
++ struct mm_struct *mm = current->mm; |
29666 |
++ struct page *page, *tail; |
29667 |
++ struct address_space *mapping; |
29668 |
++ int err, ro = 0; |
29669 |
++ |
29670 |
++ /* |
29671 |
++ * The futex address must be "naturally" aligned. |
29672 |
++ */ |
29673 |
++ key->both.offset = address % PAGE_SIZE; |
29674 |
++ if (unlikely((address % sizeof(u32)) != 0)) |
29675 |
++ return -EINVAL; |
29676 |
++ address -= key->both.offset; |
29677 |
++ |
29678 |
++ if (unlikely(!access_ok(uaddr, sizeof(u32)))) |
29679 |
++ return -EFAULT; |
29680 |
++ |
29681 |
++ if (unlikely(should_fail_futex(fshared))) |
29682 |
++ return -EFAULT; |
29683 |
++ |
29684 |
++ /* |
29685 |
++ * PROCESS_PRIVATE futexes are fast. |
29686 |
++ * As the mm cannot disappear under us and the 'key' only needs |
29687 |
++ * virtual address, we dont even have to find the underlying vma. |
29688 |
++ * Note : We do have to check 'uaddr' is a valid user address, |
29689 |
++ * but access_ok() should be faster than find_vma() |
29690 |
++ */ |
29691 |
++ if (!fshared) { |
29692 |
++ key->private.mm = mm; |
29693 |
++ key->private.address = address; |
29694 |
++ return 0; |
29695 |
++ } |
29696 |
++ |
29697 |
++again: |
29698 |
++ /* Ignore any VERIFY_READ mapping (futex common case) */ |
29699 |
++ if (unlikely(should_fail_futex(true))) |
29700 |
++ return -EFAULT; |
29701 |
++ |
29702 |
++ err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); |
29703 |
++ /* |
29704 |
++ * If write access is not required (eg. FUTEX_WAIT), try |
29705 |
++ * and get read-only access. |
29706 |
++ */ |
29707 |
++ if (err == -EFAULT && rw == FUTEX_READ) { |
29708 |
++ err = get_user_pages_fast(address, 1, 0, &page); |
29709 |
++ ro = 1; |
29710 |
++ } |
29711 |
++ if (err < 0) |
29712 |
++ return err; |
29713 |
++ else |
29714 |
++ err = 0; |
29715 |
++ |
29716 |
++ /* |
29717 |
++ * The treatment of mapping from this point on is critical. The page |
29718 |
++ * lock protects many things but in this context the page lock |
29719 |
++ * stabilizes mapping, prevents inode freeing in the shared |
29720 |
++ * file-backed region case and guards against movement to swap cache. |
29721 |
++ * |
29722 |
++ * Strictly speaking the page lock is not needed in all cases being |
29723 |
++ * considered here and page lock forces unnecessarily serialization |
29724 |
++ * From this point on, mapping will be re-verified if necessary and |
29725 |
++ * page lock will be acquired only if it is unavoidable |
29726 |
++ * |
29727 |
++ * Mapping checks require the head page for any compound page so the |
29728 |
++ * head page and mapping is looked up now. For anonymous pages, it |
29729 |
++ * does not matter if the page splits in the future as the key is |
29730 |
++ * based on the address. For filesystem-backed pages, the tail is |
29731 |
++ * required as the index of the page determines the key. For |
29732 |
++ * base pages, there is no tail page and tail == page. |
29733 |
++ */ |
29734 |
++ tail = page; |
29735 |
++ page = compound_head(page); |
29736 |
++ mapping = READ_ONCE(page->mapping); |
29737 |
++ |
29738 |
++ /* |
29739 |
++ * If page->mapping is NULL, then it cannot be a PageAnon |
29740 |
++ * page; but it might be the ZERO_PAGE or in the gate area or |
29741 |
++ * in a special mapping (all cases which we are happy to fail); |
29742 |
++ * or it may have been a good file page when get_user_pages_fast |
29743 |
++ * found it, but truncated or holepunched or subjected to |
29744 |
++ * invalidate_complete_page2 before we got the page lock (also |
29745 |
++ * cases which we are happy to fail). And we hold a reference, |
29746 |
++ * so refcount care in invalidate_complete_page's remove_mapping |
29747 |
++ * prevents drop_caches from setting mapping to NULL beneath us. |
29748 |
++ * |
29749 |
++ * The case we do have to guard against is when memory pressure made |
29750 |
++ * shmem_writepage move it from filecache to swapcache beneath us: |
29751 |
++ * an unlikely race, but we do need to retry for page->mapping. |
29752 |
++ */ |
29753 |
++ if (unlikely(!mapping)) { |
29754 |
++ int shmem_swizzled; |
29755 |
++ |
29756 |
++ /* |
29757 |
++ * Page lock is required to identify which special case above |
29758 |
++ * applies. If this is really a shmem page then the page lock |
29759 |
++ * will prevent unexpected transitions. |
29760 |
++ */ |
29761 |
++ lock_page(page); |
29762 |
++ shmem_swizzled = PageSwapCache(page) || page->mapping; |
29763 |
++ unlock_page(page); |
29764 |
++ put_page(page); |
29765 |
++ |
29766 |
++ if (shmem_swizzled) |
29767 |
++ goto again; |
29768 |
++ |
29769 |
++ return -EFAULT; |
29770 |
++ } |
29771 |
++ |
29772 |
++ /* |
29773 |
++ * Private mappings are handled in a simple way. |
29774 |
++ * |
29775 |
++ * If the futex key is stored on an anonymous page, then the associated |
29776 |
++ * object is the mm which is implicitly pinned by the calling process. |
29777 |
++ * |
29778 |
++ * NOTE: When userspace waits on a MAP_SHARED mapping, even if |
29779 |
++ * it's a read-only handle, it's expected that futexes attach to |
29780 |
++ * the object not the particular process. |
29781 |
++ */ |
29782 |
++ if (PageAnon(page)) { |
29783 |
++ /* |
29784 |
++ * A RO anonymous page will never change and thus doesn't make |
29785 |
++ * sense for futex operations. |
29786 |
++ */ |
29787 |
++ if (unlikely(should_fail_futex(true)) || ro) { |
29788 |
++ err = -EFAULT; |
29789 |
++ goto out; |
29790 |
++ } |
29791 |
++ |
29792 |
++ key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ |
29793 |
++ key->private.mm = mm; |
29794 |
++ key->private.address = address; |
29795 |
++ |
29796 |
++ } else { |
29797 |
++ struct inode *inode; |
29798 |
++ |
29799 |
++ /* |
29800 |
++ * The associated futex object in this case is the inode and |
29801 |
++ * the page->mapping must be traversed. Ordinarily this should |
29802 |
++ * be stabilised under page lock but it's not strictly |
29803 |
++ * necessary in this case as we just want to pin the inode, not |
29804 |
++ * update the radix tree or anything like that. |
29805 |
++ * |
29806 |
++ * The RCU read lock is taken as the inode is finally freed |
29807 |
++ * under RCU. If the mapping still matches expectations then the |
29808 |
++ * mapping->host can be safely accessed as being a valid inode. |
29809 |
++ */ |
29810 |
++ rcu_read_lock(); |
29811 |
++ |
29812 |
++ if (READ_ONCE(page->mapping) != mapping) { |
29813 |
++ rcu_read_unlock(); |
29814 |
++ put_page(page); |
29815 |
++ |
29816 |
++ goto again; |
29817 |
++ } |
29818 |
++ |
29819 |
++ inode = READ_ONCE(mapping->host); |
29820 |
++ if (!inode) { |
29821 |
++ rcu_read_unlock(); |
29822 |
++ put_page(page); |
29823 |
++ |
29824 |
++ goto again; |
29825 |
++ } |
29826 |
++ |
29827 |
++ key->both.offset |= FUT_OFF_INODE; /* inode-based key */ |
29828 |
++ key->shared.i_seq = get_inode_sequence_number(inode); |
29829 |
++ key->shared.pgoff = page_to_pgoff(tail); |
29830 |
++ rcu_read_unlock(); |
29831 |
++ } |
29832 |
++ |
29833 |
++out: |
29834 |
++ put_page(page); |
29835 |
++ return err; |
29836 |
++} |
29837 |
++ |
29838 |
++/** |
29839 |
++ * fault_in_user_writeable() - Fault in user address and verify RW access |
29840 |
++ * @uaddr: pointer to faulting user space address |
29841 |
++ * |
29842 |
++ * Slow path to fixup the fault we just took in the atomic write |
29843 |
++ * access to @uaddr. |
29844 |
++ * |
29845 |
++ * We have no generic implementation of a non-destructive write to the |
29846 |
++ * user address. We know that we faulted in the atomic pagefault |
29847 |
++ * disabled section so we can as well avoid the #PF overhead by |
29848 |
++ * calling get_user_pages() right away. |
29849 |
++ */ |
29850 |
++static int fault_in_user_writeable(u32 __user *uaddr) |
29851 |
++{ |
29852 |
++ struct mm_struct *mm = current->mm; |
29853 |
++ int ret; |
29854 |
++ |
29855 |
++ mmap_read_lock(mm); |
29856 |
++ ret = fixup_user_fault(mm, (unsigned long)uaddr, |
29857 |
++ FAULT_FLAG_WRITE, NULL); |
29858 |
++ mmap_read_unlock(mm); |
29859 |
++ |
29860 |
++ return ret < 0 ? ret : 0; |
29861 |
++} |
29862 |
++ |
29863 |
++/** |
29864 |
++ * futex_top_waiter() - Return the highest priority waiter on a futex |
29865 |
++ * @hb: the hash bucket the futex_q's reside in |
29866 |
++ * @key: the futex key (to distinguish it from other futex futex_q's) |
29867 |
++ * |
29868 |
++ * Must be called with the hb lock held. |
29869 |
++ */ |
29870 |
++static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, |
29871 |
++ union futex_key *key) |
29872 |
++{ |
29873 |
++ struct futex_q *this; |
29874 |
++ |
29875 |
++ plist_for_each_entry(this, &hb->chain, list) { |
29876 |
++ if (match_futex(&this->key, key)) |
29877 |
++ return this; |
29878 |
++ } |
29879 |
++ return NULL; |
29880 |
++} |
29881 |
++ |
29882 |
++static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, |
29883 |
++ u32 uval, u32 newval) |
29884 |
++{ |
29885 |
++ int ret; |
29886 |
++ |
29887 |
++ pagefault_disable(); |
29888 |
++ ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); |
29889 |
++ pagefault_enable(); |
29890 |
++ |
29891 |
++ return ret; |
29892 |
++} |
29893 |
++ |
29894 |
++static int get_futex_value_locked(u32 *dest, u32 __user *from) |
29895 |
++{ |
29896 |
++ int ret; |
29897 |
++ |
29898 |
++ pagefault_disable(); |
29899 |
++ ret = __get_user(*dest, from); |
29900 |
++ pagefault_enable(); |
29901 |
++ |
29902 |
++ return ret ? -EFAULT : 0; |
29903 |
++} |
29904 |
++ |
29905 |
++ |
29906 |
++/* |
29907 |
++ * PI code: |
29908 |
++ */ |
29909 |
++static int refill_pi_state_cache(void) |
29910 |
++{ |
29911 |
++ struct futex_pi_state *pi_state; |
29912 |
++ |
29913 |
++ if (likely(current->pi_state_cache)) |
29914 |
++ return 0; |
29915 |
++ |
29916 |
++ pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); |
29917 |
++ |
29918 |
++ if (!pi_state) |
29919 |
++ return -ENOMEM; |
29920 |
++ |
29921 |
++ INIT_LIST_HEAD(&pi_state->list); |
29922 |
++ /* pi_mutex gets initialized later */ |
29923 |
++ pi_state->owner = NULL; |
29924 |
++ refcount_set(&pi_state->refcount, 1); |
29925 |
++ pi_state->key = FUTEX_KEY_INIT; |
29926 |
++ |
29927 |
++ current->pi_state_cache = pi_state; |
29928 |
++ |
29929 |
++ return 0; |
29930 |
++} |
29931 |
++ |
29932 |
++static struct futex_pi_state *alloc_pi_state(void) |
29933 |
++{ |
29934 |
++ struct futex_pi_state *pi_state = current->pi_state_cache; |
29935 |
++ |
29936 |
++ WARN_ON(!pi_state); |
29937 |
++ current->pi_state_cache = NULL; |
29938 |
++ |
29939 |
++ return pi_state; |
29940 |
++} |
29941 |
++ |
29942 |
++static void pi_state_update_owner(struct futex_pi_state *pi_state, |
29943 |
++ struct task_struct *new_owner) |
29944 |
++{ |
29945 |
++ struct task_struct *old_owner = pi_state->owner; |
29946 |
++ |
29947 |
++ lockdep_assert_held(&pi_state->pi_mutex.wait_lock); |
29948 |
++ |
29949 |
++ if (old_owner) { |
29950 |
++ raw_spin_lock(&old_owner->pi_lock); |
29951 |
++ WARN_ON(list_empty(&pi_state->list)); |
29952 |
++ list_del_init(&pi_state->list); |
29953 |
++ raw_spin_unlock(&old_owner->pi_lock); |
29954 |
++ } |
29955 |
++ |
29956 |
++ if (new_owner) { |
29957 |
++ raw_spin_lock(&new_owner->pi_lock); |
29958 |
++ WARN_ON(!list_empty(&pi_state->list)); |
29959 |
++ list_add(&pi_state->list, &new_owner->pi_state_list); |
29960 |
++ pi_state->owner = new_owner; |
29961 |
++ raw_spin_unlock(&new_owner->pi_lock); |
29962 |
++ } |
29963 |
++} |
29964 |
++ |
29965 |
++static void get_pi_state(struct futex_pi_state *pi_state) |
29966 |
++{ |
29967 |
++ WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount)); |
29968 |
++} |
29969 |
++ |
29970 |
++/* |
29971 |
++ * Drops a reference to the pi_state object and frees or caches it |
29972 |
++ * when the last reference is gone. |
29973 |
++ */ |
29974 |
++static void put_pi_state(struct futex_pi_state *pi_state) |
29975 |
++{ |
29976 |
++ if (!pi_state) |
29977 |
++ return; |
29978 |
++ |
29979 |
++ if (!refcount_dec_and_test(&pi_state->refcount)) |
29980 |
++ return; |
29981 |
++ |
29982 |
++ /* |
29983 |
++ * If pi_state->owner is NULL, the owner is most probably dying |
29984 |
++ * and has cleaned up the pi_state already |
29985 |
++ */ |
29986 |
++ if (pi_state->owner) { |
29987 |
++ unsigned long flags; |
29988 |
++ |
29989 |
++ raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags); |
29990 |
++ pi_state_update_owner(pi_state, NULL); |
29991 |
++ rt_mutex_proxy_unlock(&pi_state->pi_mutex); |
29992 |
++ raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags); |
29993 |
++ } |
29994 |
++ |
29995 |
++ if (current->pi_state_cache) { |
29996 |
++ kfree(pi_state); |
29997 |
++ } else { |
29998 |
++ /* |
29999 |
++ * pi_state->list is already empty. |
30000 |
++ * clear pi_state->owner. |
30001 |
++ * refcount is at 0 - put it back to 1. |
30002 |
++ */ |
30003 |
++ pi_state->owner = NULL; |
30004 |
++ refcount_set(&pi_state->refcount, 1); |
30005 |
++ current->pi_state_cache = pi_state; |
30006 |
++ } |
30007 |
++} |
30008 |
++ |
30009 |
++#ifdef CONFIG_FUTEX_PI |
30010 |
++ |
30011 |
++/* |
30012 |
++ * This task is holding PI mutexes at exit time => bad. |
30013 |
++ * Kernel cleans up PI-state, but userspace is likely hosed. |
30014 |
++ * (Robust-futex cleanup is separate and might save the day for userspace.) |
30015 |
++ */ |
30016 |
++static void exit_pi_state_list(struct task_struct *curr) |
30017 |
++{ |
30018 |
++ struct list_head *next, *head = &curr->pi_state_list; |
30019 |
++ struct futex_pi_state *pi_state; |
30020 |
++ struct futex_hash_bucket *hb; |
30021 |
++ union futex_key key = FUTEX_KEY_INIT; |
30022 |
++ |
30023 |
++ if (!futex_cmpxchg_enabled) |
30024 |
++ return; |
30025 |
++ /* |
30026 |
++ * We are a ZOMBIE and nobody can enqueue itself on |
30027 |
++ * pi_state_list anymore, but we have to be careful |
30028 |
++ * versus waiters unqueueing themselves: |
30029 |
++ */ |
30030 |
++ raw_spin_lock_irq(&curr->pi_lock); |
30031 |
++ while (!list_empty(head)) { |
30032 |
++ next = head->next; |
30033 |
++ pi_state = list_entry(next, struct futex_pi_state, list); |
30034 |
++ key = pi_state->key; |
30035 |
++ hb = hash_futex(&key); |
30036 |
++ |
30037 |
++ /* |
30038 |
++ * We can race against put_pi_state() removing itself from the |
30039 |
++ * list (a waiter going away). put_pi_state() will first |
30040 |
++ * decrement the reference count and then modify the list, so |
30041 |
++ * its possible to see the list entry but fail this reference |
30042 |
++ * acquire. |
30043 |
++ * |
30044 |
++ * In that case; drop the locks to let put_pi_state() make |
30045 |
++ * progress and retry the loop. |
30046 |
++ */ |
30047 |
++ if (!refcount_inc_not_zero(&pi_state->refcount)) { |
30048 |
++ raw_spin_unlock_irq(&curr->pi_lock); |
30049 |
++ cpu_relax(); |
30050 |
++ raw_spin_lock_irq(&curr->pi_lock); |
30051 |
++ continue; |
30052 |
++ } |
30053 |
++ raw_spin_unlock_irq(&curr->pi_lock); |
30054 |
++ |
30055 |
++ spin_lock(&hb->lock); |
30056 |
++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
30057 |
++ raw_spin_lock(&curr->pi_lock); |
30058 |
++ /* |
30059 |
++ * We dropped the pi-lock, so re-check whether this |
30060 |
++ * task still owns the PI-state: |
30061 |
++ */ |
30062 |
++ if (head->next != next) { |
30063 |
++ /* retain curr->pi_lock for the loop invariant */ |
30064 |
++ raw_spin_unlock(&pi_state->pi_mutex.wait_lock); |
30065 |
++ spin_unlock(&hb->lock); |
30066 |
++ put_pi_state(pi_state); |
30067 |
++ continue; |
30068 |
++ } |
30069 |
++ |
30070 |
++ WARN_ON(pi_state->owner != curr); |
30071 |
++ WARN_ON(list_empty(&pi_state->list)); |
30072 |
++ list_del_init(&pi_state->list); |
30073 |
++ pi_state->owner = NULL; |
30074 |
++ |
30075 |
++ raw_spin_unlock(&curr->pi_lock); |
30076 |
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
30077 |
++ spin_unlock(&hb->lock); |
30078 |
++ |
30079 |
++ rt_mutex_futex_unlock(&pi_state->pi_mutex); |
30080 |
++ put_pi_state(pi_state); |
30081 |
++ |
30082 |
++ raw_spin_lock_irq(&curr->pi_lock); |
30083 |
++ } |
30084 |
++ raw_spin_unlock_irq(&curr->pi_lock); |
30085 |
++} |
30086 |
++#else |
30087 |
++static inline void exit_pi_state_list(struct task_struct *curr) { } |
30088 |
++#endif |
30089 |
++ |
30090 |
++/* |
30091 |
++ * We need to check the following states: |
30092 |
++ * |
30093 |
++ * Waiter | pi_state | pi->owner | uTID | uODIED | ? |
30094 |
++ * |
30095 |
++ * [1] NULL | --- | --- | 0 | 0/1 | Valid |
30096 |
++ * [2] NULL | --- | --- | >0 | 0/1 | Valid |
30097 |
++ * |
30098 |
++ * [3] Found | NULL | -- | Any | 0/1 | Invalid |
30099 |
++ * |
30100 |
++ * [4] Found | Found | NULL | 0 | 1 | Valid |
30101 |
++ * [5] Found | Found | NULL | >0 | 1 | Invalid |
30102 |
++ * |
30103 |
++ * [6] Found | Found | task | 0 | 1 | Valid |
30104 |
++ * |
30105 |
++ * [7] Found | Found | NULL | Any | 0 | Invalid |
30106 |
++ * |
30107 |
++ * [8] Found | Found | task | ==taskTID | 0/1 | Valid |
30108 |
++ * [9] Found | Found | task | 0 | 0 | Invalid |
30109 |
++ * [10] Found | Found | task | !=taskTID | 0/1 | Invalid |
30110 |
++ * |
30111 |
++ * [1] Indicates that the kernel can acquire the futex atomically. We |
30112 |
++ * came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit. |
30113 |
++ * |
30114 |
++ * [2] Valid, if TID does not belong to a kernel thread. If no matching |
30115 |
++ * thread is found then it indicates that the owner TID has died. |
30116 |
++ * |
30117 |
++ * [3] Invalid. The waiter is queued on a non PI futex |
30118 |
++ * |
30119 |
++ * [4] Valid state after exit_robust_list(), which sets the user space |
30120 |
++ * value to FUTEX_WAITERS | FUTEX_OWNER_DIED. |
30121 |
++ * |
30122 |
++ * [5] The user space value got manipulated between exit_robust_list() |
30123 |
++ * and exit_pi_state_list() |
30124 |
++ * |
30125 |
++ * [6] Valid state after exit_pi_state_list() which sets the new owner in |
30126 |
++ * the pi_state but cannot access the user space value. |
30127 |
++ * |
30128 |
++ * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set. |
30129 |
++ * |
30130 |
++ * [8] Owner and user space value match |
30131 |
++ * |
30132 |
++ * [9] There is no transient state which sets the user space TID to 0 |
30133 |
++ * except exit_robust_list(), but this is indicated by the |
30134 |
++ * FUTEX_OWNER_DIED bit. See [4] |
30135 |
++ * |
30136 |
++ * [10] There is no transient state which leaves owner and user space |
30137 |
++ * TID out of sync. Except one error case where the kernel is denied |
30138 |
++ * write access to the user address, see fixup_pi_state_owner(). |
30139 |
++ * |
30140 |
++ * |
30141 |
++ * Serialization and lifetime rules: |
30142 |
++ * |
30143 |
++ * hb->lock: |
30144 |
++ * |
30145 |
++ * hb -> futex_q, relation |
30146 |
++ * futex_q -> pi_state, relation |
30147 |
++ * |
30148 |
++ * (cannot be raw because hb can contain arbitrary amount |
30149 |
++ * of futex_q's) |
30150 |
++ * |
30151 |
++ * pi_mutex->wait_lock: |
30152 |
++ * |
30153 |
++ * {uval, pi_state} |
30154 |
++ * |
30155 |
++ * (and pi_mutex 'obviously') |
30156 |
++ * |
30157 |
++ * p->pi_lock: |
30158 |
++ * |
30159 |
++ * p->pi_state_list -> pi_state->list, relation |
30160 |
++ * pi_mutex->owner -> pi_state->owner, relation |
30161 |
++ * |
30162 |
++ * pi_state->refcount: |
30163 |
++ * |
30164 |
++ * pi_state lifetime |
30165 |
++ * |
30166 |
++ * |
30167 |
++ * Lock order: |
30168 |
++ * |
30169 |
++ * hb->lock |
30170 |
++ * pi_mutex->wait_lock |
30171 |
++ * p->pi_lock |
30172 |
++ * |
30173 |
++ */ |
30174 |
++ |
30175 |
++/* |
30176 |
++ * Validate that the existing waiter has a pi_state and sanity check |
30177 |
++ * the pi_state against the user space value. If correct, attach to |
30178 |
++ * it. |
30179 |
++ */ |
30180 |
++static int attach_to_pi_state(u32 __user *uaddr, u32 uval, |
30181 |
++ struct futex_pi_state *pi_state, |
30182 |
++ struct futex_pi_state **ps) |
30183 |
++{ |
30184 |
++ pid_t pid = uval & FUTEX_TID_MASK; |
30185 |
++ u32 uval2; |
30186 |
++ int ret; |
30187 |
++ |
30188 |
++ /* |
30189 |
++ * Userspace might have messed up non-PI and PI futexes [3] |
30190 |
++ */ |
30191 |
++ if (unlikely(!pi_state)) |
30192 |
++ return -EINVAL; |
30193 |
++ |
30194 |
++ /* |
30195 |
++ * We get here with hb->lock held, and having found a |
30196 |
++ * futex_top_waiter(). This means that futex_lock_pi() of said futex_q |
30197 |
++ * has dropped the hb->lock in between queue_me() and unqueue_me_pi(), |
30198 |
++ * which in turn means that futex_lock_pi() still has a reference on |
30199 |
++ * our pi_state. |
30200 |
++ * |
30201 |
++ * The waiter holding a reference on @pi_state also protects against |
30202 |
++ * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi() |
30203 |
++ * and futex_wait_requeue_pi() as it cannot go to 0 and consequently |
30204 |
++ * free pi_state before we can take a reference ourselves. |
30205 |
++ */ |
30206 |
++ WARN_ON(!refcount_read(&pi_state->refcount)); |
30207 |
++ |
30208 |
++ /* |
30209 |
++ * Now that we have a pi_state, we can acquire wait_lock |
30210 |
++ * and do the state validation. |
30211 |
++ */ |
30212 |
++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
30213 |
++ |
30214 |
++ /* |
30215 |
++ * Since {uval, pi_state} is serialized by wait_lock, and our current |
30216 |
++ * uval was read without holding it, it can have changed. Verify it |
30217 |
++ * still is what we expect it to be, otherwise retry the entire |
30218 |
++ * operation. |
30219 |
++ */ |
30220 |
++ if (get_futex_value_locked(&uval2, uaddr)) |
30221 |
++ goto out_efault; |
30222 |
++ |
30223 |
++ if (uval != uval2) |
30224 |
++ goto out_eagain; |
30225 |
++ |
30226 |
++ /* |
30227 |
++ * Handle the owner died case: |
30228 |
++ */ |
30229 |
++ if (uval & FUTEX_OWNER_DIED) { |
30230 |
++ /* |
30231 |
++ * exit_pi_state_list sets owner to NULL and wakes the |
30232 |
++ * topmost waiter. The task which acquires the |
30233 |
++ * pi_state->rt_mutex will fixup owner. |
30234 |
++ */ |
30235 |
++ if (!pi_state->owner) { |
30236 |
++ /* |
30237 |
++ * No pi state owner, but the user space TID |
30238 |
++ * is not 0. Inconsistent state. [5] |
30239 |
++ */ |
30240 |
++ if (pid) |
30241 |
++ goto out_einval; |
30242 |
++ /* |
30243 |
++ * Take a ref on the state and return success. [4] |
30244 |
++ */ |
30245 |
++ goto out_attach; |
30246 |
++ } |
30247 |
++ |
30248 |
++ /* |
30249 |
++ * If TID is 0, then either the dying owner has not |
30250 |
++ * yet executed exit_pi_state_list() or some waiter |
30251 |
++ * acquired the rtmutex in the pi state, but did not |
30252 |
++ * yet fixup the TID in user space. |
30253 |
++ * |
30254 |
++ * Take a ref on the state and return success. [6] |
30255 |
++ */ |
30256 |
++ if (!pid) |
30257 |
++ goto out_attach; |
30258 |
++ } else { |
30259 |
++ /* |
30260 |
++ * If the owner died bit is not set, then the pi_state |
30261 |
++ * must have an owner. [7] |
30262 |
++ */ |
30263 |
++ if (!pi_state->owner) |
30264 |
++ goto out_einval; |
30265 |
++ } |
30266 |
++ |
30267 |
++ /* |
30268 |
++ * Bail out if user space manipulated the futex value. If pi |
30269 |
++ * state exists then the owner TID must be the same as the |
30270 |
++ * user space TID. [9/10] |
30271 |
++ */ |
30272 |
++ if (pid != task_pid_vnr(pi_state->owner)) |
30273 |
++ goto out_einval; |
30274 |
++ |
30275 |
++out_attach: |
30276 |
++ get_pi_state(pi_state); |
30277 |
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
30278 |
++ *ps = pi_state; |
30279 |
++ return 0; |
30280 |
++ |
30281 |
++out_einval: |
30282 |
++ ret = -EINVAL; |
30283 |
++ goto out_error; |
30284 |
++ |
30285 |
++out_eagain: |
30286 |
++ ret = -EAGAIN; |
30287 |
++ goto out_error; |
30288 |
++ |
30289 |
++out_efault: |
30290 |
++ ret = -EFAULT; |
30291 |
++ goto out_error; |
30292 |
++ |
30293 |
++out_error: |
30294 |
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
30295 |
++ return ret; |
30296 |
++} |
30297 |
++ |
30298 |
++/** |
30299 |
++ * wait_for_owner_exiting - Block until the owner has exited |
30300 |
++ * @ret: owner's current futex lock status |
30301 |
++ * @exiting: Pointer to the exiting task |
30302 |
++ * |
30303 |
++ * Caller must hold a refcount on @exiting. |
30304 |
++ */ |
30305 |
++static void wait_for_owner_exiting(int ret, struct task_struct *exiting) |
30306 |
++{ |
30307 |
++ if (ret != -EBUSY) { |
30308 |
++ WARN_ON_ONCE(exiting); |
30309 |
++ return; |
30310 |
++ } |
30311 |
++ |
30312 |
++ if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) |
30313 |
++ return; |
30314 |
++ |
30315 |
++ mutex_lock(&exiting->futex_exit_mutex); |
30316 |
++ /* |
30317 |
++ * No point in doing state checking here. If the waiter got here |
30318 |
++ * while the task was in exec()->exec_futex_release() then it can |
30319 |
++ * have any FUTEX_STATE_* value when the waiter has acquired the |
30320 |
++ * mutex. OK, if running, EXITING or DEAD if it reached exit() |
30321 |
++ * already. Highly unlikely and not a problem. Just one more round |
30322 |
++ * through the futex maze. |
30323 |
++ */ |
30324 |
++ mutex_unlock(&exiting->futex_exit_mutex); |
30325 |
++ |
30326 |
++ put_task_struct(exiting); |
30327 |
++} |
30328 |
++ |
30329 |
++static int handle_exit_race(u32 __user *uaddr, u32 uval, |
30330 |
++ struct task_struct *tsk) |
30331 |
++{ |
30332 |
++ u32 uval2; |
30333 |
++ |
30334 |
++ /* |
30335 |
++ * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the |
30336 |
++ * caller that the alleged owner is busy. |
30337 |
++ */ |
30338 |
++ if (tsk && tsk->futex_state != FUTEX_STATE_DEAD) |
30339 |
++ return -EBUSY; |
30340 |
++ |
30341 |
++ /* |
30342 |
++ * Reread the user space value to handle the following situation: |
30343 |
++ * |
30344 |
++ * CPU0 CPU1 |
30345 |
++ * |
30346 |
++ * sys_exit() sys_futex() |
30347 |
++ * do_exit() futex_lock_pi() |
30348 |
++ * futex_lock_pi_atomic() |
30349 |
++ * exit_signals(tsk) No waiters: |
30350 |
++ * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID |
30351 |
++ * mm_release(tsk) Set waiter bit |
30352 |
++ * exit_robust_list(tsk) { *uaddr = 0x80000PID; |
30353 |
++ * Set owner died attach_to_pi_owner() { |
30354 |
++ * *uaddr = 0xC0000000; tsk = get_task(PID); |
30355 |
++ * } if (!tsk->flags & PF_EXITING) { |
30356 |
++ * ... attach(); |
30357 |
++ * tsk->futex_state = } else { |
30358 |
++ * FUTEX_STATE_DEAD; if (tsk->futex_state != |
30359 |
++ * FUTEX_STATE_DEAD) |
30360 |
++ * return -EAGAIN; |
30361 |
++ * return -ESRCH; <--- FAIL |
30362 |
++ * } |
30363 |
++ * |
30364 |
++ * Returning ESRCH unconditionally is wrong here because the |
30365 |
++ * user space value has been changed by the exiting task. |
30366 |
++ * |
30367 |
++ * The same logic applies to the case where the exiting task is |
30368 |
++ * already gone. |
30369 |
++ */ |
30370 |
++ if (get_futex_value_locked(&uval2, uaddr)) |
30371 |
++ return -EFAULT; |
30372 |
++ |
30373 |
++ /* If the user space value has changed, try again. */ |
30374 |
++ if (uval2 != uval) |
30375 |
++ return -EAGAIN; |
30376 |
++ |
30377 |
++ /* |
30378 |
++ * The exiting task did not have a robust list, the robust list was |
30379 |
++ * corrupted or the user space value in *uaddr is simply bogus. |
30380 |
++ * Give up and tell user space. |
30381 |
++ */ |
30382 |
++ return -ESRCH; |
30383 |
++} |
30384 |
++ |
30385 |
++static void __attach_to_pi_owner(struct task_struct *p, union futex_key *key, |
30386 |
++ struct futex_pi_state **ps) |
30387 |
++{ |
30388 |
++ /* |
30389 |
++ * No existing pi state. First waiter. [2] |
30390 |
++ * |
30391 |
++ * This creates pi_state, we have hb->lock held, this means nothing can |
30392 |
++ * observe this state, wait_lock is irrelevant. |
30393 |
++ */ |
30394 |
++ struct futex_pi_state *pi_state = alloc_pi_state(); |
30395 |
++ |
30396 |
++ /* |
30397 |
++ * Initialize the pi_mutex in locked state and make @p |
30398 |
++ * the owner of it: |
30399 |
++ */ |
30400 |
++ rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); |
30401 |
++ |
30402 |
++ /* Store the key for possible exit cleanups: */ |
30403 |
++ pi_state->key = *key; |
30404 |
++ |
30405 |
++ WARN_ON(!list_empty(&pi_state->list)); |
30406 |
++ list_add(&pi_state->list, &p->pi_state_list); |
30407 |
++ /* |
30408 |
++ * Assignment without holding pi_state->pi_mutex.wait_lock is safe |
30409 |
++ * because there is no concurrency as the object is not published yet. |
30410 |
++ */ |
30411 |
++ pi_state->owner = p; |
30412 |
++ |
30413 |
++ *ps = pi_state; |
30414 |
++} |
30415 |
++/* |
30416 |
++ * Lookup the task for the TID provided from user space and attach to |
30417 |
++ * it after doing proper sanity checks. |
30418 |
++ */ |
30419 |
++static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, |
30420 |
++ struct futex_pi_state **ps, |
30421 |
++ struct task_struct **exiting) |
30422 |
++{ |
30423 |
++ pid_t pid = uval & FUTEX_TID_MASK; |
30424 |
++ struct task_struct *p; |
30425 |
++ |
30426 |
++ /* |
30427 |
++ * We are the first waiter - try to look up the real owner and attach |
30428 |
++ * the new pi_state to it, but bail out when TID = 0 [1] |
30429 |
++ * |
30430 |
++ * The !pid check is paranoid. None of the call sites should end up |
30431 |
++ * with pid == 0, but better safe than sorry. Let the caller retry |
30432 |
++ */ |
30433 |
++ if (!pid) |
30434 |
++ return -EAGAIN; |
30435 |
++ p = find_get_task_by_vpid(pid); |
30436 |
++ if (!p) |
30437 |
++ return handle_exit_race(uaddr, uval, NULL); |
30438 |
++ |
30439 |
++ if (unlikely(p->flags & PF_KTHREAD)) { |
30440 |
++ put_task_struct(p); |
30441 |
++ return -EPERM; |
30442 |
++ } |
30443 |
++ |
30444 |
++ /* |
30445 |
++ * We need to look at the task state to figure out, whether the |
30446 |
++ * task is exiting. To protect against the change of the task state |
30447 |
++ * in futex_exit_release(), we do this protected by p->pi_lock: |
30448 |
++ */ |
30449 |
++ raw_spin_lock_irq(&p->pi_lock); |
30450 |
++ if (unlikely(p->futex_state != FUTEX_STATE_OK)) { |
30451 |
++ /* |
30452 |
++ * The task is on the way out. When the futex state is |
30453 |
++ * FUTEX_STATE_DEAD, we know that the task has finished |
30454 |
++ * the cleanup: |
30455 |
++ */ |
30456 |
++ int ret = handle_exit_race(uaddr, uval, p); |
30457 |
++ |
30458 |
++ raw_spin_unlock_irq(&p->pi_lock); |
30459 |
++ /* |
30460 |
++ * If the owner task is between FUTEX_STATE_EXITING and |
30461 |
++ * FUTEX_STATE_DEAD then store the task pointer and keep |
30462 |
++ * the reference on the task struct. The calling code will |
30463 |
++ * drop all locks, wait for the task to reach |
30464 |
++ * FUTEX_STATE_DEAD and then drop the refcount. This is |
30465 |
++ * required to prevent a live lock when the current task |
30466 |
++ * preempted the exiting task between the two states. |
30467 |
++ */ |
30468 |
++ if (ret == -EBUSY) |
30469 |
++ *exiting = p; |
30470 |
++ else |
30471 |
++ put_task_struct(p); |
30472 |
++ return ret; |
30473 |
++ } |
30474 |
++ |
30475 |
++ __attach_to_pi_owner(p, key, ps); |
30476 |
++ raw_spin_unlock_irq(&p->pi_lock); |
30477 |
++ |
30478 |
++ put_task_struct(p); |
30479 |
++ |
30480 |
++ return 0; |
30481 |
++} |
30482 |
++ |
30483 |
++static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) |
30484 |
++{ |
30485 |
++ int err; |
30486 |
++ u32 curval; |
30487 |
++ |
30488 |
++ if (unlikely(should_fail_futex(true))) |
30489 |
++ return -EFAULT; |
30490 |
++ |
30491 |
++ err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); |
30492 |
++ if (unlikely(err)) |
30493 |
++ return err; |
30494 |
++ |
30495 |
++ /* If user space value changed, let the caller retry */ |
30496 |
++ return curval != uval ? -EAGAIN : 0; |
30497 |
++} |
30498 |
++ |
30499 |
++/** |
30500 |
++ * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex |
30501 |
++ * @uaddr: the pi futex user address |
30502 |
++ * @hb: the pi futex hash bucket |
30503 |
++ * @key: the futex key associated with uaddr and hb |
30504 |
++ * @ps: the pi_state pointer where we store the result of the |
30505 |
++ * lookup |
30506 |
++ * @task: the task to perform the atomic lock work for. This will |
30507 |
++ * be "current" except in the case of requeue pi. |
30508 |
++ * @exiting: Pointer to store the task pointer of the owner task |
30509 |
++ * which is in the middle of exiting |
30510 |
++ * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
30511 |
++ * |
30512 |
++ * Return: |
30513 |
++ * - 0 - ready to wait; |
30514 |
++ * - 1 - acquired the lock; |
30515 |
++ * - <0 - error |
30516 |
++ * |
30517 |
++ * The hb->lock must be held by the caller. |
30518 |
++ * |
30519 |
++ * @exiting is only set when the return value is -EBUSY. If so, this holds |
30520 |
++ * a refcount on the exiting task on return and the caller needs to drop it |
30521 |
++ * after waiting for the exit to complete. |
30522 |
++ */ |
30523 |
++static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, |
30524 |
++ union futex_key *key, |
30525 |
++ struct futex_pi_state **ps, |
30526 |
++ struct task_struct *task, |
30527 |
++ struct task_struct **exiting, |
30528 |
++ int set_waiters) |
30529 |
++{ |
30530 |
++ u32 uval, newval, vpid = task_pid_vnr(task); |
30531 |
++ struct futex_q *top_waiter; |
30532 |
++ int ret; |
30533 |
++ |
30534 |
++ /* |
30535 |
++ * Read the user space value first so we can validate a few |
30536 |
++ * things before proceeding further. |
30537 |
++ */ |
30538 |
++ if (get_futex_value_locked(&uval, uaddr)) |
30539 |
++ return -EFAULT; |
30540 |
++ |
30541 |
++ if (unlikely(should_fail_futex(true))) |
30542 |
++ return -EFAULT; |
30543 |
++ |
30544 |
++ /* |
30545 |
++ * Detect deadlocks. |
30546 |
++ */ |
30547 |
++ if ((unlikely((uval & FUTEX_TID_MASK) == vpid))) |
30548 |
++ return -EDEADLK; |
30549 |
++ |
30550 |
++ if ((unlikely(should_fail_futex(true)))) |
30551 |
++ return -EDEADLK; |
30552 |
++ |
30553 |
++ /* |
30554 |
++ * Lookup existing state first. If it exists, try to attach to |
30555 |
++ * its pi_state. |
30556 |
++ */ |
30557 |
++ top_waiter = futex_top_waiter(hb, key); |
30558 |
++ if (top_waiter) |
30559 |
++ return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps); |
30560 |
++ |
30561 |
++ /* |
30562 |
++ * No waiter and user TID is 0. We are here because the |
30563 |
++ * waiters or the owner died bit is set or called from |
30564 |
++ * requeue_cmp_pi or for whatever reason something took the |
30565 |
++ * syscall. |
30566 |
++ */ |
30567 |
++ if (!(uval & FUTEX_TID_MASK)) { |
30568 |
++ /* |
30569 |
++ * We take over the futex. No other waiters and the user space |
30570 |
++ * TID is 0. We preserve the owner died bit. |
30571 |
++ */ |
30572 |
++ newval = uval & FUTEX_OWNER_DIED; |
30573 |
++ newval |= vpid; |
30574 |
++ |
30575 |
++ /* The futex requeue_pi code can enforce the waiters bit */ |
30576 |
++ if (set_waiters) |
30577 |
++ newval |= FUTEX_WAITERS; |
30578 |
++ |
30579 |
++ ret = lock_pi_update_atomic(uaddr, uval, newval); |
30580 |
++ if (ret) |
30581 |
++ return ret; |
30582 |
++ |
30583 |
++ /* |
30584 |
++ * If the waiter bit was requested the caller also needs PI |
30585 |
++ * state attached to the new owner of the user space futex. |
30586 |
++ * |
30587 |
++ * @task is guaranteed to be alive and it cannot be exiting |
30588 |
++ * because it is either sleeping or waiting in |
30589 |
++ * futex_requeue_pi_wakeup_sync(). |
30590 |
++ * |
30591 |
++ * No need to do the full attach_to_pi_owner() exercise |
30592 |
++ * because @task is known and valid. |
30593 |
++ */ |
30594 |
++ if (set_waiters) { |
30595 |
++ raw_spin_lock_irq(&task->pi_lock); |
30596 |
++ __attach_to_pi_owner(task, key, ps); |
30597 |
++ raw_spin_unlock_irq(&task->pi_lock); |
30598 |
++ } |
30599 |
++ return 1; |
30600 |
++ } |
30601 |
++ |
30602 |
++ /* |
30603 |
++ * First waiter. Set the waiters bit before attaching ourself to |
30604 |
++ * the owner. If owner tries to unlock, it will be forced into |
30605 |
++ * the kernel and blocked on hb->lock. |
30606 |
++ */ |
30607 |
++ newval = uval | FUTEX_WAITERS; |
30608 |
++ ret = lock_pi_update_atomic(uaddr, uval, newval); |
30609 |
++ if (ret) |
30610 |
++ return ret; |
30611 |
++ /* |
30612 |
++ * If the update of the user space value succeeded, we try to |
30613 |
++ * attach to the owner. If that fails, no harm done, we only |
30614 |
++ * set the FUTEX_WAITERS bit in the user space variable. |
30615 |
++ */ |
30616 |
++ return attach_to_pi_owner(uaddr, newval, key, ps, exiting); |
30617 |
++} |
30618 |
++ |
30619 |
++/** |
30620 |
++ * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket |
30621 |
++ * @q: The futex_q to unqueue |
30622 |
++ * |
30623 |
++ * The q->lock_ptr must not be NULL and must be held by the caller. |
30624 |
++ */ |
30625 |
++static void __unqueue_futex(struct futex_q *q) |
30626 |
++{ |
30627 |
++ struct futex_hash_bucket *hb; |
30628 |
++ |
30629 |
++ if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) |
30630 |
++ return; |
30631 |
++ lockdep_assert_held(q->lock_ptr); |
30632 |
++ |
30633 |
++ hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); |
30634 |
++ plist_del(&q->list, &hb->chain); |
30635 |
++ hb_waiters_dec(hb); |
30636 |
++} |
30637 |
++ |
30638 |
++/* |
30639 |
++ * The hash bucket lock must be held when this is called. |
30640 |
++ * Afterwards, the futex_q must not be accessed. Callers |
30641 |
++ * must ensure to later call wake_up_q() for the actual |
30642 |
++ * wakeups to occur. |
30643 |
++ */ |
30644 |
++static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) |
30645 |
++{ |
30646 |
++ struct task_struct *p = q->task; |
30647 |
++ |
30648 |
++ if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) |
30649 |
++ return; |
30650 |
++ |
30651 |
++ get_task_struct(p); |
30652 |
++ __unqueue_futex(q); |
30653 |
++ /* |
30654 |
++ * The waiting task can free the futex_q as soon as q->lock_ptr = NULL |
30655 |
++ * is written, without taking any locks. This is possible in the event |
30656 |
++ * of a spurious wakeup, for example. A memory barrier is required here |
30657 |
++ * to prevent the following store to lock_ptr from getting ahead of the |
30658 |
++ * plist_del in __unqueue_futex(). |
30659 |
++ */ |
30660 |
++ smp_store_release(&q->lock_ptr, NULL); |
30661 |
++ |
30662 |
++ /* |
30663 |
++ * Queue the task for later wakeup for after we've released |
30664 |
++ * the hb->lock. |
30665 |
++ */ |
30666 |
++ wake_q_add_safe(wake_q, p); |
30667 |
++} |
30668 |
++ |
30669 |
++/* |
30670 |
++ * Caller must hold a reference on @pi_state. |
30671 |
++ */ |
30672 |
++static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) |
30673 |
++{ |
30674 |
++ struct rt_mutex_waiter *top_waiter; |
30675 |
++ struct task_struct *new_owner; |
30676 |
++ bool postunlock = false; |
30677 |
++ DEFINE_RT_WAKE_Q(wqh); |
30678 |
++ u32 curval, newval; |
30679 |
++ int ret = 0; |
30680 |
++ |
30681 |
++ top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex); |
30682 |
++ if (WARN_ON_ONCE(!top_waiter)) { |
30683 |
++ /* |
30684 |
++ * As per the comment in futex_unlock_pi() this should not happen. |
30685 |
++ * |
30686 |
++ * When this happens, give up our locks and try again, giving |
30687 |
++ * the futex_lock_pi() instance time to complete, either by |
30688 |
++ * waiting on the rtmutex or removing itself from the futex |
30689 |
++ * queue. |
30690 |
++ */ |
30691 |
++ ret = -EAGAIN; |
30692 |
++ goto out_unlock; |
30693 |
++ } |
30694 |
++ |
30695 |
++ new_owner = top_waiter->task; |
30696 |
++ |
30697 |
++ /* |
30698 |
++ * We pass it to the next owner. The WAITERS bit is always kept |
30699 |
++ * enabled while there is PI state around. We cleanup the owner |
30700 |
++ * died bit, because we are the owner. |
30701 |
++ */ |
30702 |
++ newval = FUTEX_WAITERS | task_pid_vnr(new_owner); |
30703 |
++ |
30704 |
++ if (unlikely(should_fail_futex(true))) { |
30705 |
++ ret = -EFAULT; |
30706 |
++ goto out_unlock; |
30707 |
++ } |
30708 |
++ |
30709 |
++ ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); |
30710 |
++ if (!ret && (curval != uval)) { |
30711 |
++ /* |
30712 |
++ * If a unconditional UNLOCK_PI operation (user space did not |
30713 |
++ * try the TID->0 transition) raced with a waiter setting the |
30714 |
++ * FUTEX_WAITERS flag between get_user() and locking the hash |
30715 |
++ * bucket lock, retry the operation. |
30716 |
++ */ |
30717 |
++ if ((FUTEX_TID_MASK & curval) == uval) |
30718 |
++ ret = -EAGAIN; |
30719 |
++ else |
30720 |
++ ret = -EINVAL; |
30721 |
++ } |
30722 |
++ |
30723 |
++ if (!ret) { |
30724 |
++ /* |
30725 |
++ * This is a point of no return; once we modified the uval |
30726 |
++ * there is no going back and subsequent operations must |
30727 |
++ * not fail. |
30728 |
++ */ |
30729 |
++ pi_state_update_owner(pi_state, new_owner); |
30730 |
++ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wqh); |
30731 |
++ } |
30732 |
++ |
30733 |
++out_unlock: |
30734 |
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
30735 |
++ |
30736 |
++ if (postunlock) |
30737 |
++ rt_mutex_postunlock(&wqh); |
30738 |
++ |
30739 |
++ return ret; |
30740 |
++} |
30741 |
++ |
30742 |
++/* |
30743 |
++ * Express the locking dependencies for lockdep: |
30744 |
++ */ |
30745 |
++static inline void |
30746 |
++double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) |
30747 |
++{ |
30748 |
++ if (hb1 <= hb2) { |
30749 |
++ spin_lock(&hb1->lock); |
30750 |
++ if (hb1 < hb2) |
30751 |
++ spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); |
30752 |
++ } else { /* hb1 > hb2 */ |
30753 |
++ spin_lock(&hb2->lock); |
30754 |
++ spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); |
30755 |
++ } |
30756 |
++} |
30757 |
++ |
30758 |
++static inline void |
30759 |
++double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) |
30760 |
++{ |
30761 |
++ spin_unlock(&hb1->lock); |
30762 |
++ if (hb1 != hb2) |
30763 |
++ spin_unlock(&hb2->lock); |
30764 |
++} |
30765 |
++ |
30766 |
++/* |
30767 |
++ * Wake up waiters matching bitset queued on this futex (uaddr). |
30768 |
++ */ |
30769 |
++static int |
30770 |
++futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) |
30771 |
++{ |
30772 |
++ struct futex_hash_bucket *hb; |
30773 |
++ struct futex_q *this, *next; |
30774 |
++ union futex_key key = FUTEX_KEY_INIT; |
30775 |
++ int ret; |
30776 |
++ DEFINE_WAKE_Q(wake_q); |
30777 |
++ |
30778 |
++ if (!bitset) |
30779 |
++ return -EINVAL; |
30780 |
++ |
30781 |
++ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ); |
30782 |
++ if (unlikely(ret != 0)) |
30783 |
++ return ret; |
30784 |
++ |
30785 |
++ hb = hash_futex(&key); |
30786 |
++ |
30787 |
++ /* Make sure we really have tasks to wakeup */ |
30788 |
++ if (!hb_waiters_pending(hb)) |
30789 |
++ return ret; |
30790 |
++ |
30791 |
++ spin_lock(&hb->lock); |
30792 |
++ |
30793 |
++ plist_for_each_entry_safe(this, next, &hb->chain, list) { |
30794 |
++ if (match_futex (&this->key, &key)) { |
30795 |
++ if (this->pi_state || this->rt_waiter) { |
30796 |
++ ret = -EINVAL; |
30797 |
++ break; |
30798 |
++ } |
30799 |
++ |
30800 |
++ /* Check if one of the bits is set in both bitsets */ |
30801 |
++ if (!(this->bitset & bitset)) |
30802 |
++ continue; |
30803 |
++ |
30804 |
++ mark_wake_futex(&wake_q, this); |
30805 |
++ if (++ret >= nr_wake) |
30806 |
++ break; |
30807 |
++ } |
30808 |
++ } |
30809 |
++ |
30810 |
++ spin_unlock(&hb->lock); |
30811 |
++ wake_up_q(&wake_q); |
30812 |
++ return ret; |
30813 |
++} |
30814 |
++ |
30815 |
++static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr) |
30816 |
++{ |
30817 |
++ unsigned int op = (encoded_op & 0x70000000) >> 28; |
30818 |
++ unsigned int cmp = (encoded_op & 0x0f000000) >> 24; |
30819 |
++ int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11); |
30820 |
++ int cmparg = sign_extend32(encoded_op & 0x00000fff, 11); |
30821 |
++ int oldval, ret; |
30822 |
++ |
30823 |
++ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) { |
30824 |
++ if (oparg < 0 || oparg > 31) { |
30825 |
++ char comm[sizeof(current->comm)]; |
30826 |
++ /* |
30827 |
++ * kill this print and return -EINVAL when userspace |
30828 |
++ * is sane again |
30829 |
++ */ |
30830 |
++ pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n", |
30831 |
++ get_task_comm(comm, current), oparg); |
30832 |
++ oparg &= 31; |
30833 |
++ } |
30834 |
++ oparg = 1 << oparg; |
30835 |
++ } |
30836 |
++ |
30837 |
++ pagefault_disable(); |
30838 |
++ ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr); |
30839 |
++ pagefault_enable(); |
30840 |
++ if (ret) |
30841 |
++ return ret; |
30842 |
++ |
30843 |
++ switch (cmp) { |
30844 |
++ case FUTEX_OP_CMP_EQ: |
30845 |
++ return oldval == cmparg; |
30846 |
++ case FUTEX_OP_CMP_NE: |
30847 |
++ return oldval != cmparg; |
30848 |
++ case FUTEX_OP_CMP_LT: |
30849 |
++ return oldval < cmparg; |
30850 |
++ case FUTEX_OP_CMP_GE: |
30851 |
++ return oldval >= cmparg; |
30852 |
++ case FUTEX_OP_CMP_LE: |
30853 |
++ return oldval <= cmparg; |
30854 |
++ case FUTEX_OP_CMP_GT: |
30855 |
++ return oldval > cmparg; |
30856 |
++ default: |
30857 |
++ return -ENOSYS; |
30858 |
++ } |
30859 |
++} |
30860 |
++ |
30861 |
++/* |
30862 |
++ * Wake up all waiters hashed on the physical page that is mapped |
30863 |
++ * to this virtual address: |
30864 |
++ */ |
30865 |
++static int |
30866 |
++futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, |
30867 |
++ int nr_wake, int nr_wake2, int op) |
30868 |
++{ |
30869 |
++ union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
30870 |
++ struct futex_hash_bucket *hb1, *hb2; |
30871 |
++ struct futex_q *this, *next; |
30872 |
++ int ret, op_ret; |
30873 |
++ DEFINE_WAKE_Q(wake_q); |
30874 |
++ |
30875 |
++retry: |
30876 |
++ ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ); |
30877 |
++ if (unlikely(ret != 0)) |
30878 |
++ return ret; |
30879 |
++ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); |
30880 |
++ if (unlikely(ret != 0)) |
30881 |
++ return ret; |
30882 |
++ |
30883 |
++ hb1 = hash_futex(&key1); |
30884 |
++ hb2 = hash_futex(&key2); |
30885 |
++ |
30886 |
++retry_private: |
30887 |
++ double_lock_hb(hb1, hb2); |
30888 |
++ op_ret = futex_atomic_op_inuser(op, uaddr2); |
30889 |
++ if (unlikely(op_ret < 0)) { |
30890 |
++ double_unlock_hb(hb1, hb2); |
30891 |
++ |
30892 |
++ if (!IS_ENABLED(CONFIG_MMU) || |
30893 |
++ unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) { |
30894 |
++ /* |
30895 |
++ * we don't get EFAULT from MMU faults if we don't have |
30896 |
++ * an MMU, but we might get them from range checking |
30897 |
++ */ |
30898 |
++ ret = op_ret; |
30899 |
++ return ret; |
30900 |
++ } |
30901 |
++ |
30902 |
++ if (op_ret == -EFAULT) { |
30903 |
++ ret = fault_in_user_writeable(uaddr2); |
30904 |
++ if (ret) |
30905 |
++ return ret; |
30906 |
++ } |
30907 |
++ |
30908 |
++ cond_resched(); |
30909 |
++ if (!(flags & FLAGS_SHARED)) |
30910 |
++ goto retry_private; |
30911 |
++ goto retry; |
30912 |
++ } |
30913 |
++ |
30914 |
++ plist_for_each_entry_safe(this, next, &hb1->chain, list) { |
30915 |
++ if (match_futex (&this->key, &key1)) { |
30916 |
++ if (this->pi_state || this->rt_waiter) { |
30917 |
++ ret = -EINVAL; |
30918 |
++ goto out_unlock; |
30919 |
++ } |
30920 |
++ mark_wake_futex(&wake_q, this); |
30921 |
++ if (++ret >= nr_wake) |
30922 |
++ break; |
30923 |
++ } |
30924 |
++ } |
30925 |
++ |
30926 |
++ if (op_ret > 0) { |
30927 |
++ op_ret = 0; |
30928 |
++ plist_for_each_entry_safe(this, next, &hb2->chain, list) { |
30929 |
++ if (match_futex (&this->key, &key2)) { |
30930 |
++ if (this->pi_state || this->rt_waiter) { |
30931 |
++ ret = -EINVAL; |
30932 |
++ goto out_unlock; |
30933 |
++ } |
30934 |
++ mark_wake_futex(&wake_q, this); |
30935 |
++ if (++op_ret >= nr_wake2) |
30936 |
++ break; |
30937 |
++ } |
30938 |
++ } |
30939 |
++ ret += op_ret; |
30940 |
++ } |
30941 |
++ |
30942 |
++out_unlock: |
30943 |
++ double_unlock_hb(hb1, hb2); |
30944 |
++ wake_up_q(&wake_q); |
30945 |
++ return ret; |
30946 |
++} |
30947 |
++ |
30948 |
++/** |
30949 |
++ * requeue_futex() - Requeue a futex_q from one hb to another |
30950 |
++ * @q: the futex_q to requeue |
30951 |
++ * @hb1: the source hash_bucket |
30952 |
++ * @hb2: the target hash_bucket |
30953 |
++ * @key2: the new key for the requeued futex_q |
30954 |
++ */ |
30955 |
++static inline |
30956 |
++void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, |
30957 |
++ struct futex_hash_bucket *hb2, union futex_key *key2) |
30958 |
++{ |
30959 |
++ |
30960 |
++ /* |
30961 |
++ * If key1 and key2 hash to the same bucket, no need to |
30962 |
++ * requeue. |
30963 |
++ */ |
30964 |
++ if (likely(&hb1->chain != &hb2->chain)) { |
30965 |
++ plist_del(&q->list, &hb1->chain); |
30966 |
++ hb_waiters_dec(hb1); |
30967 |
++ hb_waiters_inc(hb2); |
30968 |
++ plist_add(&q->list, &hb2->chain); |
30969 |
++ q->lock_ptr = &hb2->lock; |
30970 |
++ } |
30971 |
++ q->key = *key2; |
30972 |
++} |
30973 |
++ |
30974 |
++static inline bool futex_requeue_pi_prepare(struct futex_q *q, |
30975 |
++ struct futex_pi_state *pi_state) |
30976 |
++{ |
30977 |
++ int old, new; |
30978 |
++ |
30979 |
++ /* |
30980 |
++ * Set state to Q_REQUEUE_PI_IN_PROGRESS unless an early wakeup has |
30981 |
++ * already set Q_REQUEUE_PI_IGNORE to signal that requeue should |
30982 |
++ * ignore the waiter. |
30983 |
++ */ |
30984 |
++ old = atomic_read_acquire(&q->requeue_state); |
30985 |
++ do { |
30986 |
++ if (old == Q_REQUEUE_PI_IGNORE) |
30987 |
++ return false; |
30988 |
++ |
30989 |
++ /* |
30990 |
++ * futex_proxy_trylock_atomic() might have set it to |
30991 |
++ * IN_PROGRESS and a interleaved early wake to WAIT. |
30992 |
++ * |
30993 |
++ * It was considered to have an extra state for that |
30994 |
++ * trylock, but that would just add more conditionals |
30995 |
++ * all over the place for a dubious value. |
30996 |
++ */ |
30997 |
++ if (old != Q_REQUEUE_PI_NONE) |
30998 |
++ break; |
30999 |
++ |
31000 |
++ new = Q_REQUEUE_PI_IN_PROGRESS; |
31001 |
++ } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); |
31002 |
++ |
31003 |
++ q->pi_state = pi_state; |
31004 |
++ return true; |
31005 |
++} |
31006 |
++ |
31007 |
++static inline void futex_requeue_pi_complete(struct futex_q *q, int locked) |
31008 |
++{ |
31009 |
++ int old, new; |
31010 |
++ |
31011 |
++ old = atomic_read_acquire(&q->requeue_state); |
31012 |
++ do { |
31013 |
++ if (old == Q_REQUEUE_PI_IGNORE) |
31014 |
++ return; |
31015 |
++ |
31016 |
++ if (locked >= 0) { |
31017 |
++ /* Requeue succeeded. Set DONE or LOCKED */ |
31018 |
++ WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_PROGRESS && |
31019 |
++ old != Q_REQUEUE_PI_WAIT); |
31020 |
++ new = Q_REQUEUE_PI_DONE + locked; |
31021 |
++ } else if (old == Q_REQUEUE_PI_IN_PROGRESS) { |
31022 |
++ /* Deadlock, no early wakeup interleave */ |
31023 |
++ new = Q_REQUEUE_PI_NONE; |
31024 |
++ } else { |
31025 |
++ /* Deadlock, early wakeup interleave. */ |
31026 |
++ WARN_ON_ONCE(old != Q_REQUEUE_PI_WAIT); |
31027 |
++ new = Q_REQUEUE_PI_IGNORE; |
31028 |
++ } |
31029 |
++ } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); |
31030 |
++ |
31031 |
++#ifdef CONFIG_PREEMPT_RT |
31032 |
++ /* If the waiter interleaved with the requeue let it know */ |
31033 |
++ if (unlikely(old == Q_REQUEUE_PI_WAIT)) |
31034 |
++ rcuwait_wake_up(&q->requeue_wait); |
31035 |
++#endif |
31036 |
++} |
31037 |
++ |
31038 |
++static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q) |
31039 |
++{ |
31040 |
++ int old, new; |
31041 |
++ |
31042 |
++ old = atomic_read_acquire(&q->requeue_state); |
31043 |
++ do { |
31044 |
++ /* Is requeue done already? */ |
31045 |
++ if (old >= Q_REQUEUE_PI_DONE) |
31046 |
++ return old; |
31047 |
++ |
31048 |
++ /* |
31049 |
++ * If not done, then tell the requeue code to either ignore |
31050 |
++ * the waiter or to wake it up once the requeue is done. |
31051 |
++ */ |
31052 |
++ new = Q_REQUEUE_PI_WAIT; |
31053 |
++ if (old == Q_REQUEUE_PI_NONE) |
31054 |
++ new = Q_REQUEUE_PI_IGNORE; |
31055 |
++ } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); |
31056 |
++ |
31057 |
++ /* If the requeue was in progress, wait for it to complete */ |
31058 |
++ if (old == Q_REQUEUE_PI_IN_PROGRESS) { |
31059 |
++#ifdef CONFIG_PREEMPT_RT |
31060 |
++ rcuwait_wait_event(&q->requeue_wait, |
31061 |
++ atomic_read(&q->requeue_state) != Q_REQUEUE_PI_WAIT, |
31062 |
++ TASK_UNINTERRUPTIBLE); |
31063 |
++#else |
31064 |
++ (void)atomic_cond_read_relaxed(&q->requeue_state, VAL != Q_REQUEUE_PI_WAIT); |
31065 |
++#endif |
31066 |
++ } |
31067 |
++ |
31068 |
++ /* |
31069 |
++ * Requeue is now either prohibited or complete. Reread state |
31070 |
++ * because during the wait above it might have changed. Nothing |
31071 |
++ * will modify q->requeue_state after this point. |
31072 |
++ */ |
31073 |
++ return atomic_read(&q->requeue_state); |
31074 |
++} |
31075 |
++ |
31076 |
++/** |
31077 |
++ * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue |
31078 |
++ * @q: the futex_q |
31079 |
++ * @key: the key of the requeue target futex |
31080 |
++ * @hb: the hash_bucket of the requeue target futex |
31081 |
++ * |
31082 |
++ * During futex_requeue, with requeue_pi=1, it is possible to acquire the |
31083 |
++ * target futex if it is uncontended or via a lock steal. |
31084 |
++ * |
31085 |
++ * 1) Set @q::key to the requeue target futex key so the waiter can detect |
31086 |
++ * the wakeup on the right futex. |
31087 |
++ * |
31088 |
++ * 2) Dequeue @q from the hash bucket. |
31089 |
++ * |
31090 |
++ * 3) Set @q::rt_waiter to NULL so the woken up task can detect atomic lock |
31091 |
++ * acquisition. |
31092 |
++ * |
31093 |
++ * 4) Set the q->lock_ptr to the requeue target hb->lock for the case that |
31094 |
++ * the waiter has to fixup the pi state. |
31095 |
++ * |
31096 |
++ * 5) Complete the requeue state so the waiter can make progress. After |
31097 |
++ * this point the waiter task can return from the syscall immediately in |
31098 |
++ * case that the pi state does not have to be fixed up. |
31099 |
++ * |
31100 |
++ * 6) Wake the waiter task. |
31101 |
++ * |
31102 |
++ * Must be called with both q->lock_ptr and hb->lock held. |
31103 |
++ */ |
31104 |
++static inline |
31105 |
++void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
31106 |
++ struct futex_hash_bucket *hb) |
31107 |
++{ |
31108 |
++ q->key = *key; |
31109 |
++ |
31110 |
++ __unqueue_futex(q); |
31111 |
++ |
31112 |
++ WARN_ON(!q->rt_waiter); |
31113 |
++ q->rt_waiter = NULL; |
31114 |
++ |
31115 |
++ q->lock_ptr = &hb->lock; |
31116 |
++ |
31117 |
++ /* Signal locked state to the waiter */ |
31118 |
++ futex_requeue_pi_complete(q, 1); |
31119 |
++ wake_up_state(q->task, TASK_NORMAL); |
31120 |
++} |
31121 |
++ |
31122 |
++/** |
31123 |
++ * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter |
31124 |
++ * @pifutex: the user address of the to futex |
31125 |
++ * @hb1: the from futex hash bucket, must be locked by the caller |
31126 |
++ * @hb2: the to futex hash bucket, must be locked by the caller |
31127 |
++ * @key1: the from futex key |
31128 |
++ * @key2: the to futex key |
31129 |
++ * @ps: address to store the pi_state pointer |
31130 |
++ * @exiting: Pointer to store the task pointer of the owner task |
31131 |
++ * which is in the middle of exiting |
31132 |
++ * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
31133 |
++ * |
31134 |
++ * Try and get the lock on behalf of the top waiter if we can do it atomically. |
31135 |
++ * Wake the top waiter if we succeed. If the caller specified set_waiters, |
31136 |
++ * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. |
31137 |
++ * hb1 and hb2 must be held by the caller. |
31138 |
++ * |
31139 |
++ * @exiting is only set when the return value is -EBUSY. If so, this holds |
31140 |
++ * a refcount on the exiting task on return and the caller needs to drop it |
31141 |
++ * after waiting for the exit to complete. |
31142 |
++ * |
31143 |
++ * Return: |
31144 |
++ * - 0 - failed to acquire the lock atomically; |
31145 |
++ * - >0 - acquired the lock, return value is vpid of the top_waiter |
31146 |
++ * - <0 - error |
31147 |
++ */ |
31148 |
++static int |
31149 |
++futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, |
31150 |
++ struct futex_hash_bucket *hb2, union futex_key *key1, |
31151 |
++ union futex_key *key2, struct futex_pi_state **ps, |
31152 |
++ struct task_struct **exiting, int set_waiters) |
31153 |
++{ |
31154 |
++ struct futex_q *top_waiter = NULL; |
31155 |
++ u32 curval; |
31156 |
++ int ret; |
31157 |
++ |
31158 |
++ if (get_futex_value_locked(&curval, pifutex)) |
31159 |
++ return -EFAULT; |
31160 |
++ |
31161 |
++ if (unlikely(should_fail_futex(true))) |
31162 |
++ return -EFAULT; |
31163 |
++ |
31164 |
++ /* |
31165 |
++ * Find the top_waiter and determine if there are additional waiters. |
31166 |
++ * If the caller intends to requeue more than 1 waiter to pifutex, |
31167 |
++ * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, |
31168 |
++ * as we have means to handle the possible fault. If not, don't set |
31169 |
++ * the bit unnecessarily as it will force the subsequent unlock to enter |
31170 |
++ * the kernel. |
31171 |
++ */ |
31172 |
++ top_waiter = futex_top_waiter(hb1, key1); |
31173 |
++ |
31174 |
++ /* There are no waiters, nothing for us to do. */ |
31175 |
++ if (!top_waiter) |
31176 |
++ return 0; |
31177 |
++ |
31178 |
++ /* |
31179 |
++ * Ensure that this is a waiter sitting in futex_wait_requeue_pi() |
31180 |
++ * and waiting on the 'waitqueue' futex which is always !PI. |
31181 |
++ */ |
31182 |
++ if (!top_waiter->rt_waiter || top_waiter->pi_state) |
31183 |
++ return -EINVAL; |
31184 |
++ |
31185 |
++ /* Ensure we requeue to the expected futex. */ |
31186 |
++ if (!match_futex(top_waiter->requeue_pi_key, key2)) |
31187 |
++ return -EINVAL; |
31188 |
++ |
31189 |
++ /* Ensure that this does not race against an early wakeup */ |
31190 |
++ if (!futex_requeue_pi_prepare(top_waiter, NULL)) |
31191 |
++ return -EAGAIN; |
31192 |
++ |
31193 |
++ /* |
31194 |
++ * Try to take the lock for top_waiter and set the FUTEX_WAITERS bit |
31195 |
++ * in the contended case or if @set_waiters is true. |
31196 |
++ * |
31197 |
++ * In the contended case PI state is attached to the lock owner. If |
31198 |
++ * the user space lock can be acquired then PI state is attached to |
31199 |
++ * the new owner (@top_waiter->task) when @set_waiters is true. |
31200 |
++ */ |
31201 |
++ ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, |
31202 |
++ exiting, set_waiters); |
31203 |
++ if (ret == 1) { |
31204 |
++ /* |
31205 |
++ * Lock was acquired in user space and PI state was |
31206 |
++ * attached to @top_waiter->task. That means state is fully |
31207 |
++ * consistent and the waiter can return to user space |
31208 |
++ * immediately after the wakeup. |
31209 |
++ */ |
31210 |
++ requeue_pi_wake_futex(top_waiter, key2, hb2); |
31211 |
++ } else if (ret < 0) { |
31212 |
++ /* Rewind top_waiter::requeue_state */ |
31213 |
++ futex_requeue_pi_complete(top_waiter, ret); |
31214 |
++ } else { |
31215 |
++ /* |
31216 |
++ * futex_lock_pi_atomic() did not acquire the user space |
31217 |
++ * futex, but managed to establish the proxy lock and pi |
31218 |
++ * state. top_waiter::requeue_state cannot be fixed up here |
31219 |
++ * because the waiter is not enqueued on the rtmutex |
31220 |
++ * yet. This is handled at the callsite depending on the |
31221 |
++ * result of rt_mutex_start_proxy_lock() which is |
31222 |
++ * guaranteed to be reached with this function returning 0. |
31223 |
++ */ |
31224 |
++ } |
31225 |
++ return ret; |
31226 |
++} |
31227 |
++ |
31228 |
++/** |
31229 |
++ * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 |
31230 |
++ * @uaddr1: source futex user address |
31231 |
++ * @flags: futex flags (FLAGS_SHARED, etc.) |
31232 |
++ * @uaddr2: target futex user address |
31233 |
++ * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) |
31234 |
++ * @nr_requeue: number of waiters to requeue (0-INT_MAX) |
31235 |
++ * @cmpval: @uaddr1 expected value (or %NULL) |
31236 |
++ * @requeue_pi: if we are attempting to requeue from a non-pi futex to a |
31237 |
++ * pi futex (pi to pi requeue is not supported) |
31238 |
++ * |
31239 |
++ * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire |
31240 |
++ * uaddr2 atomically on behalf of the top waiter. |
31241 |
++ * |
31242 |
++ * Return: |
31243 |
++ * - >=0 - on success, the number of tasks requeued or woken; |
31244 |
++ * - <0 - on error |
31245 |
++ */ |
31246 |
++static int futex_requeue(u32 __user *uaddr1, unsigned int flags, |
31247 |
++ u32 __user *uaddr2, int nr_wake, int nr_requeue, |
31248 |
++ u32 *cmpval, int requeue_pi) |
31249 |
++{ |
31250 |
++ union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
31251 |
++ int task_count = 0, ret; |
31252 |
++ struct futex_pi_state *pi_state = NULL; |
31253 |
++ struct futex_hash_bucket *hb1, *hb2; |
31254 |
++ struct futex_q *this, *next; |
31255 |
++ DEFINE_WAKE_Q(wake_q); |
31256 |
++ |
31257 |
++ if (nr_wake < 0 || nr_requeue < 0) |
31258 |
++ return -EINVAL; |
31259 |
++ |
31260 |
++ /* |
31261 |
++ * When PI not supported: return -ENOSYS if requeue_pi is true, |
31262 |
++ * consequently the compiler knows requeue_pi is always false past |
31263 |
++ * this point which will optimize away all the conditional code |
31264 |
++ * further down. |
31265 |
++ */ |
31266 |
++ if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi) |
31267 |
++ return -ENOSYS; |
31268 |
++ |
31269 |
++ if (requeue_pi) { |
31270 |
++ /* |
31271 |
++ * Requeue PI only works on two distinct uaddrs. This |
31272 |
++ * check is only valid for private futexes. See below. |
31273 |
++ */ |
31274 |
++ if (uaddr1 == uaddr2) |
31275 |
++ return -EINVAL; |
31276 |
++ |
31277 |
++ /* |
31278 |
++ * futex_requeue() allows the caller to define the number |
31279 |
++ * of waiters to wake up via the @nr_wake argument. With |
31280 |
++ * REQUEUE_PI, waking up more than one waiter is creating |
31281 |
++ * more problems than it solves. Waking up a waiter makes |
31282 |
++ * only sense if the PI futex @uaddr2 is uncontended as |
31283 |
++ * this allows the requeue code to acquire the futex |
31284 |
++ * @uaddr2 before waking the waiter. The waiter can then |
31285 |
++ * return to user space without further action. A secondary |
31286 |
++ * wakeup would just make the futex_wait_requeue_pi() |
31287 |
++ * handling more complex, because that code would have to |
31288 |
++ * look up pi_state and do more or less all the handling |
31289 |
++ * which the requeue code has to do for the to be requeued |
31290 |
++ * waiters. So restrict the number of waiters to wake to |
31291 |
++ * one, and only wake it up when the PI futex is |
31292 |
++ * uncontended. Otherwise requeue it and let the unlock of |
31293 |
++ * the PI futex handle the wakeup. |
31294 |
++ * |
31295 |
++ * All REQUEUE_PI users, e.g. pthread_cond_signal() and |
31296 |
++ * pthread_cond_broadcast() must use nr_wake=1. |
31297 |
++ */ |
31298 |
++ if (nr_wake != 1) |
31299 |
++ return -EINVAL; |
31300 |
++ |
31301 |
++ /* |
31302 |
++ * requeue_pi requires a pi_state, try to allocate it now |
31303 |
++ * without any locks in case it fails. |
31304 |
++ */ |
31305 |
++ if (refill_pi_state_cache()) |
31306 |
++ return -ENOMEM; |
31307 |
++ } |
31308 |
++ |
31309 |
++retry: |
31310 |
++ ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ); |
31311 |
++ if (unlikely(ret != 0)) |
31312 |
++ return ret; |
31313 |
++ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, |
31314 |
++ requeue_pi ? FUTEX_WRITE : FUTEX_READ); |
31315 |
++ if (unlikely(ret != 0)) |
31316 |
++ return ret; |
31317 |
++ |
31318 |
++ /* |
31319 |
++ * The check above which compares uaddrs is not sufficient for |
31320 |
++ * shared futexes. We need to compare the keys: |
31321 |
++ */ |
31322 |
++ if (requeue_pi && match_futex(&key1, &key2)) |
31323 |
++ return -EINVAL; |
31324 |
++ |
31325 |
++ hb1 = hash_futex(&key1); |
31326 |
++ hb2 = hash_futex(&key2); |
31327 |
++ |
31328 |
++retry_private: |
31329 |
++ hb_waiters_inc(hb2); |
31330 |
++ double_lock_hb(hb1, hb2); |
31331 |
++ |
31332 |
++ if (likely(cmpval != NULL)) { |
31333 |
++ u32 curval; |
31334 |
++ |
31335 |
++ ret = get_futex_value_locked(&curval, uaddr1); |
31336 |
++ |
31337 |
++ if (unlikely(ret)) { |
31338 |
++ double_unlock_hb(hb1, hb2); |
31339 |
++ hb_waiters_dec(hb2); |
31340 |
++ |
31341 |
++ ret = get_user(curval, uaddr1); |
31342 |
++ if (ret) |
31343 |
++ return ret; |
31344 |
++ |
31345 |
++ if (!(flags & FLAGS_SHARED)) |
31346 |
++ goto retry_private; |
31347 |
++ |
31348 |
++ goto retry; |
31349 |
++ } |
31350 |
++ if (curval != *cmpval) { |
31351 |
++ ret = -EAGAIN; |
31352 |
++ goto out_unlock; |
31353 |
++ } |
31354 |
++ } |
31355 |
++ |
31356 |
++ if (requeue_pi) { |
31357 |
++ struct task_struct *exiting = NULL; |
31358 |
++ |
31359 |
++ /* |
31360 |
++ * Attempt to acquire uaddr2 and wake the top waiter. If we |
31361 |
++ * intend to requeue waiters, force setting the FUTEX_WAITERS |
31362 |
++ * bit. We force this here where we are able to easily handle |
31363 |
++ * faults rather in the requeue loop below. |
31364 |
++ * |
31365 |
++ * Updates topwaiter::requeue_state if a top waiter exists. |
31366 |
++ */ |
31367 |
++ ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, |
31368 |
++ &key2, &pi_state, |
31369 |
++ &exiting, nr_requeue); |
31370 |
++ |
31371 |
++ /* |
31372 |
++ * At this point the top_waiter has either taken uaddr2 or |
31373 |
++ * is waiting on it. In both cases pi_state has been |
31374 |
++ * established and an initial refcount on it. In case of an |
31375 |
++ * error there's nothing. |
31376 |
++ * |
31377 |
++ * The top waiter's requeue_state is up to date: |
31378 |
++ * |
31379 |
++ * - If the lock was acquired atomically (ret == 1), then |
31380 |
++ * the state is Q_REQUEUE_PI_LOCKED. |
31381 |
++ * |
31382 |
++ * The top waiter has been dequeued and woken up and can |
31383 |
++ * return to user space immediately. The kernel/user |
31384 |
++ * space state is consistent. In case that there must be |
31385 |
++ * more waiters requeued the WAITERS bit in the user |
31386 |
++ * space futex is set so the top waiter task has to go |
31387 |
++ * into the syscall slowpath to unlock the futex. This |
31388 |
++ * will block until this requeue operation has been |
31389 |
++ * completed and the hash bucket locks have been |
31390 |
++ * dropped. |
31391 |
++ * |
31392 |
++ * - If the trylock failed with an error (ret < 0) then |
31393 |
++ * the state is either Q_REQUEUE_PI_NONE, i.e. "nothing |
31394 |
++ * happened", or Q_REQUEUE_PI_IGNORE when there was an |
31395 |
++ * interleaved early wakeup. |
31396 |
++ * |
31397 |
++ * - If the trylock did not succeed (ret == 0) then the |
31398 |
++ * state is either Q_REQUEUE_PI_IN_PROGRESS or |
31399 |
++ * Q_REQUEUE_PI_WAIT if an early wakeup interleaved. |
31400 |
++ * This will be cleaned up in the loop below, which |
31401 |
++ * cannot fail because futex_proxy_trylock_atomic() did |
31402 |
++ * the same sanity checks for requeue_pi as the loop |
31403 |
++ * below does. |
31404 |
++ */ |
31405 |
++ switch (ret) { |
31406 |
++ case 0: |
31407 |
++ /* We hold a reference on the pi state. */ |
31408 |
++ break; |
31409 |
++ |
31410 |
++ case 1: |
31411 |
++ /* |
31412 |
++ * futex_proxy_trylock_atomic() acquired the user space |
31413 |
++ * futex. Adjust task_count. |
31414 |
++ */ |
31415 |
++ task_count++; |
31416 |
++ ret = 0; |
31417 |
++ break; |
31418 |
++ |
31419 |
++ /* |
31420 |
++ * If the above failed, then pi_state is NULL and |
31421 |
++ * waiter::requeue_state is correct. |
31422 |
++ */ |
31423 |
++ case -EFAULT: |
31424 |
++ double_unlock_hb(hb1, hb2); |
31425 |
++ hb_waiters_dec(hb2); |
31426 |
++ ret = fault_in_user_writeable(uaddr2); |
31427 |
++ if (!ret) |
31428 |
++ goto retry; |
31429 |
++ return ret; |
31430 |
++ case -EBUSY: |
31431 |
++ case -EAGAIN: |
31432 |
++ /* |
31433 |
++ * Two reasons for this: |
31434 |
++ * - EBUSY: Owner is exiting and we just wait for the |
31435 |
++ * exit to complete. |
31436 |
++ * - EAGAIN: The user space value changed. |
31437 |
++ */ |
31438 |
++ double_unlock_hb(hb1, hb2); |
31439 |
++ hb_waiters_dec(hb2); |
31440 |
++ /* |
31441 |
++ * Handle the case where the owner is in the middle of |
31442 |
++ * exiting. Wait for the exit to complete otherwise |
31443 |
++ * this task might loop forever, aka. live lock. |
31444 |
++ */ |
31445 |
++ wait_for_owner_exiting(ret, exiting); |
31446 |
++ cond_resched(); |
31447 |
++ goto retry; |
31448 |
++ default: |
31449 |
++ goto out_unlock; |
31450 |
++ } |
31451 |
++ } |
31452 |
++ |
31453 |
++ plist_for_each_entry_safe(this, next, &hb1->chain, list) { |
31454 |
++ if (task_count - nr_wake >= nr_requeue) |
31455 |
++ break; |
31456 |
++ |
31457 |
++ if (!match_futex(&this->key, &key1)) |
31458 |
++ continue; |
31459 |
++ |
31460 |
++ /* |
31461 |
++ * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always |
31462 |
++ * be paired with each other and no other futex ops. |
31463 |
++ * |
31464 |
++ * We should never be requeueing a futex_q with a pi_state, |
31465 |
++ * which is awaiting a futex_unlock_pi(). |
31466 |
++ */ |
31467 |
++ if ((requeue_pi && !this->rt_waiter) || |
31468 |
++ (!requeue_pi && this->rt_waiter) || |
31469 |
++ this->pi_state) { |
31470 |
++ ret = -EINVAL; |
31471 |
++ break; |
31472 |
++ } |
31473 |
++ |
31474 |
++ /* Plain futexes just wake or requeue and are done */ |
31475 |
++ if (!requeue_pi) { |
31476 |
++ if (++task_count <= nr_wake) |
31477 |
++ mark_wake_futex(&wake_q, this); |
31478 |
++ else |
31479 |
++ requeue_futex(this, hb1, hb2, &key2); |
31480 |
++ continue; |
31481 |
++ } |
31482 |
++ |
31483 |
++ /* Ensure we requeue to the expected futex for requeue_pi. */ |
31484 |
++ if (!match_futex(this->requeue_pi_key, &key2)) { |
31485 |
++ ret = -EINVAL; |
31486 |
++ break; |
31487 |
++ } |
31488 |
++ |
31489 |
++ /* |
31490 |
++ * Requeue nr_requeue waiters and possibly one more in the case |
31491 |
++ * of requeue_pi if we couldn't acquire the lock atomically. |
31492 |
++ * |
31493 |
++ * Prepare the waiter to take the rt_mutex. Take a refcount |
31494 |
++ * on the pi_state and store the pointer in the futex_q |
31495 |
++ * object of the waiter. |
31496 |
++ */ |
31497 |
++ get_pi_state(pi_state); |
31498 |
++ |
31499 |
++ /* Don't requeue when the waiter is already on the way out. */ |
31500 |
++ if (!futex_requeue_pi_prepare(this, pi_state)) { |
31501 |
++ /* |
31502 |
++ * Early woken waiter signaled that it is on the |
31503 |
++ * way out. Drop the pi_state reference and try the |
31504 |
++ * next waiter. @this->pi_state is still NULL. |
31505 |
++ */ |
31506 |
++ put_pi_state(pi_state); |
31507 |
++ continue; |
31508 |
++ } |
31509 |
++ |
31510 |
++ ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, |
31511 |
++ this->rt_waiter, |
31512 |
++ this->task); |
31513 |
++ |
31514 |
++ if (ret == 1) { |
31515 |
++ /* |
31516 |
++ * We got the lock. We do neither drop the refcount |
31517 |
++ * on pi_state nor clear this->pi_state because the |
31518 |
++ * waiter needs the pi_state for cleaning up the |
31519 |
++ * user space value. It will drop the refcount |
31520 |
++ * after doing so. this::requeue_state is updated |
31521 |
++ * in the wakeup as well. |
31522 |
++ */ |
31523 |
++ requeue_pi_wake_futex(this, &key2, hb2); |
31524 |
++ task_count++; |
31525 |
++ } else if (!ret) { |
31526 |
++ /* Waiter is queued, move it to hb2 */ |
31527 |
++ requeue_futex(this, hb1, hb2, &key2); |
31528 |
++ futex_requeue_pi_complete(this, 0); |
31529 |
++ task_count++; |
31530 |
++ } else { |
31531 |
++ /* |
31532 |
++ * rt_mutex_start_proxy_lock() detected a potential |
31533 |
++ * deadlock when we tried to queue that waiter. |
31534 |
++ * Drop the pi_state reference which we took above |
31535 |
++ * and remove the pointer to the state from the |
31536 |
++ * waiters futex_q object. |
31537 |
++ */ |
31538 |
++ this->pi_state = NULL; |
31539 |
++ put_pi_state(pi_state); |
31540 |
++ futex_requeue_pi_complete(this, ret); |
31541 |
++ /* |
31542 |
++ * We stop queueing more waiters and let user space |
31543 |
++ * deal with the mess. |
31544 |
++ */ |
31545 |
++ break; |
31546 |
++ } |
31547 |
++ } |
31548 |
++ |
31549 |
++ /* |
31550 |
++ * We took an extra initial reference to the pi_state in |
31551 |
++ * futex_proxy_trylock_atomic(). We need to drop it here again. |
31552 |
++ */ |
31553 |
++ put_pi_state(pi_state); |
31554 |
++ |
31555 |
++out_unlock: |
31556 |
++ double_unlock_hb(hb1, hb2); |
31557 |
++ wake_up_q(&wake_q); |
31558 |
++ hb_waiters_dec(hb2); |
31559 |
++ return ret ? ret : task_count; |
31560 |
++} |
31561 |
++ |
31562 |
++/* The key must be already stored in q->key. */ |
31563 |
++static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) |
31564 |
++ __acquires(&hb->lock) |
31565 |
++{ |
31566 |
++ struct futex_hash_bucket *hb; |
31567 |
++ |
31568 |
++ hb = hash_futex(&q->key); |
31569 |
++ |
31570 |
++ /* |
31571 |
++ * Increment the counter before taking the lock so that |
31572 |
++ * a potential waker won't miss a to-be-slept task that is |
31573 |
++ * waiting for the spinlock. This is safe as all queue_lock() |
31574 |
++ * users end up calling queue_me(). Similarly, for housekeeping, |
31575 |
++ * decrement the counter at queue_unlock() when some error has |
31576 |
++ * occurred and we don't end up adding the task to the list. |
31577 |
++ */ |
31578 |
++ hb_waiters_inc(hb); /* implies smp_mb(); (A) */ |
31579 |
++ |
31580 |
++ q->lock_ptr = &hb->lock; |
31581 |
++ |
31582 |
++ spin_lock(&hb->lock); |
31583 |
++ return hb; |
31584 |
++} |
31585 |
++ |
31586 |
++static inline void |
31587 |
++queue_unlock(struct futex_hash_bucket *hb) |
31588 |
++ __releases(&hb->lock) |
31589 |
++{ |
31590 |
++ spin_unlock(&hb->lock); |
31591 |
++ hb_waiters_dec(hb); |
31592 |
++} |
31593 |
++ |
31594 |
++static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) |
31595 |
++{ |
31596 |
++ int prio; |
31597 |
++ |
31598 |
++ /* |
31599 |
++ * The priority used to register this element is |
31600 |
++ * - either the real thread-priority for the real-time threads |
31601 |
++ * (i.e. threads with a priority lower than MAX_RT_PRIO) |
31602 |
++ * - or MAX_RT_PRIO for non-RT threads. |
31603 |
++ * Thus, all RT-threads are woken first in priority order, and |
31604 |
++ * the others are woken last, in FIFO order. |
31605 |
++ */ |
31606 |
++ prio = min(current->normal_prio, MAX_RT_PRIO); |
31607 |
++ |
31608 |
++ plist_node_init(&q->list, prio); |
31609 |
++ plist_add(&q->list, &hb->chain); |
31610 |
++ q->task = current; |
31611 |
++} |
31612 |
++ |
31613 |
++/** |
31614 |
++ * queue_me() - Enqueue the futex_q on the futex_hash_bucket |
31615 |
++ * @q: The futex_q to enqueue |
31616 |
++ * @hb: The destination hash bucket |
31617 |
++ * |
31618 |
++ * The hb->lock must be held by the caller, and is released here. A call to |
31619 |
++ * queue_me() is typically paired with exactly one call to unqueue_me(). The |
31620 |
++ * exceptions involve the PI related operations, which may use unqueue_me_pi() |
31621 |
++ * or nothing if the unqueue is done as part of the wake process and the unqueue |
31622 |
++ * state is implicit in the state of woken task (see futex_wait_requeue_pi() for |
31623 |
++ * an example). |
31624 |
++ */ |
31625 |
++static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) |
31626 |
++ __releases(&hb->lock) |
31627 |
++{ |
31628 |
++ __queue_me(q, hb); |
31629 |
++ spin_unlock(&hb->lock); |
31630 |
++} |
31631 |
++ |
31632 |
++/** |
31633 |
++ * unqueue_me() - Remove the futex_q from its futex_hash_bucket |
31634 |
++ * @q: The futex_q to unqueue |
31635 |
++ * |
31636 |
++ * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must |
31637 |
++ * be paired with exactly one earlier call to queue_me(). |
31638 |
++ * |
31639 |
++ * Return: |
31640 |
++ * - 1 - if the futex_q was still queued (and we removed unqueued it); |
31641 |
++ * - 0 - if the futex_q was already removed by the waking thread |
31642 |
++ */ |
31643 |
++static int unqueue_me(struct futex_q *q) |
31644 |
++{ |
31645 |
++ spinlock_t *lock_ptr; |
31646 |
++ int ret = 0; |
31647 |
++ |
31648 |
++ /* In the common case we don't take the spinlock, which is nice. */ |
31649 |
++retry: |
31650 |
++ /* |
31651 |
++ * q->lock_ptr can change between this read and the following spin_lock. |
31652 |
++ * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and |
31653 |
++ * optimizing lock_ptr out of the logic below. |
31654 |
++ */ |
31655 |
++ lock_ptr = READ_ONCE(q->lock_ptr); |
31656 |
++ if (lock_ptr != NULL) { |
31657 |
++ spin_lock(lock_ptr); |
31658 |
++ /* |
31659 |
++ * q->lock_ptr can change between reading it and |
31660 |
++ * spin_lock(), causing us to take the wrong lock. This |
31661 |
++ * corrects the race condition. |
31662 |
++ * |
31663 |
++ * Reasoning goes like this: if we have the wrong lock, |
31664 |
++ * q->lock_ptr must have changed (maybe several times) |
31665 |
++ * between reading it and the spin_lock(). It can |
31666 |
++ * change again after the spin_lock() but only if it was |
31667 |
++ * already changed before the spin_lock(). It cannot, |
31668 |
++ * however, change back to the original value. Therefore |
31669 |
++ * we can detect whether we acquired the correct lock. |
31670 |
++ */ |
31671 |
++ if (unlikely(lock_ptr != q->lock_ptr)) { |
31672 |
++ spin_unlock(lock_ptr); |
31673 |
++ goto retry; |
31674 |
++ } |
31675 |
++ __unqueue_futex(q); |
31676 |
++ |
31677 |
++ BUG_ON(q->pi_state); |
31678 |
++ |
31679 |
++ spin_unlock(lock_ptr); |
31680 |
++ ret = 1; |
31681 |
++ } |
31682 |
++ |
31683 |
++ return ret; |
31684 |
++} |
31685 |
++ |
31686 |
++/* |
31687 |
++ * PI futexes can not be requeued and must remove themselves from the |
31688 |
++ * hash bucket. The hash bucket lock (i.e. lock_ptr) is held. |
31689 |
++ */ |
31690 |
++static void unqueue_me_pi(struct futex_q *q) |
31691 |
++{ |
31692 |
++ __unqueue_futex(q); |
31693 |
++ |
31694 |
++ BUG_ON(!q->pi_state); |
31695 |
++ put_pi_state(q->pi_state); |
31696 |
++ q->pi_state = NULL; |
31697 |
++} |
31698 |
++ |
31699 |
++static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
31700 |
++ struct task_struct *argowner) |
31701 |
++{ |
31702 |
++ struct futex_pi_state *pi_state = q->pi_state; |
31703 |
++ struct task_struct *oldowner, *newowner; |
31704 |
++ u32 uval, curval, newval, newtid; |
31705 |
++ int err = 0; |
31706 |
++ |
31707 |
++ oldowner = pi_state->owner; |
31708 |
++ |
31709 |
++ /* |
31710 |
++ * We are here because either: |
31711 |
++ * |
31712 |
++ * - we stole the lock and pi_state->owner needs updating to reflect |
31713 |
++ * that (@argowner == current), |
31714 |
++ * |
31715 |
++ * or: |
31716 |
++ * |
31717 |
++ * - someone stole our lock and we need to fix things to point to the |
31718 |
++ * new owner (@argowner == NULL). |
31719 |
++ * |
31720 |
++ * Either way, we have to replace the TID in the user space variable. |
31721 |
++ * This must be atomic as we have to preserve the owner died bit here. |
31722 |
++ * |
31723 |
++ * Note: We write the user space value _before_ changing the pi_state |
31724 |
++ * because we can fault here. Imagine swapped out pages or a fork |
31725 |
++ * that marked all the anonymous memory readonly for cow. |
31726 |
++ * |
31727 |
++ * Modifying pi_state _before_ the user space value would leave the |
31728 |
++ * pi_state in an inconsistent state when we fault here, because we |
31729 |
++ * need to drop the locks to handle the fault. This might be observed |
31730 |
++ * in the PID checks when attaching to PI state . |
31731 |
++ */ |
31732 |
++retry: |
31733 |
++ if (!argowner) { |
31734 |
++ if (oldowner != current) { |
31735 |
++ /* |
31736 |
++ * We raced against a concurrent self; things are |
31737 |
++ * already fixed up. Nothing to do. |
31738 |
++ */ |
31739 |
++ return 0; |
31740 |
++ } |
31741 |
++ |
31742 |
++ if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { |
31743 |
++ /* We got the lock. pi_state is correct. Tell caller. */ |
31744 |
++ return 1; |
31745 |
++ } |
31746 |
++ |
31747 |
++ /* |
31748 |
++ * The trylock just failed, so either there is an owner or |
31749 |
++ * there is a higher priority waiter than this one. |
31750 |
++ */ |
31751 |
++ newowner = rt_mutex_owner(&pi_state->pi_mutex); |
31752 |
++ /* |
31753 |
++ * If the higher priority waiter has not yet taken over the |
31754 |
++ * rtmutex then newowner is NULL. We can't return here with |
31755 |
++ * that state because it's inconsistent vs. the user space |
31756 |
++ * state. So drop the locks and try again. It's a valid |
31757 |
++ * situation and not any different from the other retry |
31758 |
++ * conditions. |
31759 |
++ */ |
31760 |
++ if (unlikely(!newowner)) { |
31761 |
++ err = -EAGAIN; |
31762 |
++ goto handle_err; |
31763 |
++ } |
31764 |
++ } else { |
31765 |
++ WARN_ON_ONCE(argowner != current); |
31766 |
++ if (oldowner == current) { |
31767 |
++ /* |
31768 |
++ * We raced against a concurrent self; things are |
31769 |
++ * already fixed up. Nothing to do. |
31770 |
++ */ |
31771 |
++ return 1; |
31772 |
++ } |
31773 |
++ newowner = argowner; |
31774 |
++ } |
31775 |
++ |
31776 |
++ newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; |
31777 |
++ /* Owner died? */ |
31778 |
++ if (!pi_state->owner) |
31779 |
++ newtid |= FUTEX_OWNER_DIED; |
31780 |
++ |
31781 |
++ err = get_futex_value_locked(&uval, uaddr); |
31782 |
++ if (err) |
31783 |
++ goto handle_err; |
31784 |
++ |
31785 |
++ for (;;) { |
31786 |
++ newval = (uval & FUTEX_OWNER_DIED) | newtid; |
31787 |
++ |
31788 |
++ err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); |
31789 |
++ if (err) |
31790 |
++ goto handle_err; |
31791 |
++ |
31792 |
++ if (curval == uval) |
31793 |
++ break; |
31794 |
++ uval = curval; |
31795 |
++ } |
31796 |
++ |
31797 |
++ /* |
31798 |
++ * We fixed up user space. Now we need to fix the pi_state |
31799 |
++ * itself. |
31800 |
++ */ |
31801 |
++ pi_state_update_owner(pi_state, newowner); |
31802 |
++ |
31803 |
++ return argowner == current; |
31804 |
++ |
31805 |
++ /* |
31806 |
++ * In order to reschedule or handle a page fault, we need to drop the |
31807 |
++ * locks here. In the case of a fault, this gives the other task |
31808 |
++ * (either the highest priority waiter itself or the task which stole |
31809 |
++ * the rtmutex) the chance to try the fixup of the pi_state. So once we |
31810 |
++ * are back from handling the fault we need to check the pi_state after |
31811 |
++ * reacquiring the locks and before trying to do another fixup. When |
31812 |
++ * the fixup has been done already we simply return. |
31813 |
++ * |
31814 |
++ * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely |
31815 |
++ * drop hb->lock since the caller owns the hb -> futex_q relation. |
31816 |
++ * Dropping the pi_mutex->wait_lock requires the state revalidate. |
31817 |
++ */ |
31818 |
++handle_err: |
31819 |
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
31820 |
++ spin_unlock(q->lock_ptr); |
31821 |
++ |
31822 |
++ switch (err) { |
31823 |
++ case -EFAULT: |
31824 |
++ err = fault_in_user_writeable(uaddr); |
31825 |
++ break; |
31826 |
++ |
31827 |
++ case -EAGAIN: |
31828 |
++ cond_resched(); |
31829 |
++ err = 0; |
31830 |
++ break; |
31831 |
++ |
31832 |
++ default: |
31833 |
++ WARN_ON_ONCE(1); |
31834 |
++ break; |
31835 |
++ } |
31836 |
++ |
31837 |
++ spin_lock(q->lock_ptr); |
31838 |
++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
31839 |
++ |
31840 |
++ /* |
31841 |
++ * Check if someone else fixed it for us: |
31842 |
++ */ |
31843 |
++ if (pi_state->owner != oldowner) |
31844 |
++ return argowner == current; |
31845 |
++ |
31846 |
++ /* Retry if err was -EAGAIN or the fault in succeeded */ |
31847 |
++ if (!err) |
31848 |
++ goto retry; |
31849 |
++ |
31850 |
++ /* |
31851 |
++ * fault_in_user_writeable() failed so user state is immutable. At |
31852 |
++ * best we can make the kernel state consistent but user state will |
31853 |
++ * be most likely hosed and any subsequent unlock operation will be |
31854 |
++ * rejected due to PI futex rule [10]. |
31855 |
++ * |
31856 |
++ * Ensure that the rtmutex owner is also the pi_state owner despite |
31857 |
++ * the user space value claiming something different. There is no |
31858 |
++ * point in unlocking the rtmutex if current is the owner as it |
31859 |
++ * would need to wait until the next waiter has taken the rtmutex |
31860 |
++ * to guarantee consistent state. Keep it simple. Userspace asked |
31861 |
++ * for this wreckaged state. |
31862 |
++ * |
31863 |
++ * The rtmutex has an owner - either current or some other |
31864 |
++ * task. See the EAGAIN loop above. |
31865 |
++ */ |
31866 |
++ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex)); |
31867 |
++ |
31868 |
++ return err; |
31869 |
++} |
31870 |
++ |
31871 |
++static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
31872 |
++ struct task_struct *argowner) |
31873 |
++{ |
31874 |
++ struct futex_pi_state *pi_state = q->pi_state; |
31875 |
++ int ret; |
31876 |
++ |
31877 |
++ lockdep_assert_held(q->lock_ptr); |
31878 |
++ |
31879 |
++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
31880 |
++ ret = __fixup_pi_state_owner(uaddr, q, argowner); |
31881 |
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
31882 |
++ return ret; |
31883 |
++} |
31884 |
++ |
31885 |
++static long futex_wait_restart(struct restart_block *restart); |
31886 |
++ |
31887 |
++/** |
31888 |
++ * fixup_owner() - Post lock pi_state and corner case management |
31889 |
++ * @uaddr: user address of the futex |
31890 |
++ * @q: futex_q (contains pi_state and access to the rt_mutex) |
31891 |
++ * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) |
31892 |
++ * |
31893 |
++ * After attempting to lock an rt_mutex, this function is called to cleanup |
31894 |
++ * the pi_state owner as well as handle race conditions that may allow us to |
31895 |
++ * acquire the lock. Must be called with the hb lock held. |
31896 |
++ * |
31897 |
++ * Return: |
31898 |
++ * - 1 - success, lock taken; |
31899 |
++ * - 0 - success, lock not taken; |
31900 |
++ * - <0 - on error (-EFAULT) |
31901 |
++ */ |
31902 |
++static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
31903 |
++{ |
31904 |
++ if (locked) { |
31905 |
++ /* |
31906 |
++ * Got the lock. We might not be the anticipated owner if we |
31907 |
++ * did a lock-steal - fix up the PI-state in that case: |
31908 |
++ * |
31909 |
++ * Speculative pi_state->owner read (we don't hold wait_lock); |
31910 |
++ * since we own the lock pi_state->owner == current is the |
31911 |
++ * stable state, anything else needs more attention. |
31912 |
++ */ |
31913 |
++ if (q->pi_state->owner != current) |
31914 |
++ return fixup_pi_state_owner(uaddr, q, current); |
31915 |
++ return 1; |
31916 |
++ } |
31917 |
++ |
31918 |
++ /* |
31919 |
++ * If we didn't get the lock; check if anybody stole it from us. In |
31920 |
++ * that case, we need to fix up the uval to point to them instead of |
31921 |
++ * us, otherwise bad things happen. [10] |
31922 |
++ * |
31923 |
++ * Another speculative read; pi_state->owner == current is unstable |
31924 |
++ * but needs our attention. |
31925 |
++ */ |
31926 |
++ if (q->pi_state->owner == current) |
31927 |
++ return fixup_pi_state_owner(uaddr, q, NULL); |
31928 |
++ |
31929 |
++ /* |
31930 |
++ * Paranoia check. If we did not take the lock, then we should not be |
31931 |
++ * the owner of the rt_mutex. Warn and establish consistent state. |
31932 |
++ */ |
31933 |
++ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) |
31934 |
++ return fixup_pi_state_owner(uaddr, q, current); |
31935 |
++ |
31936 |
++ return 0; |
31937 |
++} |
31938 |
++ |
31939 |
++/** |
31940 |
++ * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal |
31941 |
++ * @hb: the futex hash bucket, must be locked by the caller |
31942 |
++ * @q: the futex_q to queue up on |
31943 |
++ * @timeout: the prepared hrtimer_sleeper, or null for no timeout |
31944 |
++ */ |
31945 |
++static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, |
31946 |
++ struct hrtimer_sleeper *timeout) |
31947 |
++{ |
31948 |
++ /* |
31949 |
++ * The task state is guaranteed to be set before another task can |
31950 |
++ * wake it. set_current_state() is implemented using smp_store_mb() and |
31951 |
++ * queue_me() calls spin_unlock() upon completion, both serializing |
31952 |
++ * access to the hash list and forcing another memory barrier. |
31953 |
++ */ |
31954 |
++ set_current_state(TASK_INTERRUPTIBLE); |
31955 |
++ queue_me(q, hb); |
31956 |
++ |
31957 |
++ /* Arm the timer */ |
31958 |
++ if (timeout) |
31959 |
++ hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS); |
31960 |
++ |
31961 |
++ /* |
31962 |
++ * If we have been removed from the hash list, then another task |
31963 |
++ * has tried to wake us, and we can skip the call to schedule(). |
31964 |
++ */ |
31965 |
++ if (likely(!plist_node_empty(&q->list))) { |
31966 |
++ /* |
31967 |
++ * If the timer has already expired, current will already be |
31968 |
++ * flagged for rescheduling. Only call schedule if there |
31969 |
++ * is no timeout, or if it has yet to expire. |
31970 |
++ */ |
31971 |
++ if (!timeout || timeout->task) |
31972 |
++ freezable_schedule(); |
31973 |
++ } |
31974 |
++ __set_current_state(TASK_RUNNING); |
31975 |
++} |
31976 |
++ |
31977 |
++/** |
31978 |
++ * futex_wait_setup() - Prepare to wait on a futex |
31979 |
++ * @uaddr: the futex userspace address |
31980 |
++ * @val: the expected value |
31981 |
++ * @flags: futex flags (FLAGS_SHARED, etc.) |
31982 |
++ * @q: the associated futex_q |
31983 |
++ * @hb: storage for hash_bucket pointer to be returned to caller |
31984 |
++ * |
31985 |
++ * Setup the futex_q and locate the hash_bucket. Get the futex value and |
31986 |
++ * compare it with the expected value. Handle atomic faults internally. |
31987 |
++ * Return with the hb lock held on success, and unlocked on failure. |
31988 |
++ * |
31989 |
++ * Return: |
31990 |
++ * - 0 - uaddr contains val and hb has been locked; |
31991 |
++ * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked |
31992 |
++ */ |
31993 |
++static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, |
31994 |
++ struct futex_q *q, struct futex_hash_bucket **hb) |
31995 |
++{ |
31996 |
++ u32 uval; |
31997 |
++ int ret; |
31998 |
++ |
31999 |
++ /* |
32000 |
++ * Access the page AFTER the hash-bucket is locked. |
32001 |
++ * Order is important: |
32002 |
++ * |
32003 |
++ * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); |
32004 |
++ * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } |
32005 |
++ * |
32006 |
++ * The basic logical guarantee of a futex is that it blocks ONLY |
32007 |
++ * if cond(var) is known to be true at the time of blocking, for |
32008 |
++ * any cond. If we locked the hash-bucket after testing *uaddr, that |
32009 |
++ * would open a race condition where we could block indefinitely with |
32010 |
++ * cond(var) false, which would violate the guarantee. |
32011 |
++ * |
32012 |
++ * On the other hand, we insert q and release the hash-bucket only |
32013 |
++ * after testing *uaddr. This guarantees that futex_wait() will NOT |
32014 |
++ * absorb a wakeup if *uaddr does not match the desired values |
32015 |
++ * while the syscall executes. |
32016 |
++ */ |
32017 |
++retry: |
32018 |
++ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ); |
32019 |
++ if (unlikely(ret != 0)) |
32020 |
++ return ret; |
32021 |
++ |
32022 |
++retry_private: |
32023 |
++ *hb = queue_lock(q); |
32024 |
++ |
32025 |
++ ret = get_futex_value_locked(&uval, uaddr); |
32026 |
++ |
32027 |
++ if (ret) { |
32028 |
++ queue_unlock(*hb); |
32029 |
++ |
32030 |
++ ret = get_user(uval, uaddr); |
32031 |
++ if (ret) |
32032 |
++ return ret; |
32033 |
++ |
32034 |
++ if (!(flags & FLAGS_SHARED)) |
32035 |
++ goto retry_private; |
32036 |
++ |
32037 |
++ goto retry; |
32038 |
++ } |
32039 |
++ |
32040 |
++ if (uval != val) { |
32041 |
++ queue_unlock(*hb); |
32042 |
++ ret = -EWOULDBLOCK; |
32043 |
++ } |
32044 |
++ |
32045 |
++ return ret; |
32046 |
++} |
32047 |
++ |
32048 |
++static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, |
32049 |
++ ktime_t *abs_time, u32 bitset) |
32050 |
++{ |
32051 |
++ struct hrtimer_sleeper timeout, *to; |
32052 |
++ struct restart_block *restart; |
32053 |
++ struct futex_hash_bucket *hb; |
32054 |
++ struct futex_q q = futex_q_init; |
32055 |
++ int ret; |
32056 |
++ |
32057 |
++ if (!bitset) |
32058 |
++ return -EINVAL; |
32059 |
++ q.bitset = bitset; |
32060 |
++ |
32061 |
++ to = futex_setup_timer(abs_time, &timeout, flags, |
32062 |
++ current->timer_slack_ns); |
32063 |
++retry: |
32064 |
++ /* |
32065 |
++ * Prepare to wait on uaddr. On success, it holds hb->lock and q |
32066 |
++ * is initialized. |
32067 |
++ */ |
32068 |
++ ret = futex_wait_setup(uaddr, val, flags, &q, &hb); |
32069 |
++ if (ret) |
32070 |
++ goto out; |
32071 |
++ |
32072 |
++ /* queue_me and wait for wakeup, timeout, or a signal. */ |
32073 |
++ futex_wait_queue_me(hb, &q, to); |
32074 |
++ |
32075 |
++ /* If we were woken (and unqueued), we succeeded, whatever. */ |
32076 |
++ ret = 0; |
32077 |
++ if (!unqueue_me(&q)) |
32078 |
++ goto out; |
32079 |
++ ret = -ETIMEDOUT; |
32080 |
++ if (to && !to->task) |
32081 |
++ goto out; |
32082 |
++ |
32083 |
++ /* |
32084 |
++ * We expect signal_pending(current), but we might be the |
32085 |
++ * victim of a spurious wakeup as well. |
32086 |
++ */ |
32087 |
++ if (!signal_pending(current)) |
32088 |
++ goto retry; |
32089 |
++ |
32090 |
++ ret = -ERESTARTSYS; |
32091 |
++ if (!abs_time) |
32092 |
++ goto out; |
32093 |
++ |
32094 |
++ restart = ¤t->restart_block; |
32095 |
++ restart->futex.uaddr = uaddr; |
32096 |
++ restart->futex.val = val; |
32097 |
++ restart->futex.time = *abs_time; |
32098 |
++ restart->futex.bitset = bitset; |
32099 |
++ restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; |
32100 |
++ |
32101 |
++ ret = set_restart_fn(restart, futex_wait_restart); |
32102 |
++ |
32103 |
++out: |
32104 |
++ if (to) { |
32105 |
++ hrtimer_cancel(&to->timer); |
32106 |
++ destroy_hrtimer_on_stack(&to->timer); |
32107 |
++ } |
32108 |
++ return ret; |
32109 |
++} |
32110 |
++ |
32111 |
++ |
32112 |
++static long futex_wait_restart(struct restart_block *restart) |
32113 |
++{ |
32114 |
++ u32 __user *uaddr = restart->futex.uaddr; |
32115 |
++ ktime_t t, *tp = NULL; |
32116 |
++ |
32117 |
++ if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { |
32118 |
++ t = restart->futex.time; |
32119 |
++ tp = &t; |
32120 |
++ } |
32121 |
++ restart->fn = do_no_restart_syscall; |
32122 |
++ |
32123 |
++ return (long)futex_wait(uaddr, restart->futex.flags, |
32124 |
++ restart->futex.val, tp, restart->futex.bitset); |
32125 |
++} |
32126 |
++ |
32127 |
++ |
32128 |
++/* |
32129 |
++ * Userspace tried a 0 -> TID atomic transition of the futex value |
32130 |
++ * and failed. The kernel side here does the whole locking operation: |
32131 |
++ * if there are waiters then it will block as a consequence of relying |
32132 |
++ * on rt-mutexes, it does PI, etc. (Due to races the kernel might see |
32133 |
++ * a 0 value of the futex too.). |
32134 |
++ * |
32135 |
++ * Also serves as futex trylock_pi()'ing, and due semantics. |
32136 |
++ */ |
32137 |
++static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, |
32138 |
++ ktime_t *time, int trylock) |
32139 |
++{ |
32140 |
++ struct hrtimer_sleeper timeout, *to; |
32141 |
++ struct task_struct *exiting = NULL; |
32142 |
++ struct rt_mutex_waiter rt_waiter; |
32143 |
++ struct futex_hash_bucket *hb; |
32144 |
++ struct futex_q q = futex_q_init; |
32145 |
++ int res, ret; |
32146 |
++ |
32147 |
++ if (!IS_ENABLED(CONFIG_FUTEX_PI)) |
32148 |
++ return -ENOSYS; |
32149 |
++ |
32150 |
++ if (refill_pi_state_cache()) |
32151 |
++ return -ENOMEM; |
32152 |
++ |
32153 |
++ to = futex_setup_timer(time, &timeout, flags, 0); |
32154 |
++ |
32155 |
++retry: |
32156 |
++ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE); |
32157 |
++ if (unlikely(ret != 0)) |
32158 |
++ goto out; |
32159 |
++ |
32160 |
++retry_private: |
32161 |
++ hb = queue_lock(&q); |
32162 |
++ |
32163 |
++ ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, |
32164 |
++ &exiting, 0); |
32165 |
++ if (unlikely(ret)) { |
32166 |
++ /* |
32167 |
++ * Atomic work succeeded and we got the lock, |
32168 |
++ * or failed. Either way, we do _not_ block. |
32169 |
++ */ |
32170 |
++ switch (ret) { |
32171 |
++ case 1: |
32172 |
++ /* We got the lock. */ |
32173 |
++ ret = 0; |
32174 |
++ goto out_unlock_put_key; |
32175 |
++ case -EFAULT: |
32176 |
++ goto uaddr_faulted; |
32177 |
++ case -EBUSY: |
32178 |
++ case -EAGAIN: |
32179 |
++ /* |
32180 |
++ * Two reasons for this: |
32181 |
++ * - EBUSY: Task is exiting and we just wait for the |
32182 |
++ * exit to complete. |
32183 |
++ * - EAGAIN: The user space value changed. |
32184 |
++ */ |
32185 |
++ queue_unlock(hb); |
32186 |
++ /* |
32187 |
++ * Handle the case where the owner is in the middle of |
32188 |
++ * exiting. Wait for the exit to complete otherwise |
32189 |
++ * this task might loop forever, aka. live lock. |
32190 |
++ */ |
32191 |
++ wait_for_owner_exiting(ret, exiting); |
32192 |
++ cond_resched(); |
32193 |
++ goto retry; |
32194 |
++ default: |
32195 |
++ goto out_unlock_put_key; |
32196 |
++ } |
32197 |
++ } |
32198 |
++ |
32199 |
++ WARN_ON(!q.pi_state); |
32200 |
++ |
32201 |
++ /* |
32202 |
++ * Only actually queue now that the atomic ops are done: |
32203 |
++ */ |
32204 |
++ __queue_me(&q, hb); |
32205 |
++ |
32206 |
++ if (trylock) { |
32207 |
++ ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); |
32208 |
++ /* Fixup the trylock return value: */ |
32209 |
++ ret = ret ? 0 : -EWOULDBLOCK; |
32210 |
++ goto no_block; |
32211 |
++ } |
32212 |
++ |
32213 |
++ rt_mutex_init_waiter(&rt_waiter); |
32214 |
++ |
32215 |
++ /* |
32216 |
++ * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not |
32217 |
++ * hold it while doing rt_mutex_start_proxy(), because then it will |
32218 |
++ * include hb->lock in the blocking chain, even through we'll not in |
32219 |
++ * fact hold it while blocking. This will lead it to report -EDEADLK |
32220 |
++ * and BUG when futex_unlock_pi() interleaves with this. |
32221 |
++ * |
32222 |
++ * Therefore acquire wait_lock while holding hb->lock, but drop the |
32223 |
++ * latter before calling __rt_mutex_start_proxy_lock(). This |
32224 |
++ * interleaves with futex_unlock_pi() -- which does a similar lock |
32225 |
++ * handoff -- such that the latter can observe the futex_q::pi_state |
32226 |
++ * before __rt_mutex_start_proxy_lock() is done. |
32227 |
++ */ |
32228 |
++ raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); |
32229 |
++ spin_unlock(q.lock_ptr); |
32230 |
++ /* |
32231 |
++ * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter |
32232 |
++ * such that futex_unlock_pi() is guaranteed to observe the waiter when |
32233 |
++ * it sees the futex_q::pi_state. |
32234 |
++ */ |
32235 |
++ ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); |
32236 |
++ raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); |
32237 |
++ |
32238 |
++ if (ret) { |
32239 |
++ if (ret == 1) |
32240 |
++ ret = 0; |
32241 |
++ goto cleanup; |
32242 |
++ } |
32243 |
++ |
32244 |
++ if (unlikely(to)) |
32245 |
++ hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS); |
32246 |
++ |
32247 |
++ ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); |
32248 |
++ |
32249 |
++cleanup: |
32250 |
++ spin_lock(q.lock_ptr); |
32251 |
++ /* |
32252 |
++ * If we failed to acquire the lock (deadlock/signal/timeout), we must |
32253 |
++ * first acquire the hb->lock before removing the lock from the |
32254 |
++ * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait |
32255 |
++ * lists consistent. |
32256 |
++ * |
32257 |
++ * In particular; it is important that futex_unlock_pi() can not |
32258 |
++ * observe this inconsistency. |
32259 |
++ */ |
32260 |
++ if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) |
32261 |
++ ret = 0; |
32262 |
++ |
32263 |
++no_block: |
32264 |
++ /* |
32265 |
++ * Fixup the pi_state owner and possibly acquire the lock if we |
32266 |
++ * haven't already. |
32267 |
++ */ |
32268 |
++ res = fixup_owner(uaddr, &q, !ret); |
32269 |
++ /* |
32270 |
++ * If fixup_owner() returned an error, propagate that. If it acquired |
32271 |
++ * the lock, clear our -ETIMEDOUT or -EINTR. |
32272 |
++ */ |
32273 |
++ if (res) |
32274 |
++ ret = (res < 0) ? res : 0; |
32275 |
++ |
32276 |
++ unqueue_me_pi(&q); |
32277 |
++ spin_unlock(q.lock_ptr); |
32278 |
++ goto out; |
32279 |
++ |
32280 |
++out_unlock_put_key: |
32281 |
++ queue_unlock(hb); |
32282 |
++ |
32283 |
++out: |
32284 |
++ if (to) { |
32285 |
++ hrtimer_cancel(&to->timer); |
32286 |
++ destroy_hrtimer_on_stack(&to->timer); |
32287 |
++ } |
32288 |
++ return ret != -EINTR ? ret : -ERESTARTNOINTR; |
32289 |
++ |
32290 |
++uaddr_faulted: |
32291 |
++ queue_unlock(hb); |
32292 |
++ |
32293 |
++ ret = fault_in_user_writeable(uaddr); |
32294 |
++ if (ret) |
32295 |
++ goto out; |
32296 |
++ |
32297 |
++ if (!(flags & FLAGS_SHARED)) |
32298 |
++ goto retry_private; |
32299 |
++ |
32300 |
++ goto retry; |
32301 |
++} |
32302 |
++ |
32303 |
++/* |
32304 |
++ * Userspace attempted a TID -> 0 atomic transition, and failed. |
32305 |
++ * This is the in-kernel slowpath: we look up the PI state (if any), |
32306 |
++ * and do the rt-mutex unlock. |
32307 |
++ */ |
32308 |
++static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) |
32309 |
++{ |
32310 |
++ u32 curval, uval, vpid = task_pid_vnr(current); |
32311 |
++ union futex_key key = FUTEX_KEY_INIT; |
32312 |
++ struct futex_hash_bucket *hb; |
32313 |
++ struct futex_q *top_waiter; |
32314 |
++ int ret; |
32315 |
++ |
32316 |
++ if (!IS_ENABLED(CONFIG_FUTEX_PI)) |
32317 |
++ return -ENOSYS; |
32318 |
++ |
32319 |
++retry: |
32320 |
++ if (get_user(uval, uaddr)) |
32321 |
++ return -EFAULT; |
32322 |
++ /* |
32323 |
++ * We release only a lock we actually own: |
32324 |
++ */ |
32325 |
++ if ((uval & FUTEX_TID_MASK) != vpid) |
32326 |
++ return -EPERM; |
32327 |
++ |
32328 |
++ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE); |
32329 |
++ if (ret) |
32330 |
++ return ret; |
32331 |
++ |
32332 |
++ hb = hash_futex(&key); |
32333 |
++ spin_lock(&hb->lock); |
32334 |
++ |
32335 |
++ /* |
32336 |
++ * Check waiters first. We do not trust user space values at |
32337 |
++ * all and we at least want to know if user space fiddled |
32338 |
++ * with the futex value instead of blindly unlocking. |
32339 |
++ */ |
32340 |
++ top_waiter = futex_top_waiter(hb, &key); |
32341 |
++ if (top_waiter) { |
32342 |
++ struct futex_pi_state *pi_state = top_waiter->pi_state; |
32343 |
++ |
32344 |
++ ret = -EINVAL; |
32345 |
++ if (!pi_state) |
32346 |
++ goto out_unlock; |
32347 |
++ |
32348 |
++ /* |
32349 |
++ * If current does not own the pi_state then the futex is |
32350 |
++ * inconsistent and user space fiddled with the futex value. |
32351 |
++ */ |
32352 |
++ if (pi_state->owner != current) |
32353 |
++ goto out_unlock; |
32354 |
++ |
32355 |
++ get_pi_state(pi_state); |
32356 |
++ /* |
32357 |
++ * By taking wait_lock while still holding hb->lock, we ensure |
32358 |
++ * there is no point where we hold neither; and therefore |
32359 |
++ * wake_futex_pi() must observe a state consistent with what we |
32360 |
++ * observed. |
32361 |
++ * |
32362 |
++ * In particular; this forces __rt_mutex_start_proxy() to |
32363 |
++ * complete such that we're guaranteed to observe the |
32364 |
++ * rt_waiter. Also see the WARN in wake_futex_pi(). |
32365 |
++ */ |
32366 |
++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
32367 |
++ spin_unlock(&hb->lock); |
32368 |
++ |
32369 |
++ /* drops pi_state->pi_mutex.wait_lock */ |
32370 |
++ ret = wake_futex_pi(uaddr, uval, pi_state); |
32371 |
++ |
32372 |
++ put_pi_state(pi_state); |
32373 |
++ |
32374 |
++ /* |
32375 |
++ * Success, we're done! No tricky corner cases. |
32376 |
++ */ |
32377 |
++ if (!ret) |
32378 |
++ return ret; |
32379 |
++ /* |
32380 |
++ * The atomic access to the futex value generated a |
32381 |
++ * pagefault, so retry the user-access and the wakeup: |
32382 |
++ */ |
32383 |
++ if (ret == -EFAULT) |
32384 |
++ goto pi_faulted; |
32385 |
++ /* |
32386 |
++ * A unconditional UNLOCK_PI op raced against a waiter |
32387 |
++ * setting the FUTEX_WAITERS bit. Try again. |
32388 |
++ */ |
32389 |
++ if (ret == -EAGAIN) |
32390 |
++ goto pi_retry; |
32391 |
++ /* |
32392 |
++ * wake_futex_pi has detected invalid state. Tell user |
32393 |
++ * space. |
32394 |
++ */ |
32395 |
++ return ret; |
32396 |
++ } |
32397 |
++ |
32398 |
++ /* |
32399 |
++ * We have no kernel internal state, i.e. no waiters in the |
32400 |
++ * kernel. Waiters which are about to queue themselves are stuck |
32401 |
++ * on hb->lock. So we can safely ignore them. We do neither |
32402 |
++ * preserve the WAITERS bit not the OWNER_DIED one. We are the |
32403 |
++ * owner. |
32404 |
++ */ |
32405 |
++ if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) { |
32406 |
++ spin_unlock(&hb->lock); |
32407 |
++ switch (ret) { |
32408 |
++ case -EFAULT: |
32409 |
++ goto pi_faulted; |
32410 |
++ |
32411 |
++ case -EAGAIN: |
32412 |
++ goto pi_retry; |
32413 |
++ |
32414 |
++ default: |
32415 |
++ WARN_ON_ONCE(1); |
32416 |
++ return ret; |
32417 |
++ } |
32418 |
++ } |
32419 |
++ |
32420 |
++ /* |
32421 |
++ * If uval has changed, let user space handle it. |
32422 |
++ */ |
32423 |
++ ret = (curval == uval) ? 0 : -EAGAIN; |
32424 |
++ |
32425 |
++out_unlock: |
32426 |
++ spin_unlock(&hb->lock); |
32427 |
++ return ret; |
32428 |
++ |
32429 |
++pi_retry: |
32430 |
++ cond_resched(); |
32431 |
++ goto retry; |
32432 |
++ |
32433 |
++pi_faulted: |
32434 |
++ |
32435 |
++ ret = fault_in_user_writeable(uaddr); |
32436 |
++ if (!ret) |
32437 |
++ goto retry; |
32438 |
++ |
32439 |
++ return ret; |
32440 |
++} |
32441 |
++ |
32442 |
++/** |
32443 |
++ * handle_early_requeue_pi_wakeup() - Handle early wakeup on the initial futex |
32444 |
++ * @hb: the hash_bucket futex_q was original enqueued on |
32445 |
++ * @q: the futex_q woken while waiting to be requeued |
32446 |
++ * @timeout: the timeout associated with the wait (NULL if none) |
32447 |
++ * |
32448 |
++ * Determine the cause for the early wakeup. |
32449 |
++ * |
32450 |
++ * Return: |
32451 |
++ * -EWOULDBLOCK or -ETIMEDOUT or -ERESTARTNOINTR |
32452 |
++ */ |
32453 |
++static inline |
32454 |
++int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, |
32455 |
++ struct futex_q *q, |
32456 |
++ struct hrtimer_sleeper *timeout) |
32457 |
++{ |
32458 |
++ int ret; |
32459 |
++ |
32460 |
++ /* |
32461 |
++ * With the hb lock held, we avoid races while we process the wakeup. |
32462 |
++ * We only need to hold hb (and not hb2) to ensure atomicity as the |
32463 |
++ * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. |
32464 |
++ * It can't be requeued from uaddr2 to something else since we don't |
32465 |
++ * support a PI aware source futex for requeue. |
32466 |
++ */ |
32467 |
++ WARN_ON_ONCE(&hb->lock != q->lock_ptr); |
32468 |
++ |
32469 |
++ /* |
32470 |
++ * We were woken prior to requeue by a timeout or a signal. |
32471 |
++ * Unqueue the futex_q and determine which it was. |
32472 |
++ */ |
32473 |
++ plist_del(&q->list, &hb->chain); |
32474 |
++ hb_waiters_dec(hb); |
32475 |
++ |
32476 |
++ /* Handle spurious wakeups gracefully */ |
32477 |
++ ret = -EWOULDBLOCK; |
32478 |
++ if (timeout && !timeout->task) |
32479 |
++ ret = -ETIMEDOUT; |
32480 |
++ else if (signal_pending(current)) |
32481 |
++ ret = -ERESTARTNOINTR; |
32482 |
++ return ret; |
32483 |
++} |
32484 |
++ |
32485 |
++/** |
32486 |
++ * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 |
32487 |
++ * @uaddr: the futex we initially wait on (non-pi) |
32488 |
++ * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be |
32489 |
++ * the same type, no requeueing from private to shared, etc. |
32490 |
++ * @val: the expected value of uaddr |
32491 |
++ * @abs_time: absolute timeout |
32492 |
++ * @bitset: 32 bit wakeup bitset set by userspace, defaults to all |
32493 |
++ * @uaddr2: the pi futex we will take prior to returning to user-space |
32494 |
++ * |
32495 |
++ * The caller will wait on uaddr and will be requeued by futex_requeue() to |
32496 |
++ * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake |
32497 |
++ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to |
32498 |
++ * userspace. This ensures the rt_mutex maintains an owner when it has waiters; |
32499 |
++ * without one, the pi logic would not know which task to boost/deboost, if |
32500 |
++ * there was a need to. |
32501 |
++ * |
32502 |
++ * We call schedule in futex_wait_queue_me() when we enqueue and return there |
32503 |
++ * via the following-- |
32504 |
++ * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() |
32505 |
++ * 2) wakeup on uaddr2 after a requeue |
32506 |
++ * 3) signal |
32507 |
++ * 4) timeout |
32508 |
++ * |
32509 |
++ * If 3, cleanup and return -ERESTARTNOINTR. |
32510 |
++ * |
32511 |
++ * If 2, we may then block on trying to take the rt_mutex and return via: |
32512 |
++ * 5) successful lock |
32513 |
++ * 6) signal |
32514 |
++ * 7) timeout |
32515 |
++ * 8) other lock acquisition failure |
32516 |
++ * |
32517 |
++ * If 6, return -EWOULDBLOCK (restarting the syscall would do the same). |
32518 |
++ * |
32519 |
++ * If 4 or 7, we cleanup and return with -ETIMEDOUT. |
32520 |
++ * |
32521 |
++ * Return: |
32522 |
++ * - 0 - On success; |
32523 |
++ * - <0 - On error |
32524 |
++ */ |
32525 |
++static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
32526 |
++ u32 val, ktime_t *abs_time, u32 bitset, |
32527 |
++ u32 __user *uaddr2) |
32528 |
++{ |
32529 |
++ struct hrtimer_sleeper timeout, *to; |
32530 |
++ struct rt_mutex_waiter rt_waiter; |
32531 |
++ struct futex_hash_bucket *hb; |
32532 |
++ union futex_key key2 = FUTEX_KEY_INIT; |
32533 |
++ struct futex_q q = futex_q_init; |
32534 |
++ struct rt_mutex_base *pi_mutex; |
32535 |
++ int res, ret; |
32536 |
++ |
32537 |
++ if (!IS_ENABLED(CONFIG_FUTEX_PI)) |
32538 |
++ return -ENOSYS; |
32539 |
++ |
32540 |
++ if (uaddr == uaddr2) |
32541 |
++ return -EINVAL; |
32542 |
++ |
32543 |
++ if (!bitset) |
32544 |
++ return -EINVAL; |
32545 |
++ |
32546 |
++ to = futex_setup_timer(abs_time, &timeout, flags, |
32547 |
++ current->timer_slack_ns); |
32548 |
++ |
32549 |
++ /* |
32550 |
++ * The waiter is allocated on our stack, manipulated by the requeue |
32551 |
++ * code while we sleep on uaddr. |
32552 |
++ */ |
32553 |
++ rt_mutex_init_waiter(&rt_waiter); |
32554 |
++ |
32555 |
++ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); |
32556 |
++ if (unlikely(ret != 0)) |
32557 |
++ goto out; |
32558 |
++ |
32559 |
++ q.bitset = bitset; |
32560 |
++ q.rt_waiter = &rt_waiter; |
32561 |
++ q.requeue_pi_key = &key2; |
32562 |
++ |
32563 |
++ /* |
32564 |
++ * Prepare to wait on uaddr. On success, it holds hb->lock and q |
32565 |
++ * is initialized. |
32566 |
++ */ |
32567 |
++ ret = futex_wait_setup(uaddr, val, flags, &q, &hb); |
32568 |
++ if (ret) |
32569 |
++ goto out; |
32570 |
++ |
32571 |
++ /* |
32572 |
++ * The check above which compares uaddrs is not sufficient for |
32573 |
++ * shared futexes. We need to compare the keys: |
32574 |
++ */ |
32575 |
++ if (match_futex(&q.key, &key2)) { |
32576 |
++ queue_unlock(hb); |
32577 |
++ ret = -EINVAL; |
32578 |
++ goto out; |
32579 |
++ } |
32580 |
++ |
32581 |
++ /* Queue the futex_q, drop the hb lock, wait for wakeup. */ |
32582 |
++ futex_wait_queue_me(hb, &q, to); |
32583 |
++ |
32584 |
++ switch (futex_requeue_pi_wakeup_sync(&q)) { |
32585 |
++ case Q_REQUEUE_PI_IGNORE: |
32586 |
++ /* The waiter is still on uaddr1 */ |
32587 |
++ spin_lock(&hb->lock); |
32588 |
++ ret = handle_early_requeue_pi_wakeup(hb, &q, to); |
32589 |
++ spin_unlock(&hb->lock); |
32590 |
++ break; |
32591 |
++ |
32592 |
++ case Q_REQUEUE_PI_LOCKED: |
32593 |
++ /* The requeue acquired the lock */ |
32594 |
++ if (q.pi_state && (q.pi_state->owner != current)) { |
32595 |
++ spin_lock(q.lock_ptr); |
32596 |
++ ret = fixup_owner(uaddr2, &q, true); |
32597 |
++ /* |
32598 |
++ * Drop the reference to the pi state which the |
32599 |
++ * requeue_pi() code acquired for us. |
32600 |
++ */ |
32601 |
++ put_pi_state(q.pi_state); |
32602 |
++ spin_unlock(q.lock_ptr); |
32603 |
++ /* |
32604 |
++ * Adjust the return value. It's either -EFAULT or |
32605 |
++ * success (1) but the caller expects 0 for success. |
32606 |
++ */ |
32607 |
++ ret = ret < 0 ? ret : 0; |
32608 |
++ } |
32609 |
++ break; |
32610 |
++ |
32611 |
++ case Q_REQUEUE_PI_DONE: |
32612 |
++ /* Requeue completed. Current is 'pi_blocked_on' the rtmutex */ |
32613 |
++ pi_mutex = &q.pi_state->pi_mutex; |
32614 |
++ ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); |
32615 |
++ |
32616 |
++ /* Current is not longer pi_blocked_on */ |
32617 |
++ spin_lock(q.lock_ptr); |
32618 |
++ if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) |
32619 |
++ ret = 0; |
32620 |
++ |
32621 |
++ debug_rt_mutex_free_waiter(&rt_waiter); |
32622 |
++ /* |
32623 |
++ * Fixup the pi_state owner and possibly acquire the lock if we |
32624 |
++ * haven't already. |
32625 |
++ */ |
32626 |
++ res = fixup_owner(uaddr2, &q, !ret); |
32627 |
++ /* |
32628 |
++ * If fixup_owner() returned an error, propagate that. If it |
32629 |
++ * acquired the lock, clear -ETIMEDOUT or -EINTR. |
32630 |
++ */ |
32631 |
++ if (res) |
32632 |
++ ret = (res < 0) ? res : 0; |
32633 |
++ |
32634 |
++ unqueue_me_pi(&q); |
32635 |
++ spin_unlock(q.lock_ptr); |
32636 |
++ |
32637 |
++ if (ret == -EINTR) { |
32638 |
++ /* |
32639 |
++ * We've already been requeued, but cannot restart |
32640 |
++ * by calling futex_lock_pi() directly. We could |
32641 |
++ * restart this syscall, but it would detect that |
32642 |
++ * the user space "val" changed and return |
32643 |
++ * -EWOULDBLOCK. Save the overhead of the restart |
32644 |
++ * and return -EWOULDBLOCK directly. |
32645 |
++ */ |
32646 |
++ ret = -EWOULDBLOCK; |
32647 |
++ } |
32648 |
++ break; |
32649 |
++ default: |
32650 |
++ BUG(); |
32651 |
++ } |
32652 |
++ |
32653 |
++out: |
32654 |
++ if (to) { |
32655 |
++ hrtimer_cancel(&to->timer); |
32656 |
++ destroy_hrtimer_on_stack(&to->timer); |
32657 |
++ } |
32658 |
++ return ret; |
32659 |
++} |
32660 |
++ |
32661 |
++/* |
32662 |
++ * Support for robust futexes: the kernel cleans up held futexes at |
32663 |
++ * thread exit time. |
32664 |
++ * |
32665 |
++ * Implementation: user-space maintains a per-thread list of locks it |
32666 |
++ * is holding. Upon do_exit(), the kernel carefully walks this list, |
32667 |
++ * and marks all locks that are owned by this thread with the |
32668 |
++ * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is |
32669 |
++ * always manipulated with the lock held, so the list is private and |
32670 |
++ * per-thread. Userspace also maintains a per-thread 'list_op_pending' |
32671 |
++ * field, to allow the kernel to clean up if the thread dies after |
32672 |
++ * acquiring the lock, but just before it could have added itself to |
32673 |
++ * the list. There can only be one such pending lock. |
32674 |
++ */ |
32675 |
++ |
32676 |
++/** |
32677 |
++ * sys_set_robust_list() - Set the robust-futex list head of a task |
32678 |
++ * @head: pointer to the list-head |
32679 |
++ * @len: length of the list-head, as userspace expects |
32680 |
++ */ |
32681 |
++SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, |
32682 |
++ size_t, len) |
32683 |
++{ |
32684 |
++ if (!futex_cmpxchg_enabled) |
32685 |
++ return -ENOSYS; |
32686 |
++ /* |
32687 |
++ * The kernel knows only one size for now: |
32688 |
++ */ |
32689 |
++ if (unlikely(len != sizeof(*head))) |
32690 |
++ return -EINVAL; |
32691 |
++ |
32692 |
++ current->robust_list = head; |
32693 |
++ |
32694 |
++ return 0; |
32695 |
++} |
32696 |
++ |
32697 |
++/** |
32698 |
++ * sys_get_robust_list() - Get the robust-futex list head of a task |
32699 |
++ * @pid: pid of the process [zero for current task] |
32700 |
++ * @head_ptr: pointer to a list-head pointer, the kernel fills it in |
32701 |
++ * @len_ptr: pointer to a length field, the kernel fills in the header size |
32702 |
++ */ |
32703 |
++SYSCALL_DEFINE3(get_robust_list, int, pid, |
32704 |
++ struct robust_list_head __user * __user *, head_ptr, |
32705 |
++ size_t __user *, len_ptr) |
32706 |
++{ |
32707 |
++ struct robust_list_head __user *head; |
32708 |
++ unsigned long ret; |
32709 |
++ struct task_struct *p; |
32710 |
++ |
32711 |
++ if (!futex_cmpxchg_enabled) |
32712 |
++ return -ENOSYS; |
32713 |
++ |
32714 |
++ rcu_read_lock(); |
32715 |
++ |
32716 |
++ ret = -ESRCH; |
32717 |
++ if (!pid) |
32718 |
++ p = current; |
32719 |
++ else { |
32720 |
++ p = find_task_by_vpid(pid); |
32721 |
++ if (!p) |
32722 |
++ goto err_unlock; |
32723 |
++ } |
32724 |
++ |
32725 |
++ ret = -EPERM; |
32726 |
++ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) |
32727 |
++ goto err_unlock; |
32728 |
++ |
32729 |
++ head = p->robust_list; |
32730 |
++ rcu_read_unlock(); |
32731 |
++ |
32732 |
++ if (put_user(sizeof(*head), len_ptr)) |
32733 |
++ return -EFAULT; |
32734 |
++ return put_user(head, head_ptr); |
32735 |
++ |
32736 |
++err_unlock: |
32737 |
++ rcu_read_unlock(); |
32738 |
++ |
32739 |
++ return ret; |
32740 |
++} |
32741 |
++ |
32742 |
++/* Constants for the pending_op argument of handle_futex_death */ |
32743 |
++#define HANDLE_DEATH_PENDING true |
32744 |
++#define HANDLE_DEATH_LIST false |
32745 |
++ |
32746 |
++/* |
32747 |
++ * Process a futex-list entry, check whether it's owned by the |
32748 |
++ * dying task, and do notification if so: |
32749 |
++ */ |
32750 |
++static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, |
32751 |
++ bool pi, bool pending_op) |
32752 |
++{ |
32753 |
++ u32 uval, nval, mval; |
32754 |
++ pid_t owner; |
32755 |
++ int err; |
32756 |
++ |
32757 |
++ /* Futex address must be 32bit aligned */ |
32758 |
++ if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0) |
32759 |
++ return -1; |
32760 |
++ |
32761 |
++retry: |
32762 |
++ if (get_user(uval, uaddr)) |
32763 |
++ return -1; |
32764 |
++ |
32765 |
++ /* |
32766 |
++ * Special case for regular (non PI) futexes. The unlock path in |
32767 |
++ * user space has two race scenarios: |
32768 |
++ * |
32769 |
++ * 1. The unlock path releases the user space futex value and |
32770 |
++ * before it can execute the futex() syscall to wake up |
32771 |
++ * waiters it is killed. |
32772 |
++ * |
32773 |
++ * 2. A woken up waiter is killed before it can acquire the |
32774 |
++ * futex in user space. |
32775 |
++ * |
32776 |
++ * In the second case, the wake up notification could be generated |
32777 |
++ * by the unlock path in user space after setting the futex value |
32778 |
++ * to zero or by the kernel after setting the OWNER_DIED bit below. |
32779 |
++ * |
32780 |
++ * In both cases the TID validation below prevents a wakeup of |
32781 |
++ * potential waiters which can cause these waiters to block |
32782 |
++ * forever. |
32783 |
++ * |
32784 |
++ * In both cases the following conditions are met: |
32785 |
++ * |
32786 |
++ * 1) task->robust_list->list_op_pending != NULL |
32787 |
++ * @pending_op == true |
32788 |
++ * 2) The owner part of user space futex value == 0 |
32789 |
++ * 3) Regular futex: @pi == false |
32790 |
++ * |
32791 |
++ * If these conditions are met, it is safe to attempt waking up a |
32792 |
++ * potential waiter without touching the user space futex value and |
32793 |
++ * trying to set the OWNER_DIED bit. If the futex value is zero, |
32794 |
++ * the rest of the user space mutex state is consistent, so a woken |
32795 |
++ * waiter will just take over the uncontended futex. Setting the |
32796 |
++ * OWNER_DIED bit would create inconsistent state and malfunction |
32797 |
++ * of the user space owner died handling. Otherwise, the OWNER_DIED |
32798 |
++ * bit is already set, and the woken waiter is expected to deal with |
32799 |
++ * this. |
32800 |
++ */ |
32801 |
++ owner = uval & FUTEX_TID_MASK; |
32802 |
++ |
32803 |
++ if (pending_op && !pi && !owner) { |
32804 |
++ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); |
32805 |
++ return 0; |
32806 |
++ } |
32807 |
++ |
32808 |
++ if (owner != task_pid_vnr(curr)) |
32809 |
++ return 0; |
32810 |
++ |
32811 |
++ /* |
32812 |
++ * Ok, this dying thread is truly holding a futex |
32813 |
++ * of interest. Set the OWNER_DIED bit atomically |
32814 |
++ * via cmpxchg, and if the value had FUTEX_WAITERS |
32815 |
++ * set, wake up a waiter (if any). (We have to do a |
32816 |
++ * futex_wake() even if OWNER_DIED is already set - |
32817 |
++ * to handle the rare but possible case of recursive |
32818 |
++ * thread-death.) The rest of the cleanup is done in |
32819 |
++ * userspace. |
32820 |
++ */ |
32821 |
++ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; |
32822 |
++ |
32823 |
++ /* |
32824 |
++ * We are not holding a lock here, but we want to have |
32825 |
++ * the pagefault_disable/enable() protection because |
32826 |
++ * we want to handle the fault gracefully. If the |
32827 |
++ * access fails we try to fault in the futex with R/W |
32828 |
++ * verification via get_user_pages. get_user() above |
32829 |
++ * does not guarantee R/W access. If that fails we |
32830 |
++ * give up and leave the futex locked. |
32831 |
++ */ |
32832 |
++ if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) { |
32833 |
++ switch (err) { |
32834 |
++ case -EFAULT: |
32835 |
++ if (fault_in_user_writeable(uaddr)) |
32836 |
++ return -1; |
32837 |
++ goto retry; |
32838 |
++ |
32839 |
++ case -EAGAIN: |
32840 |
++ cond_resched(); |
32841 |
++ goto retry; |
32842 |
++ |
32843 |
++ default: |
32844 |
++ WARN_ON_ONCE(1); |
32845 |
++ return err; |
32846 |
++ } |
32847 |
++ } |
32848 |
++ |
32849 |
++ if (nval != uval) |
32850 |
++ goto retry; |
32851 |
++ |
32852 |
++ /* |
32853 |
++ * Wake robust non-PI futexes here. The wakeup of |
32854 |
++ * PI futexes happens in exit_pi_state(): |
32855 |
++ */ |
32856 |
++ if (!pi && (uval & FUTEX_WAITERS)) |
32857 |
++ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); |
32858 |
++ |
32859 |
++ return 0; |
32860 |
++} |
32861 |
++ |
32862 |
++/* |
32863 |
++ * Fetch a robust-list pointer. Bit 0 signals PI futexes: |
32864 |
++ */ |
32865 |
++static inline int fetch_robust_entry(struct robust_list __user **entry, |
32866 |
++ struct robust_list __user * __user *head, |
32867 |
++ unsigned int *pi) |
32868 |
++{ |
32869 |
++ unsigned long uentry; |
32870 |
++ |
32871 |
++ if (get_user(uentry, (unsigned long __user *)head)) |
32872 |
++ return -EFAULT; |
32873 |
++ |
32874 |
++ *entry = (void __user *)(uentry & ~1UL); |
32875 |
++ *pi = uentry & 1; |
32876 |
++ |
32877 |
++ return 0; |
32878 |
++} |
32879 |
++ |
32880 |
++/* |
32881 |
++ * Walk curr->robust_list (very carefully, it's a userspace list!) |
32882 |
++ * and mark any locks found there dead, and notify any waiters. |
32883 |
++ * |
32884 |
++ * We silently return on any sign of list-walking problem. |
32885 |
++ */ |
32886 |
++static void exit_robust_list(struct task_struct *curr) |
32887 |
++{ |
32888 |
++ struct robust_list_head __user *head = curr->robust_list; |
32889 |
++ struct robust_list __user *entry, *next_entry, *pending; |
32890 |
++ unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
32891 |
++ unsigned int next_pi; |
32892 |
++ unsigned long futex_offset; |
32893 |
++ int rc; |
32894 |
++ |
32895 |
++ if (!futex_cmpxchg_enabled) |
32896 |
++ return; |
32897 |
++ |
32898 |
++ /* |
32899 |
++ * Fetch the list head (which was registered earlier, via |
32900 |
++ * sys_set_robust_list()): |
32901 |
++ */ |
32902 |
++ if (fetch_robust_entry(&entry, &head->list.next, &pi)) |
32903 |
++ return; |
32904 |
++ /* |
32905 |
++ * Fetch the relative futex offset: |
32906 |
++ */ |
32907 |
++ if (get_user(futex_offset, &head->futex_offset)) |
32908 |
++ return; |
32909 |
++ /* |
32910 |
++ * Fetch any possibly pending lock-add first, and handle it |
32911 |
++ * if it exists: |
32912 |
++ */ |
32913 |
++ if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) |
32914 |
++ return; |
32915 |
++ |
32916 |
++ next_entry = NULL; /* avoid warning with gcc */ |
32917 |
++ while (entry != &head->list) { |
32918 |
++ /* |
32919 |
++ * Fetch the next entry in the list before calling |
32920 |
++ * handle_futex_death: |
32921 |
++ */ |
32922 |
++ rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); |
32923 |
++ /* |
32924 |
++ * A pending lock might already be on the list, so |
32925 |
++ * don't process it twice: |
32926 |
++ */ |
32927 |
++ if (entry != pending) { |
32928 |
++ if (handle_futex_death((void __user *)entry + futex_offset, |
32929 |
++ curr, pi, HANDLE_DEATH_LIST)) |
32930 |
++ return; |
32931 |
++ } |
32932 |
++ if (rc) |
32933 |
++ return; |
32934 |
++ entry = next_entry; |
32935 |
++ pi = next_pi; |
32936 |
++ /* |
32937 |
++ * Avoid excessively long or circular lists: |
32938 |
++ */ |
32939 |
++ if (!--limit) |
32940 |
++ break; |
32941 |
++ |
32942 |
++ cond_resched(); |
32943 |
++ } |
32944 |
++ |
32945 |
++ if (pending) { |
32946 |
++ handle_futex_death((void __user *)pending + futex_offset, |
32947 |
++ curr, pip, HANDLE_DEATH_PENDING); |
32948 |
++ } |
32949 |
++} |
32950 |
++ |
32951 |
++static void futex_cleanup(struct task_struct *tsk) |
32952 |
++{ |
32953 |
++ if (unlikely(tsk->robust_list)) { |
32954 |
++ exit_robust_list(tsk); |
32955 |
++ tsk->robust_list = NULL; |
32956 |
++ } |
32957 |
++ |
32958 |
++#ifdef CONFIG_COMPAT |
32959 |
++ if (unlikely(tsk->compat_robust_list)) { |
32960 |
++ compat_exit_robust_list(tsk); |
32961 |
++ tsk->compat_robust_list = NULL; |
32962 |
++ } |
32963 |
++#endif |
32964 |
++ |
32965 |
++ if (unlikely(!list_empty(&tsk->pi_state_list))) |
32966 |
++ exit_pi_state_list(tsk); |
32967 |
++} |
32968 |
++ |
32969 |
++/** |
32970 |
++ * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD |
32971 |
++ * @tsk: task to set the state on |
32972 |
++ * |
32973 |
++ * Set the futex exit state of the task lockless. The futex waiter code |
32974 |
++ * observes that state when a task is exiting and loops until the task has |
32975 |
++ * actually finished the futex cleanup. The worst case for this is that the |
32976 |
++ * waiter runs through the wait loop until the state becomes visible. |
32977 |
++ * |
32978 |
++ * This is called from the recursive fault handling path in do_exit(). |
32979 |
++ * |
32980 |
++ * This is best effort. Either the futex exit code has run already or |
32981 |
++ * not. If the OWNER_DIED bit has been set on the futex then the waiter can |
32982 |
++ * take it over. If not, the problem is pushed back to user space. If the |
32983 |
++ * futex exit code did not run yet, then an already queued waiter might |
32984 |
++ * block forever, but there is nothing which can be done about that. |
32985 |
++ */ |
32986 |
++void futex_exit_recursive(struct task_struct *tsk) |
32987 |
++{ |
32988 |
++ /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ |
32989 |
++ if (tsk->futex_state == FUTEX_STATE_EXITING) |
32990 |
++ mutex_unlock(&tsk->futex_exit_mutex); |
32991 |
++ tsk->futex_state = FUTEX_STATE_DEAD; |
32992 |
++} |
32993 |
++ |
32994 |
++static void futex_cleanup_begin(struct task_struct *tsk) |
32995 |
++{ |
32996 |
++ /* |
32997 |
++ * Prevent various race issues against a concurrent incoming waiter |
32998 |
++ * including live locks by forcing the waiter to block on |
32999 |
++ * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in |
33000 |
++ * attach_to_pi_owner(). |
33001 |
++ */ |
33002 |
++ mutex_lock(&tsk->futex_exit_mutex); |
33003 |
++ |
33004 |
++ /* |
33005 |
++ * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. |
33006 |
++ * |
33007 |
++ * This ensures that all subsequent checks of tsk->futex_state in |
33008 |
++ * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with |
33009 |
++ * tsk->pi_lock held. |
33010 |
++ * |
33011 |
++ * It guarantees also that a pi_state which was queued right before |
33012 |
++ * the state change under tsk->pi_lock by a concurrent waiter must |
33013 |
++ * be observed in exit_pi_state_list(). |
33014 |
++ */ |
33015 |
++ raw_spin_lock_irq(&tsk->pi_lock); |
33016 |
++ tsk->futex_state = FUTEX_STATE_EXITING; |
33017 |
++ raw_spin_unlock_irq(&tsk->pi_lock); |
33018 |
++} |
33019 |
++ |
33020 |
++static void futex_cleanup_end(struct task_struct *tsk, int state) |
33021 |
++{ |
33022 |
++ /* |
33023 |
++ * Lockless store. The only side effect is that an observer might |
33024 |
++ * take another loop until it becomes visible. |
33025 |
++ */ |
33026 |
++ tsk->futex_state = state; |
33027 |
++ /* |
33028 |
++ * Drop the exit protection. This unblocks waiters which observed |
33029 |
++ * FUTEX_STATE_EXITING to reevaluate the state. |
33030 |
++ */ |
33031 |
++ mutex_unlock(&tsk->futex_exit_mutex); |
33032 |
++} |
33033 |
++ |
33034 |
++void futex_exec_release(struct task_struct *tsk) |
33035 |
++{ |
33036 |
++ /* |
33037 |
++ * The state handling is done for consistency, but in the case of |
33038 |
++ * exec() there is no way to prevent further damage as the PID stays |
33039 |
++ * the same. But for the unlikely and arguably buggy case that a |
33040 |
++ * futex is held on exec(), this provides at least as much state |
33041 |
++ * consistency protection which is possible. |
33042 |
++ */ |
33043 |
++ futex_cleanup_begin(tsk); |
33044 |
++ futex_cleanup(tsk); |
33045 |
++ /* |
33046 |
++ * Reset the state to FUTEX_STATE_OK. The task is alive and about |
33047 |
++ * exec a new binary. |
33048 |
++ */ |
33049 |
++ futex_cleanup_end(tsk, FUTEX_STATE_OK); |
33050 |
++} |
33051 |
++ |
33052 |
++void futex_exit_release(struct task_struct *tsk) |
33053 |
++{ |
33054 |
++ futex_cleanup_begin(tsk); |
33055 |
++ futex_cleanup(tsk); |
33056 |
++ futex_cleanup_end(tsk, FUTEX_STATE_DEAD); |
33057 |
++} |
33058 |
++ |
33059 |
++long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
33060 |
++ u32 __user *uaddr2, u32 val2, u32 val3) |
33061 |
++{ |
33062 |
++ int cmd = op & FUTEX_CMD_MASK; |
33063 |
++ unsigned int flags = 0; |
33064 |
++ |
33065 |
++ if (!(op & FUTEX_PRIVATE_FLAG)) |
33066 |
++ flags |= FLAGS_SHARED; |
33067 |
++ |
33068 |
++ if (op & FUTEX_CLOCK_REALTIME) { |
33069 |
++ flags |= FLAGS_CLOCKRT; |
33070 |
++ if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI && |
33071 |
++ cmd != FUTEX_LOCK_PI2) |
33072 |
++ return -ENOSYS; |
33073 |
++ } |
33074 |
++ |
33075 |
++ switch (cmd) { |
33076 |
++ case FUTEX_LOCK_PI: |
33077 |
++ case FUTEX_LOCK_PI2: |
33078 |
++ case FUTEX_UNLOCK_PI: |
33079 |
++ case FUTEX_TRYLOCK_PI: |
33080 |
++ case FUTEX_WAIT_REQUEUE_PI: |
33081 |
++ case FUTEX_CMP_REQUEUE_PI: |
33082 |
++ if (!futex_cmpxchg_enabled) |
33083 |
++ return -ENOSYS; |
33084 |
++ } |
33085 |
++ |
33086 |
++ switch (cmd) { |
33087 |
++ case FUTEX_WAIT: |
33088 |
++ val3 = FUTEX_BITSET_MATCH_ANY; |
33089 |
++ fallthrough; |
33090 |
++ case FUTEX_WAIT_BITSET: |
33091 |
++ return futex_wait(uaddr, flags, val, timeout, val3); |
33092 |
++ case FUTEX_WAKE: |
33093 |
++ val3 = FUTEX_BITSET_MATCH_ANY; |
33094 |
++ fallthrough; |
33095 |
++ case FUTEX_WAKE_BITSET: |
33096 |
++ return futex_wake(uaddr, flags, val, val3); |
33097 |
++ case FUTEX_REQUEUE: |
33098 |
++ return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0); |
33099 |
++ case FUTEX_CMP_REQUEUE: |
33100 |
++ return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0); |
33101 |
++ case FUTEX_WAKE_OP: |
33102 |
++ return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); |
33103 |
++ case FUTEX_LOCK_PI: |
33104 |
++ flags |= FLAGS_CLOCKRT; |
33105 |
++ fallthrough; |
33106 |
++ case FUTEX_LOCK_PI2: |
33107 |
++ return futex_lock_pi(uaddr, flags, timeout, 0); |
33108 |
++ case FUTEX_UNLOCK_PI: |
33109 |
++ return futex_unlock_pi(uaddr, flags); |
33110 |
++ case FUTEX_TRYLOCK_PI: |
33111 |
++ return futex_lock_pi(uaddr, flags, NULL, 1); |
33112 |
++ case FUTEX_WAIT_REQUEUE_PI: |
33113 |
++ val3 = FUTEX_BITSET_MATCH_ANY; |
33114 |
++ return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, |
33115 |
++ uaddr2); |
33116 |
++ case FUTEX_CMP_REQUEUE_PI: |
33117 |
++ return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1); |
33118 |
++ } |
33119 |
++ return -ENOSYS; |
33120 |
++} |
33121 |
++ |
33122 |
++static __always_inline bool futex_cmd_has_timeout(u32 cmd) |
33123 |
++{ |
33124 |
++ switch (cmd) { |
33125 |
++ case FUTEX_WAIT: |
33126 |
++ case FUTEX_LOCK_PI: |
33127 |
++ case FUTEX_LOCK_PI2: |
33128 |
++ case FUTEX_WAIT_BITSET: |
33129 |
++ case FUTEX_WAIT_REQUEUE_PI: |
33130 |
++ return true; |
33131 |
++ } |
33132 |
++ return false; |
33133 |
++} |
33134 |
++ |
33135 |
++static __always_inline int |
33136 |
++futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t) |
33137 |
++{ |
33138 |
++ if (!timespec64_valid(ts)) |
33139 |
++ return -EINVAL; |
33140 |
++ |
33141 |
++ *t = timespec64_to_ktime(*ts); |
33142 |
++ if (cmd == FUTEX_WAIT) |
33143 |
++ *t = ktime_add_safe(ktime_get(), *t); |
33144 |
++ else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME)) |
33145 |
++ *t = timens_ktime_to_host(CLOCK_MONOTONIC, *t); |
33146 |
++ return 0; |
33147 |
++} |
33148 |
++ |
33149 |
++SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
33150 |
++ const struct __kernel_timespec __user *, utime, |
33151 |
++ u32 __user *, uaddr2, u32, val3) |
33152 |
++{ |
33153 |
++ int ret, cmd = op & FUTEX_CMD_MASK; |
33154 |
++ ktime_t t, *tp = NULL; |
33155 |
++ struct timespec64 ts; |
33156 |
++ |
33157 |
++ if (utime && futex_cmd_has_timeout(cmd)) { |
33158 |
++ if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG)))) |
33159 |
++ return -EFAULT; |
33160 |
++ if (get_timespec64(&ts, utime)) |
33161 |
++ return -EFAULT; |
33162 |
++ ret = futex_init_timeout(cmd, op, &ts, &t); |
33163 |
++ if (ret) |
33164 |
++ return ret; |
33165 |
++ tp = &t; |
33166 |
++ } |
33167 |
++ |
33168 |
++ return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3); |
33169 |
++} |
33170 |
++ |
33171 |
++#ifdef CONFIG_COMPAT |
33172 |
++/* |
33173 |
++ * Fetch a robust-list pointer. Bit 0 signals PI futexes: |
33174 |
++ */ |
33175 |
++static inline int |
33176 |
++compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, |
33177 |
++ compat_uptr_t __user *head, unsigned int *pi) |
33178 |
++{ |
33179 |
++ if (get_user(*uentry, head)) |
33180 |
++ return -EFAULT; |
33181 |
++ |
33182 |
++ *entry = compat_ptr((*uentry) & ~1); |
33183 |
++ *pi = (unsigned int)(*uentry) & 1; |
33184 |
++ |
33185 |
++ return 0; |
33186 |
++} |
33187 |
++ |
33188 |
++static void __user *futex_uaddr(struct robust_list __user *entry, |
33189 |
++ compat_long_t futex_offset) |
33190 |
++{ |
33191 |
++ compat_uptr_t base = ptr_to_compat(entry); |
33192 |
++ void __user *uaddr = compat_ptr(base + futex_offset); |
33193 |
++ |
33194 |
++ return uaddr; |
33195 |
++} |
33196 |
++ |
33197 |
++/* |
33198 |
++ * Walk curr->robust_list (very carefully, it's a userspace list!) |
33199 |
++ * and mark any locks found there dead, and notify any waiters. |
33200 |
++ * |
33201 |
++ * We silently return on any sign of list-walking problem. |
33202 |
++ */ |
33203 |
++static void compat_exit_robust_list(struct task_struct *curr) |
33204 |
++{ |
33205 |
++ struct compat_robust_list_head __user *head = curr->compat_robust_list; |
33206 |
++ struct robust_list __user *entry, *next_entry, *pending; |
33207 |
++ unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
33208 |
++ unsigned int next_pi; |
33209 |
++ compat_uptr_t uentry, next_uentry, upending; |
33210 |
++ compat_long_t futex_offset; |
33211 |
++ int rc; |
33212 |
++ |
33213 |
++ if (!futex_cmpxchg_enabled) |
33214 |
++ return; |
33215 |
++ |
33216 |
++ /* |
33217 |
++ * Fetch the list head (which was registered earlier, via |
33218 |
++ * sys_set_robust_list()): |
33219 |
++ */ |
33220 |
++ if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) |
33221 |
++ return; |
33222 |
++ /* |
33223 |
++ * Fetch the relative futex offset: |
33224 |
++ */ |
33225 |
++ if (get_user(futex_offset, &head->futex_offset)) |
33226 |
++ return; |
33227 |
++ /* |
33228 |
++ * Fetch any possibly pending lock-add first, and handle it |
33229 |
++ * if it exists: |
33230 |
++ */ |
33231 |
++ if (compat_fetch_robust_entry(&upending, &pending, |
33232 |
++ &head->list_op_pending, &pip)) |
33233 |
++ return; |
33234 |
++ |
33235 |
++ next_entry = NULL; /* avoid warning with gcc */ |
33236 |
++ while (entry != (struct robust_list __user *) &head->list) { |
33237 |
++ /* |
33238 |
++ * Fetch the next entry in the list before calling |
33239 |
++ * handle_futex_death: |
33240 |
++ */ |
33241 |
++ rc = compat_fetch_robust_entry(&next_uentry, &next_entry, |
33242 |
++ (compat_uptr_t __user *)&entry->next, &next_pi); |
33243 |
++ /* |
33244 |
++ * A pending lock might already be on the list, so |
33245 |
++ * dont process it twice: |
33246 |
++ */ |
33247 |
++ if (entry != pending) { |
33248 |
++ void __user *uaddr = futex_uaddr(entry, futex_offset); |
33249 |
++ |
33250 |
++ if (handle_futex_death(uaddr, curr, pi, |
33251 |
++ HANDLE_DEATH_LIST)) |
33252 |
++ return; |
33253 |
++ } |
33254 |
++ if (rc) |
33255 |
++ return; |
33256 |
++ uentry = next_uentry; |
33257 |
++ entry = next_entry; |
33258 |
++ pi = next_pi; |
33259 |
++ /* |
33260 |
++ * Avoid excessively long or circular lists: |
33261 |
++ */ |
33262 |
++ if (!--limit) |
33263 |
++ break; |
33264 |
++ |
33265 |
++ cond_resched(); |
33266 |
++ } |
33267 |
++ if (pending) { |
33268 |
++ void __user *uaddr = futex_uaddr(pending, futex_offset); |
33269 |
++ |
33270 |
++ handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); |
33271 |
++ } |
33272 |
++} |
33273 |
++ |
33274 |
++COMPAT_SYSCALL_DEFINE2(set_robust_list, |
33275 |
++ struct compat_robust_list_head __user *, head, |
33276 |
++ compat_size_t, len) |
33277 |
++{ |
33278 |
++ if (!futex_cmpxchg_enabled) |
33279 |
++ return -ENOSYS; |
33280 |
++ |
33281 |
++ if (unlikely(len != sizeof(*head))) |
33282 |
++ return -EINVAL; |
33283 |
++ |
33284 |
++ current->compat_robust_list = head; |
33285 |
++ |
33286 |
++ return 0; |
33287 |
++} |
33288 |
++ |
33289 |
++COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, |
33290 |
++ compat_uptr_t __user *, head_ptr, |
33291 |
++ compat_size_t __user *, len_ptr) |
33292 |
++{ |
33293 |
++ struct compat_robust_list_head __user *head; |
33294 |
++ unsigned long ret; |
33295 |
++ struct task_struct *p; |
33296 |
++ |
33297 |
++ if (!futex_cmpxchg_enabled) |
33298 |
++ return -ENOSYS; |
33299 |
++ |
33300 |
++ rcu_read_lock(); |
33301 |
++ |
33302 |
++ ret = -ESRCH; |
33303 |
++ if (!pid) |
33304 |
++ p = current; |
33305 |
++ else { |
33306 |
++ p = find_task_by_vpid(pid); |
33307 |
++ if (!p) |
33308 |
++ goto err_unlock; |
33309 |
++ } |
33310 |
++ |
33311 |
++ ret = -EPERM; |
33312 |
++ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) |
33313 |
++ goto err_unlock; |
33314 |
++ |
33315 |
++ head = p->compat_robust_list; |
33316 |
++ rcu_read_unlock(); |
33317 |
++ |
33318 |
++ if (put_user(sizeof(*head), len_ptr)) |
33319 |
++ return -EFAULT; |
33320 |
++ return put_user(ptr_to_compat(head), head_ptr); |
33321 |
++ |
33322 |
++err_unlock: |
33323 |
++ rcu_read_unlock(); |
33324 |
++ |
33325 |
++ return ret; |
33326 |
++} |
33327 |
++#endif /* CONFIG_COMPAT */ |
33328 |
++ |
33329 |
++#ifdef CONFIG_COMPAT_32BIT_TIME |
33330 |
++SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, |
33331 |
++ const struct old_timespec32 __user *, utime, u32 __user *, uaddr2, |
33332 |
++ u32, val3) |
33333 |
++{ |
33334 |
++ int ret, cmd = op & FUTEX_CMD_MASK; |
33335 |
++ ktime_t t, *tp = NULL; |
33336 |
++ struct timespec64 ts; |
33337 |
++ |
33338 |
++ if (utime && futex_cmd_has_timeout(cmd)) { |
33339 |
++ if (get_old_timespec32(&ts, utime)) |
33340 |
++ return -EFAULT; |
33341 |
++ ret = futex_init_timeout(cmd, op, &ts, &t); |
33342 |
++ if (ret) |
33343 |
++ return ret; |
33344 |
++ tp = &t; |
33345 |
++ } |
33346 |
++ |
33347 |
++ return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3); |
33348 |
++} |
33349 |
++#endif /* CONFIG_COMPAT_32BIT_TIME */ |
33350 |
++ |
33351 |
++static void __init futex_detect_cmpxchg(void) |
33352 |
++{ |
33353 |
++#ifndef CONFIG_HAVE_FUTEX_CMPXCHG |
33354 |
++ u32 curval; |
33355 |
++ |
33356 |
++ /* |
33357 |
++ * This will fail and we want it. Some arch implementations do |
33358 |
++ * runtime detection of the futex_atomic_cmpxchg_inatomic() |
33359 |
++ * functionality. We want to know that before we call in any |
33360 |
++ * of the complex code paths. Also we want to prevent |
33361 |
++ * registration of robust lists in that case. NULL is |
33362 |
++ * guaranteed to fault and we get -EFAULT on functional |
33363 |
++ * implementation, the non-functional ones will return |
33364 |
++ * -ENOSYS. |
33365 |
++ */ |
33366 |
++ if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) |
33367 |
++ futex_cmpxchg_enabled = 1; |
33368 |
++#endif |
33369 |
++} |
33370 |
++ |
33371 |
++static int __init futex_init(void) |
33372 |
++{ |
33373 |
++ unsigned int futex_shift; |
33374 |
++ unsigned long i; |
33375 |
++ |
33376 |
++#if CONFIG_BASE_SMALL |
33377 |
++ futex_hashsize = 16; |
33378 |
++#else |
33379 |
++ futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus()); |
33380 |
++#endif |
33381 |
++ |
33382 |
++ futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues), |
33383 |
++ futex_hashsize, 0, |
33384 |
++ futex_hashsize < 256 ? HASH_SMALL : 0, |
33385 |
++ &futex_shift, NULL, |
33386 |
++ futex_hashsize, futex_hashsize); |
33387 |
++ futex_hashsize = 1UL << futex_shift; |
33388 |
++ |
33389 |
++ futex_detect_cmpxchg(); |
33390 |
++ |
33391 |
++ for (i = 0; i < futex_hashsize; i++) { |
33392 |
++ atomic_set(&futex_queues[i].waiters, 0); |
33393 |
++ plist_head_init(&futex_queues[i].chain); |
33394 |
++ spin_lock_init(&futex_queues[i].lock); |
33395 |
++ } |
33396 |
++ |
33397 |
++ return 0; |
33398 |
++} |
33399 |
++core_initcall(futex_init); |
33400 |
+diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c |
33401 |
+index 7971e989e425b..74a4ef1da9ad7 100644 |
33402 |
+--- a/kernel/gcov/gcc_4_7.c |
33403 |
++++ b/kernel/gcov/gcc_4_7.c |
33404 |
+@@ -82,6 +82,7 @@ struct gcov_fn_info { |
33405 |
+ * @version: gcov version magic indicating the gcc version used for compilation |
33406 |
+ * @next: list head for a singly-linked list |
33407 |
+ * @stamp: uniquifying time stamp |
33408 |
++ * @checksum: unique object checksum |
33409 |
+ * @filename: name of the associated gcov data file |
33410 |
+ * @merge: merge functions (null for unused counter type) |
33411 |
+ * @n_functions: number of instrumented functions |
33412 |
+@@ -94,6 +95,10 @@ struct gcov_info { |
33413 |
+ unsigned int version; |
33414 |
+ struct gcov_info *next; |
33415 |
+ unsigned int stamp; |
33416 |
++ /* Since GCC 12.1 a checksum field is added. */ |
33417 |
++#if (__GNUC__ >= 12) |
33418 |
++ unsigned int checksum; |
33419 |
++#endif |
33420 |
+ const char *filename; |
33421 |
+ void (*merge[GCOV_COUNTERS])(gcov_type *, unsigned int); |
33422 |
+ unsigned int n_functions; |
33423 |
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h |
33424 |
+index e58342ace11f2..f1d83a8b44171 100644 |
33425 |
+--- a/kernel/irq/internals.h |
33426 |
++++ b/kernel/irq/internals.h |
33427 |
+@@ -52,6 +52,7 @@ enum { |
33428 |
+ * IRQS_PENDING - irq is pending and replayed later |
33429 |
+ * IRQS_SUSPENDED - irq is suspended |
33430 |
+ * IRQS_NMI - irq line is used to deliver NMIs |
33431 |
++ * IRQS_SYSFS - descriptor has been added to sysfs |
33432 |
+ */ |
33433 |
+ enum { |
33434 |
+ IRQS_AUTODETECT = 0x00000001, |
33435 |
+@@ -64,6 +65,7 @@ enum { |
33436 |
+ IRQS_SUSPENDED = 0x00000800, |
33437 |
+ IRQS_TIMINGS = 0x00001000, |
33438 |
+ IRQS_NMI = 0x00002000, |
33439 |
++ IRQS_SYSFS = 0x00004000, |
33440 |
+ }; |
33441 |
+ |
33442 |
+ #include "debug.h" |
33443 |
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c |
33444 |
+index 21b3ac2a29d20..7a45fd5932454 100644 |
33445 |
+--- a/kernel/irq/irqdesc.c |
33446 |
++++ b/kernel/irq/irqdesc.c |
33447 |
+@@ -288,22 +288,25 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc) |
33448 |
+ if (irq_kobj_base) { |
33449 |
+ /* |
33450 |
+ * Continue even in case of failure as this is nothing |
33451 |
+- * crucial. |
33452 |
++ * crucial and failures in the late irq_sysfs_init() |
33453 |
++ * cannot be rolled back. |
33454 |
+ */ |
33455 |
+ if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) |
33456 |
+ pr_warn("Failed to add kobject for irq %d\n", irq); |
33457 |
++ else |
33458 |
++ desc->istate |= IRQS_SYSFS; |
33459 |
+ } |
33460 |
+ } |
33461 |
+ |
33462 |
+ static void irq_sysfs_del(struct irq_desc *desc) |
33463 |
+ { |
33464 |
+ /* |
33465 |
+- * If irq_sysfs_init() has not yet been invoked (early boot), then |
33466 |
+- * irq_kobj_base is NULL and the descriptor was never added. |
33467 |
+- * kobject_del() complains about a object with no parent, so make |
33468 |
+- * it conditional. |
33469 |
++ * Only invoke kobject_del() when kobject_add() was successfully |
33470 |
++ * invoked for the descriptor. This covers both early boot, where |
33471 |
++ * sysfs is not initialized yet, and the case of a failed |
33472 |
++ * kobject_add() invocation. |
33473 |
+ */ |
33474 |
+- if (irq_kobj_base) |
33475 |
++ if (desc->istate & IRQS_SYSFS) |
33476 |
+ kobject_del(&desc->kobj); |
33477 |
+ } |
33478 |
+ |
33479 |
+diff --git a/kernel/padata.c b/kernel/padata.c |
33480 |
+index 18d3a5c699d84..c17f772cc315a 100644 |
33481 |
+--- a/kernel/padata.c |
33482 |
++++ b/kernel/padata.c |
33483 |
+@@ -207,14 +207,16 @@ int padata_do_parallel(struct padata_shell *ps, |
33484 |
+ pw = padata_work_alloc(); |
33485 |
+ spin_unlock(&padata_works_lock); |
33486 |
+ |
33487 |
++ if (!pw) { |
33488 |
++ /* Maximum works limit exceeded, run in the current task. */ |
33489 |
++ padata->parallel(padata); |
33490 |
++ } |
33491 |
++ |
33492 |
+ rcu_read_unlock_bh(); |
33493 |
+ |
33494 |
+ if (pw) { |
33495 |
+ padata_work_init(pw, padata_parallel_worker, padata, 0); |
33496 |
+ queue_work(pinst->parallel_wq, &pw->pw_work); |
33497 |
+- } else { |
33498 |
+- /* Maximum works limit exceeded, run in the current task. */ |
33499 |
+- padata->parallel(padata); |
33500 |
+ } |
33501 |
+ |
33502 |
+ return 0; |
33503 |
+@@ -388,13 +390,16 @@ void padata_do_serial(struct padata_priv *padata) |
33504 |
+ int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); |
33505 |
+ struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); |
33506 |
+ struct padata_priv *cur; |
33507 |
++ struct list_head *pos; |
33508 |
+ |
33509 |
+ spin_lock(&reorder->lock); |
33510 |
+ /* Sort in ascending order of sequence number. */ |
33511 |
+- list_for_each_entry_reverse(cur, &reorder->list, list) |
33512 |
++ list_for_each_prev(pos, &reorder->list) { |
33513 |
++ cur = list_entry(pos, struct padata_priv, list); |
33514 |
+ if (cur->seq_nr < padata->seq_nr) |
33515 |
+ break; |
33516 |
+- list_add(&padata->list, &cur->list); |
33517 |
++ } |
33518 |
++ list_add(&padata->list, pos); |
33519 |
+ spin_unlock(&reorder->lock); |
33520 |
+ |
33521 |
+ /* |
33522 |
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c |
33523 |
+index 330d499376924..475d630e650f1 100644 |
33524 |
+--- a/kernel/power/snapshot.c |
33525 |
++++ b/kernel/power/snapshot.c |
33526 |
+@@ -1719,8 +1719,8 @@ static unsigned long minimum_image_size(unsigned long saveable) |
33527 |
+ * /sys/power/reserved_size, respectively). To make this happen, we compute the |
33528 |
+ * total number of available page frames and allocate at least |
33529 |
+ * |
33530 |
+- * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 |
33531 |
+- * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) |
33532 |
++ * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2 |
33533 |
++ * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) |
33534 |
+ * |
33535 |
+ * of them, which corresponds to the maximum size of a hibernation image. |
33536 |
+ * |
33537 |
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c |
33538 |
+index 63f7ce228cc35..cf101da389b00 100644 |
33539 |
+--- a/kernel/rcu/tree.c |
33540 |
++++ b/kernel/rcu/tree.c |
33541 |
+@@ -2674,7 +2674,7 @@ void rcu_force_quiescent_state(void) |
33542 |
+ struct rcu_node *rnp_old = NULL; |
33543 |
+ |
33544 |
+ /* Funnel through hierarchy to reduce memory contention. */ |
33545 |
+- rnp = __this_cpu_read(rcu_data.mynode); |
33546 |
++ rnp = raw_cpu_read(rcu_data.mynode); |
33547 |
+ for (; rnp != NULL; rnp = rnp->parent) { |
33548 |
+ ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || |
33549 |
+ !raw_spin_trylock(&rnp->fqslock); |
33550 |
+diff --git a/kernel/relay.c b/kernel/relay.c |
33551 |
+index d1a67fbb819d3..6825b84038776 100644 |
33552 |
+--- a/kernel/relay.c |
33553 |
++++ b/kernel/relay.c |
33554 |
+@@ -151,13 +151,13 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) |
33555 |
+ { |
33556 |
+ struct rchan_buf *buf; |
33557 |
+ |
33558 |
+- if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *)) |
33559 |
++ if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t)) |
33560 |
+ return NULL; |
33561 |
+ |
33562 |
+ buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); |
33563 |
+ if (!buf) |
33564 |
+ return NULL; |
33565 |
+- buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t *), |
33566 |
++ buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t), |
33567 |
+ GFP_KERNEL); |
33568 |
+ if (!buf->padding) |
33569 |
+ goto free_buf; |
33570 |
+diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c |
33571 |
+index ceb03d76c0ccd..221ca10505738 100644 |
33572 |
+--- a/kernel/sched/cpudeadline.c |
33573 |
++++ b/kernel/sched/cpudeadline.c |
33574 |
+@@ -124,7 +124,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, |
33575 |
+ unsigned long cap, max_cap = 0; |
33576 |
+ int cpu, max_cpu = -1; |
33577 |
+ |
33578 |
+- if (!static_branch_unlikely(&sched_asym_cpucapacity)) |
33579 |
++ if (!sched_asym_cpucap_active()) |
33580 |
+ return 1; |
33581 |
+ |
33582 |
+ /* Ensure the capacity of the CPUs fits the task. */ |
33583 |
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c |
33584 |
+index 147b757d162b9..2a2f32eaffccd 100644 |
33585 |
+--- a/kernel/sched/deadline.c |
33586 |
++++ b/kernel/sched/deadline.c |
33587 |
+@@ -112,7 +112,7 @@ static inline unsigned long __dl_bw_capacity(int i) |
33588 |
+ */ |
33589 |
+ static inline unsigned long dl_bw_capacity(int i) |
33590 |
+ { |
33591 |
+- if (!static_branch_unlikely(&sched_asym_cpucapacity) && |
33592 |
++ if (!sched_asym_cpucap_active() && |
33593 |
+ capacity_orig_of(i) == SCHED_CAPACITY_SCALE) { |
33594 |
+ return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; |
33595 |
+ } else { |
33596 |
+@@ -1703,7 +1703,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int flags) |
33597 |
+ * Take the capacity of the CPU into account to |
33598 |
+ * ensure it fits the requirement of the task. |
33599 |
+ */ |
33600 |
+- if (static_branch_unlikely(&sched_asym_cpucapacity)) |
33601 |
++ if (sched_asym_cpucap_active()) |
33602 |
+ select_rq |= !dl_task_fits_capacity(p, cpu); |
33603 |
+ |
33604 |
+ if (select_rq) { |
33605 |
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c |
33606 |
+index a853e4e9e3c36..6648683cd9644 100644 |
33607 |
+--- a/kernel/sched/fair.c |
33608 |
++++ b/kernel/sched/fair.c |
33609 |
+@@ -4120,14 +4120,140 @@ done: |
33610 |
+ trace_sched_util_est_se_tp(&p->se); |
33611 |
+ } |
33612 |
+ |
33613 |
+-static inline int task_fits_capacity(struct task_struct *p, long capacity) |
33614 |
++static inline int util_fits_cpu(unsigned long util, |
33615 |
++ unsigned long uclamp_min, |
33616 |
++ unsigned long uclamp_max, |
33617 |
++ int cpu) |
33618 |
+ { |
33619 |
+- return fits_capacity(uclamp_task_util(p), capacity); |
33620 |
++ unsigned long capacity_orig, capacity_orig_thermal; |
33621 |
++ unsigned long capacity = capacity_of(cpu); |
33622 |
++ bool fits, uclamp_max_fits; |
33623 |
++ |
33624 |
++ /* |
33625 |
++ * Check if the real util fits without any uclamp boost/cap applied. |
33626 |
++ */ |
33627 |
++ fits = fits_capacity(util, capacity); |
33628 |
++ |
33629 |
++ if (!uclamp_is_used()) |
33630 |
++ return fits; |
33631 |
++ |
33632 |
++ /* |
33633 |
++ * We must use capacity_orig_of() for comparing against uclamp_min and |
33634 |
++ * uclamp_max. We only care about capacity pressure (by using |
33635 |
++ * capacity_of()) for comparing against the real util. |
33636 |
++ * |
33637 |
++ * If a task is boosted to 1024 for example, we don't want a tiny |
33638 |
++ * pressure to skew the check whether it fits a CPU or not. |
33639 |
++ * |
33640 |
++ * Similarly if a task is capped to capacity_orig_of(little_cpu), it |
33641 |
++ * should fit a little cpu even if there's some pressure. |
33642 |
++ * |
33643 |
++ * Only exception is for thermal pressure since it has a direct impact |
33644 |
++ * on available OPP of the system. |
33645 |
++ * |
33646 |
++ * We honour it for uclamp_min only as a drop in performance level |
33647 |
++ * could result in not getting the requested minimum performance level. |
33648 |
++ * |
33649 |
++ * For uclamp_max, we can tolerate a drop in performance level as the |
33650 |
++ * goal is to cap the task. So it's okay if it's getting less. |
33651 |
++ * |
33652 |
++ * In case of capacity inversion, which is not handled yet, we should |
33653 |
++ * honour the inverted capacity for both uclamp_min and uclamp_max all |
33654 |
++ * the time. |
33655 |
++ */ |
33656 |
++ capacity_orig = capacity_orig_of(cpu); |
33657 |
++ capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu); |
33658 |
++ |
33659 |
++ /* |
33660 |
++ * We want to force a task to fit a cpu as implied by uclamp_max. |
33661 |
++ * But we do have some corner cases to cater for.. |
33662 |
++ * |
33663 |
++ * |
33664 |
++ * C=z |
33665 |
++ * | ___ |
33666 |
++ * | C=y | | |
33667 |
++ * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max |
33668 |
++ * | C=x | | | | |
33669 |
++ * | ___ | | | | |
33670 |
++ * | | | | | | | (util somewhere in this region) |
33671 |
++ * | | | | | | | |
33672 |
++ * | | | | | | | |
33673 |
++ * +---------------------------------------- |
33674 |
++ * cpu0 cpu1 cpu2 |
33675 |
++ * |
33676 |
++ * In the above example if a task is capped to a specific performance |
33677 |
++ * point, y, then when: |
33678 |
++ * |
33679 |
++ * * util = 80% of x then it does not fit on cpu0 and should migrate |
33680 |
++ * to cpu1 |
33681 |
++ * * util = 80% of y then it is forced to fit on cpu1 to honour |
33682 |
++ * uclamp_max request. |
33683 |
++ * |
33684 |
++ * which is what we're enforcing here. A task always fits if |
33685 |
++ * uclamp_max <= capacity_orig. But when uclamp_max > capacity_orig, |
33686 |
++ * the normal upmigration rules should withhold still. |
33687 |
++ * |
33688 |
++ * Only exception is when we are on max capacity, then we need to be |
33689 |
++ * careful not to block overutilized state. This is so because: |
33690 |
++ * |
33691 |
++ * 1. There's no concept of capping at max_capacity! We can't go |
33692 |
++ * beyond this performance level anyway. |
33693 |
++ * 2. The system is being saturated when we're operating near |
33694 |
++ * max capacity, it doesn't make sense to block overutilized. |
33695 |
++ */ |
33696 |
++ uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE); |
33697 |
++ uclamp_max_fits = !uclamp_max_fits && (uclamp_max <= capacity_orig); |
33698 |
++ fits = fits || uclamp_max_fits; |
33699 |
++ |
33700 |
++ /* |
33701 |
++ * |
33702 |
++ * C=z |
33703 |
++ * | ___ (region a, capped, util >= uclamp_max) |
33704 |
++ * | C=y | | |
33705 |
++ * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max |
33706 |
++ * | C=x | | | | |
33707 |
++ * | ___ | | | | (region b, uclamp_min <= util <= uclamp_max) |
33708 |
++ * |_ _ _|_ _|_ _ _ _| _ | _ _ _| _ | _ _ _ _ _ uclamp_min |
33709 |
++ * | | | | | | | |
33710 |
++ * | | | | | | | (region c, boosted, util < uclamp_min) |
33711 |
++ * +---------------------------------------- |
33712 |
++ * cpu0 cpu1 cpu2 |
33713 |
++ * |
33714 |
++ * a) If util > uclamp_max, then we're capped, we don't care about |
33715 |
++ * actual fitness value here. We only care if uclamp_max fits |
33716 |
++ * capacity without taking margin/pressure into account. |
33717 |
++ * See comment above. |
33718 |
++ * |
33719 |
++ * b) If uclamp_min <= util <= uclamp_max, then the normal |
33720 |
++ * fits_capacity() rules apply. Except we need to ensure that we |
33721 |
++ * enforce we remain within uclamp_max, see comment above. |
33722 |
++ * |
33723 |
++ * c) If util < uclamp_min, then we are boosted. Same as (b) but we |
33724 |
++ * need to take into account the boosted value fits the CPU without |
33725 |
++ * taking margin/pressure into account. |
33726 |
++ * |
33727 |
++ * Cases (a) and (b) are handled in the 'fits' variable already. We |
33728 |
++ * just need to consider an extra check for case (c) after ensuring we |
33729 |
++ * handle the case uclamp_min > uclamp_max. |
33730 |
++ */ |
33731 |
++ uclamp_min = min(uclamp_min, uclamp_max); |
33732 |
++ if (util < uclamp_min && capacity_orig != SCHED_CAPACITY_SCALE) |
33733 |
++ fits = fits && (uclamp_min <= capacity_orig_thermal); |
33734 |
++ |
33735 |
++ return fits; |
33736 |
++} |
33737 |
++ |
33738 |
++static inline int task_fits_cpu(struct task_struct *p, int cpu) |
33739 |
++{ |
33740 |
++ unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN); |
33741 |
++ unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX); |
33742 |
++ unsigned long util = task_util_est(p); |
33743 |
++ return util_fits_cpu(util, uclamp_min, uclamp_max, cpu); |
33744 |
+ } |
33745 |
+ |
33746 |
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) |
33747 |
+ { |
33748 |
+- if (!static_branch_unlikely(&sched_asym_cpucapacity)) |
33749 |
++ if (!sched_asym_cpucap_active()) |
33750 |
+ return; |
33751 |
+ |
33752 |
+ if (!p || p->nr_cpus_allowed == 1) { |
33753 |
+@@ -4135,7 +4261,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) |
33754 |
+ return; |
33755 |
+ } |
33756 |
+ |
33757 |
+- if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { |
33758 |
++ if (task_fits_cpu(p, cpu_of(rq))) { |
33759 |
+ rq->misfit_task_load = 0; |
33760 |
+ return; |
33761 |
+ } |
33762 |
+@@ -6372,21 +6498,23 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool |
33763 |
+ static int |
33764 |
+ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) |
33765 |
+ { |
33766 |
+- unsigned long task_util, best_cap = 0; |
33767 |
++ unsigned long task_util, util_min, util_max, best_cap = 0; |
33768 |
+ int cpu, best_cpu = -1; |
33769 |
+ struct cpumask *cpus; |
33770 |
+ |
33771 |
+ cpus = this_cpu_cpumask_var_ptr(select_idle_mask); |
33772 |
+ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); |
33773 |
+ |
33774 |
+- task_util = uclamp_task_util(p); |
33775 |
++ task_util = task_util_est(p); |
33776 |
++ util_min = uclamp_eff_value(p, UCLAMP_MIN); |
33777 |
++ util_max = uclamp_eff_value(p, UCLAMP_MAX); |
33778 |
+ |
33779 |
+ for_each_cpu_wrap(cpu, cpus, target) { |
33780 |
+ unsigned long cpu_cap = capacity_of(cpu); |
33781 |
+ |
33782 |
+ if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu)) |
33783 |
+ continue; |
33784 |
+- if (fits_capacity(task_util, cpu_cap)) |
33785 |
++ if (util_fits_cpu(task_util, util_min, util_max, cpu)) |
33786 |
+ return cpu; |
33787 |
+ |
33788 |
+ if (cpu_cap > best_cap) { |
33789 |
+@@ -6398,10 +6526,13 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) |
33790 |
+ return best_cpu; |
33791 |
+ } |
33792 |
+ |
33793 |
+-static inline bool asym_fits_capacity(int task_util, int cpu) |
33794 |
++static inline bool asym_fits_cpu(unsigned long util, |
33795 |
++ unsigned long util_min, |
33796 |
++ unsigned long util_max, |
33797 |
++ int cpu) |
33798 |
+ { |
33799 |
+- if (static_branch_unlikely(&sched_asym_cpucapacity)) |
33800 |
+- return fits_capacity(task_util, capacity_of(cpu)); |
33801 |
++ if (sched_asym_cpucap_active()) |
33802 |
++ return util_fits_cpu(util, util_min, util_max, cpu); |
33803 |
+ |
33804 |
+ return true; |
33805 |
+ } |
33806 |
+@@ -6413,16 +6544,18 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) |
33807 |
+ { |
33808 |
+ bool has_idle_core = false; |
33809 |
+ struct sched_domain *sd; |
33810 |
+- unsigned long task_util; |
33811 |
++ unsigned long task_util, util_min, util_max; |
33812 |
+ int i, recent_used_cpu; |
33813 |
+ |
33814 |
+ /* |
33815 |
+ * On asymmetric system, update task utilization because we will check |
33816 |
+ * that the task fits with cpu's capacity. |
33817 |
+ */ |
33818 |
+- if (static_branch_unlikely(&sched_asym_cpucapacity)) { |
33819 |
++ if (sched_asym_cpucap_active()) { |
33820 |
+ sync_entity_load_avg(&p->se); |
33821 |
+- task_util = uclamp_task_util(p); |
33822 |
++ task_util = task_util_est(p); |
33823 |
++ util_min = uclamp_eff_value(p, UCLAMP_MIN); |
33824 |
++ util_max = uclamp_eff_value(p, UCLAMP_MAX); |
33825 |
+ } |
33826 |
+ |
33827 |
+ /* |
33828 |
+@@ -6431,7 +6564,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) |
33829 |
+ lockdep_assert_irqs_disabled(); |
33830 |
+ |
33831 |
+ if ((available_idle_cpu(target) || sched_idle_cpu(target)) && |
33832 |
+- asym_fits_capacity(task_util, target)) |
33833 |
++ asym_fits_cpu(task_util, util_min, util_max, target)) |
33834 |
+ return target; |
33835 |
+ |
33836 |
+ /* |
33837 |
+@@ -6439,7 +6572,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) |
33838 |
+ */ |
33839 |
+ if (prev != target && cpus_share_cache(prev, target) && |
33840 |
+ (available_idle_cpu(prev) || sched_idle_cpu(prev)) && |
33841 |
+- asym_fits_capacity(task_util, prev)) |
33842 |
++ asym_fits_cpu(task_util, util_min, util_max, prev)) |
33843 |
+ return prev; |
33844 |
+ |
33845 |
+ /* |
33846 |
+@@ -6454,7 +6587,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) |
33847 |
+ in_task() && |
33848 |
+ prev == smp_processor_id() && |
33849 |
+ this_rq()->nr_running <= 1 && |
33850 |
+- asym_fits_capacity(task_util, prev)) { |
33851 |
++ asym_fits_cpu(task_util, util_min, util_max, prev)) { |
33852 |
+ return prev; |
33853 |
+ } |
33854 |
+ |
33855 |
+@@ -6466,12 +6599,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) |
33856 |
+ cpus_share_cache(recent_used_cpu, target) && |
33857 |
+ (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && |
33858 |
+ cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && |
33859 |
+- asym_fits_capacity(task_util, recent_used_cpu)) { |
33860 |
+- /* |
33861 |
+- * Replace recent_used_cpu with prev as it is a potential |
33862 |
+- * candidate for the next wake: |
33863 |
+- */ |
33864 |
+- p->recent_used_cpu = prev; |
33865 |
++ asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) { |
33866 |
+ return recent_used_cpu; |
33867 |
+ } |
33868 |
+ |
33869 |
+@@ -6479,7 +6607,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) |
33870 |
+ * For asymmetric CPU capacity systems, our domain of interest is |
33871 |
+ * sd_asym_cpucapacity rather than sd_llc. |
33872 |
+ */ |
33873 |
+- if (static_branch_unlikely(&sched_asym_cpucapacity)) { |
33874 |
++ if (sched_asym_cpucap_active()) { |
33875 |
+ sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target)); |
33876 |
+ /* |
33877 |
+ * On an asymmetric CPU capacity system where an exclusive |
33878 |
+@@ -8033,7 +8161,7 @@ static int detach_tasks(struct lb_env *env) |
33879 |
+ |
33880 |
+ case migrate_misfit: |
33881 |
+ /* This is not a misfit task */ |
33882 |
+- if (task_fits_capacity(p, capacity_of(env->src_cpu))) |
33883 |
++ if (task_fits_cpu(p, env->src_cpu)) |
33884 |
+ goto next; |
33885 |
+ |
33886 |
+ env->imbalance = 0; |
33887 |
+@@ -8918,6 +9046,10 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd, |
33888 |
+ |
33889 |
+ memset(sgs, 0, sizeof(*sgs)); |
33890 |
+ |
33891 |
++ /* Assume that task can't fit any CPU of the group */ |
33892 |
++ if (sd->flags & SD_ASYM_CPUCAPACITY) |
33893 |
++ sgs->group_misfit_task_load = 1; |
33894 |
++ |
33895 |
+ for_each_cpu(i, sched_group_span(group)) { |
33896 |
+ struct rq *rq = cpu_rq(i); |
33897 |
+ unsigned int local; |
33898 |
+@@ -8937,12 +9069,12 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd, |
33899 |
+ if (!nr_running && idle_cpu_without(i, p)) |
33900 |
+ sgs->idle_cpus++; |
33901 |
+ |
33902 |
+- } |
33903 |
++ /* Check if task fits in the CPU */ |
33904 |
++ if (sd->flags & SD_ASYM_CPUCAPACITY && |
33905 |
++ sgs->group_misfit_task_load && |
33906 |
++ task_fits_cpu(p, i)) |
33907 |
++ sgs->group_misfit_task_load = 0; |
33908 |
+ |
33909 |
+- /* Check if task fits in the group */ |
33910 |
+- if (sd->flags & SD_ASYM_CPUCAPACITY && |
33911 |
+- !task_fits_capacity(p, group->sgc->max_capacity)) { |
33912 |
+- sgs->group_misfit_task_load = 1; |
33913 |
+ } |
33914 |
+ |
33915 |
+ sgs->group_capacity = group->sgc->capacity; |
33916 |
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c |
33917 |
+index f75dcd3537b84..add67f811e004 100644 |
33918 |
+--- a/kernel/sched/rt.c |
33919 |
++++ b/kernel/sched/rt.c |
33920 |
+@@ -473,7 +473,7 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) |
33921 |
+ unsigned int cpu_cap; |
33922 |
+ |
33923 |
+ /* Only heterogeneous systems can benefit from this check */ |
33924 |
+- if (!static_branch_unlikely(&sched_asym_cpucapacity)) |
33925 |
++ if (!sched_asym_cpucap_active()) |
33926 |
+ return true; |
33927 |
+ |
33928 |
+ min_cap = uclamp_eff_value(p, UCLAMP_MIN); |
33929 |
+@@ -1736,7 +1736,7 @@ static int find_lowest_rq(struct task_struct *task) |
33930 |
+ * If we're on asym system ensure we consider the different capacities |
33931 |
+ * of the CPUs when searching for the lowest_mask. |
33932 |
+ */ |
33933 |
+- if (static_branch_unlikely(&sched_asym_cpucapacity)) { |
33934 |
++ if (sched_asym_cpucap_active()) { |
33935 |
+ |
33936 |
+ ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, |
33937 |
+ task, lowest_mask, |
33938 |
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h |
33939 |
+index 7a3fcd70aa868..e1f46ed412bce 100644 |
33940 |
+--- a/kernel/sched/sched.h |
33941 |
++++ b/kernel/sched/sched.h |
33942 |
+@@ -1788,6 +1788,11 @@ DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); |
33943 |
+ DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); |
33944 |
+ extern struct static_key_false sched_asym_cpucapacity; |
33945 |
+ |
33946 |
++static __always_inline bool sched_asym_cpucap_active(void) |
33947 |
++{ |
33948 |
++ return static_branch_unlikely(&sched_asym_cpucapacity); |
33949 |
++} |
33950 |
++ |
33951 |
+ struct sched_group_capacity { |
33952 |
+ atomic_t ref; |
33953 |
+ /* |
33954 |
+@@ -2916,6 +2921,15 @@ static inline bool uclamp_is_used(void) |
33955 |
+ return static_branch_likely(&sched_uclamp_used); |
33956 |
+ } |
33957 |
+ #else /* CONFIG_UCLAMP_TASK */ |
33958 |
++static inline unsigned long uclamp_eff_value(struct task_struct *p, |
33959 |
++ enum uclamp_id clamp_id) |
33960 |
++{ |
33961 |
++ if (clamp_id == UCLAMP_MIN) |
33962 |
++ return 0; |
33963 |
++ |
33964 |
++ return SCHED_CAPACITY_SCALE; |
33965 |
++} |
33966 |
++ |
33967 |
+ static inline |
33968 |
+ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, |
33969 |
+ struct task_struct *p) |
33970 |
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c |
33971 |
+index eaa98e2b468fc..16b0d3fa56e00 100644 |
33972 |
+--- a/kernel/trace/blktrace.c |
33973 |
++++ b/kernel/trace/blktrace.c |
33974 |
+@@ -1547,7 +1547,8 @@ blk_trace_event_print_binary(struct trace_iterator *iter, int flags, |
33975 |
+ |
33976 |
+ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) |
33977 |
+ { |
33978 |
+- if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) |
33979 |
++ if ((iter->ent->type != TRACE_BLK) || |
33980 |
++ !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) |
33981 |
+ return TRACE_TYPE_UNHANDLED; |
33982 |
+ |
33983 |
+ return print_one_line(iter, true); |
33984 |
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c |
33985 |
+index 31e3e0bbd1293..6397285883fa0 100644 |
33986 |
+--- a/kernel/trace/trace_events_hist.c |
33987 |
++++ b/kernel/trace/trace_events_hist.c |
33988 |
+@@ -5954,7 +5954,7 @@ enable: |
33989 |
+ /* Just return zero, not the number of registered triggers */ |
33990 |
+ ret = 0; |
33991 |
+ out: |
33992 |
+- if (ret == 0) |
33993 |
++ if (ret == 0 && glob[0]) |
33994 |
+ hist_err_clear(); |
33995 |
+ |
33996 |
+ return ret; |
33997 |
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c |
33998 |
+index 6946f8e204e39..793c31b7e417d 100644 |
33999 |
+--- a/lib/debugobjects.c |
34000 |
++++ b/lib/debugobjects.c |
34001 |
+@@ -440,6 +440,7 @@ static int object_cpu_offline(unsigned int cpu) |
34002 |
+ struct debug_percpu_free *percpu_pool; |
34003 |
+ struct hlist_node *tmp; |
34004 |
+ struct debug_obj *obj; |
34005 |
++ unsigned long flags; |
34006 |
+ |
34007 |
+ /* Remote access is safe as the CPU is dead already */ |
34008 |
+ percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); |
34009 |
+@@ -447,6 +448,12 @@ static int object_cpu_offline(unsigned int cpu) |
34010 |
+ hlist_del(&obj->node); |
34011 |
+ kmem_cache_free(obj_cache, obj); |
34012 |
+ } |
34013 |
++ |
34014 |
++ raw_spin_lock_irqsave(&pool_lock, flags); |
34015 |
++ obj_pool_used -= percpu_pool->obj_free; |
34016 |
++ debug_objects_freed += percpu_pool->obj_free; |
34017 |
++ raw_spin_unlock_irqrestore(&pool_lock, flags); |
34018 |
++ |
34019 |
+ percpu_pool->obj_free = 0; |
34020 |
+ |
34021 |
+ return 0; |
34022 |
+@@ -1321,6 +1328,8 @@ static int __init debug_objects_replace_static_objects(void) |
34023 |
+ hlist_add_head(&obj->node, &objects); |
34024 |
+ } |
34025 |
+ |
34026 |
++ debug_objects_allocated += i; |
34027 |
++ |
34028 |
+ /* |
34029 |
+ * debug_objects_mem_init() is now called early that only one CPU is up |
34030 |
+ * and interrupts have been disabled, so it is safe to replace the |
34031 |
+@@ -1389,6 +1398,7 @@ void __init debug_objects_mem_init(void) |
34032 |
+ debug_objects_enabled = 0; |
34033 |
+ kmem_cache_destroy(obj_cache); |
34034 |
+ pr_warn("out of memory.\n"); |
34035 |
++ return; |
34036 |
+ } else |
34037 |
+ debug_objects_selftest(); |
34038 |
+ |
34039 |
+diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c |
34040 |
+index 5f4b07b56cd9c..9738664386088 100644 |
34041 |
+--- a/lib/fonts/fonts.c |
34042 |
++++ b/lib/fonts/fonts.c |
34043 |
+@@ -135,8 +135,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w, |
34044 |
+ if (res > 20) |
34045 |
+ c += 20 - res; |
34046 |
+ |
34047 |
+- if ((font_w & (1 << (f->width - 1))) && |
34048 |
+- (font_h & (1 << (f->height - 1)))) |
34049 |
++ if ((font_w & (1U << (f->width - 1))) && |
34050 |
++ (font_h & (1U << (f->height - 1)))) |
34051 |
+ c += 1000; |
34052 |
+ |
34053 |
+ if (c > cc) { |
34054 |
+diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c |
34055 |
+index 21016b32d3131..2b24ea6c94979 100644 |
34056 |
+--- a/lib/notifier-error-inject.c |
34057 |
++++ b/lib/notifier-error-inject.c |
34058 |
+@@ -15,7 +15,7 @@ static int debugfs_errno_get(void *data, u64 *val) |
34059 |
+ return 0; |
34060 |
+ } |
34061 |
+ |
34062 |
+-DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set, |
34063 |
++DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set, |
34064 |
+ "%lld\n"); |
34065 |
+ |
34066 |
+ static struct dentry *debugfs_create_errno(const char *name, umode_t mode, |
34067 |
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c |
34068 |
+index 1bccd6cd5f482..e68be7aba7d16 100644 |
34069 |
+--- a/lib/test_firmware.c |
34070 |
++++ b/lib/test_firmware.c |
34071 |
+@@ -1111,6 +1111,7 @@ static int __init test_firmware_init(void) |
34072 |
+ |
34073 |
+ rc = misc_register(&test_fw_misc_device); |
34074 |
+ if (rc) { |
34075 |
++ __test_firmware_config_free(); |
34076 |
+ kfree(test_fw_config); |
34077 |
+ pr_err("could not register misc device: %d\n", rc); |
34078 |
+ return rc; |
34079 |
+diff --git a/lib/test_overflow.c b/lib/test_overflow.c |
34080 |
+index 7a4b6f6c5473c..7a5a5738d2d21 100644 |
34081 |
+--- a/lib/test_overflow.c |
34082 |
++++ b/lib/test_overflow.c |
34083 |
+@@ -588,12 +588,110 @@ static int __init test_overflow_allocation(void) |
34084 |
+ return err; |
34085 |
+ } |
34086 |
+ |
34087 |
++struct __test_flex_array { |
34088 |
++ unsigned long flags; |
34089 |
++ size_t count; |
34090 |
++ unsigned long data[]; |
34091 |
++}; |
34092 |
++ |
34093 |
++static int __init test_overflow_size_helpers(void) |
34094 |
++{ |
34095 |
++ struct __test_flex_array *obj; |
34096 |
++ int count = 0; |
34097 |
++ int err = 0; |
34098 |
++ int var; |
34099 |
++ |
34100 |
++#define check_one_size_helper(expected, func, args...) ({ \ |
34101 |
++ bool __failure = false; \ |
34102 |
++ size_t _r; \ |
34103 |
++ \ |
34104 |
++ _r = func(args); \ |
34105 |
++ if (_r != (expected)) { \ |
34106 |
++ pr_warn("expected " #func "(" #args ") " \ |
34107 |
++ "to return %zu but got %zu instead\n", \ |
34108 |
++ (size_t)(expected), _r); \ |
34109 |
++ __failure = true; \ |
34110 |
++ } \ |
34111 |
++ count++; \ |
34112 |
++ __failure; \ |
34113 |
++}) |
34114 |
++ |
34115 |
++ var = 4; |
34116 |
++ err |= check_one_size_helper(20, size_mul, var++, 5); |
34117 |
++ err |= check_one_size_helper(20, size_mul, 4, var++); |
34118 |
++ err |= check_one_size_helper(0, size_mul, 0, 3); |
34119 |
++ err |= check_one_size_helper(0, size_mul, 3, 0); |
34120 |
++ err |= check_one_size_helper(6, size_mul, 2, 3); |
34121 |
++ err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1); |
34122 |
++ err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3); |
34123 |
++ err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3); |
34124 |
++ |
34125 |
++ var = 4; |
34126 |
++ err |= check_one_size_helper(9, size_add, var++, 5); |
34127 |
++ err |= check_one_size_helper(9, size_add, 4, var++); |
34128 |
++ err |= check_one_size_helper(9, size_add, 9, 0); |
34129 |
++ err |= check_one_size_helper(9, size_add, 0, 9); |
34130 |
++ err |= check_one_size_helper(5, size_add, 2, 3); |
34131 |
++ err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1); |
34132 |
++ err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3); |
34133 |
++ err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3); |
34134 |
++ |
34135 |
++ var = 4; |
34136 |
++ err |= check_one_size_helper(1, size_sub, var--, 3); |
34137 |
++ err |= check_one_size_helper(1, size_sub, 4, var--); |
34138 |
++ err |= check_one_size_helper(1, size_sub, 3, 2); |
34139 |
++ err |= check_one_size_helper(9, size_sub, 9, 0); |
34140 |
++ err |= check_one_size_helper(SIZE_MAX, size_sub, 9, -3); |
34141 |
++ err |= check_one_size_helper(SIZE_MAX, size_sub, 0, 9); |
34142 |
++ err |= check_one_size_helper(SIZE_MAX, size_sub, 2, 3); |
34143 |
++ err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0); |
34144 |
++ err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10); |
34145 |
++ err |= check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX); |
34146 |
++ err |= check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX); |
34147 |
++ err |= check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1); |
34148 |
++ err |= check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3); |
34149 |
++ err |= check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3); |
34150 |
++ |
34151 |
++ var = 4; |
34152 |
++ err |= check_one_size_helper(4 * sizeof(*obj->data), |
34153 |
++ flex_array_size, obj, data, var++); |
34154 |
++ err |= check_one_size_helper(5 * sizeof(*obj->data), |
34155 |
++ flex_array_size, obj, data, var++); |
34156 |
++ err |= check_one_size_helper(0, flex_array_size, obj, data, 0); |
34157 |
++ err |= check_one_size_helper(sizeof(*obj->data), |
34158 |
++ flex_array_size, obj, data, 1); |
34159 |
++ err |= check_one_size_helper(7 * sizeof(*obj->data), |
34160 |
++ flex_array_size, obj, data, 7); |
34161 |
++ err |= check_one_size_helper(SIZE_MAX, |
34162 |
++ flex_array_size, obj, data, -1); |
34163 |
++ err |= check_one_size_helper(SIZE_MAX, |
34164 |
++ flex_array_size, obj, data, SIZE_MAX - 4); |
34165 |
++ |
34166 |
++ var = 4; |
34167 |
++ err |= check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)), |
34168 |
++ struct_size, obj, data, var++); |
34169 |
++ err |= check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)), |
34170 |
++ struct_size, obj, data, var++); |
34171 |
++ err |= check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0); |
34172 |
++ err |= check_one_size_helper(sizeof(*obj) + sizeof(*obj->data), |
34173 |
++ struct_size, obj, data, 1); |
34174 |
++ err |= check_one_size_helper(SIZE_MAX, |
34175 |
++ struct_size, obj, data, -3); |
34176 |
++ err |= check_one_size_helper(SIZE_MAX, |
34177 |
++ struct_size, obj, data, SIZE_MAX - 3); |
34178 |
++ |
34179 |
++ pr_info("%d overflow size helper tests finished\n", count); |
34180 |
++ |
34181 |
++ return err; |
34182 |
++} |
34183 |
++ |
34184 |
+ static int __init test_module_init(void) |
34185 |
+ { |
34186 |
+ int err = 0; |
34187 |
+ |
34188 |
+ err |= test_overflow_calculation(); |
34189 |
+ err |= test_overflow_shift(); |
34190 |
++ err |= test_overflow_size_helpers(); |
34191 |
+ err |= test_overflow_allocation(); |
34192 |
+ |
34193 |
+ if (err) { |
34194 |
+diff --git a/net/802/mrp.c b/net/802/mrp.c |
34195 |
+index 35e04cc5390c4..c10a432a5b435 100644 |
34196 |
+--- a/net/802/mrp.c |
34197 |
++++ b/net/802/mrp.c |
34198 |
+@@ -606,7 +606,10 @@ static void mrp_join_timer(struct timer_list *t) |
34199 |
+ spin_unlock(&app->lock); |
34200 |
+ |
34201 |
+ mrp_queue_xmit(app); |
34202 |
+- mrp_join_timer_arm(app); |
34203 |
++ spin_lock(&app->lock); |
34204 |
++ if (likely(app->active)) |
34205 |
++ mrp_join_timer_arm(app); |
34206 |
++ spin_unlock(&app->lock); |
34207 |
+ } |
34208 |
+ |
34209 |
+ static void mrp_periodic_timer_arm(struct mrp_applicant *app) |
34210 |
+@@ -620,11 +623,12 @@ static void mrp_periodic_timer(struct timer_list *t) |
34211 |
+ struct mrp_applicant *app = from_timer(app, t, periodic_timer); |
34212 |
+ |
34213 |
+ spin_lock(&app->lock); |
34214 |
+- mrp_mad_event(app, MRP_EVENT_PERIODIC); |
34215 |
+- mrp_pdu_queue(app); |
34216 |
++ if (likely(app->active)) { |
34217 |
++ mrp_mad_event(app, MRP_EVENT_PERIODIC); |
34218 |
++ mrp_pdu_queue(app); |
34219 |
++ mrp_periodic_timer_arm(app); |
34220 |
++ } |
34221 |
+ spin_unlock(&app->lock); |
34222 |
+- |
34223 |
+- mrp_periodic_timer_arm(app); |
34224 |
+ } |
34225 |
+ |
34226 |
+ static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) |
34227 |
+@@ -872,6 +876,7 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl) |
34228 |
+ app->dev = dev; |
34229 |
+ app->app = appl; |
34230 |
+ app->mad = RB_ROOT; |
34231 |
++ app->active = true; |
34232 |
+ spin_lock_init(&app->lock); |
34233 |
+ skb_queue_head_init(&app->queue); |
34234 |
+ rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app); |
34235 |
+@@ -900,6 +905,9 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) |
34236 |
+ |
34237 |
+ RCU_INIT_POINTER(port->applicants[appl->type], NULL); |
34238 |
+ |
34239 |
++ spin_lock_bh(&app->lock); |
34240 |
++ app->active = false; |
34241 |
++ spin_unlock_bh(&app->lock); |
34242 |
+ /* Delete timer and generate a final TX event to flush out |
34243 |
+ * all pending messages before the applicant is gone. |
34244 |
+ */ |
34245 |
+diff --git a/net/9p/client.c b/net/9p/client.c |
34246 |
+index 565aee6dfcc66..08e0c9990af06 100644 |
34247 |
+--- a/net/9p/client.c |
34248 |
++++ b/net/9p/client.c |
34249 |
+@@ -281,6 +281,11 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size) |
34250 |
+ p9pdu_reset(&req->rc); |
34251 |
+ req->t_err = 0; |
34252 |
+ req->status = REQ_STATUS_ALLOC; |
34253 |
++ /* refcount needs to be set to 0 before inserting into the idr |
34254 |
++ * so p9_tag_lookup does not accept a request that is not fully |
34255 |
++ * initialized. refcount_set to 2 below will mark request ready. |
34256 |
++ */ |
34257 |
++ refcount_set(&req->refcount, 0); |
34258 |
+ init_waitqueue_head(&req->wq); |
34259 |
+ INIT_LIST_HEAD(&req->req_list); |
34260 |
+ |
34261 |
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c |
34262 |
+index bb84ff5fb98a2..a41b4dcf1a7a8 100644 |
34263 |
+--- a/net/bluetooth/hci_core.c |
34264 |
++++ b/net/bluetooth/hci_core.c |
34265 |
+@@ -5101,7 +5101,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, |
34266 |
+ *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; |
34267 |
+ else |
34268 |
+ *req_complete = bt_cb(skb)->hci.req_complete; |
34269 |
+- kfree_skb(skb); |
34270 |
++ dev_kfree_skb_irq(skb); |
34271 |
+ } |
34272 |
+ spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); |
34273 |
+ } |
34274 |
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c |
34275 |
+index f09f0a78eb7be..04000499f4a21 100644 |
34276 |
+--- a/net/bluetooth/mgmt.c |
34277 |
++++ b/net/bluetooth/mgmt.c |
34278 |
+@@ -7971,7 +7971,7 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, |
34279 |
+ * extra parameters we don't know about will be ignored in this request. |
34280 |
+ */ |
34281 |
+ if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE) |
34282 |
+- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, |
34283 |
++ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, |
34284 |
+ MGMT_STATUS_INVALID_PARAMS); |
34285 |
+ |
34286 |
+ flags = __le32_to_cpu(cp->flags); |
34287 |
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c |
34288 |
+index 7324764384b67..8d6fce9005bdd 100644 |
34289 |
+--- a/net/bluetooth/rfcomm/core.c |
34290 |
++++ b/net/bluetooth/rfcomm/core.c |
34291 |
+@@ -590,7 +590,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) |
34292 |
+ |
34293 |
+ ret = rfcomm_dlc_send_frag(d, frag); |
34294 |
+ if (ret < 0) { |
34295 |
+- kfree_skb(frag); |
34296 |
++ dev_kfree_skb_irq(frag); |
34297 |
+ goto unlock; |
34298 |
+ } |
34299 |
+ |
34300 |
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c |
34301 |
+index 7583ee98c35b5..11d254ce3581c 100644 |
34302 |
+--- a/net/bpf/test_run.c |
34303 |
++++ b/net/bpf/test_run.c |
34304 |
+@@ -470,9 +470,6 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) |
34305 |
+ { |
34306 |
+ struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; |
34307 |
+ |
34308 |
+- if (!skb->len) |
34309 |
+- return -EINVAL; |
34310 |
+- |
34311 |
+ if (!__skb) |
34312 |
+ return 0; |
34313 |
+ |
34314 |
+diff --git a/net/core/dev.c b/net/core/dev.c |
34315 |
+index be51644e95dae..33d6b691e15ea 100644 |
34316 |
+--- a/net/core/dev.c |
34317 |
++++ b/net/core/dev.c |
34318 |
+@@ -10640,24 +10640,16 @@ void netdev_run_todo(void) |
34319 |
+ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, |
34320 |
+ const struct net_device_stats *netdev_stats) |
34321 |
+ { |
34322 |
+-#if BITS_PER_LONG == 64 |
34323 |
+- BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); |
34324 |
+- memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); |
34325 |
+- /* zero out counters that only exist in rtnl_link_stats64 */ |
34326 |
+- memset((char *)stats64 + sizeof(*netdev_stats), 0, |
34327 |
+- sizeof(*stats64) - sizeof(*netdev_stats)); |
34328 |
+-#else |
34329 |
+- size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); |
34330 |
+- const unsigned long *src = (const unsigned long *)netdev_stats; |
34331 |
++ size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t); |
34332 |
++ const atomic_long_t *src = (atomic_long_t *)netdev_stats; |
34333 |
+ u64 *dst = (u64 *)stats64; |
34334 |
+ |
34335 |
+ BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); |
34336 |
+ for (i = 0; i < n; i++) |
34337 |
+- dst[i] = src[i]; |
34338 |
++ dst[i] = atomic_long_read(&src[i]); |
34339 |
+ /* zero out counters that only exist in rtnl_link_stats64 */ |
34340 |
+ memset((char *)stats64 + n * sizeof(u64), 0, |
34341 |
+ sizeof(*stats64) - n * sizeof(u64)); |
34342 |
+-#endif |
34343 |
+ } |
34344 |
+ EXPORT_SYMBOL(netdev_stats_to_stats64); |
34345 |
+ |
34346 |
+diff --git a/net/core/filter.c b/net/core/filter.c |
34347 |
+index fb5b9dbf3bc08..2da05622afbe8 100644 |
34348 |
+--- a/net/core/filter.c |
34349 |
++++ b/net/core/filter.c |
34350 |
+@@ -2123,8 +2123,17 @@ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, |
34351 |
+ { |
34352 |
+ unsigned int mlen = skb_network_offset(skb); |
34353 |
+ |
34354 |
++ if (unlikely(skb->len <= mlen)) { |
34355 |
++ kfree_skb(skb); |
34356 |
++ return -ERANGE; |
34357 |
++ } |
34358 |
++ |
34359 |
+ if (mlen) { |
34360 |
+ __skb_pull(skb, mlen); |
34361 |
++ if (unlikely(!skb->len)) { |
34362 |
++ kfree_skb(skb); |
34363 |
++ return -ERANGE; |
34364 |
++ } |
34365 |
+ |
34366 |
+ /* At ingress, the mac header has already been pulled once. |
34367 |
+ * At egress, skb_pospull_rcsum has to be done in case that |
34368 |
+@@ -2144,7 +2153,7 @@ static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, |
34369 |
+ u32 flags) |
34370 |
+ { |
34371 |
+ /* Verify that a link layer header is carried */ |
34372 |
+- if (unlikely(skb->mac_header >= skb->network_header)) { |
34373 |
++ if (unlikely(skb->mac_header >= skb->network_header || skb->len == 0)) { |
34374 |
+ kfree_skb(skb); |
34375 |
+ return -ERANGE; |
34376 |
+ } |
34377 |
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
34378 |
+index 6706bd3c8e9ca..058ec2f17da68 100644 |
34379 |
+--- a/net/core/skbuff.c |
34380 |
++++ b/net/core/skbuff.c |
34381 |
+@@ -2263,6 +2263,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta) |
34382 |
+ insp = list; |
34383 |
+ } else { |
34384 |
+ /* Eaten partially. */ |
34385 |
++ if (skb_is_gso(skb) && !list->head_frag && |
34386 |
++ skb_headlen(list)) |
34387 |
++ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
34388 |
+ |
34389 |
+ if (skb_shared(list)) { |
34390 |
+ /* Sucks! We need to fork list. :-( */ |
34391 |
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c |
34392 |
+index f562f7e2bdc72..dc9b93d8f0d3e 100644 |
34393 |
+--- a/net/core/skmsg.c |
34394 |
++++ b/net/core/skmsg.c |
34395 |
+@@ -880,13 +880,16 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, |
34396 |
+ ret = sk_psock_map_verd(ret, msg->sk_redir); |
34397 |
+ psock->apply_bytes = msg->apply_bytes; |
34398 |
+ if (ret == __SK_REDIRECT) { |
34399 |
+- if (psock->sk_redir) |
34400 |
++ if (psock->sk_redir) { |
34401 |
+ sock_put(psock->sk_redir); |
34402 |
+- psock->sk_redir = msg->sk_redir; |
34403 |
+- if (!psock->sk_redir) { |
34404 |
++ psock->sk_redir = NULL; |
34405 |
++ } |
34406 |
++ if (!msg->sk_redir) { |
34407 |
+ ret = __SK_DROP; |
34408 |
+ goto out; |
34409 |
+ } |
34410 |
++ psock->redir_ingress = sk_msg_to_ingress(msg); |
34411 |
++ psock->sk_redir = msg->sk_redir; |
34412 |
+ sock_hold(psock->sk_redir); |
34413 |
+ } |
34414 |
+ out: |
34415 |
+diff --git a/net/core/sock.c b/net/core/sock.c |
34416 |
+index 9bcffe1d5332a..b7ac53e72d1ad 100644 |
34417 |
+--- a/net/core/sock.c |
34418 |
++++ b/net/core/sock.c |
34419 |
+@@ -1302,7 +1302,7 @@ set_sndbuf: |
34420 |
+ break; |
34421 |
+ } |
34422 |
+ case SO_INCOMING_CPU: |
34423 |
+- WRITE_ONCE(sk->sk_incoming_cpu, val); |
34424 |
++ reuseport_update_incoming_cpu(sk, val); |
34425 |
+ break; |
34426 |
+ |
34427 |
+ case SO_CNX_ADVICE: |
34428 |
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c |
34429 |
+index 4f4bc163a223a..ae6013a8bce53 100644 |
34430 |
+--- a/net/core/sock_map.c |
34431 |
++++ b/net/core/sock_map.c |
34432 |
+@@ -349,11 +349,13 @@ static void sock_map_free(struct bpf_map *map) |
34433 |
+ |
34434 |
+ sk = xchg(psk, NULL); |
34435 |
+ if (sk) { |
34436 |
++ sock_hold(sk); |
34437 |
+ lock_sock(sk); |
34438 |
+ rcu_read_lock(); |
34439 |
+ sock_map_unref(sk, psk); |
34440 |
+ rcu_read_unlock(); |
34441 |
+ release_sock(sk); |
34442 |
++ sock_put(sk); |
34443 |
+ } |
34444 |
+ } |
34445 |
+ |
34446 |
+diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c |
34447 |
+index fb90e1e00773b..5a165286e4d8e 100644 |
34448 |
+--- a/net/core/sock_reuseport.c |
34449 |
++++ b/net/core/sock_reuseport.c |
34450 |
+@@ -37,6 +37,70 @@ void reuseport_has_conns_set(struct sock *sk) |
34451 |
+ } |
34452 |
+ EXPORT_SYMBOL(reuseport_has_conns_set); |
34453 |
+ |
34454 |
++static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse) |
34455 |
++{ |
34456 |
++ /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */ |
34457 |
++ WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1); |
34458 |
++} |
34459 |
++ |
34460 |
++static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse) |
34461 |
++{ |
34462 |
++ /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */ |
34463 |
++ WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu - 1); |
34464 |
++} |
34465 |
++ |
34466 |
++static void reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) |
34467 |
++{ |
34468 |
++ if (sk->sk_incoming_cpu >= 0) |
34469 |
++ __reuseport_get_incoming_cpu(reuse); |
34470 |
++} |
34471 |
++ |
34472 |
++static void reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) |
34473 |
++{ |
34474 |
++ if (sk->sk_incoming_cpu >= 0) |
34475 |
++ __reuseport_put_incoming_cpu(reuse); |
34476 |
++} |
34477 |
++ |
34478 |
++void reuseport_update_incoming_cpu(struct sock *sk, int val) |
34479 |
++{ |
34480 |
++ struct sock_reuseport *reuse; |
34481 |
++ int old_sk_incoming_cpu; |
34482 |
++ |
34483 |
++ if (unlikely(!rcu_access_pointer(sk->sk_reuseport_cb))) { |
34484 |
++ /* Paired with REAE_ONCE() in sk_incoming_cpu_update() |
34485 |
++ * and compute_score(). |
34486 |
++ */ |
34487 |
++ WRITE_ONCE(sk->sk_incoming_cpu, val); |
34488 |
++ return; |
34489 |
++ } |
34490 |
++ |
34491 |
++ spin_lock_bh(&reuseport_lock); |
34492 |
++ |
34493 |
++ /* This must be done under reuseport_lock to avoid a race with |
34494 |
++ * reuseport_grow(), which accesses sk->sk_incoming_cpu without |
34495 |
++ * lock_sock() when detaching a shutdown()ed sk. |
34496 |
++ * |
34497 |
++ * Paired with READ_ONCE() in reuseport_select_sock_by_hash(). |
34498 |
++ */ |
34499 |
++ old_sk_incoming_cpu = sk->sk_incoming_cpu; |
34500 |
++ WRITE_ONCE(sk->sk_incoming_cpu, val); |
34501 |
++ |
34502 |
++ reuse = rcu_dereference_protected(sk->sk_reuseport_cb, |
34503 |
++ lockdep_is_held(&reuseport_lock)); |
34504 |
++ |
34505 |
++ /* reuseport_grow() has detached a closed sk. */ |
34506 |
++ if (!reuse) |
34507 |
++ goto out; |
34508 |
++ |
34509 |
++ if (old_sk_incoming_cpu < 0 && val >= 0) |
34510 |
++ __reuseport_get_incoming_cpu(reuse); |
34511 |
++ else if (old_sk_incoming_cpu >= 0 && val < 0) |
34512 |
++ __reuseport_put_incoming_cpu(reuse); |
34513 |
++ |
34514 |
++out: |
34515 |
++ spin_unlock_bh(&reuseport_lock); |
34516 |
++} |
34517 |
++ |
34518 |
+ static int reuseport_sock_index(struct sock *sk, |
34519 |
+ const struct sock_reuseport *reuse, |
34520 |
+ bool closed) |
34521 |
+@@ -64,6 +128,7 @@ static void __reuseport_add_sock(struct sock *sk, |
34522 |
+ /* paired with smp_rmb() in reuseport_(select|migrate)_sock() */ |
34523 |
+ smp_wmb(); |
34524 |
+ reuse->num_socks++; |
34525 |
++ reuseport_get_incoming_cpu(sk, reuse); |
34526 |
+ } |
34527 |
+ |
34528 |
+ static bool __reuseport_detach_sock(struct sock *sk, |
34529 |
+@@ -76,6 +141,7 @@ static bool __reuseport_detach_sock(struct sock *sk, |
34530 |
+ |
34531 |
+ reuse->socks[i] = reuse->socks[reuse->num_socks - 1]; |
34532 |
+ reuse->num_socks--; |
34533 |
++ reuseport_put_incoming_cpu(sk, reuse); |
34534 |
+ |
34535 |
+ return true; |
34536 |
+ } |
34537 |
+@@ -86,6 +152,7 @@ static void __reuseport_add_closed_sock(struct sock *sk, |
34538 |
+ reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk; |
34539 |
+ /* paired with READ_ONCE() in inet_csk_bind_conflict() */ |
34540 |
+ WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1); |
34541 |
++ reuseport_get_incoming_cpu(sk, reuse); |
34542 |
+ } |
34543 |
+ |
34544 |
+ static bool __reuseport_detach_closed_sock(struct sock *sk, |
34545 |
+@@ -99,6 +166,7 @@ static bool __reuseport_detach_closed_sock(struct sock *sk, |
34546 |
+ reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks]; |
34547 |
+ /* paired with READ_ONCE() in inet_csk_bind_conflict() */ |
34548 |
+ WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1); |
34549 |
++ reuseport_put_incoming_cpu(sk, reuse); |
34550 |
+ |
34551 |
+ return true; |
34552 |
+ } |
34553 |
+@@ -166,6 +234,7 @@ int reuseport_alloc(struct sock *sk, bool bind_inany) |
34554 |
+ reuse->bind_inany = bind_inany; |
34555 |
+ reuse->socks[0] = sk; |
34556 |
+ reuse->num_socks = 1; |
34557 |
++ reuseport_get_incoming_cpu(sk, reuse); |
34558 |
+ rcu_assign_pointer(sk->sk_reuseport_cb, reuse); |
34559 |
+ |
34560 |
+ out: |
34561 |
+@@ -209,6 +278,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) |
34562 |
+ more_reuse->reuseport_id = reuse->reuseport_id; |
34563 |
+ more_reuse->bind_inany = reuse->bind_inany; |
34564 |
+ more_reuse->has_conns = reuse->has_conns; |
34565 |
++ more_reuse->incoming_cpu = reuse->incoming_cpu; |
34566 |
+ |
34567 |
+ memcpy(more_reuse->socks, reuse->socks, |
34568 |
+ reuse->num_socks * sizeof(struct sock *)); |
34569 |
+@@ -458,18 +528,32 @@ static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks, |
34570 |
+ static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse, |
34571 |
+ u32 hash, u16 num_socks) |
34572 |
+ { |
34573 |
++ struct sock *first_valid_sk = NULL; |
34574 |
+ int i, j; |
34575 |
+ |
34576 |
+ i = j = reciprocal_scale(hash, num_socks); |
34577 |
+- while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) { |
34578 |
++ do { |
34579 |
++ struct sock *sk = reuse->socks[i]; |
34580 |
++ |
34581 |
++ if (sk->sk_state != TCP_ESTABLISHED) { |
34582 |
++ /* Paired with WRITE_ONCE() in __reuseport_(get|put)_incoming_cpu(). */ |
34583 |
++ if (!READ_ONCE(reuse->incoming_cpu)) |
34584 |
++ return sk; |
34585 |
++ |
34586 |
++ /* Paired with WRITE_ONCE() in reuseport_update_incoming_cpu(). */ |
34587 |
++ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) |
34588 |
++ return sk; |
34589 |
++ |
34590 |
++ if (!first_valid_sk) |
34591 |
++ first_valid_sk = sk; |
34592 |
++ } |
34593 |
++ |
34594 |
+ i++; |
34595 |
+ if (i >= num_socks) |
34596 |
+ i = 0; |
34597 |
+- if (i == j) |
34598 |
+- return NULL; |
34599 |
+- } |
34600 |
++ } while (i != j); |
34601 |
+ |
34602 |
+- return reuse->socks[i]; |
34603 |
++ return first_valid_sk; |
34604 |
+ } |
34605 |
+ |
34606 |
+ /** |
34607 |
+diff --git a/net/core/stream.c b/net/core/stream.c |
34608 |
+index a61130504827a..d7c5413d16d57 100644 |
34609 |
+--- a/net/core/stream.c |
34610 |
++++ b/net/core/stream.c |
34611 |
+@@ -196,6 +196,12 @@ void sk_stream_kill_queues(struct sock *sk) |
34612 |
+ /* First the read buffer. */ |
34613 |
+ __skb_queue_purge(&sk->sk_receive_queue); |
34614 |
+ |
34615 |
++ /* Next, the error queue. |
34616 |
++ * We need to use queue lock, because other threads might |
34617 |
++ * add packets to the queue without socket lock being held. |
34618 |
++ */ |
34619 |
++ skb_queue_purge(&sk->sk_error_queue); |
34620 |
++ |
34621 |
+ /* Next, the write queue. */ |
34622 |
+ WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); |
34623 |
+ |
34624 |
+diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c |
34625 |
+index f8f7b7c34e7da..e443088ab0f65 100644 |
34626 |
+--- a/net/dsa/tag_8021q.c |
34627 |
++++ b/net/dsa/tag_8021q.c |
34628 |
+@@ -529,6 +529,7 @@ static void dsa_tag_8021q_teardown(struct dsa_switch *ds) |
34629 |
+ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto) |
34630 |
+ { |
34631 |
+ struct dsa_8021q_context *ctx; |
34632 |
++ int err; |
34633 |
+ |
34634 |
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
34635 |
+ if (!ctx) |
34636 |
+@@ -541,7 +542,15 @@ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto) |
34637 |
+ |
34638 |
+ ds->tag_8021q_ctx = ctx; |
34639 |
+ |
34640 |
+- return dsa_tag_8021q_setup(ds); |
34641 |
++ err = dsa_tag_8021q_setup(ds); |
34642 |
++ if (err) |
34643 |
++ goto err_free; |
34644 |
++ |
34645 |
++ return 0; |
34646 |
++ |
34647 |
++err_free: |
34648 |
++ kfree(ctx); |
34649 |
++ return err; |
34650 |
+ } |
34651 |
+ EXPORT_SYMBOL_GPL(dsa_tag_8021q_register); |
34652 |
+ |
34653 |
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c |
34654 |
+index e4983f473a3c5..6991d77dcb2e2 100644 |
34655 |
+--- a/net/ethtool/ioctl.c |
34656 |
++++ b/net/ethtool/ioctl.c |
34657 |
+@@ -1988,7 +1988,8 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) |
34658 |
+ } else { |
34659 |
+ /* Driver expects to be called at twice the frequency in rc */ |
34660 |
+ int n = rc * 2, interval = HZ / n; |
34661 |
+- u64 count = n * id.data, i = 0; |
34662 |
++ u64 count = mul_u32_u32(n, id.data); |
34663 |
++ u64 i = 0; |
34664 |
+ |
34665 |
+ do { |
34666 |
+ rtnl_lock(); |
34667 |
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c |
34668 |
+index a1045c3d71b4f..7ce40b49c9560 100644 |
34669 |
+--- a/net/hsr/hsr_device.c |
34670 |
++++ b/net/hsr/hsr_device.c |
34671 |
+@@ -219,7 +219,9 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) |
34672 |
+ skb->dev = master->dev; |
34673 |
+ skb_reset_mac_header(skb); |
34674 |
+ skb_reset_mac_len(skb); |
34675 |
++ spin_lock_bh(&hsr->seqnr_lock); |
34676 |
+ hsr_forward_skb(skb, master); |
34677 |
++ spin_unlock_bh(&hsr->seqnr_lock); |
34678 |
+ } else { |
34679 |
+ atomic_long_inc(&dev->tx_dropped); |
34680 |
+ dev_kfree_skb_any(skb); |
34681 |
+@@ -278,7 +280,6 @@ static void send_hsr_supervision_frame(struct hsr_port *master, |
34682 |
+ __u8 type = HSR_TLV_LIFE_CHECK; |
34683 |
+ struct hsr_sup_payload *hsr_sp; |
34684 |
+ struct hsr_sup_tag *hsr_stag; |
34685 |
+- unsigned long irqflags; |
34686 |
+ struct sk_buff *skb; |
34687 |
+ |
34688 |
+ *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); |
34689 |
+@@ -299,7 +300,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master, |
34690 |
+ set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version); |
34691 |
+ |
34692 |
+ /* From HSRv1 on we have separate supervision sequence numbers. */ |
34693 |
+- spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags); |
34694 |
++ spin_lock_bh(&hsr->seqnr_lock); |
34695 |
+ if (hsr->prot_version > 0) { |
34696 |
+ hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr); |
34697 |
+ hsr->sup_sequence_nr++; |
34698 |
+@@ -307,7 +308,6 @@ static void send_hsr_supervision_frame(struct hsr_port *master, |
34699 |
+ hsr_stag->sequence_nr = htons(hsr->sequence_nr); |
34700 |
+ hsr->sequence_nr++; |
34701 |
+ } |
34702 |
+- spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); |
34703 |
+ |
34704 |
+ hsr_stag->HSR_TLV_type = type; |
34705 |
+ /* TODO: Why 12 in HSRv0? */ |
34706 |
+@@ -318,11 +318,13 @@ static void send_hsr_supervision_frame(struct hsr_port *master, |
34707 |
+ hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); |
34708 |
+ ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr); |
34709 |
+ |
34710 |
+- if (skb_put_padto(skb, ETH_ZLEN)) |
34711 |
++ if (skb_put_padto(skb, ETH_ZLEN)) { |
34712 |
++ spin_unlock_bh(&hsr->seqnr_lock); |
34713 |
+ return; |
34714 |
++ } |
34715 |
+ |
34716 |
+ hsr_forward_skb(skb, master); |
34717 |
+- |
34718 |
++ spin_unlock_bh(&hsr->seqnr_lock); |
34719 |
+ return; |
34720 |
+ } |
34721 |
+ |
34722 |
+@@ -332,7 +334,6 @@ static void send_prp_supervision_frame(struct hsr_port *master, |
34723 |
+ struct hsr_priv *hsr = master->hsr; |
34724 |
+ struct hsr_sup_payload *hsr_sp; |
34725 |
+ struct hsr_sup_tag *hsr_stag; |
34726 |
+- unsigned long irqflags; |
34727 |
+ struct sk_buff *skb; |
34728 |
+ |
34729 |
+ skb = hsr_init_skb(master); |
34730 |
+@@ -347,7 +348,7 @@ static void send_prp_supervision_frame(struct hsr_port *master, |
34731 |
+ set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0)); |
34732 |
+ |
34733 |
+ /* From HSRv1 on we have separate supervision sequence numbers. */ |
34734 |
+- spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags); |
34735 |
++ spin_lock_bh(&hsr->seqnr_lock); |
34736 |
+ hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr); |
34737 |
+ hsr->sup_sequence_nr++; |
34738 |
+ hsr_stag->HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD; |
34739 |
+@@ -358,13 +359,12 @@ static void send_prp_supervision_frame(struct hsr_port *master, |
34740 |
+ ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr); |
34741 |
+ |
34742 |
+ if (skb_put_padto(skb, ETH_ZLEN)) { |
34743 |
+- spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); |
34744 |
++ spin_unlock_bh(&hsr->seqnr_lock); |
34745 |
+ return; |
34746 |
+ } |
34747 |
+ |
34748 |
+- spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); |
34749 |
+- |
34750 |
+ hsr_forward_skb(skb, master); |
34751 |
++ spin_unlock_bh(&hsr->seqnr_lock); |
34752 |
+ } |
34753 |
+ |
34754 |
+ /* Announce (supervision frame) timer function |
34755 |
+@@ -444,7 +444,7 @@ void hsr_dev_setup(struct net_device *dev) |
34756 |
+ dev->header_ops = &hsr_header_ops; |
34757 |
+ dev->netdev_ops = &hsr_device_ops; |
34758 |
+ SET_NETDEV_DEVTYPE(dev, &hsr_type); |
34759 |
+- dev->priv_flags |= IFF_NO_QUEUE; |
34760 |
++ dev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; |
34761 |
+ |
34762 |
+ dev->needs_free_netdev = true; |
34763 |
+ |
34764 |
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c |
34765 |
+index 07892c4b6d0c6..35382ed686d1d 100644 |
34766 |
+--- a/net/hsr/hsr_forward.c |
34767 |
++++ b/net/hsr/hsr_forward.c |
34768 |
+@@ -458,7 +458,6 @@ static void handle_std_frame(struct sk_buff *skb, |
34769 |
+ { |
34770 |
+ struct hsr_port *port = frame->port_rcv; |
34771 |
+ struct hsr_priv *hsr = port->hsr; |
34772 |
+- unsigned long irqflags; |
34773 |
+ |
34774 |
+ frame->skb_hsr = NULL; |
34775 |
+ frame->skb_prp = NULL; |
34776 |
+@@ -468,10 +467,9 @@ static void handle_std_frame(struct sk_buff *skb, |
34777 |
+ frame->is_from_san = true; |
34778 |
+ } else { |
34779 |
+ /* Sequence nr for the master node */ |
34780 |
+- spin_lock_irqsave(&hsr->seqnr_lock, irqflags); |
34781 |
++ lockdep_assert_held(&hsr->seqnr_lock); |
34782 |
+ frame->sequence_nr = hsr->sequence_nr; |
34783 |
+ hsr->sequence_nr++; |
34784 |
+- spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags); |
34785 |
+ } |
34786 |
+ } |
34787 |
+ |
34788 |
+@@ -572,11 +570,13 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) |
34789 |
+ { |
34790 |
+ struct hsr_frame_info frame; |
34791 |
+ |
34792 |
++ rcu_read_lock(); |
34793 |
+ if (fill_frame_info(&frame, skb, port) < 0) |
34794 |
+ goto out_drop; |
34795 |
+ |
34796 |
+ hsr_register_frame_in(frame.node_src, port, frame.sequence_nr); |
34797 |
+ hsr_forward_do(&frame); |
34798 |
++ rcu_read_unlock(); |
34799 |
+ /* Gets called for ingress frames as well as egress from master port. |
34800 |
+ * So check and increment stats for master port only here. |
34801 |
+ */ |
34802 |
+@@ -591,6 +591,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) |
34803 |
+ return; |
34804 |
+ |
34805 |
+ out_drop: |
34806 |
++ rcu_read_unlock(); |
34807 |
+ port->dev->stats.tx_dropped++; |
34808 |
+ kfree_skb(skb); |
34809 |
+ } |
34810 |
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c |
34811 |
+index e31949479305e..414bf4d3d3c92 100644 |
34812 |
+--- a/net/hsr/hsr_framereg.c |
34813 |
++++ b/net/hsr/hsr_framereg.c |
34814 |
+@@ -159,6 +159,7 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, |
34815 |
+ return NULL; |
34816 |
+ |
34817 |
+ ether_addr_copy(new_node->macaddress_A, addr); |
34818 |
++ spin_lock_init(&new_node->seq_out_lock); |
34819 |
+ |
34820 |
+ /* We are only interested in time diffs here, so use current jiffies |
34821 |
+ * as initialization. (0 could trigger an spurious ring error warning). |
34822 |
+@@ -313,6 +314,7 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame) |
34823 |
+ goto done; |
34824 |
+ |
34825 |
+ ether_addr_copy(node_real->macaddress_B, ethhdr->h_source); |
34826 |
++ spin_lock_bh(&node_real->seq_out_lock); |
34827 |
+ for (i = 0; i < HSR_PT_PORTS; i++) { |
34828 |
+ if (!node_curr->time_in_stale[i] && |
34829 |
+ time_after(node_curr->time_in[i], node_real->time_in[i])) { |
34830 |
+@@ -323,12 +325,16 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame) |
34831 |
+ if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i])) |
34832 |
+ node_real->seq_out[i] = node_curr->seq_out[i]; |
34833 |
+ } |
34834 |
++ spin_unlock_bh(&node_real->seq_out_lock); |
34835 |
+ node_real->addr_B_port = port_rcv->type; |
34836 |
+ |
34837 |
+ spin_lock_bh(&hsr->list_lock); |
34838 |
+- list_del_rcu(&node_curr->mac_list); |
34839 |
++ if (!node_curr->removed) { |
34840 |
++ list_del_rcu(&node_curr->mac_list); |
34841 |
++ node_curr->removed = true; |
34842 |
++ kfree_rcu(node_curr, rcu_head); |
34843 |
++ } |
34844 |
+ spin_unlock_bh(&hsr->list_lock); |
34845 |
+- kfree_rcu(node_curr, rcu_head); |
34846 |
+ |
34847 |
+ done: |
34848 |
+ /* PRP uses v0 header */ |
34849 |
+@@ -416,13 +422,17 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, |
34850 |
+ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, |
34851 |
+ u16 sequence_nr) |
34852 |
+ { |
34853 |
++ spin_lock_bh(&node->seq_out_lock); |
34854 |
+ if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) && |
34855 |
+ time_is_after_jiffies(node->time_out[port->type] + |
34856 |
+- msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) |
34857 |
++ msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) { |
34858 |
++ spin_unlock_bh(&node->seq_out_lock); |
34859 |
+ return 1; |
34860 |
++ } |
34861 |
+ |
34862 |
+ node->time_out[port->type] = jiffies; |
34863 |
+ node->seq_out[port->type] = sequence_nr; |
34864 |
++ spin_unlock_bh(&node->seq_out_lock); |
34865 |
+ return 0; |
34866 |
+ } |
34867 |
+ |
34868 |
+@@ -502,9 +512,12 @@ void hsr_prune_nodes(struct timer_list *t) |
34869 |
+ if (time_is_before_jiffies(timestamp + |
34870 |
+ msecs_to_jiffies(HSR_NODE_FORGET_TIME))) { |
34871 |
+ hsr_nl_nodedown(hsr, node->macaddress_A); |
34872 |
+- list_del_rcu(&node->mac_list); |
34873 |
+- /* Note that we need to free this entry later: */ |
34874 |
+- kfree_rcu(node, rcu_head); |
34875 |
++ if (!node->removed) { |
34876 |
++ list_del_rcu(&node->mac_list); |
34877 |
++ node->removed = true; |
34878 |
++ /* Note that we need to free this entry later: */ |
34879 |
++ kfree_rcu(node, rcu_head); |
34880 |
++ } |
34881 |
+ } |
34882 |
+ } |
34883 |
+ spin_unlock_bh(&hsr->list_lock); |
34884 |
+diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h |
34885 |
+index d9628e7a5f051..48990166e4c4e 100644 |
34886 |
+--- a/net/hsr/hsr_framereg.h |
34887 |
++++ b/net/hsr/hsr_framereg.h |
34888 |
+@@ -69,6 +69,8 @@ void prp_update_san_info(struct hsr_node *node, bool is_sup); |
34889 |
+ |
34890 |
+ struct hsr_node { |
34891 |
+ struct list_head mac_list; |
34892 |
++ /* Protect R/W access to seq_out */ |
34893 |
++ spinlock_t seq_out_lock; |
34894 |
+ unsigned char macaddress_A[ETH_ALEN]; |
34895 |
+ unsigned char macaddress_B[ETH_ALEN]; |
34896 |
+ /* Local slave through which AddrB frames are received from this node */ |
34897 |
+@@ -80,6 +82,7 @@ struct hsr_node { |
34898 |
+ bool san_a; |
34899 |
+ bool san_b; |
34900 |
+ u16 seq_out[HSR_PT_PORTS]; |
34901 |
++ bool removed; |
34902 |
+ struct rcu_head rcu_head; |
34903 |
+ }; |
34904 |
+ |
34905 |
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c |
34906 |
+index a53f9bf7886f0..8039097546dea 100644 |
34907 |
+--- a/net/ipv4/inet_connection_sock.c |
34908 |
++++ b/net/ipv4/inet_connection_sock.c |
34909 |
+@@ -155,10 +155,14 @@ static int inet_csk_bind_conflict(const struct sock *sk, |
34910 |
+ */ |
34911 |
+ |
34912 |
+ sk_for_each_bound(sk2, &tb->owners) { |
34913 |
+- if (sk != sk2 && |
34914 |
+- (!sk->sk_bound_dev_if || |
34915 |
+- !sk2->sk_bound_dev_if || |
34916 |
+- sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { |
34917 |
++ int bound_dev_if2; |
34918 |
++ |
34919 |
++ if (sk == sk2) |
34920 |
++ continue; |
34921 |
++ bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); |
34922 |
++ if ((!sk->sk_bound_dev_if || |
34923 |
++ !bound_dev_if2 || |
34924 |
++ sk->sk_bound_dev_if == bound_dev_if2)) { |
34925 |
+ if (reuse && sk2->sk_reuse && |
34926 |
+ sk2->sk_state != TCP_LISTEN) { |
34927 |
+ if ((!relax || |
34928 |
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c |
34929 |
+index 5194c6870273c..b4b642e3de783 100644 |
34930 |
+--- a/net/ipv4/tcp_bpf.c |
34931 |
++++ b/net/ipv4/tcp_bpf.c |
34932 |
+@@ -45,8 +45,11 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, |
34933 |
+ tmp->sg.end = i; |
34934 |
+ if (apply) { |
34935 |
+ apply_bytes -= size; |
34936 |
+- if (!apply_bytes) |
34937 |
++ if (!apply_bytes) { |
34938 |
++ if (sge->length) |
34939 |
++ sk_msg_iter_var_prev(i); |
34940 |
+ break; |
34941 |
++ } |
34942 |
+ } |
34943 |
+ } while (i != msg->sg.end); |
34944 |
+ |
34945 |
+@@ -131,10 +134,9 @@ static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, |
34946 |
+ return ret; |
34947 |
+ } |
34948 |
+ |
34949 |
+-int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, |
34950 |
+- u32 bytes, int flags) |
34951 |
++int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, |
34952 |
++ struct sk_msg *msg, u32 bytes, int flags) |
34953 |
+ { |
34954 |
+- bool ingress = sk_msg_to_ingress(msg); |
34955 |
+ struct sk_psock *psock = sk_psock_get(sk); |
34956 |
+ int ret; |
34957 |
+ |
34958 |
+@@ -277,10 +279,10 @@ msg_bytes_ready: |
34959 |
+ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, |
34960 |
+ struct sk_msg *msg, int *copied, int flags) |
34961 |
+ { |
34962 |
+- bool cork = false, enospc = sk_msg_full(msg); |
34963 |
++ bool cork = false, enospc = sk_msg_full(msg), redir_ingress; |
34964 |
+ struct sock *sk_redir; |
34965 |
+ u32 tosend, origsize, sent, delta = 0; |
34966 |
+- u32 eval = __SK_NONE; |
34967 |
++ u32 eval; |
34968 |
+ int ret; |
34969 |
+ |
34970 |
+ more_data: |
34971 |
+@@ -311,6 +313,7 @@ more_data: |
34972 |
+ tosend = msg->sg.size; |
34973 |
+ if (psock->apply_bytes && psock->apply_bytes < tosend) |
34974 |
+ tosend = psock->apply_bytes; |
34975 |
++ eval = __SK_NONE; |
34976 |
+ |
34977 |
+ switch (psock->eval) { |
34978 |
+ case __SK_PASS: |
34979 |
+@@ -322,6 +325,7 @@ more_data: |
34980 |
+ sk_msg_apply_bytes(psock, tosend); |
34981 |
+ break; |
34982 |
+ case __SK_REDIRECT: |
34983 |
++ redir_ingress = psock->redir_ingress; |
34984 |
+ sk_redir = psock->sk_redir; |
34985 |
+ sk_msg_apply_bytes(psock, tosend); |
34986 |
+ if (!psock->apply_bytes) { |
34987 |
+@@ -338,7 +342,8 @@ more_data: |
34988 |
+ release_sock(sk); |
34989 |
+ |
34990 |
+ origsize = msg->sg.size; |
34991 |
+- ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); |
34992 |
++ ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, |
34993 |
++ msg, tosend, flags); |
34994 |
+ sent = origsize - msg->sg.size; |
34995 |
+ |
34996 |
+ if (eval == __SK_REDIRECT) |
34997 |
+diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c |
34998 |
+index 46101fd67a477..1ff5b8e30bb92 100644 |
34999 |
+--- a/net/ipv4/udp_tunnel_core.c |
35000 |
++++ b/net/ipv4/udp_tunnel_core.c |
35001 |
+@@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); |
35002 |
+ void udp_tunnel_sock_release(struct socket *sock) |
35003 |
+ { |
35004 |
+ rcu_assign_sk_user_data(sock->sk, NULL); |
35005 |
++ synchronize_rcu(); |
35006 |
+ kernel_sock_shutdown(sock, SHUT_RDWR); |
35007 |
+ sock_release(sock); |
35008 |
+ } |
35009 |
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c |
35010 |
+index 946871741f129..d4cdc2b1b4689 100644 |
35011 |
+--- a/net/ipv6/sit.c |
35012 |
++++ b/net/ipv6/sit.c |
35013 |
+@@ -696,7 +696,7 @@ static int ipip6_rcv(struct sk_buff *skb) |
35014 |
+ skb->dev = tunnel->dev; |
35015 |
+ |
35016 |
+ if (packet_is_spoofed(skb, iph, tunnel)) { |
35017 |
+- tunnel->dev->stats.rx_errors++; |
35018 |
++ DEV_STATS_INC(tunnel->dev, rx_errors); |
35019 |
+ goto out; |
35020 |
+ } |
35021 |
+ |
35022 |
+@@ -716,8 +716,8 @@ static int ipip6_rcv(struct sk_buff *skb) |
35023 |
+ net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", |
35024 |
+ &iph->saddr, iph->tos); |
35025 |
+ if (err > 1) { |
35026 |
+- ++tunnel->dev->stats.rx_frame_errors; |
35027 |
+- ++tunnel->dev->stats.rx_errors; |
35028 |
++ DEV_STATS_INC(tunnel->dev, rx_frame_errors); |
35029 |
++ DEV_STATS_INC(tunnel->dev, rx_errors); |
35030 |
+ goto out; |
35031 |
+ } |
35032 |
+ } |
35033 |
+@@ -948,7 +948,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, |
35034 |
+ if (!rt) { |
35035 |
+ rt = ip_route_output_flow(tunnel->net, &fl4, NULL); |
35036 |
+ if (IS_ERR(rt)) { |
35037 |
+- dev->stats.tx_carrier_errors++; |
35038 |
++ DEV_STATS_INC(dev, tx_carrier_errors); |
35039 |
+ goto tx_error_icmp; |
35040 |
+ } |
35041 |
+ dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr); |
35042 |
+@@ -956,14 +956,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, |
35043 |
+ |
35044 |
+ if (rt->rt_type != RTN_UNICAST) { |
35045 |
+ ip_rt_put(rt); |
35046 |
+- dev->stats.tx_carrier_errors++; |
35047 |
++ DEV_STATS_INC(dev, tx_carrier_errors); |
35048 |
+ goto tx_error_icmp; |
35049 |
+ } |
35050 |
+ tdev = rt->dst.dev; |
35051 |
+ |
35052 |
+ if (tdev == dev) { |
35053 |
+ ip_rt_put(rt); |
35054 |
+- dev->stats.collisions++; |
35055 |
++ DEV_STATS_INC(dev, collisions); |
35056 |
+ goto tx_error; |
35057 |
+ } |
35058 |
+ |
35059 |
+@@ -976,7 +976,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, |
35060 |
+ mtu = dst_mtu(&rt->dst) - t_hlen; |
35061 |
+ |
35062 |
+ if (mtu < IPV4_MIN_MTU) { |
35063 |
+- dev->stats.collisions++; |
35064 |
++ DEV_STATS_INC(dev, collisions); |
35065 |
+ ip_rt_put(rt); |
35066 |
+ goto tx_error; |
35067 |
+ } |
35068 |
+@@ -1015,7 +1015,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, |
35069 |
+ struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
35070 |
+ if (!new_skb) { |
35071 |
+ ip_rt_put(rt); |
35072 |
+- dev->stats.tx_dropped++; |
35073 |
++ DEV_STATS_INC(dev, tx_dropped); |
35074 |
+ kfree_skb(skb); |
35075 |
+ return NETDEV_TX_OK; |
35076 |
+ } |
35077 |
+@@ -1045,7 +1045,7 @@ tx_error_icmp: |
35078 |
+ dst_link_failure(skb); |
35079 |
+ tx_error: |
35080 |
+ kfree_skb(skb); |
35081 |
+- dev->stats.tx_errors++; |
35082 |
++ DEV_STATS_INC(dev, tx_errors); |
35083 |
+ return NETDEV_TX_OK; |
35084 |
+ } |
35085 |
+ |
35086 |
+@@ -1064,7 +1064,7 @@ static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb, |
35087 |
+ return NETDEV_TX_OK; |
35088 |
+ tx_error: |
35089 |
+ kfree_skb(skb); |
35090 |
+- dev->stats.tx_errors++; |
35091 |
++ DEV_STATS_INC(dev, tx_errors); |
35092 |
+ return NETDEV_TX_OK; |
35093 |
+ } |
35094 |
+ |
35095 |
+@@ -1093,7 +1093,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb, |
35096 |
+ return NETDEV_TX_OK; |
35097 |
+ |
35098 |
+ tx_err: |
35099 |
+- dev->stats.tx_errors++; |
35100 |
++ DEV_STATS_INC(dev, tx_errors); |
35101 |
+ kfree_skb(skb); |
35102 |
+ return NETDEV_TX_OK; |
35103 |
+ |
35104 |
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c |
35105 |
+index 041859b5b71d0..a3347f2457824 100644 |
35106 |
+--- a/net/mac80211/iface.c |
35107 |
++++ b/net/mac80211/iface.c |
35108 |
+@@ -2059,6 +2059,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, |
35109 |
+ |
35110 |
+ ret = cfg80211_register_netdevice(ndev); |
35111 |
+ if (ret) { |
35112 |
++ ieee80211_if_free(ndev); |
35113 |
+ free_netdev(ndev); |
35114 |
+ return ret; |
35115 |
+ } |
35116 |
+diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c |
35117 |
+index 61e3b05cf02c3..1020d67600a95 100644 |
35118 |
+--- a/net/netfilter/nf_conntrack_proto_icmpv6.c |
35119 |
++++ b/net/netfilter/nf_conntrack_proto_icmpv6.c |
35120 |
+@@ -129,6 +129,56 @@ static void icmpv6_error_log(const struct sk_buff *skb, |
35121 |
+ nf_l4proto_log_invalid(skb, state, IPPROTO_ICMPV6, "%s", msg); |
35122 |
+ } |
35123 |
+ |
35124 |
++static noinline_for_stack int |
35125 |
++nf_conntrack_icmpv6_redirect(struct nf_conn *tmpl, struct sk_buff *skb, |
35126 |
++ unsigned int dataoff, |
35127 |
++ const struct nf_hook_state *state) |
35128 |
++{ |
35129 |
++ u8 hl = ipv6_hdr(skb)->hop_limit; |
35130 |
++ union nf_inet_addr outer_daddr; |
35131 |
++ union { |
35132 |
++ struct nd_opt_hdr nd_opt; |
35133 |
++ struct rd_msg rd_msg; |
35134 |
++ } tmp; |
35135 |
++ const struct nd_opt_hdr *nd_opt; |
35136 |
++ const struct rd_msg *rd_msg; |
35137 |
++ |
35138 |
++ rd_msg = skb_header_pointer(skb, dataoff, sizeof(*rd_msg), &tmp.rd_msg); |
35139 |
++ if (!rd_msg) { |
35140 |
++ icmpv6_error_log(skb, state, "short redirect"); |
35141 |
++ return -NF_ACCEPT; |
35142 |
++ } |
35143 |
++ |
35144 |
++ if (rd_msg->icmph.icmp6_code != 0) |
35145 |
++ return NF_ACCEPT; |
35146 |
++ |
35147 |
++ if (hl != 255 || !(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { |
35148 |
++ icmpv6_error_log(skb, state, "invalid saddr or hoplimit for redirect"); |
35149 |
++ return -NF_ACCEPT; |
35150 |
++ } |
35151 |
++ |
35152 |
++ dataoff += sizeof(*rd_msg); |
35153 |
++ |
35154 |
++ /* warning: rd_msg no longer usable after this call */ |
35155 |
++ nd_opt = skb_header_pointer(skb, dataoff, sizeof(*nd_opt), &tmp.nd_opt); |
35156 |
++ if (!nd_opt || nd_opt->nd_opt_len == 0) { |
35157 |
++ icmpv6_error_log(skb, state, "redirect without options"); |
35158 |
++ return -NF_ACCEPT; |
35159 |
++ } |
35160 |
++ |
35161 |
++ /* We could call ndisc_parse_options(), but it would need |
35162 |
++ * skb_linearize() and a bit more work. |
35163 |
++ */ |
35164 |
++ if (nd_opt->nd_opt_type != ND_OPT_REDIRECT_HDR) |
35165 |
++ return NF_ACCEPT; |
35166 |
++ |
35167 |
++ memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr, |
35168 |
++ sizeof(outer_daddr.ip6)); |
35169 |
++ dataoff += 8; |
35170 |
++ return nf_conntrack_inet_error(tmpl, skb, dataoff, state, |
35171 |
++ IPPROTO_ICMPV6, &outer_daddr); |
35172 |
++} |
35173 |
++ |
35174 |
+ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, |
35175 |
+ struct sk_buff *skb, |
35176 |
+ unsigned int dataoff, |
35177 |
+@@ -159,6 +209,9 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, |
35178 |
+ return NF_ACCEPT; |
35179 |
+ } |
35180 |
+ |
35181 |
++ if (icmp6h->icmp6_type == NDISC_REDIRECT) |
35182 |
++ return nf_conntrack_icmpv6_redirect(tmpl, skb, dataoff, state); |
35183 |
++ |
35184 |
+ /* is not error message ? */ |
35185 |
+ if (icmp6h->icmp6_type >= 128) |
35186 |
+ return NF_ACCEPT; |
35187 |
+diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c |
35188 |
+index 66c9a6c2b9cf9..336f282a221fd 100644 |
35189 |
+--- a/net/netfilter/nf_flow_table_offload.c |
35190 |
++++ b/net/netfilter/nf_flow_table_offload.c |
35191 |
+@@ -372,12 +372,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule, |
35192 |
+ const __be32 *addr, const __be32 *mask) |
35193 |
+ { |
35194 |
+ struct flow_action_entry *entry; |
35195 |
+- int i, j; |
35196 |
++ int i; |
35197 |
+ |
35198 |
+- for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) { |
35199 |
++ for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) { |
35200 |
+ entry = flow_action_entry_next(flow_rule); |
35201 |
+ flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6, |
35202 |
+- offset + i, &addr[j], mask); |
35203 |
++ offset + i * sizeof(u32), &addr[i], mask); |
35204 |
+ } |
35205 |
+ } |
35206 |
+ |
35207 |
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c |
35208 |
+index 94c48122fdc3a..795a25ecb8939 100644 |
35209 |
+--- a/net/openvswitch/datapath.c |
35210 |
++++ b/net/openvswitch/datapath.c |
35211 |
+@@ -946,6 +946,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) |
35212 |
+ struct sw_flow_mask mask; |
35213 |
+ struct sk_buff *reply; |
35214 |
+ struct datapath *dp; |
35215 |
++ struct sw_flow_key *key; |
35216 |
+ struct sw_flow_actions *acts; |
35217 |
+ struct sw_flow_match match; |
35218 |
+ u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); |
35219 |
+@@ -973,24 +974,26 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) |
35220 |
+ } |
35221 |
+ |
35222 |
+ /* Extract key. */ |
35223 |
+- ovs_match_init(&match, &new_flow->key, false, &mask); |
35224 |
++ key = kzalloc(sizeof(*key), GFP_KERNEL); |
35225 |
++ if (!key) { |
35226 |
++ error = -ENOMEM; |
35227 |
++ goto err_kfree_key; |
35228 |
++ } |
35229 |
++ |
35230 |
++ ovs_match_init(&match, key, false, &mask); |
35231 |
+ error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], |
35232 |
+ a[OVS_FLOW_ATTR_MASK], log); |
35233 |
+ if (error) |
35234 |
+ goto err_kfree_flow; |
35235 |
+ |
35236 |
++ ovs_flow_mask_key(&new_flow->key, key, true, &mask); |
35237 |
++ |
35238 |
+ /* Extract flow identifier. */ |
35239 |
+ error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], |
35240 |
+- &new_flow->key, log); |
35241 |
++ key, log); |
35242 |
+ if (error) |
35243 |
+ goto err_kfree_flow; |
35244 |
+ |
35245 |
+- /* unmasked key is needed to match when ufid is not used. */ |
35246 |
+- if (ovs_identifier_is_key(&new_flow->id)) |
35247 |
+- match.key = new_flow->id.unmasked_key; |
35248 |
+- |
35249 |
+- ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask); |
35250 |
+- |
35251 |
+ /* Validate actions. */ |
35252 |
+ error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS], |
35253 |
+ &new_flow->key, &acts, log); |
35254 |
+@@ -1017,7 +1020,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) |
35255 |
+ if (ovs_identifier_is_ufid(&new_flow->id)) |
35256 |
+ flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id); |
35257 |
+ if (!flow) |
35258 |
+- flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key); |
35259 |
++ flow = ovs_flow_tbl_lookup(&dp->table, key); |
35260 |
+ if (likely(!flow)) { |
35261 |
+ rcu_assign_pointer(new_flow->sf_acts, acts); |
35262 |
+ |
35263 |
+@@ -1087,6 +1090,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) |
35264 |
+ |
35265 |
+ if (reply) |
35266 |
+ ovs_notify(&dp_flow_genl_family, reply, info); |
35267 |
++ |
35268 |
++ kfree(key); |
35269 |
+ return 0; |
35270 |
+ |
35271 |
+ err_unlock_ovs: |
35272 |
+@@ -1096,6 +1101,8 @@ err_kfree_acts: |
35273 |
+ ovs_nla_free_flow_actions(acts); |
35274 |
+ err_kfree_flow: |
35275 |
+ ovs_flow_free(new_flow, false); |
35276 |
++err_kfree_key: |
35277 |
++ kfree(key); |
35278 |
+ error: |
35279 |
+ return error; |
35280 |
+ } |
35281 |
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c |
35282 |
+index 9683617db7049..08c117bc083ec 100644 |
35283 |
+--- a/net/rxrpc/output.c |
35284 |
++++ b/net/rxrpc/output.c |
35285 |
+@@ -93,7 +93,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, |
35286 |
+ *_hard_ack = hard_ack; |
35287 |
+ *_top = top; |
35288 |
+ |
35289 |
+- pkt->ack.bufferSpace = htons(8); |
35290 |
++ pkt->ack.bufferSpace = htons(0); |
35291 |
+ pkt->ack.maxSkew = htons(0); |
35292 |
+ pkt->ack.firstPacket = htonl(hard_ack + 1); |
35293 |
+ pkt->ack.previousPacket = htonl(call->ackr_highest_seq); |
35294 |
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c |
35295 |
+index 3c3a626459deb..d4e4e94f4f987 100644 |
35296 |
+--- a/net/rxrpc/sendmsg.c |
35297 |
++++ b/net/rxrpc/sendmsg.c |
35298 |
+@@ -716,7 +716,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) |
35299 |
+ if (call->tx_total_len != -1 || |
35300 |
+ call->tx_pending || |
35301 |
+ call->tx_top != 0) |
35302 |
+- goto error_put; |
35303 |
++ goto out_put_unlock; |
35304 |
+ call->tx_total_len = p.call.tx_total_len; |
35305 |
+ } |
35306 |
+ } |
35307 |
+diff --git a/net/sched/ematch.c b/net/sched/ematch.c |
35308 |
+index 4ce6813618515..5c1235e6076ae 100644 |
35309 |
+--- a/net/sched/ematch.c |
35310 |
++++ b/net/sched/ematch.c |
35311 |
+@@ -255,6 +255,8 @@ static int tcf_em_validate(struct tcf_proto *tp, |
35312 |
+ * the value carried. |
35313 |
+ */ |
35314 |
+ if (em_hdr->flags & TCF_EM_SIMPLE) { |
35315 |
++ if (em->ops->datalen > 0) |
35316 |
++ goto errout; |
35317 |
+ if (data_len < sizeof(u32)) |
35318 |
+ goto errout; |
35319 |
+ em->data = *(u32 *) data; |
35320 |
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c |
35321 |
+index b46a416787ec3..43ebf090029d7 100644 |
35322 |
+--- a/net/sctp/sysctl.c |
35323 |
++++ b/net/sctp/sysctl.c |
35324 |
+@@ -84,17 +84,18 @@ static struct ctl_table sctp_table[] = { |
35325 |
+ { /* sentinel */ } |
35326 |
+ }; |
35327 |
+ |
35328 |
++/* The following index defines are used in sctp_sysctl_net_register(). |
35329 |
++ * If you add new items to the sctp_net_table, please ensure that |
35330 |
++ * the index values of these defines hold the same meaning indicated by |
35331 |
++ * their macro names when they appear in sctp_net_table. |
35332 |
++ */ |
35333 |
++#define SCTP_RTO_MIN_IDX 0 |
35334 |
++#define SCTP_RTO_MAX_IDX 1 |
35335 |
++#define SCTP_PF_RETRANS_IDX 2 |
35336 |
++#define SCTP_PS_RETRANS_IDX 3 |
35337 |
++ |
35338 |
+ static struct ctl_table sctp_net_table[] = { |
35339 |
+- { |
35340 |
+- .procname = "rto_initial", |
35341 |
+- .data = &init_net.sctp.rto_initial, |
35342 |
+- .maxlen = sizeof(unsigned int), |
35343 |
+- .mode = 0644, |
35344 |
+- .proc_handler = proc_dointvec_minmax, |
35345 |
+- .extra1 = SYSCTL_ONE, |
35346 |
+- .extra2 = &timer_max |
35347 |
+- }, |
35348 |
+- { |
35349 |
++ [SCTP_RTO_MIN_IDX] = { |
35350 |
+ .procname = "rto_min", |
35351 |
+ .data = &init_net.sctp.rto_min, |
35352 |
+ .maxlen = sizeof(unsigned int), |
35353 |
+@@ -103,7 +104,7 @@ static struct ctl_table sctp_net_table[] = { |
35354 |
+ .extra1 = SYSCTL_ONE, |
35355 |
+ .extra2 = &init_net.sctp.rto_max |
35356 |
+ }, |
35357 |
+- { |
35358 |
++ [SCTP_RTO_MAX_IDX] = { |
35359 |
+ .procname = "rto_max", |
35360 |
+ .data = &init_net.sctp.rto_max, |
35361 |
+ .maxlen = sizeof(unsigned int), |
35362 |
+@@ -112,6 +113,33 @@ static struct ctl_table sctp_net_table[] = { |
35363 |
+ .extra1 = &init_net.sctp.rto_min, |
35364 |
+ .extra2 = &timer_max |
35365 |
+ }, |
35366 |
++ [SCTP_PF_RETRANS_IDX] = { |
35367 |
++ .procname = "pf_retrans", |
35368 |
++ .data = &init_net.sctp.pf_retrans, |
35369 |
++ .maxlen = sizeof(int), |
35370 |
++ .mode = 0644, |
35371 |
++ .proc_handler = proc_dointvec_minmax, |
35372 |
++ .extra1 = SYSCTL_ZERO, |
35373 |
++ .extra2 = &init_net.sctp.ps_retrans, |
35374 |
++ }, |
35375 |
++ [SCTP_PS_RETRANS_IDX] = { |
35376 |
++ .procname = "ps_retrans", |
35377 |
++ .data = &init_net.sctp.ps_retrans, |
35378 |
++ .maxlen = sizeof(int), |
35379 |
++ .mode = 0644, |
35380 |
++ .proc_handler = proc_dointvec_minmax, |
35381 |
++ .extra1 = &init_net.sctp.pf_retrans, |
35382 |
++ .extra2 = &ps_retrans_max, |
35383 |
++ }, |
35384 |
++ { |
35385 |
++ .procname = "rto_initial", |
35386 |
++ .data = &init_net.sctp.rto_initial, |
35387 |
++ .maxlen = sizeof(unsigned int), |
35388 |
++ .mode = 0644, |
35389 |
++ .proc_handler = proc_dointvec_minmax, |
35390 |
++ .extra1 = SYSCTL_ONE, |
35391 |
++ .extra2 = &timer_max |
35392 |
++ }, |
35393 |
+ { |
35394 |
+ .procname = "rto_alpha_exp_divisor", |
35395 |
+ .data = &init_net.sctp.rto_alpha, |
35396 |
+@@ -207,24 +235,6 @@ static struct ctl_table sctp_net_table[] = { |
35397 |
+ .extra1 = SYSCTL_ONE, |
35398 |
+ .extra2 = SYSCTL_INT_MAX, |
35399 |
+ }, |
35400 |
+- { |
35401 |
+- .procname = "pf_retrans", |
35402 |
+- .data = &init_net.sctp.pf_retrans, |
35403 |
+- .maxlen = sizeof(int), |
35404 |
+- .mode = 0644, |
35405 |
+- .proc_handler = proc_dointvec_minmax, |
35406 |
+- .extra1 = SYSCTL_ZERO, |
35407 |
+- .extra2 = &init_net.sctp.ps_retrans, |
35408 |
+- }, |
35409 |
+- { |
35410 |
+- .procname = "ps_retrans", |
35411 |
+- .data = &init_net.sctp.ps_retrans, |
35412 |
+- .maxlen = sizeof(int), |
35413 |
+- .mode = 0644, |
35414 |
+- .proc_handler = proc_dointvec_minmax, |
35415 |
+- .extra1 = &init_net.sctp.pf_retrans, |
35416 |
+- .extra2 = &ps_retrans_max, |
35417 |
+- }, |
35418 |
+ { |
35419 |
+ .procname = "sndbuf_policy", |
35420 |
+ .data = &init_net.sctp.sndbuf_policy, |
35421 |
+@@ -586,6 +596,11 @@ int sctp_sysctl_net_register(struct net *net) |
35422 |
+ for (i = 0; table[i].data; i++) |
35423 |
+ table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; |
35424 |
+ |
35425 |
++ table[SCTP_RTO_MIN_IDX].extra2 = &net->sctp.rto_max; |
35426 |
++ table[SCTP_RTO_MAX_IDX].extra1 = &net->sctp.rto_min; |
35427 |
++ table[SCTP_PF_RETRANS_IDX].extra2 = &net->sctp.ps_retrans; |
35428 |
++ table[SCTP_PS_RETRANS_IDX].extra1 = &net->sctp.pf_retrans; |
35429 |
++ |
35430 |
+ net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); |
35431 |
+ if (net->sctp.sysctl_header == NULL) { |
35432 |
+ kfree(table); |
35433 |
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c |
35434 |
+index ca2a494d727b2..bbeb80e1133df 100644 |
35435 |
+--- a/net/sunrpc/clnt.c |
35436 |
++++ b/net/sunrpc/clnt.c |
35437 |
+@@ -1375,7 +1375,7 @@ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, |
35438 |
+ break; |
35439 |
+ default: |
35440 |
+ err = -EAFNOSUPPORT; |
35441 |
+- goto out; |
35442 |
++ goto out_release; |
35443 |
+ } |
35444 |
+ if (err < 0) { |
35445 |
+ dprintk("RPC: can't bind UDP socket (%d)\n", err); |
35446 |
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c |
35447 |
+index 1295f9ab839fd..507ba8b799920 100644 |
35448 |
+--- a/net/sunrpc/xprtrdma/verbs.c |
35449 |
++++ b/net/sunrpc/xprtrdma/verbs.c |
35450 |
+@@ -858,7 +858,7 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size, |
35451 |
+ return req; |
35452 |
+ |
35453 |
+ out3: |
35454 |
+- kfree(req->rl_sendbuf); |
35455 |
++ rpcrdma_regbuf_free(req->rl_sendbuf); |
35456 |
+ out2: |
35457 |
+ kfree(req); |
35458 |
+ out1: |
35459 |
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c |
35460 |
+index 794ef3b3d7d4b..c0fea678abb1c 100644 |
35461 |
+--- a/net/tls/tls_sw.c |
35462 |
++++ b/net/tls/tls_sw.c |
35463 |
+@@ -801,7 +801,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, |
35464 |
+ struct sk_psock *psock; |
35465 |
+ struct sock *sk_redir; |
35466 |
+ struct tls_rec *rec; |
35467 |
+- bool enospc, policy; |
35468 |
++ bool enospc, policy, redir_ingress; |
35469 |
+ int err = 0, send; |
35470 |
+ u32 delta = 0; |
35471 |
+ |
35472 |
+@@ -846,6 +846,7 @@ more_data: |
35473 |
+ } |
35474 |
+ break; |
35475 |
+ case __SK_REDIRECT: |
35476 |
++ redir_ingress = psock->redir_ingress; |
35477 |
+ sk_redir = psock->sk_redir; |
35478 |
+ memcpy(&msg_redir, msg, sizeof(*msg)); |
35479 |
+ if (msg->apply_bytes < send) |
35480 |
+@@ -855,7 +856,8 @@ more_data: |
35481 |
+ sk_msg_return_zero(sk, msg, send); |
35482 |
+ msg->sg.size -= send; |
35483 |
+ release_sock(sk); |
35484 |
+- err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags); |
35485 |
++ err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, |
35486 |
++ &msg_redir, send, flags); |
35487 |
+ lock_sock(sk); |
35488 |
+ if (err < 0) { |
35489 |
+ *copied -= sk_msg_free_nocharge(sk, &msg_redir); |
35490 |
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c |
35491 |
+index a579e28bd213f..0a59a00cb5815 100644 |
35492 |
+--- a/net/unix/af_unix.c |
35493 |
++++ b/net/unix/af_unix.c |
35494 |
+@@ -1865,13 +1865,20 @@ restart_locked: |
35495 |
+ unix_state_lock(sk); |
35496 |
+ |
35497 |
+ err = 0; |
35498 |
+- if (unix_peer(sk) == other) { |
35499 |
++ if (sk->sk_type == SOCK_SEQPACKET) { |
35500 |
++ /* We are here only when racing with unix_release_sock() |
35501 |
++ * is clearing @other. Never change state to TCP_CLOSE |
35502 |
++ * unlike SOCK_DGRAM wants. |
35503 |
++ */ |
35504 |
++ unix_state_unlock(sk); |
35505 |
++ err = -EPIPE; |
35506 |
++ } else if (unix_peer(sk) == other) { |
35507 |
+ unix_peer(sk) = NULL; |
35508 |
+ unix_dgram_peer_wake_disconnect_wakeup(sk, other); |
35509 |
+ |
35510 |
++ sk->sk_state = TCP_CLOSE; |
35511 |
+ unix_state_unlock(sk); |
35512 |
+ |
35513 |
+- sk->sk_state = TCP_CLOSE; |
35514 |
+ unix_dgram_disconnected(sk, other); |
35515 |
+ sock_put(other); |
35516 |
+ err = -ECONNREFUSED; |
35517 |
+@@ -3401,6 +3408,7 @@ static int __init af_unix_init(void) |
35518 |
+ rc = proto_register(&unix_stream_proto, 1); |
35519 |
+ if (rc != 0) { |
35520 |
+ pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); |
35521 |
++ proto_unregister(&unix_dgram_proto); |
35522 |
+ goto out; |
35523 |
+ } |
35524 |
+ |
35525 |
+diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c |
35526 |
+index b17dc9745188e..94c1112f1c8c3 100644 |
35527 |
+--- a/net/vmw_vsock/vmci_transport.c |
35528 |
++++ b/net/vmw_vsock/vmci_transport.c |
35529 |
+@@ -1711,7 +1711,11 @@ static int vmci_transport_dgram_enqueue( |
35530 |
+ if (!dg) |
35531 |
+ return -ENOMEM; |
35532 |
+ |
35533 |
+- memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len); |
35534 |
++ err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len); |
35535 |
++ if (err) { |
35536 |
++ kfree(dg); |
35537 |
++ return err; |
35538 |
++ } |
35539 |
+ |
35540 |
+ dg->dst = vmci_make_handle(remote_addr->svm_cid, |
35541 |
+ remote_addr->svm_port); |
35542 |
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c |
35543 |
+index 7b19a2087db99..d0fbe822e7934 100644 |
35544 |
+--- a/net/wireless/reg.c |
35545 |
++++ b/net/wireless/reg.c |
35546 |
+@@ -4247,8 +4247,10 @@ static int __init regulatory_init_db(void) |
35547 |
+ return -EINVAL; |
35548 |
+ |
35549 |
+ err = load_builtin_regdb_keys(); |
35550 |
+- if (err) |
35551 |
++ if (err) { |
35552 |
++ platform_device_unregister(reg_pdev); |
35553 |
+ return err; |
35554 |
++ } |
35555 |
+ |
35556 |
+ /* We always try to get an update for the static regdomain */ |
35557 |
+ err = regulatory_hint_core(cfg80211_world_regdom->alpha2); |
35558 |
+diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c |
35559 |
+index 9ec93d90e8a5a..4eb7aa11cfbb2 100644 |
35560 |
+--- a/samples/vfio-mdev/mdpy-fb.c |
35561 |
++++ b/samples/vfio-mdev/mdpy-fb.c |
35562 |
+@@ -109,7 +109,7 @@ static int mdpy_fb_probe(struct pci_dev *pdev, |
35563 |
+ |
35564 |
+ ret = pci_request_regions(pdev, "mdpy-fb"); |
35565 |
+ if (ret < 0) |
35566 |
+- return ret; |
35567 |
++ goto err_disable_dev; |
35568 |
+ |
35569 |
+ pci_read_config_dword(pdev, MDPY_FORMAT_OFFSET, &format); |
35570 |
+ pci_read_config_dword(pdev, MDPY_WIDTH_OFFSET, &width); |
35571 |
+@@ -191,6 +191,9 @@ err_release_fb: |
35572 |
+ err_release_regions: |
35573 |
+ pci_release_regions(pdev); |
35574 |
+ |
35575 |
++err_disable_dev: |
35576 |
++ pci_disable_device(pdev); |
35577 |
++ |
35578 |
+ return ret; |
35579 |
+ } |
35580 |
+ |
35581 |
+@@ -199,7 +202,10 @@ static void mdpy_fb_remove(struct pci_dev *pdev) |
35582 |
+ struct fb_info *info = pci_get_drvdata(pdev); |
35583 |
+ |
35584 |
+ unregister_framebuffer(info); |
35585 |
++ iounmap(info->screen_base); |
35586 |
+ framebuffer_release(info); |
35587 |
++ pci_release_regions(pdev); |
35588 |
++ pci_disable_device(pdev); |
35589 |
+ } |
35590 |
+ |
35591 |
+ static struct pci_device_id mdpy_fb_pci_table[] = { |
35592 |
+diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening |
35593 |
+index 942ed8de36d35..2e509e32cf75a 100644 |
35594 |
+--- a/security/Kconfig.hardening |
35595 |
++++ b/security/Kconfig.hardening |
35596 |
+@@ -240,6 +240,9 @@ config INIT_ON_FREE_DEFAULT_ON |
35597 |
+ |
35598 |
+ config CC_HAS_ZERO_CALL_USED_REGS |
35599 |
+ def_bool $(cc-option,-fzero-call-used-regs=used-gpr) |
35600 |
++ # https://github.com/ClangBuiltLinux/linux/issues/1766 |
35601 |
++ # https://github.com/llvm/llvm-project/issues/59242 |
35602 |
++ depends on !CC_IS_CLANG || CLANG_VERSION > 150006 |
35603 |
+ |
35604 |
+ config ZERO_CALL_USED_REGS |
35605 |
+ bool "Enable register zeroing on function exit" |
35606 |
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c |
35607 |
+index a891705b1d577..8c7719108d7f7 100644 |
35608 |
+--- a/security/apparmor/apparmorfs.c |
35609 |
++++ b/security/apparmor/apparmorfs.c |
35610 |
+@@ -867,8 +867,10 @@ static struct multi_transaction *multi_transaction_new(struct file *file, |
35611 |
+ if (!t) |
35612 |
+ return ERR_PTR(-ENOMEM); |
35613 |
+ kref_init(&t->count); |
35614 |
+- if (copy_from_user(t->data, buf, size)) |
35615 |
++ if (copy_from_user(t->data, buf, size)) { |
35616 |
++ put_multi_transaction(t); |
35617 |
+ return ERR_PTR(-EFAULT); |
35618 |
++ } |
35619 |
+ |
35620 |
+ return t; |
35621 |
+ } |
35622 |
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c |
35623 |
+index f72406fe1bf27..10274eb90fa37 100644 |
35624 |
+--- a/security/apparmor/lsm.c |
35625 |
++++ b/security/apparmor/lsm.c |
35626 |
+@@ -1170,10 +1170,10 @@ static int apparmor_inet_conn_request(const struct sock *sk, struct sk_buff *skb |
35627 |
+ #endif |
35628 |
+ |
35629 |
+ /* |
35630 |
+- * The cred blob is a pointer to, not an instance of, an aa_task_ctx. |
35631 |
++ * The cred blob is a pointer to, not an instance of, an aa_label. |
35632 |
+ */ |
35633 |
+ struct lsm_blob_sizes apparmor_blob_sizes __lsm_ro_after_init = { |
35634 |
+- .lbs_cred = sizeof(struct aa_task_ctx *), |
35635 |
++ .lbs_cred = sizeof(struct aa_label *), |
35636 |
+ .lbs_file = sizeof(struct aa_file_ctx), |
35637 |
+ .lbs_task = sizeof(struct aa_task_ctx), |
35638 |
+ }; |
35639 |
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c |
35640 |
+index 4c010c9a6af1d..fcf22577f606c 100644 |
35641 |
+--- a/security/apparmor/policy.c |
35642 |
++++ b/security/apparmor/policy.c |
35643 |
+@@ -1125,7 +1125,7 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, |
35644 |
+ |
35645 |
+ if (!name) { |
35646 |
+ /* remove namespace - can only happen if fqname[0] == ':' */ |
35647 |
+- mutex_lock_nested(&ns->parent->lock, ns->level); |
35648 |
++ mutex_lock_nested(&ns->parent->lock, ns->parent->level); |
35649 |
+ __aa_bump_ns_revision(ns); |
35650 |
+ __aa_remove_ns(ns); |
35651 |
+ mutex_unlock(&ns->parent->lock); |
35652 |
+diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c |
35653 |
+index 70921d95fb406..53d24cf638936 100644 |
35654 |
+--- a/security/apparmor/policy_ns.c |
35655 |
++++ b/security/apparmor/policy_ns.c |
35656 |
+@@ -121,7 +121,7 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name) |
35657 |
+ return ns; |
35658 |
+ |
35659 |
+ fail_unconfined: |
35660 |
+- kfree_sensitive(ns->base.hname); |
35661 |
++ aa_policy_destroy(&ns->base); |
35662 |
+ fail_ns: |
35663 |
+ kfree_sensitive(ns); |
35664 |
+ return NULL; |
35665 |
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c |
35666 |
+index 03c9609ca407b..d5b3a062d1d18 100644 |
35667 |
+--- a/security/apparmor/policy_unpack.c |
35668 |
++++ b/security/apparmor/policy_unpack.c |
35669 |
+@@ -964,7 +964,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns) |
35670 |
+ * if not specified use previous version |
35671 |
+ * Mask off everything that is not kernel abi version |
35672 |
+ */ |
35673 |
+- if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) { |
35674 |
++ if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v8)) { |
35675 |
+ audit_iface(NULL, NULL, NULL, "unsupported interface version", |
35676 |
+ e, error); |
35677 |
+ return error; |
35678 |
+diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c |
35679 |
+index 3b06a01bd0fdd..aa93b750a9f32 100644 |
35680 |
+--- a/security/integrity/digsig.c |
35681 |
++++ b/security/integrity/digsig.c |
35682 |
+@@ -122,6 +122,7 @@ int __init integrity_init_keyring(const unsigned int id) |
35683 |
+ { |
35684 |
+ struct key_restriction *restriction; |
35685 |
+ key_perm_t perm; |
35686 |
++ int ret; |
35687 |
+ |
35688 |
+ perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW |
35689 |
+ | KEY_USR_READ | KEY_USR_SEARCH; |
35690 |
+@@ -142,7 +143,10 @@ int __init integrity_init_keyring(const unsigned int id) |
35691 |
+ perm |= KEY_USR_WRITE; |
35692 |
+ |
35693 |
+ out: |
35694 |
+- return __integrity_init_keyring(id, perm, restriction); |
35695 |
++ ret = __integrity_init_keyring(id, perm, restriction); |
35696 |
++ if (ret) |
35697 |
++ kfree(restriction); |
35698 |
++ return ret; |
35699 |
+ } |
35700 |
+ |
35701 |
+ static int __init integrity_add_key(const unsigned int id, const void *data, |
35702 |
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c |
35703 |
+index 748b97a2582a4..ed43d30682ff8 100644 |
35704 |
+--- a/security/integrity/ima/ima_policy.c |
35705 |
++++ b/security/integrity/ima/ima_policy.c |
35706 |
+@@ -391,12 +391,6 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry) |
35707 |
+ |
35708 |
+ nentry->lsm[i].type = entry->lsm[i].type; |
35709 |
+ nentry->lsm[i].args_p = entry->lsm[i].args_p; |
35710 |
+- /* |
35711 |
+- * Remove the reference from entry so that the associated |
35712 |
+- * memory will not be freed during a later call to |
35713 |
+- * ima_lsm_free_rule(entry). |
35714 |
+- */ |
35715 |
+- entry->lsm[i].args_p = NULL; |
35716 |
+ |
35717 |
+ ima_filter_rule_init(nentry->lsm[i].type, Audit_equal, |
35718 |
+ nentry->lsm[i].args_p, |
35719 |
+@@ -410,6 +404,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry) |
35720 |
+ |
35721 |
+ static int ima_lsm_update_rule(struct ima_rule_entry *entry) |
35722 |
+ { |
35723 |
++ int i; |
35724 |
+ struct ima_rule_entry *nentry; |
35725 |
+ |
35726 |
+ nentry = ima_lsm_copy_rule(entry); |
35727 |
+@@ -424,7 +419,8 @@ static int ima_lsm_update_rule(struct ima_rule_entry *entry) |
35728 |
+ * references and the entry itself. All other memory refrences will now |
35729 |
+ * be owned by nentry. |
35730 |
+ */ |
35731 |
+- ima_lsm_free_rule(entry); |
35732 |
++ for (i = 0; i < MAX_LSM_RULES; i++) |
35733 |
++ ima_filter_rule_free(entry->lsm[i].rule); |
35734 |
+ kfree(entry); |
35735 |
+ |
35736 |
+ return 0; |
35737 |
+@@ -542,6 +538,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule, |
35738 |
+ const char *func_data) |
35739 |
+ { |
35740 |
+ int i; |
35741 |
++ bool result = false; |
35742 |
++ struct ima_rule_entry *lsm_rule = rule; |
35743 |
++ bool rule_reinitialized = false; |
35744 |
+ |
35745 |
+ if ((rule->flags & IMA_FUNC) && |
35746 |
+ (rule->func != func && func != POST_SETATTR)) |
35747 |
+@@ -590,35 +589,55 @@ static bool ima_match_rules(struct ima_rule_entry *rule, |
35748 |
+ int rc = 0; |
35749 |
+ u32 osid; |
35750 |
+ |
35751 |
+- if (!rule->lsm[i].rule) { |
35752 |
+- if (!rule->lsm[i].args_p) |
35753 |
++ if (!lsm_rule->lsm[i].rule) { |
35754 |
++ if (!lsm_rule->lsm[i].args_p) |
35755 |
+ continue; |
35756 |
+ else |
35757 |
+ return false; |
35758 |
+ } |
35759 |
++ |
35760 |
++retry: |
35761 |
+ switch (i) { |
35762 |
+ case LSM_OBJ_USER: |
35763 |
+ case LSM_OBJ_ROLE: |
35764 |
+ case LSM_OBJ_TYPE: |
35765 |
+ security_inode_getsecid(inode, &osid); |
35766 |
+- rc = ima_filter_rule_match(osid, rule->lsm[i].type, |
35767 |
++ rc = ima_filter_rule_match(osid, lsm_rule->lsm[i].type, |
35768 |
+ Audit_equal, |
35769 |
+- rule->lsm[i].rule); |
35770 |
++ lsm_rule->lsm[i].rule); |
35771 |
+ break; |
35772 |
+ case LSM_SUBJ_USER: |
35773 |
+ case LSM_SUBJ_ROLE: |
35774 |
+ case LSM_SUBJ_TYPE: |
35775 |
+- rc = ima_filter_rule_match(secid, rule->lsm[i].type, |
35776 |
++ rc = ima_filter_rule_match(secid, lsm_rule->lsm[i].type, |
35777 |
+ Audit_equal, |
35778 |
+- rule->lsm[i].rule); |
35779 |
++ lsm_rule->lsm[i].rule); |
35780 |
+ break; |
35781 |
+ default: |
35782 |
+ break; |
35783 |
+ } |
35784 |
+- if (!rc) |
35785 |
+- return false; |
35786 |
++ |
35787 |
++ if (rc == -ESTALE && !rule_reinitialized) { |
35788 |
++ lsm_rule = ima_lsm_copy_rule(rule); |
35789 |
++ if (lsm_rule) { |
35790 |
++ rule_reinitialized = true; |
35791 |
++ goto retry; |
35792 |
++ } |
35793 |
++ } |
35794 |
++ if (!rc) { |
35795 |
++ result = false; |
35796 |
++ goto out; |
35797 |
++ } |
35798 |
+ } |
35799 |
+- return true; |
35800 |
++ result = true; |
35801 |
++ |
35802 |
++out: |
35803 |
++ if (rule_reinitialized) { |
35804 |
++ for (i = 0; i < MAX_LSM_RULES; i++) |
35805 |
++ ima_filter_rule_free(lsm_rule->lsm[i].rule); |
35806 |
++ kfree(lsm_rule); |
35807 |
++ } |
35808 |
++ return result; |
35809 |
+ } |
35810 |
+ |
35811 |
+ /* |
35812 |
+diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c |
35813 |
+index db1ad6d7a57fb..f84a0598e4f6a 100644 |
35814 |
+--- a/security/integrity/ima/ima_template.c |
35815 |
++++ b/security/integrity/ima/ima_template.c |
35816 |
+@@ -241,11 +241,11 @@ int template_desc_init_fields(const char *template_fmt, |
35817 |
+ } |
35818 |
+ |
35819 |
+ if (fields && num_fields) { |
35820 |
+- *fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL); |
35821 |
++ *fields = kmalloc_array(i, sizeof(**fields), GFP_KERNEL); |
35822 |
+ if (*fields == NULL) |
35823 |
+ return -ENOMEM; |
35824 |
+ |
35825 |
+- memcpy(*fields, found_fields, i * sizeof(*fields)); |
35826 |
++ memcpy(*fields, found_fields, i * sizeof(**fields)); |
35827 |
+ *num_fields = i; |
35828 |
+ } |
35829 |
+ |
35830 |
+diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c |
35831 |
+index b12f7d986b1e3..5fce105a372d3 100644 |
35832 |
+--- a/security/loadpin/loadpin.c |
35833 |
++++ b/security/loadpin/loadpin.c |
35834 |
+@@ -118,21 +118,11 @@ static void loadpin_sb_free_security(struct super_block *mnt_sb) |
35835 |
+ } |
35836 |
+ } |
35837 |
+ |
35838 |
+-static int loadpin_read_file(struct file *file, enum kernel_read_file_id id, |
35839 |
+- bool contents) |
35840 |
++static int loadpin_check(struct file *file, enum kernel_read_file_id id) |
35841 |
+ { |
35842 |
+ struct super_block *load_root; |
35843 |
+ const char *origin = kernel_read_file_id_str(id); |
35844 |
+ |
35845 |
+- /* |
35846 |
+- * If we will not know that we'll be seeing the full contents |
35847 |
+- * then we cannot trust a load will be complete and unchanged |
35848 |
+- * off disk. Treat all contents=false hooks as if there were |
35849 |
+- * no associated file struct. |
35850 |
+- */ |
35851 |
+- if (!contents) |
35852 |
+- file = NULL; |
35853 |
+- |
35854 |
+ /* If the file id is excluded, ignore the pinning. */ |
35855 |
+ if ((unsigned int)id < ARRAY_SIZE(ignore_read_file_id) && |
35856 |
+ ignore_read_file_id[id]) { |
35857 |
+@@ -187,9 +177,25 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id, |
35858 |
+ return 0; |
35859 |
+ } |
35860 |
+ |
35861 |
++static int loadpin_read_file(struct file *file, enum kernel_read_file_id id, |
35862 |
++ bool contents) |
35863 |
++{ |
35864 |
++ /* |
35865 |
++ * LoadPin only cares about the _origin_ of a file, not its |
35866 |
++ * contents, so we can ignore the "are full contents available" |
35867 |
++ * argument here. |
35868 |
++ */ |
35869 |
++ return loadpin_check(file, id); |
35870 |
++} |
35871 |
++ |
35872 |
+ static int loadpin_load_data(enum kernel_load_data_id id, bool contents) |
35873 |
+ { |
35874 |
+- return loadpin_read_file(NULL, (enum kernel_read_file_id) id, contents); |
35875 |
++ /* |
35876 |
++ * LoadPin only cares about the _origin_ of a file, not its |
35877 |
++ * contents, so a NULL file is passed, and we can ignore the |
35878 |
++ * state of "contents". |
35879 |
++ */ |
35880 |
++ return loadpin_check(NULL, (enum kernel_read_file_id) id); |
35881 |
+ } |
35882 |
+ |
35883 |
+ static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = { |
35884 |
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c |
35885 |
+index f38c2e5e9a297..44e06ef4ff0b4 100644 |
35886 |
+--- a/sound/core/pcm_native.c |
35887 |
++++ b/sound/core/pcm_native.c |
35888 |
+@@ -1419,8 +1419,10 @@ static int snd_pcm_do_start(struct snd_pcm_substream *substream, |
35889 |
+ static void snd_pcm_undo_start(struct snd_pcm_substream *substream, |
35890 |
+ snd_pcm_state_t state) |
35891 |
+ { |
35892 |
+- if (substream->runtime->trigger_master == substream) |
35893 |
++ if (substream->runtime->trigger_master == substream) { |
35894 |
+ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); |
35895 |
++ substream->runtime->stop_operating = true; |
35896 |
++ } |
35897 |
+ } |
35898 |
+ |
35899 |
+ static void snd_pcm_post_start(struct snd_pcm_substream *substream, |
35900 |
+diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c |
35901 |
+index d3bc9e8c407dc..f0d34cf70c3e0 100644 |
35902 |
+--- a/sound/drivers/mts64.c |
35903 |
++++ b/sound/drivers/mts64.c |
35904 |
+@@ -815,6 +815,9 @@ static void snd_mts64_interrupt(void *private) |
35905 |
+ u8 status, data; |
35906 |
+ struct snd_rawmidi_substream *substream; |
35907 |
+ |
35908 |
++ if (!mts) |
35909 |
++ return; |
35910 |
++ |
35911 |
+ spin_lock(&mts->lock); |
35912 |
+ ret = mts64_read(mts->pardev->port); |
35913 |
+ data = ret & 0x00ff; |
35914 |
+diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c |
35915 |
+index 37154ed43bd53..c09652da43ffd 100644 |
35916 |
+--- a/sound/hda/ext/hdac_ext_stream.c |
35917 |
++++ b/sound/hda/ext/hdac_ext_stream.c |
35918 |
+@@ -475,23 +475,6 @@ int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus, |
35919 |
+ } |
35920 |
+ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_get_spbmaxfifo); |
35921 |
+ |
35922 |
+- |
35923 |
+-/** |
35924 |
+- * snd_hdac_ext_stop_streams - stop all stream if running |
35925 |
+- * @bus: HD-audio core bus |
35926 |
+- */ |
35927 |
+-void snd_hdac_ext_stop_streams(struct hdac_bus *bus) |
35928 |
+-{ |
35929 |
+- struct hdac_stream *stream; |
35930 |
+- |
35931 |
+- if (bus->chip_init) { |
35932 |
+- list_for_each_entry(stream, &bus->stream_list, list) |
35933 |
+- snd_hdac_stream_stop(stream); |
35934 |
+- snd_hdac_bus_stop_chip(bus); |
35935 |
+- } |
35936 |
+-} |
35937 |
+-EXPORT_SYMBOL_GPL(snd_hdac_ext_stop_streams); |
35938 |
+- |
35939 |
+ /** |
35940 |
+ * snd_hdac_ext_stream_drsm_enable - enable DMA resume for a stream |
35941 |
+ * @bus: HD-audio core bus |
35942 |
+diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c |
35943 |
+index aa7955fdf68a0..eea22cf72aefd 100644 |
35944 |
+--- a/sound/hda/hdac_stream.c |
35945 |
++++ b/sound/hda/hdac_stream.c |
35946 |
+@@ -142,6 +142,33 @@ void snd_hdac_stream_stop(struct hdac_stream *azx_dev) |
35947 |
+ } |
35948 |
+ EXPORT_SYMBOL_GPL(snd_hdac_stream_stop); |
35949 |
+ |
35950 |
++/** |
35951 |
++ * snd_hdac_stop_streams - stop all streams |
35952 |
++ * @bus: HD-audio core bus |
35953 |
++ */ |
35954 |
++void snd_hdac_stop_streams(struct hdac_bus *bus) |
35955 |
++{ |
35956 |
++ struct hdac_stream *stream; |
35957 |
++ |
35958 |
++ list_for_each_entry(stream, &bus->stream_list, list) |
35959 |
++ snd_hdac_stream_stop(stream); |
35960 |
++} |
35961 |
++EXPORT_SYMBOL_GPL(snd_hdac_stop_streams); |
35962 |
++ |
35963 |
++/** |
35964 |
++ * snd_hdac_stop_streams_and_chip - stop all streams and chip if running |
35965 |
++ * @bus: HD-audio core bus |
35966 |
++ */ |
35967 |
++void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus) |
35968 |
++{ |
35969 |
++ |
35970 |
++ if (bus->chip_init) { |
35971 |
++ snd_hdac_stop_streams(bus); |
35972 |
++ snd_hdac_bus_stop_chip(bus); |
35973 |
++ } |
35974 |
++} |
35975 |
++EXPORT_SYMBOL_GPL(snd_hdac_stop_streams_and_chip); |
35976 |
++ |
35977 |
+ /** |
35978 |
+ * snd_hdac_stream_reset - reset a stream |
35979 |
+ * @azx_dev: HD-audio core stream to reset |
35980 |
+diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c |
35981 |
+index bb31b7fe867d6..477a5b4b50bcb 100644 |
35982 |
+--- a/sound/pci/asihpi/hpioctl.c |
35983 |
++++ b/sound/pci/asihpi/hpioctl.c |
35984 |
+@@ -361,7 +361,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, |
35985 |
+ pci_dev->device, pci_dev->subsystem_vendor, |
35986 |
+ pci_dev->subsystem_device, pci_dev->devfn); |
35987 |
+ |
35988 |
+- if (pci_enable_device(pci_dev) < 0) { |
35989 |
++ if (pcim_enable_device(pci_dev) < 0) { |
35990 |
+ dev_err(&pci_dev->dev, |
35991 |
+ "pci_enable_device failed, disabling device\n"); |
35992 |
+ return -EIO; |
35993 |
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c |
35994 |
+index 75dcb14ff20ad..0ff286b7b66be 100644 |
35995 |
+--- a/sound/pci/hda/hda_controller.c |
35996 |
++++ b/sound/pci/hda/hda_controller.c |
35997 |
+@@ -1033,10 +1033,8 @@ EXPORT_SYMBOL_GPL(azx_init_chip); |
35998 |
+ void azx_stop_all_streams(struct azx *chip) |
35999 |
+ { |
36000 |
+ struct hdac_bus *bus = azx_bus(chip); |
36001 |
+- struct hdac_stream *s; |
36002 |
+ |
36003 |
+- list_for_each_entry(s, &bus->stream_list, list) |
36004 |
+- snd_hdac_stream_stop(s); |
36005 |
++ snd_hdac_stop_streams(bus); |
36006 |
+ } |
36007 |
+ EXPORT_SYMBOL_GPL(azx_stop_all_streams); |
36008 |
+ |
36009 |
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c |
36010 |
+index ba1289abd45f8..5bf0597ce38b8 100644 |
36011 |
+--- a/sound/pci/hda/patch_hdmi.c |
36012 |
++++ b/sound/pci/hda/patch_hdmi.c |
36013 |
+@@ -1962,6 +1962,7 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) |
36014 |
+ static const struct snd_pci_quirk force_connect_list[] = { |
36015 |
+ SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), |
36016 |
+ SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), |
36017 |
++ SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1), |
36018 |
+ SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1), |
36019 |
+ SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1), |
36020 |
+ {} |
36021 |
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
36022 |
+index 28f8d98d39f15..79c65da1b4ee9 100644 |
36023 |
+--- a/sound/pci/hda/patch_realtek.c |
36024 |
++++ b/sound/pci/hda/patch_realtek.c |
36025 |
+@@ -10593,6 +10593,17 @@ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec, |
36026 |
+ } |
36027 |
+ } |
36028 |
+ |
36029 |
++static void alc897_fixup_lenovo_headset_mode(struct hda_codec *codec, |
36030 |
++ const struct hda_fixup *fix, int action) |
36031 |
++{ |
36032 |
++ struct alc_spec *spec = codec->spec; |
36033 |
++ |
36034 |
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) { |
36035 |
++ spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; |
36036 |
++ spec->gen.hp_automute_hook = alc897_hp_automute_hook; |
36037 |
++ } |
36038 |
++} |
36039 |
++ |
36040 |
+ static const struct coef_fw alc668_coefs[] = { |
36041 |
+ WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0), |
36042 |
+ WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80), |
36043 |
+@@ -10676,6 +10687,8 @@ enum { |
36044 |
+ ALC897_FIXUP_LENOVO_HEADSET_MIC, |
36045 |
+ ALC897_FIXUP_HEADSET_MIC_PIN, |
36046 |
+ ALC897_FIXUP_HP_HSMIC_VERB, |
36047 |
++ ALC897_FIXUP_LENOVO_HEADSET_MODE, |
36048 |
++ ALC897_FIXUP_HEADSET_MIC_PIN2, |
36049 |
+ }; |
36050 |
+ |
36051 |
+ static const struct hda_fixup alc662_fixups[] = { |
36052 |
+@@ -11102,6 +11115,19 @@ static const struct hda_fixup alc662_fixups[] = { |
36053 |
+ { } |
36054 |
+ }, |
36055 |
+ }, |
36056 |
++ [ALC897_FIXUP_LENOVO_HEADSET_MODE] = { |
36057 |
++ .type = HDA_FIXUP_FUNC, |
36058 |
++ .v.func = alc897_fixup_lenovo_headset_mode, |
36059 |
++ }, |
36060 |
++ [ALC897_FIXUP_HEADSET_MIC_PIN2] = { |
36061 |
++ .type = HDA_FIXUP_PINS, |
36062 |
++ .v.pins = (const struct hda_pintbl[]) { |
36063 |
++ { 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */ |
36064 |
++ { } |
36065 |
++ }, |
36066 |
++ .chained = true, |
36067 |
++ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE |
36068 |
++ }, |
36069 |
+ }; |
36070 |
+ |
36071 |
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
36072 |
+@@ -11154,6 +11180,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
36073 |
+ SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN), |
36074 |
+ SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN), |
36075 |
+ SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN), |
36076 |
++ SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2), |
36077 |
+ SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), |
36078 |
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), |
36079 |
+ SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO), |
36080 |
+diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c |
36081 |
+index 60dee41816dc2..1c26577f08ee0 100644 |
36082 |
+--- a/sound/soc/codecs/pcm512x.c |
36083 |
++++ b/sound/soc/codecs/pcm512x.c |
36084 |
+@@ -1635,7 +1635,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) |
36085 |
+ if (val > 6) { |
36086 |
+ dev_err(dev, "Invalid pll-in\n"); |
36087 |
+ ret = -EINVAL; |
36088 |
+- goto err_clk; |
36089 |
++ goto err_pm; |
36090 |
+ } |
36091 |
+ pcm512x->pll_in = val; |
36092 |
+ } |
36093 |
+@@ -1644,7 +1644,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) |
36094 |
+ if (val > 6) { |
36095 |
+ dev_err(dev, "Invalid pll-out\n"); |
36096 |
+ ret = -EINVAL; |
36097 |
+- goto err_clk; |
36098 |
++ goto err_pm; |
36099 |
+ } |
36100 |
+ pcm512x->pll_out = val; |
36101 |
+ } |
36102 |
+@@ -1653,12 +1653,12 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap) |
36103 |
+ dev_err(dev, |
36104 |
+ "Error: both pll-in and pll-out, or none\n"); |
36105 |
+ ret = -EINVAL; |
36106 |
+- goto err_clk; |
36107 |
++ goto err_pm; |
36108 |
+ } |
36109 |
+ if (pcm512x->pll_in && pcm512x->pll_in == pcm512x->pll_out) { |
36110 |
+ dev_err(dev, "Error: pll-in == pll-out\n"); |
36111 |
+ ret = -EINVAL; |
36112 |
+- goto err_clk; |
36113 |
++ goto err_pm; |
36114 |
+ } |
36115 |
+ } |
36116 |
+ #endif |
36117 |
+diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c |
36118 |
+index c592c40a7ab35..604754e4b29ff 100644 |
36119 |
+--- a/sound/soc/codecs/rt298.c |
36120 |
++++ b/sound/soc/codecs/rt298.c |
36121 |
+@@ -1173,6 +1173,13 @@ static const struct dmi_system_id force_combo_jack_table[] = { |
36122 |
+ DMI_MATCH(DMI_PRODUCT_NAME, "Geminilake") |
36123 |
+ } |
36124 |
+ }, |
36125 |
++ { |
36126 |
++ .ident = "Intel Kabylake R RVP", |
36127 |
++ .matches = { |
36128 |
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), |
36129 |
++ DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform") |
36130 |
++ } |
36131 |
++ }, |
36132 |
+ { } |
36133 |
+ }; |
36134 |
+ |
36135 |
+diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c |
36136 |
+index ecbaf129a6e3e..51b385575a5cc 100644 |
36137 |
+--- a/sound/soc/codecs/rt5670.c |
36138 |
++++ b/sound/soc/codecs/rt5670.c |
36139 |
+@@ -3313,8 +3313,6 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, |
36140 |
+ if (ret < 0) |
36141 |
+ goto err; |
36142 |
+ |
36143 |
+- pm_runtime_put(&i2c->dev); |
36144 |
+- |
36145 |
+ return 0; |
36146 |
+ err: |
36147 |
+ pm_runtime_disable(&i2c->dev); |
36148 |
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c |
36149 |
+index f117ec0c489f0..6759db92f6c46 100644 |
36150 |
+--- a/sound/soc/codecs/wm8994.c |
36151 |
++++ b/sound/soc/codecs/wm8994.c |
36152 |
+@@ -3853,7 +3853,12 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data) |
36153 |
+ } else { |
36154 |
+ dev_dbg(component->dev, "Jack not detected\n"); |
36155 |
+ |
36156 |
++ /* Release wm8994->accdet_lock to avoid deadlock: |
36157 |
++ * cancel_delayed_work_sync() takes wm8994->mic_work internal |
36158 |
++ * lock and wm1811_mic_work takes wm8994->accdet_lock */ |
36159 |
++ mutex_unlock(&wm8994->accdet_lock); |
36160 |
+ cancel_delayed_work_sync(&wm8994->mic_work); |
36161 |
++ mutex_lock(&wm8994->accdet_lock); |
36162 |
+ |
36163 |
+ snd_soc_component_update_bits(component, WM8958_MICBIAS2, |
36164 |
+ WM8958_MICB2_DISCH, WM8958_MICB2_DISCH); |
36165 |
+diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c |
36166 |
+index 28cbcdb56857f..89814f68ff563 100644 |
36167 |
+--- a/sound/soc/generic/audio-graph-card.c |
36168 |
++++ b/sound/soc/generic/audio-graph-card.c |
36169 |
+@@ -483,8 +483,10 @@ static int __graph_for_each_link(struct asoc_simple_priv *priv, |
36170 |
+ of_node_put(codec_ep); |
36171 |
+ of_node_put(codec_port); |
36172 |
+ |
36173 |
+- if (ret < 0) |
36174 |
++ if (ret < 0) { |
36175 |
++ of_node_put(cpu_ep); |
36176 |
+ return ret; |
36177 |
++ } |
36178 |
+ |
36179 |
+ codec_port_old = codec_port; |
36180 |
+ } |
36181 |
+diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c |
36182 |
+index 5b1a15e399123..46bb3b8bd5afe 100644 |
36183 |
+--- a/sound/soc/intel/skylake/skl.c |
36184 |
++++ b/sound/soc/intel/skylake/skl.c |
36185 |
+@@ -439,7 +439,7 @@ static int skl_free(struct hdac_bus *bus) |
36186 |
+ |
36187 |
+ skl->init_done = 0; /* to be sure */ |
36188 |
+ |
36189 |
+- snd_hdac_ext_stop_streams(bus); |
36190 |
++ snd_hdac_stop_streams_and_chip(bus); |
36191 |
+ |
36192 |
+ if (bus->irq >= 0) |
36193 |
+ free_irq(bus->irq, (void *)bus); |
36194 |
+@@ -1096,7 +1096,10 @@ static void skl_shutdown(struct pci_dev *pci) |
36195 |
+ if (!skl->init_done) |
36196 |
+ return; |
36197 |
+ |
36198 |
+- snd_hdac_ext_stop_streams(bus); |
36199 |
++ snd_hdac_stop_streams(bus); |
36200 |
++ snd_hdac_ext_bus_link_power_down_all(bus); |
36201 |
++ skl_dsp_sleep(skl->dsp); |
36202 |
++ |
36203 |
+ list_for_each_entry(s, &bus->stream_list, list) { |
36204 |
+ stream = stream_to_hdac_ext_stream(s); |
36205 |
+ snd_hdac_ext_stream_decouple(bus, stream, false); |
36206 |
+diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c |
36207 |
+index d884bb7c0fc74..1c28b41e43112 100644 |
36208 |
+--- a/sound/soc/mediatek/common/mtk-btcvsd.c |
36209 |
++++ b/sound/soc/mediatek/common/mtk-btcvsd.c |
36210 |
+@@ -1038,11 +1038,9 @@ static int mtk_pcm_btcvsd_copy(struct snd_soc_component *component, |
36211 |
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component); |
36212 |
+ |
36213 |
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
36214 |
+- mtk_btcvsd_snd_write(bt, buf, count); |
36215 |
++ return mtk_btcvsd_snd_write(bt, buf, count); |
36216 |
+ else |
36217 |
+- mtk_btcvsd_snd_read(bt, buf, count); |
36218 |
+- |
36219 |
+- return 0; |
36220 |
++ return mtk_btcvsd_snd_read(bt, buf, count); |
36221 |
+ } |
36222 |
+ |
36223 |
+ /* kcontrol */ |
36224 |
+diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c |
36225 |
+index 6350390414d4a..8092506facbd9 100644 |
36226 |
+--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c |
36227 |
++++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c |
36228 |
+@@ -1054,6 +1054,7 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) |
36229 |
+ int irq_id; |
36230 |
+ struct mtk_base_afe *afe; |
36231 |
+ struct mt8173_afe_private *afe_priv; |
36232 |
++ struct snd_soc_component *comp_pcm, *comp_hdmi; |
36233 |
+ |
36234 |
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33)); |
36235 |
+ if (ret) |
36236 |
+@@ -1071,16 +1072,6 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) |
36237 |
+ |
36238 |
+ afe->dev = &pdev->dev; |
36239 |
+ |
36240 |
+- irq_id = platform_get_irq(pdev, 0); |
36241 |
+- if (irq_id <= 0) |
36242 |
+- return irq_id < 0 ? irq_id : -ENXIO; |
36243 |
+- ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler, |
36244 |
+- 0, "Afe_ISR_Handle", (void *)afe); |
36245 |
+- if (ret) { |
36246 |
+- dev_err(afe->dev, "could not request_irq\n"); |
36247 |
+- return ret; |
36248 |
+- } |
36249 |
+- |
36250 |
+ afe->base_addr = devm_platform_ioremap_resource(pdev, 0); |
36251 |
+ if (IS_ERR(afe->base_addr)) |
36252 |
+ return PTR_ERR(afe->base_addr); |
36253 |
+@@ -1142,23 +1133,65 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) |
36254 |
+ if (ret) |
36255 |
+ goto err_pm_disable; |
36256 |
+ |
36257 |
+- ret = devm_snd_soc_register_component(&pdev->dev, |
36258 |
+- &mt8173_afe_pcm_dai_component, |
36259 |
+- mt8173_afe_pcm_dais, |
36260 |
+- ARRAY_SIZE(mt8173_afe_pcm_dais)); |
36261 |
++ comp_pcm = devm_kzalloc(&pdev->dev, sizeof(*comp_pcm), GFP_KERNEL); |
36262 |
++ if (!comp_pcm) { |
36263 |
++ ret = -ENOMEM; |
36264 |
++ goto err_pm_disable; |
36265 |
++ } |
36266 |
++ |
36267 |
++ ret = snd_soc_component_initialize(comp_pcm, |
36268 |
++ &mt8173_afe_pcm_dai_component, |
36269 |
++ &pdev->dev); |
36270 |
+ if (ret) |
36271 |
+ goto err_pm_disable; |
36272 |
+ |
36273 |
+- ret = devm_snd_soc_register_component(&pdev->dev, |
36274 |
+- &mt8173_afe_hdmi_dai_component, |
36275 |
+- mt8173_afe_hdmi_dais, |
36276 |
+- ARRAY_SIZE(mt8173_afe_hdmi_dais)); |
36277 |
++#ifdef CONFIG_DEBUG_FS |
36278 |
++ comp_pcm->debugfs_prefix = "pcm"; |
36279 |
++#endif |
36280 |
++ |
36281 |
++ ret = snd_soc_add_component(comp_pcm, |
36282 |
++ mt8173_afe_pcm_dais, |
36283 |
++ ARRAY_SIZE(mt8173_afe_pcm_dais)); |
36284 |
++ if (ret) |
36285 |
++ goto err_pm_disable; |
36286 |
++ |
36287 |
++ comp_hdmi = devm_kzalloc(&pdev->dev, sizeof(*comp_hdmi), GFP_KERNEL); |
36288 |
++ if (!comp_hdmi) { |
36289 |
++ ret = -ENOMEM; |
36290 |
++ goto err_pm_disable; |
36291 |
++ } |
36292 |
++ |
36293 |
++ ret = snd_soc_component_initialize(comp_hdmi, |
36294 |
++ &mt8173_afe_hdmi_dai_component, |
36295 |
++ &pdev->dev); |
36296 |
+ if (ret) |
36297 |
+ goto err_pm_disable; |
36298 |
+ |
36299 |
++#ifdef CONFIG_DEBUG_FS |
36300 |
++ comp_hdmi->debugfs_prefix = "hdmi"; |
36301 |
++#endif |
36302 |
++ |
36303 |
++ ret = snd_soc_add_component(comp_hdmi, |
36304 |
++ mt8173_afe_hdmi_dais, |
36305 |
++ ARRAY_SIZE(mt8173_afe_hdmi_dais)); |
36306 |
++ if (ret) |
36307 |
++ goto err_cleanup_components; |
36308 |
++ |
36309 |
++ irq_id = platform_get_irq(pdev, 0); |
36310 |
++ if (irq_id <= 0) |
36311 |
++ return irq_id < 0 ? irq_id : -ENXIO; |
36312 |
++ ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler, |
36313 |
++ 0, "Afe_ISR_Handle", (void *)afe); |
36314 |
++ if (ret) { |
36315 |
++ dev_err(afe->dev, "could not request_irq\n"); |
36316 |
++ goto err_pm_disable; |
36317 |
++ } |
36318 |
++ |
36319 |
+ dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n"); |
36320 |
+ return 0; |
36321 |
+ |
36322 |
++err_cleanup_components: |
36323 |
++ snd_soc_unregister_component(&pdev->dev); |
36324 |
+ err_pm_disable: |
36325 |
+ pm_runtime_disable(&pdev->dev); |
36326 |
+ return ret; |
36327 |
+@@ -1166,6 +1199,8 @@ err_pm_disable: |
36328 |
+ |
36329 |
+ static int mt8173_afe_pcm_dev_remove(struct platform_device *pdev) |
36330 |
+ { |
36331 |
++ snd_soc_unregister_component(&pdev->dev); |
36332 |
++ |
36333 |
+ pm_runtime_disable(&pdev->dev); |
36334 |
+ if (!pm_runtime_status_suspended(&pdev->dev)) |
36335 |
+ mt8173_afe_runtime_suspend(&pdev->dev); |
36336 |
+diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c |
36337 |
+index 390da5bf727eb..9421b919d4627 100644 |
36338 |
+--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c |
36339 |
++++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c |
36340 |
+@@ -200,14 +200,16 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev) |
36341 |
+ if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) { |
36342 |
+ dev_err(&pdev->dev, |
36343 |
+ "Property 'audio-codec' missing or invalid\n"); |
36344 |
+- return -EINVAL; |
36345 |
++ ret = -EINVAL; |
36346 |
++ goto out; |
36347 |
+ } |
36348 |
+ mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node = |
36349 |
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1); |
36350 |
+ if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node) { |
36351 |
+ dev_err(&pdev->dev, |
36352 |
+ "Property 'audio-codec' missing or invalid\n"); |
36353 |
+- return -EINVAL; |
36354 |
++ ret = -EINVAL; |
36355 |
++ goto out; |
36356 |
+ } |
36357 |
+ mt8173_rt5650_rt5514_codec_conf[0].dlc.of_node = |
36358 |
+ mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node; |
36359 |
+@@ -219,6 +221,7 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev) |
36360 |
+ dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", |
36361 |
+ __func__, ret); |
36362 |
+ |
36363 |
++out: |
36364 |
+ of_node_put(platform_node); |
36365 |
+ return ret; |
36366 |
+ } |
36367 |
+diff --git a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c |
36368 |
+index a56c1e87d5642..4dab1ee69ec07 100644 |
36369 |
+--- a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c |
36370 |
++++ b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c |
36371 |
+@@ -647,8 +647,10 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev) |
36372 |
+ } |
36373 |
+ |
36374 |
+ card = (struct snd_soc_card *)of_device_get_match_data(&pdev->dev); |
36375 |
+- if (!card) |
36376 |
++ if (!card) { |
36377 |
++ of_node_put(platform_node); |
36378 |
+ return -EINVAL; |
36379 |
++ } |
36380 |
+ card->dev = &pdev->dev; |
36381 |
+ |
36382 |
+ ec_codec = of_parse_phandle(pdev->dev.of_node, "mediatek,ec-codec", 0); |
36383 |
+@@ -737,8 +739,10 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev) |
36384 |
+ } |
36385 |
+ |
36386 |
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); |
36387 |
+- if (!priv) |
36388 |
+- return -ENOMEM; |
36389 |
++ if (!priv) { |
36390 |
++ ret = -ENOMEM; |
36391 |
++ goto out; |
36392 |
++ } |
36393 |
+ |
36394 |
+ snd_soc_card_set_drvdata(card, priv); |
36395 |
+ |
36396 |
+@@ -746,7 +750,8 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev) |
36397 |
+ if (IS_ERR(priv->pinctrl)) { |
36398 |
+ dev_err(&pdev->dev, "%s devm_pinctrl_get failed\n", |
36399 |
+ __func__); |
36400 |
+- return PTR_ERR(priv->pinctrl); |
36401 |
++ ret = PTR_ERR(priv->pinctrl); |
36402 |
++ goto out; |
36403 |
+ } |
36404 |
+ |
36405 |
+ for (i = 0; i < PIN_STATE_MAX; i++) { |
36406 |
+@@ -779,6 +784,7 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev) |
36407 |
+ |
36408 |
+ ret = devm_snd_soc_register_card(&pdev->dev, card); |
36409 |
+ |
36410 |
++out: |
36411 |
+ of_node_put(platform_node); |
36412 |
+ of_node_put(ec_codec); |
36413 |
+ of_node_put(hdmi_codec); |
36414 |
+diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c |
36415 |
+index 5d520e18e512f..99b245e3079a2 100644 |
36416 |
+--- a/sound/soc/pxa/mmp-pcm.c |
36417 |
++++ b/sound/soc/pxa/mmp-pcm.c |
36418 |
+@@ -98,7 +98,7 @@ static bool filter(struct dma_chan *chan, void *param) |
36419 |
+ |
36420 |
+ devname = kasprintf(GFP_KERNEL, "%s.%d", dma_data->dma_res->name, |
36421 |
+ dma_data->ssp_id); |
36422 |
+- if ((strcmp(dev_name(chan->device->dev), devname) == 0) && |
36423 |
++ if (devname && (strcmp(dev_name(chan->device->dev), devname) == 0) && |
36424 |
+ (chan->chan_id == dma_data->dma_res->start)) { |
36425 |
+ found = true; |
36426 |
+ } |
36427 |
+diff --git a/sound/soc/qcom/lpass-sc7180.c b/sound/soc/qcom/lpass-sc7180.c |
36428 |
+index 77a556b27cf09..24a1c121cb2e9 100644 |
36429 |
+--- a/sound/soc/qcom/lpass-sc7180.c |
36430 |
++++ b/sound/soc/qcom/lpass-sc7180.c |
36431 |
+@@ -131,6 +131,9 @@ static int sc7180_lpass_init(struct platform_device *pdev) |
36432 |
+ |
36433 |
+ drvdata->clks = devm_kcalloc(dev, variant->num_clks, |
36434 |
+ sizeof(*drvdata->clks), GFP_KERNEL); |
36435 |
++ if (!drvdata->clks) |
36436 |
++ return -ENOMEM; |
36437 |
++ |
36438 |
+ drvdata->num_clks = variant->num_clks; |
36439 |
+ |
36440 |
+ for (i = 0; i < drvdata->num_clks; i++) |
36441 |
+diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c |
36442 |
+index 38bd603eeb454..7c0b0fe326c22 100644 |
36443 |
+--- a/sound/soc/rockchip/rockchip_pdm.c |
36444 |
++++ b/sound/soc/rockchip/rockchip_pdm.c |
36445 |
+@@ -368,6 +368,7 @@ static int rockchip_pdm_runtime_resume(struct device *dev) |
36446 |
+ |
36447 |
+ ret = clk_prepare_enable(pdm->hclk); |
36448 |
+ if (ret) { |
36449 |
++ clk_disable_unprepare(pdm->clk); |
36450 |
+ dev_err(pdm->dev, "hclock enable failed %d\n", ret); |
36451 |
+ return ret; |
36452 |
+ } |
36453 |
+diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c |
36454 |
+index d027ca4b17964..09a25d84fee6f 100644 |
36455 |
+--- a/sound/soc/rockchip/rockchip_spdif.c |
36456 |
++++ b/sound/soc/rockchip/rockchip_spdif.c |
36457 |
+@@ -88,6 +88,7 @@ static int __maybe_unused rk_spdif_runtime_resume(struct device *dev) |
36458 |
+ |
36459 |
+ ret = clk_prepare_enable(spdif->hclk); |
36460 |
+ if (ret) { |
36461 |
++ clk_disable_unprepare(spdif->mclk); |
36462 |
+ dev_err(spdif->dev, "hclk clock enable failed %d\n", ret); |
36463 |
+ return ret; |
36464 |
+ } |
36465 |
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h |
36466 |
+index 95358f04341b0..b2f896e863532 100644 |
36467 |
+--- a/sound/usb/quirks-table.h |
36468 |
++++ b/sound/usb/quirks-table.h |
36469 |
+@@ -76,6 +76,8 @@ |
36470 |
+ { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f0a) }, |
36471 |
+ /* E-Mu 0204 USB */ |
36472 |
+ { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f19) }, |
36473 |
++/* Ktmicro Usb_audio device */ |
36474 |
++{ USB_DEVICE_VENDOR_SPEC(0x31b2, 0x0011) }, |
36475 |
+ |
36476 |
+ /* |
36477 |
+ * Creative Technology, Ltd Live! Cam Sync HD [VF0770] |
36478 |
+diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h |
36479 |
+index a7e54a08fb54c..c2e109860fbc1 100644 |
36480 |
+--- a/tools/include/linux/kernel.h |
36481 |
++++ b/tools/include/linux/kernel.h |
36482 |
+@@ -14,6 +14,8 @@ |
36483 |
+ #define UINT_MAX (~0U) |
36484 |
+ #endif |
36485 |
+ |
36486 |
++#define _RET_IP_ ((unsigned long)__builtin_return_address(0)) |
36487 |
++ |
36488 |
+ #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
36489 |
+ |
36490 |
+ #define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1) |
36491 |
+@@ -52,6 +54,10 @@ |
36492 |
+ _min1 < _min2 ? _min1 : _min2; }) |
36493 |
+ #endif |
36494 |
+ |
36495 |
++#define max_t(type, x, y) max((type)x, (type)y) |
36496 |
++#define min_t(type, x, y) min((type)x, (type)y) |
36497 |
++#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) |
36498 |
++ |
36499 |
+ #ifndef roundup |
36500 |
+ #define roundup(x, y) ( \ |
36501 |
+ { \ |
36502 |
+diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h |
36503 |
+index 6fffb3cdf39b9..49bd43b998c8a 100644 |
36504 |
+--- a/tools/lib/bpf/bpf.h |
36505 |
++++ b/tools/lib/bpf/bpf.h |
36506 |
+@@ -249,8 +249,15 @@ LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, |
36507 |
+ __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, |
36508 |
+ __u64 *probe_offset, __u64 *probe_addr); |
36509 |
+ |
36510 |
++#ifdef __cplusplus |
36511 |
++/* forward-declaring enums in C++ isn't compatible with pure C enums, so |
36512 |
++ * instead define bpf_enable_stats() as accepting int as an input |
36513 |
++ */ |
36514 |
++LIBBPF_API int bpf_enable_stats(int type); |
36515 |
++#else |
36516 |
+ enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */ |
36517 |
+ LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type); |
36518 |
++#endif |
36519 |
+ |
36520 |
+ struct bpf_prog_bind_opts { |
36521 |
+ size_t sz; /* size of this struct for forward/backward compatibility */ |
36522 |
+diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c |
36523 |
+index 5f3d20ae66d56..3ed759f53e7c2 100644 |
36524 |
+--- a/tools/lib/bpf/btf.c |
36525 |
++++ b/tools/lib/bpf/btf.c |
36526 |
+@@ -3718,14 +3718,14 @@ static inline __u16 btf_fwd_kind(struct btf_type *t) |
36527 |
+ } |
36528 |
+ |
36529 |
+ /* Check if given two types are identical ARRAY definitions */ |
36530 |
+-static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) |
36531 |
++static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) |
36532 |
+ { |
36533 |
+ struct btf_type *t1, *t2; |
36534 |
+ |
36535 |
+ t1 = btf_type_by_id(d->btf, id1); |
36536 |
+ t2 = btf_type_by_id(d->btf, id2); |
36537 |
+ if (!btf_is_array(t1) || !btf_is_array(t2)) |
36538 |
+- return 0; |
36539 |
++ return false; |
36540 |
+ |
36541 |
+ return btf_equal_array(t1, t2); |
36542 |
+ } |
36543 |
+@@ -3749,7 +3749,9 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id |
36544 |
+ m1 = btf_members(t1); |
36545 |
+ m2 = btf_members(t2); |
36546 |
+ for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { |
36547 |
+- if (m1->type != m2->type) |
36548 |
++ if (m1->type != m2->type && |
36549 |
++ !btf_dedup_identical_arrays(d, m1->type, m2->type) && |
36550 |
++ !btf_dedup_identical_structs(d, m1->type, m2->type)) |
36551 |
+ return false; |
36552 |
+ } |
36553 |
+ return true; |
36554 |
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c |
36555 |
+index 841cc68e3f427..f620911ad3bb5 100644 |
36556 |
+--- a/tools/lib/bpf/btf_dump.c |
36557 |
++++ b/tools/lib/bpf/btf_dump.c |
36558 |
+@@ -215,6 +215,17 @@ static int btf_dump_resize(struct btf_dump *d) |
36559 |
+ return 0; |
36560 |
+ } |
36561 |
+ |
36562 |
++static void btf_dump_free_names(struct hashmap *map) |
36563 |
++{ |
36564 |
++ size_t bkt; |
36565 |
++ struct hashmap_entry *cur; |
36566 |
++ |
36567 |
++ hashmap__for_each_entry(map, cur, bkt) |
36568 |
++ free((void *)cur->key); |
36569 |
++ |
36570 |
++ hashmap__free(map); |
36571 |
++} |
36572 |
++ |
36573 |
+ void btf_dump__free(struct btf_dump *d) |
36574 |
+ { |
36575 |
+ int i; |
36576 |
+@@ -233,8 +244,8 @@ void btf_dump__free(struct btf_dump *d) |
36577 |
+ free(d->cached_names); |
36578 |
+ free(d->emit_queue); |
36579 |
+ free(d->decl_stack); |
36580 |
+- hashmap__free(d->type_names); |
36581 |
+- hashmap__free(d->ident_names); |
36582 |
++ btf_dump_free_names(d->type_names); |
36583 |
++ btf_dump_free_names(d->ident_names); |
36584 |
+ |
36585 |
+ free(d); |
36586 |
+ } |
36587 |
+@@ -1457,11 +1468,23 @@ static void btf_dump_emit_type_cast(struct btf_dump *d, __u32 id, |
36588 |
+ static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map, |
36589 |
+ const char *orig_name) |
36590 |
+ { |
36591 |
++ char *old_name, *new_name; |
36592 |
+ size_t dup_cnt = 0; |
36593 |
++ int err; |
36594 |
++ |
36595 |
++ new_name = strdup(orig_name); |
36596 |
++ if (!new_name) |
36597 |
++ return 1; |
36598 |
+ |
36599 |
+ hashmap__find(name_map, orig_name, (void **)&dup_cnt); |
36600 |
+ dup_cnt++; |
36601 |
+- hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL); |
36602 |
++ |
36603 |
++ err = hashmap__set(name_map, new_name, (void *)dup_cnt, |
36604 |
++ (const void **)&old_name, NULL); |
36605 |
++ if (err) |
36606 |
++ free(new_name); |
36607 |
++ |
36608 |
++ free(old_name); |
36609 |
+ |
36610 |
+ return dup_cnt; |
36611 |
+ } |
36612 |
+@@ -1892,7 +1915,7 @@ static int btf_dump_struct_data(struct btf_dump *d, |
36613 |
+ { |
36614 |
+ const struct btf_member *m = btf_members(t); |
36615 |
+ __u16 n = btf_vlen(t); |
36616 |
+- int i, err; |
36617 |
++ int i, err = 0; |
36618 |
+ |
36619 |
+ /* note that we increment depth before calling btf_dump_print() below; |
36620 |
+ * this is intentional. btf_dump_data_newline() will not print a |
36621 |
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c |
36622 |
+index 050622649797c..f87a15bbf53b3 100644 |
36623 |
+--- a/tools/lib/bpf/libbpf.c |
36624 |
++++ b/tools/lib/bpf/libbpf.c |
36625 |
+@@ -3763,6 +3763,9 @@ static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, |
36626 |
+ int l = 0, r = obj->nr_programs - 1, m; |
36627 |
+ struct bpf_program *prog; |
36628 |
+ |
36629 |
++ if (!obj->nr_programs) |
36630 |
++ return NULL; |
36631 |
++ |
36632 |
+ while (l < r) { |
36633 |
+ m = l + (r - l + 1) / 2; |
36634 |
+ prog = &obj->programs[m]; |
36635 |
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c |
36636 |
+index 72e5d23f1ad88..edac5aaa28024 100644 |
36637 |
+--- a/tools/objtool/check.c |
36638 |
++++ b/tools/objtool/check.c |
36639 |
+@@ -846,6 +846,16 @@ static const char *uaccess_safe_builtin[] = { |
36640 |
+ "__tsan_read_write4", |
36641 |
+ "__tsan_read_write8", |
36642 |
+ "__tsan_read_write16", |
36643 |
++ "__tsan_volatile_read1", |
36644 |
++ "__tsan_volatile_read2", |
36645 |
++ "__tsan_volatile_read4", |
36646 |
++ "__tsan_volatile_read8", |
36647 |
++ "__tsan_volatile_read16", |
36648 |
++ "__tsan_volatile_write1", |
36649 |
++ "__tsan_volatile_write2", |
36650 |
++ "__tsan_volatile_write4", |
36651 |
++ "__tsan_volatile_write8", |
36652 |
++ "__tsan_volatile_write16", |
36653 |
+ "__tsan_atomic8_load", |
36654 |
+ "__tsan_atomic16_load", |
36655 |
+ "__tsan_atomic32_load", |
36656 |
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c |
36657 |
+index abf88a1ad455c..aad65c95c3711 100644 |
36658 |
+--- a/tools/perf/builtin-stat.c |
36659 |
++++ b/tools/perf/builtin-stat.c |
36660 |
+@@ -558,26 +558,14 @@ static int enable_counters(void) |
36661 |
+ return err; |
36662 |
+ } |
36663 |
+ |
36664 |
+- if (stat_config.initial_delay < 0) { |
36665 |
+- pr_info(EVLIST_DISABLED_MSG); |
36666 |
+- return 0; |
36667 |
+- } |
36668 |
+- |
36669 |
+- if (stat_config.initial_delay > 0) { |
36670 |
+- pr_info(EVLIST_DISABLED_MSG); |
36671 |
+- usleep(stat_config.initial_delay * USEC_PER_MSEC); |
36672 |
+- } |
36673 |
+- |
36674 |
+ /* |
36675 |
+ * We need to enable counters only if: |
36676 |
+ * - we don't have tracee (attaching to task or cpu) |
36677 |
+ * - we have initial delay configured |
36678 |
+ */ |
36679 |
+- if (!target__none(&target) || stat_config.initial_delay) { |
36680 |
++ if (!target__none(&target)) { |
36681 |
+ if (!all_counters_use_bpf) |
36682 |
+ evlist__enable(evsel_list); |
36683 |
+- if (stat_config.initial_delay > 0) |
36684 |
+- pr_info(EVLIST_ENABLED_MSG); |
36685 |
+ } |
36686 |
+ return 0; |
36687 |
+ } |
36688 |
+@@ -953,18 +941,31 @@ try_again_reset: |
36689 |
+ return err; |
36690 |
+ } |
36691 |
+ |
36692 |
+- /* |
36693 |
+- * Enable counters and exec the command: |
36694 |
+- */ |
36695 |
+- if (forks) { |
36696 |
++ if (stat_config.initial_delay) { |
36697 |
++ pr_info(EVLIST_DISABLED_MSG); |
36698 |
++ } else { |
36699 |
+ err = enable_counters(); |
36700 |
+ if (err) |
36701 |
+ return -1; |
36702 |
++ } |
36703 |
++ |
36704 |
++ /* Exec the command, if any */ |
36705 |
++ if (forks) |
36706 |
+ evlist__start_workload(evsel_list); |
36707 |
+ |
36708 |
+- t0 = rdclock(); |
36709 |
+- clock_gettime(CLOCK_MONOTONIC, &ref_time); |
36710 |
++ if (stat_config.initial_delay > 0) { |
36711 |
++ usleep(stat_config.initial_delay * USEC_PER_MSEC); |
36712 |
++ err = enable_counters(); |
36713 |
++ if (err) |
36714 |
++ return -1; |
36715 |
++ |
36716 |
++ pr_info(EVLIST_ENABLED_MSG); |
36717 |
++ } |
36718 |
+ |
36719 |
++ t0 = rdclock(); |
36720 |
++ clock_gettime(CLOCK_MONOTONIC, &ref_time); |
36721 |
++ |
36722 |
++ if (forks) { |
36723 |
+ if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) |
36724 |
+ status = dispatch_events(forks, timeout, interval, ×); |
36725 |
+ if (child_pid != -1) { |
36726 |
+@@ -982,13 +983,6 @@ try_again_reset: |
36727 |
+ if (WIFSIGNALED(status)) |
36728 |
+ psignal(WTERMSIG(status), argv[0]); |
36729 |
+ } else { |
36730 |
+- err = enable_counters(); |
36731 |
+- if (err) |
36732 |
+- return -1; |
36733 |
+- |
36734 |
+- t0 = rdclock(); |
36735 |
+- clock_gettime(CLOCK_MONOTONIC, &ref_time); |
36736 |
+- |
36737 |
+ status = dispatch_events(forks, timeout, interval, ×); |
36738 |
+ } |
36739 |
+ |
36740 |
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c |
36741 |
+index 2bf21194c7b39..2fea9952818f5 100644 |
36742 |
+--- a/tools/perf/builtin-trace.c |
36743 |
++++ b/tools/perf/builtin-trace.c |
36744 |
+@@ -87,6 +87,8 @@ |
36745 |
+ # define F_LINUX_SPECIFIC_BASE 1024 |
36746 |
+ #endif |
36747 |
+ |
36748 |
++#define RAW_SYSCALL_ARGS_NUM 6 |
36749 |
++ |
36750 |
+ /* |
36751 |
+ * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100 |
36752 |
+ */ |
36753 |
+@@ -107,7 +109,7 @@ struct syscall_fmt { |
36754 |
+ const char *sys_enter, |
36755 |
+ *sys_exit; |
36756 |
+ } bpf_prog_name; |
36757 |
+- struct syscall_arg_fmt arg[6]; |
36758 |
++ struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM]; |
36759 |
+ u8 nr_args; |
36760 |
+ bool errpid; |
36761 |
+ bool timeout; |
36762 |
+@@ -1224,7 +1226,7 @@ struct syscall { |
36763 |
+ */ |
36764 |
+ struct bpf_map_syscall_entry { |
36765 |
+ bool enabled; |
36766 |
+- u16 string_args_len[6]; |
36767 |
++ u16 string_args_len[RAW_SYSCALL_ARGS_NUM]; |
36768 |
+ }; |
36769 |
+ |
36770 |
+ /* |
36771 |
+@@ -1649,7 +1651,7 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) |
36772 |
+ { |
36773 |
+ int idx; |
36774 |
+ |
36775 |
+- if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0) |
36776 |
++ if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) |
36777 |
+ nr_args = sc->fmt->nr_args; |
36778 |
+ |
36779 |
+ sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); |
36780 |
+@@ -1782,11 +1784,11 @@ static int trace__read_syscall_info(struct trace *trace, int id) |
36781 |
+ #endif |
36782 |
+ sc = trace->syscalls.table + id; |
36783 |
+ if (sc->nonexistent) |
36784 |
+- return 0; |
36785 |
++ return -EEXIST; |
36786 |
+ |
36787 |
+ if (name == NULL) { |
36788 |
+ sc->nonexistent = true; |
36789 |
+- return 0; |
36790 |
++ return -EEXIST; |
36791 |
+ } |
36792 |
+ |
36793 |
+ sc->name = name; |
36794 |
+@@ -1800,11 +1802,18 @@ static int trace__read_syscall_info(struct trace *trace, int id) |
36795 |
+ sc->tp_format = trace_event__tp_format("syscalls", tp_name); |
36796 |
+ } |
36797 |
+ |
36798 |
+- if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields)) |
36799 |
+- return -ENOMEM; |
36800 |
+- |
36801 |
+- if (IS_ERR(sc->tp_format)) |
36802 |
++ /* |
36803 |
++ * Fails to read trace point format via sysfs node, so the trace point |
36804 |
++ * doesn't exist. Set the 'nonexistent' flag as true. |
36805 |
++ */ |
36806 |
++ if (IS_ERR(sc->tp_format)) { |
36807 |
++ sc->nonexistent = true; |
36808 |
+ return PTR_ERR(sc->tp_format); |
36809 |
++ } |
36810 |
++ |
36811 |
++ if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? |
36812 |
++ RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields)) |
36813 |
++ return -ENOMEM; |
36814 |
+ |
36815 |
+ sc->args = sc->tp_format->format.fields; |
36816 |
+ /* |
36817 |
+@@ -2122,11 +2131,8 @@ static struct syscall *trace__syscall_info(struct trace *trace, |
36818 |
+ (err = trace__read_syscall_info(trace, id)) != 0) |
36819 |
+ goto out_cant_read; |
36820 |
+ |
36821 |
+- if (trace->syscalls.table[id].name == NULL) { |
36822 |
+- if (trace->syscalls.table[id].nonexistent) |
36823 |
+- return NULL; |
36824 |
++ if (trace->syscalls.table && trace->syscalls.table[id].nonexistent) |
36825 |
+ goto out_cant_read; |
36826 |
+- } |
36827 |
+ |
36828 |
+ return &trace->syscalls.table[id]; |
36829 |
+ |
36830 |
+diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c |
36831 |
+index 65e6c22f38e4f..190e818a07176 100644 |
36832 |
+--- a/tools/perf/util/debug.c |
36833 |
++++ b/tools/perf/util/debug.c |
36834 |
+@@ -241,6 +241,10 @@ int perf_quiet_option(void) |
36835 |
+ opt++; |
36836 |
+ } |
36837 |
+ |
36838 |
++ /* For debug variables that are used as bool types, set to 0. */ |
36839 |
++ redirect_to_stderr = 0; |
36840 |
++ debug_peo_args = 0; |
36841 |
++ |
36842 |
+ return 0; |
36843 |
+ } |
36844 |
+ |
36845 |
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c |
36846 |
+index 6c183df191aaa..fd42f768e5848 100644 |
36847 |
+--- a/tools/perf/util/symbol-elf.c |
36848 |
++++ b/tools/perf/util/symbol-elf.c |
36849 |
+@@ -1292,7 +1292,7 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, |
36850 |
+ (!used_opd && syms_ss->adjust_symbols)) { |
36851 |
+ GElf_Phdr phdr; |
36852 |
+ |
36853 |
+- if (elf_read_program_header(syms_ss->elf, |
36854 |
++ if (elf_read_program_header(runtime_ss->elf, |
36855 |
+ (u64)sym.st_value, &phdr)) { |
36856 |
+ pr_debug4("%s: failed to find program header for " |
36857 |
+ "symbol: %s st_value: %#" PRIx64 "\n", |
36858 |
+diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config |
36859 |
+index 5192305159ec1..4a2a47fcd6efd 100644 |
36860 |
+--- a/tools/testing/selftests/bpf/config |
36861 |
++++ b/tools/testing/selftests/bpf/config |
36862 |
+@@ -46,3 +46,7 @@ CONFIG_IMA_READ_POLICY=y |
36863 |
+ CONFIG_BLK_DEV_LOOP=y |
36864 |
+ CONFIG_FUNCTION_TRACER=y |
36865 |
+ CONFIG_DYNAMIC_FTRACE=y |
36866 |
++CONFIG_NETFILTER=y |
36867 |
++CONFIG_NF_DEFRAG_IPV4=y |
36868 |
++CONFIG_NF_DEFRAG_IPV6=y |
36869 |
++CONFIG_NF_CONNTRACK=y |
36870 |
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c |
36871 |
+new file mode 100644 |
36872 |
+index 0000000000000..e3166a81e9892 |
36873 |
+--- /dev/null |
36874 |
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c |
36875 |
+@@ -0,0 +1,48 @@ |
36876 |
++// SPDX-License-Identifier: GPL-2.0 |
36877 |
++#include <test_progs.h> |
36878 |
++#include <network_helpers.h> |
36879 |
++#include "test_bpf_nf.skel.h" |
36880 |
++ |
36881 |
++enum { |
36882 |
++ TEST_XDP, |
36883 |
++ TEST_TC_BPF, |
36884 |
++}; |
36885 |
++ |
36886 |
++void test_bpf_nf_ct(int mode) |
36887 |
++{ |
36888 |
++ struct test_bpf_nf *skel; |
36889 |
++ int prog_fd, err, retval; |
36890 |
++ |
36891 |
++ skel = test_bpf_nf__open_and_load(); |
36892 |
++ if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load")) |
36893 |
++ return; |
36894 |
++ |
36895 |
++ if (mode == TEST_XDP) |
36896 |
++ prog_fd = bpf_program__fd(skel->progs.nf_xdp_ct_test); |
36897 |
++ else |
36898 |
++ prog_fd = bpf_program__fd(skel->progs.nf_skb_ct_test); |
36899 |
++ |
36900 |
++ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), NULL, NULL, |
36901 |
++ (__u32 *)&retval, NULL); |
36902 |
++ if (!ASSERT_OK(err, "bpf_prog_test_run")) |
36903 |
++ goto end; |
36904 |
++ |
36905 |
++ ASSERT_EQ(skel->bss->test_einval_bpf_tuple, -EINVAL, "Test EINVAL for NULL bpf_tuple"); |
36906 |
++ ASSERT_EQ(skel->bss->test_einval_reserved, -EINVAL, "Test EINVAL for reserved not set to 0"); |
36907 |
++ ASSERT_EQ(skel->bss->test_einval_netns_id, -EINVAL, "Test EINVAL for netns_id < -1"); |
36908 |
++ ASSERT_EQ(skel->bss->test_einval_len_opts, -EINVAL, "Test EINVAL for len__opts != NF_BPF_CT_OPTS_SZ"); |
36909 |
++ ASSERT_EQ(skel->bss->test_eproto_l4proto, -EPROTO, "Test EPROTO for l4proto != TCP or UDP"); |
36910 |
++ ASSERT_EQ(skel->bss->test_enonet_netns_id, -ENONET, "Test ENONET for bad but valid netns_id"); |
36911 |
++ ASSERT_EQ(skel->bss->test_enoent_lookup, -ENOENT, "Test ENOENT for failed lookup"); |
36912 |
++ ASSERT_EQ(skel->bss->test_eafnosupport, -EAFNOSUPPORT, "Test EAFNOSUPPORT for invalid len__tuple"); |
36913 |
++end: |
36914 |
++ test_bpf_nf__destroy(skel); |
36915 |
++} |
36916 |
++ |
36917 |
++void test_bpf_nf(void) |
36918 |
++{ |
36919 |
++ if (test__start_subtest("xdp-ct")) |
36920 |
++ test_bpf_nf_ct(TEST_XDP); |
36921 |
++ if (test__start_subtest("tc-bpf-ct")) |
36922 |
++ test_bpf_nf_ct(TEST_TC_BPF); |
36923 |
++} |
36924 |
+diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c |
36925 |
+new file mode 100644 |
36926 |
+index 0000000000000..6f131c993c0b9 |
36927 |
+--- /dev/null |
36928 |
++++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c |
36929 |
+@@ -0,0 +1,109 @@ |
36930 |
++// SPDX-License-Identifier: GPL-2.0 |
36931 |
++#include <vmlinux.h> |
36932 |
++#include <bpf/bpf_helpers.h> |
36933 |
++ |
36934 |
++#define EAFNOSUPPORT 97 |
36935 |
++#define EPROTO 71 |
36936 |
++#define ENONET 64 |
36937 |
++#define EINVAL 22 |
36938 |
++#define ENOENT 2 |
36939 |
++ |
36940 |
++int test_einval_bpf_tuple = 0; |
36941 |
++int test_einval_reserved = 0; |
36942 |
++int test_einval_netns_id = 0; |
36943 |
++int test_einval_len_opts = 0; |
36944 |
++int test_eproto_l4proto = 0; |
36945 |
++int test_enonet_netns_id = 0; |
36946 |
++int test_enoent_lookup = 0; |
36947 |
++int test_eafnosupport = 0; |
36948 |
++ |
36949 |
++struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *, struct bpf_sock_tuple *, u32, |
36950 |
++ struct bpf_ct_opts *, u32) __ksym; |
36951 |
++struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32, |
36952 |
++ struct bpf_ct_opts *, u32) __ksym; |
36953 |
++void bpf_ct_release(struct nf_conn *) __ksym; |
36954 |
++ |
36955 |
++static __always_inline void |
36956 |
++nf_ct_test(struct nf_conn *(*func)(void *, struct bpf_sock_tuple *, u32, |
36957 |
++ struct bpf_ct_opts *, u32), |
36958 |
++ void *ctx) |
36959 |
++{ |
36960 |
++ struct bpf_ct_opts opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 }; |
36961 |
++ struct bpf_sock_tuple bpf_tuple; |
36962 |
++ struct nf_conn *ct; |
36963 |
++ |
36964 |
++ __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4)); |
36965 |
++ |
36966 |
++ ct = func(ctx, NULL, 0, &opts_def, sizeof(opts_def)); |
36967 |
++ if (ct) |
36968 |
++ bpf_ct_release(ct); |
36969 |
++ else |
36970 |
++ test_einval_bpf_tuple = opts_def.error; |
36971 |
++ |
36972 |
++ opts_def.reserved[0] = 1; |
36973 |
++ ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def)); |
36974 |
++ opts_def.reserved[0] = 0; |
36975 |
++ opts_def.l4proto = IPPROTO_TCP; |
36976 |
++ if (ct) |
36977 |
++ bpf_ct_release(ct); |
36978 |
++ else |
36979 |
++ test_einval_reserved = opts_def.error; |
36980 |
++ |
36981 |
++ opts_def.netns_id = -2; |
36982 |
++ ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def)); |
36983 |
++ opts_def.netns_id = -1; |
36984 |
++ if (ct) |
36985 |
++ bpf_ct_release(ct); |
36986 |
++ else |
36987 |
++ test_einval_netns_id = opts_def.error; |
36988 |
++ |
36989 |
++ ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def) - 1); |
36990 |
++ if (ct) |
36991 |
++ bpf_ct_release(ct); |
36992 |
++ else |
36993 |
++ test_einval_len_opts = opts_def.error; |
36994 |
++ |
36995 |
++ opts_def.l4proto = IPPROTO_ICMP; |
36996 |
++ ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def)); |
36997 |
++ opts_def.l4proto = IPPROTO_TCP; |
36998 |
++ if (ct) |
36999 |
++ bpf_ct_release(ct); |
37000 |
++ else |
37001 |
++ test_eproto_l4proto = opts_def.error; |
37002 |
++ |
37003 |
++ opts_def.netns_id = 0xf00f; |
37004 |
++ ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def)); |
37005 |
++ opts_def.netns_id = -1; |
37006 |
++ if (ct) |
37007 |
++ bpf_ct_release(ct); |
37008 |
++ else |
37009 |
++ test_enonet_netns_id = opts_def.error; |
37010 |
++ |
37011 |
++ ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def)); |
37012 |
++ if (ct) |
37013 |
++ bpf_ct_release(ct); |
37014 |
++ else |
37015 |
++ test_enoent_lookup = opts_def.error; |
37016 |
++ |
37017 |
++ ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def, sizeof(opts_def)); |
37018 |
++ if (ct) |
37019 |
++ bpf_ct_release(ct); |
37020 |
++ else |
37021 |
++ test_eafnosupport = opts_def.error; |
37022 |
++} |
37023 |
++ |
37024 |
++SEC("xdp") |
37025 |
++int nf_xdp_ct_test(struct xdp_md *ctx) |
37026 |
++{ |
37027 |
++ nf_ct_test((void *)bpf_xdp_ct_lookup, ctx); |
37028 |
++ return 0; |
37029 |
++} |
37030 |
++ |
37031 |
++SEC("tc") |
37032 |
++int nf_skb_ct_test(struct __sk_buff *ctx) |
37033 |
++{ |
37034 |
++ nf_ct_test((void *)bpf_skb_ct_lookup, ctx); |
37035 |
++ return 0; |
37036 |
++} |
37037 |
++ |
37038 |
++char _license[] SEC("license") = "GPL"; |
37039 |
+diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh |
37040 |
+index 9de1d123f4f5d..a08c02abde121 100755 |
37041 |
+--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh |
37042 |
++++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh |
37043 |
+@@ -496,8 +496,8 @@ dummy_reporter_test() |
37044 |
+ |
37045 |
+ check_reporter_info dummy healthy 3 3 10 true |
37046 |
+ |
37047 |
+- echo 8192> $DEBUGFS_DIR/health/binary_len |
37048 |
+- check_fail $? "Failed set dummy reporter binary len to 8192" |
37049 |
++ echo 8192 > $DEBUGFS_DIR/health/binary_len |
37050 |
++ check_err $? "Failed set dummy reporter binary len to 8192" |
37051 |
+ |
37052 |
+ local dump=$(devlink health dump show $DL_HANDLE reporter dummy -j) |
37053 |
+ check_err $? "Failed show dump of dummy reporter" |
37054 |
+diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh |
37055 |
+index a90f394f9aa90..d374878cc0ba9 100755 |
37056 |
+--- a/tools/testing/selftests/efivarfs/efivarfs.sh |
37057 |
++++ b/tools/testing/selftests/efivarfs/efivarfs.sh |
37058 |
+@@ -87,6 +87,11 @@ test_create_read() |
37059 |
+ { |
37060 |
+ local file=$efivarfs_mount/$FUNCNAME-$test_guid |
37061 |
+ ./create-read $file |
37062 |
++ if [ $? -ne 0 ]; then |
37063 |
++ echo "create and read $file failed" |
37064 |
++ file_cleanup $file |
37065 |
++ exit 1 |
37066 |
++ fi |
37067 |
+ file_cleanup $file |
37068 |
+ } |
37069 |
+ |
37070 |
+diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc |
37071 |
+index 3145b0f1835c3..27a68bbe778be 100644 |
37072 |
+--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc |
37073 |
++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc |
37074 |
+@@ -38,11 +38,18 @@ cnt_trace() { |
37075 |
+ |
37076 |
+ test_event_enabled() { |
37077 |
+ val=$1 |
37078 |
++ check_times=10 # wait for 10 * SLEEP_TIME at most |
37079 |
+ |
37080 |
+- e=`cat $EVENT_ENABLE` |
37081 |
+- if [ "$e" != $val ]; then |
37082 |
+- fail "Expected $val but found $e" |
37083 |
+- fi |
37084 |
++ while [ $check_times -ne 0 ]; do |
37085 |
++ e=`cat $EVENT_ENABLE` |
37086 |
++ if [ "$e" == $val ]; then |
37087 |
++ return 0 |
37088 |
++ fi |
37089 |
++ sleep $SLEEP_TIME |
37090 |
++ check_times=$((check_times - 1)) |
37091 |
++ done |
37092 |
++ |
37093 |
++ fail "Expected $val but found $e" |
37094 |
+ } |
37095 |
+ |
37096 |
+ run_enable_disable() { |
37097 |
+diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c |
37098 |
+index 1d806b8ffee2d..766c1790df664 100644 |
37099 |
+--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c |
37100 |
++++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c |
37101 |
+@@ -72,7 +72,7 @@ struct memslot_antagonist_args { |
37102 |
+ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, |
37103 |
+ uint64_t nr_modifications) |
37104 |
+ { |
37105 |
+- uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; |
37106 |
++ uint64_t pages = max_t(int, vm_get_page_size(vm), getpagesize()) / vm_get_page_size(vm); |
37107 |
+ uint64_t gpa; |
37108 |
+ int i; |
37109 |
+ |
37110 |
+diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh |
37111 |
+index b48e1833bc896..76645aaf2b58f 100755 |
37112 |
+--- a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh |
37113 |
++++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh |
37114 |
+@@ -35,6 +35,8 @@ cleanup() { |
37115 |
+ for i in 1 2;do ip netns del nsrouter$i;done |
37116 |
+ } |
37117 |
+ |
37118 |
++trap cleanup EXIT |
37119 |
++ |
37120 |
+ ipv4() { |
37121 |
+ echo -n 192.168.$1.2 |
37122 |
+ } |
37123 |
+@@ -146,11 +148,17 @@ ip netns exec nsclient1 nft -f - <<EOF |
37124 |
+ table inet filter { |
37125 |
+ counter unknown { } |
37126 |
+ counter related { } |
37127 |
++ counter redir4 { } |
37128 |
++ counter redir6 { } |
37129 |
+ chain input { |
37130 |
+ type filter hook input priority 0; policy accept; |
37131 |
+- meta l4proto { icmp, icmpv6 } ct state established,untracked accept |
37132 |
+ |
37133 |
++ icmp type "redirect" ct state "related" counter name "redir4" accept |
37134 |
++ icmpv6 type "nd-redirect" ct state "related" counter name "redir6" accept |
37135 |
++ |
37136 |
++ meta l4proto { icmp, icmpv6 } ct state established,untracked accept |
37137 |
+ meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept |
37138 |
++ |
37139 |
+ counter name "unknown" drop |
37140 |
+ } |
37141 |
+ } |
37142 |
+@@ -279,5 +287,29 @@ else |
37143 |
+ echo "ERROR: icmp error RELATED state test has failed" |
37144 |
+ fi |
37145 |
+ |
37146 |
+-cleanup |
37147 |
++# add 'bad' route, expect icmp REDIRECT to be generated |
37148 |
++ip netns exec nsclient1 ip route add 192.168.1.42 via 192.168.1.1 |
37149 |
++ip netns exec nsclient1 ip route add dead:1::42 via dead:1::1 |
37150 |
++ |
37151 |
++ip netns exec "nsclient1" ping -q -c 2 192.168.1.42 > /dev/null |
37152 |
++ |
37153 |
++expect="packets 1 bytes 112" |
37154 |
++check_counter nsclient1 "redir4" "$expect" |
37155 |
++if [ $? -ne 0 ];then |
37156 |
++ ret=1 |
37157 |
++fi |
37158 |
++ |
37159 |
++ip netns exec "nsclient1" ping -c 1 dead:1::42 > /dev/null |
37160 |
++expect="packets 1 bytes 192" |
37161 |
++check_counter nsclient1 "redir6" "$expect" |
37162 |
++if [ $? -ne 0 ];then |
37163 |
++ ret=1 |
37164 |
++fi |
37165 |
++ |
37166 |
++if [ $ret -eq 0 ];then |
37167 |
++ echo "PASS: icmp redirects had RELATED state" |
37168 |
++else |
37169 |
++ echo "ERROR: icmp redirect RELATED state test has failed" |
37170 |
++fi |
37171 |
++ |
37172 |
+ exit $ret |
37173 |
+diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c |
37174 |
+index fbbdffdb2e5d2..f20d1c166d1e4 100644 |
37175 |
+--- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c |
37176 |
++++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c |
37177 |
+@@ -24,6 +24,7 @@ static int check_cpu_dscr_default(char *file, unsigned long val) |
37178 |
+ rc = read(fd, buf, sizeof(buf)); |
37179 |
+ if (rc == -1) { |
37180 |
+ perror("read() failed"); |
37181 |
++ close(fd); |
37182 |
+ return 1; |
37183 |
+ } |
37184 |
+ close(fd); |
37185 |
+@@ -65,8 +66,10 @@ static int check_all_cpu_dscr_defaults(unsigned long val) |
37186 |
+ if (access(file, F_OK)) |
37187 |
+ continue; |
37188 |
+ |
37189 |
+- if (check_cpu_dscr_default(file, val)) |
37190 |
++ if (check_cpu_dscr_default(file, val)) { |
37191 |
++ closedir(sysfs); |
37192 |
+ return 1; |
37193 |
++ } |
37194 |
+ } |
37195 |
+ closedir(sysfs); |
37196 |
+ return 0; |
37197 |
+diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c |
37198 |
+index e7ceabed7f51f..7d0aa22bdc12b 100644 |
37199 |
+--- a/tools/testing/selftests/proc/proc-uptime-002.c |
37200 |
++++ b/tools/testing/selftests/proc/proc-uptime-002.c |
37201 |
+@@ -17,6 +17,7 @@ |
37202 |
+ // while shifting across CPUs. |
37203 |
+ #undef NDEBUG |
37204 |
+ #include <assert.h> |
37205 |
++#include <errno.h> |
37206 |
+ #include <unistd.h> |
37207 |
+ #include <sys/syscall.h> |
37208 |
+ #include <stdlib.h> |
37209 |
+@@ -54,7 +55,7 @@ int main(void) |
37210 |
+ len += sizeof(unsigned long); |
37211 |
+ free(m); |
37212 |
+ m = malloc(len); |
37213 |
+- } while (sys_sched_getaffinity(0, len, m) == -EINVAL); |
37214 |
++ } while (sys_sched_getaffinity(0, len, m) == -1 && errno == EINVAL); |
37215 |
+ |
37216 |
+ fd = open("/proc/uptime", O_RDONLY); |
37217 |
+ assert(fd >= 0); |