Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.17 commit in: /
Date: Fri, 08 Apr 2022 12:53:43
Message-Id: 1649422399.9c9897d6615fc0de7efc1b691d9c9343f79e5f3f.mpagano@gentoo
1 commit: 9c9897d6615fc0de7efc1b691d9c9343f79e5f3f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Apr 8 12:53:19 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Apr 8 12:53:19 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9c9897d6
7
8 Linux patch 5.17.2
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1001_linux-5.17.2.patch | 57034 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 57038 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 19269be2..07650c38 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -47,6 +47,10 @@ Patch: 1000_linux-5.17.1.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.17.1
23
24 +Patch: 1001_linux-5.17.2.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.17.2
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1001_linux-5.17.2.patch b/1001_linux-5.17.2.patch
33 new file mode 100644
34 index 00000000..01314802
35 --- /dev/null
36 +++ b/1001_linux-5.17.2.patch
37 @@ -0,0 +1,57034 @@
38 +diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
39 +index 2416b03ff2837..137f16feee084 100644
40 +--- a/Documentation/ABI/testing/sysfs-fs-f2fs
41 ++++ b/Documentation/ABI/testing/sysfs-fs-f2fs
42 +@@ -430,6 +430,7 @@ Description: Show status of f2fs superblock in real time.
43 + 0x800 SBI_QUOTA_SKIP_FLUSH skip flushing quota in current CP
44 + 0x1000 SBI_QUOTA_NEED_REPAIR quota file may be corrupted
45 + 0x2000 SBI_IS_RESIZEFS resizefs is in process
46 ++ 0x4000 SBI_IS_FREEZING freefs is in process
47 + ====== ===================== =================================
48 +
49 + What: /sys/fs/f2fs/<disk>/ckpt_thread_ioprio
50 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
51 +index 7123524a86b8b..59f881f367793 100644
52 +--- a/Documentation/admin-guide/kernel-parameters.txt
53 ++++ b/Documentation/admin-guide/kernel-parameters.txt
54 +@@ -3485,8 +3485,7 @@
55 + difficult since unequal pointers can no longer be
56 + compared. However, if this command-line option is
57 + specified, then all normal pointers will have their true
58 +- value printed. Pointers printed via %pK may still be
59 +- hashed. This option should only be specified when
60 ++ value printed. This option should only be specified when
61 + debugging the kernel. Please do not use on production
62 + kernels.
63 +
64 +diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
65 +index d359bcfadd39a..0f86e9f931293 100644
66 +--- a/Documentation/admin-guide/sysctl/kernel.rst
67 ++++ b/Documentation/admin-guide/sysctl/kernel.rst
68 +@@ -795,6 +795,7 @@ bit 1 print system memory info
69 + bit 2 print timer info
70 + bit 3 print locks info if ``CONFIG_LOCKDEP`` is on
71 + bit 4 print ftrace buffer
72 ++bit 5 print all printk messages in buffer
73 + ===== ============================================
74 +
75 + So for example to print tasks and memory info on panic, user can::
76 +diff --git a/Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml b/Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml
77 +index 87992db389b28..3698b4b0900f5 100644
78 +--- a/Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml
79 ++++ b/Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml
80 +@@ -92,6 +92,10 @@ properties:
81 + description: AMS Controller register space
82 + maxItems: 1
83 +
84 ++ clocks:
85 ++ items:
86 ++ - description: AMS reference clock
87 ++
88 + ranges:
89 + description:
90 + Maps the child address space for PS and/or PL.
91 +@@ -181,12 +185,15 @@ properties:
92 + required:
93 + - compatible
94 + - reg
95 ++ - clocks
96 + - ranges
97 +
98 + additionalProperties: false
99 +
100 + examples:
101 + - |
102 ++ #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
103 ++
104 + bus {
105 + #address-cells = <2>;
106 + #size-cells = <2>;
107 +@@ -196,6 +203,7 @@ examples:
108 + interrupt-parent = <&gic>;
109 + interrupts = <0 56 4>;
110 + reg = <0x0 0xffa50000 0x0 0x800>;
111 ++ clocks = <&zynqmp_clk AMS_REF>;
112 + #address-cells = <1>;
113 + #size-cells = <1>;
114 + #io-channel-cells = <1>;
115 +diff --git a/Documentation/devicetree/bindings/media/i2c/hynix,hi846.yaml b/Documentation/devicetree/bindings/media/i2c/hynix,hi846.yaml
116 +index 85a8877c2f387..1e2df8cf2937b 100644
117 +--- a/Documentation/devicetree/bindings/media/i2c/hynix,hi846.yaml
118 ++++ b/Documentation/devicetree/bindings/media/i2c/hynix,hi846.yaml
119 +@@ -49,7 +49,8 @@ properties:
120 + description: Definition of the regulator used for the VDDD power supply.
121 +
122 + port:
123 +- $ref: /schemas/graph.yaml#/properties/port
124 ++ $ref: /schemas/graph.yaml#/$defs/port-base
125 ++ unevaluatedProperties: false
126 +
127 + properties:
128 + endpoint:
129 +@@ -68,8 +69,11 @@ properties:
130 + - const: 1
131 + - const: 2
132 +
133 ++ link-frequencies: true
134 ++
135 + required:
136 + - data-lanes
137 ++ - link-frequencies
138 +
139 + required:
140 + - compatible
141 +diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
142 +index 3a82b0b27fa0a..4fca71f343109 100644
143 +--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
144 ++++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml
145 +@@ -88,10 +88,9 @@ allOf:
146 + - mediatek,mt2701-smi-common
147 + then:
148 + properties:
149 +- clock:
150 +- items:
151 +- minItems: 3
152 +- maxItems: 3
153 ++ clocks:
154 ++ minItems: 3
155 ++ maxItems: 3
156 + clock-names:
157 + items:
158 + - const: apb
159 +@@ -108,10 +107,9 @@ allOf:
160 + required:
161 + - mediatek,smi
162 + properties:
163 +- clock:
164 +- items:
165 +- minItems: 3
166 +- maxItems: 3
167 ++ clocks:
168 ++ minItems: 3
169 ++ maxItems: 3
170 + clock-names:
171 + items:
172 + - const: apb
173 +@@ -133,10 +131,9 @@ allOf:
174 +
175 + then:
176 + properties:
177 +- clock:
178 +- items:
179 +- minItems: 4
180 +- maxItems: 4
181 ++ clocks:
182 ++ minItems: 4
183 ++ maxItems: 4
184 + clock-names:
185 + items:
186 + - const: apb
187 +@@ -146,10 +143,9 @@ allOf:
188 +
189 + else: # for gen2 HW that don't have gals
190 + properties:
191 +- clock:
192 +- items:
193 +- minItems: 2
194 +- maxItems: 2
195 ++ clocks:
196 ++ minItems: 2
197 ++ maxItems: 2
198 + clock-names:
199 + items:
200 + - const: apb
201 +diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
202 +index eaeff1ada7f89..c5c32c9100457 100644
203 +--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
204 ++++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
205 +@@ -79,11 +79,11 @@ allOf:
206 +
207 + then:
208 + properties:
209 +- clock:
210 +- items:
211 +- minItems: 3
212 +- maxItems: 3
213 ++ clocks:
214 ++ minItems: 2
215 ++ maxItems: 3
216 + clock-names:
217 ++ minItems: 2
218 + items:
219 + - const: apb
220 + - const: smi
221 +@@ -91,10 +91,9 @@ allOf:
222 +
223 + else:
224 + properties:
225 +- clock:
226 +- items:
227 +- minItems: 2
228 +- maxItems: 2
229 ++ clocks:
230 ++ minItems: 2
231 ++ maxItems: 2
232 + clock-names:
233 + items:
234 + - const: apb
235 +@@ -108,7 +107,6 @@ allOf:
236 + - mediatek,mt2701-smi-larb
237 + - mediatek,mt2712-smi-larb
238 + - mediatek,mt6779-smi-larb
239 +- - mediatek,mt8167-smi-larb
240 + - mediatek,mt8192-smi-larb
241 + - mediatek,mt8195-smi-larb
242 +
243 +diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
244 +index bd217e6f5018a..5cd144a9ec992 100644
245 +--- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml
246 ++++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
247 +@@ -55,7 +55,7 @@ patternProperties:
248 + properties:
249 + reg:
250 + description:
251 +- Contains the native Ready/Busy IDs.
252 ++ Contains the chip-select IDs.
253 +
254 + nand-ecc-engine:
255 + allOf:
256 +@@ -184,7 +184,7 @@ examples:
257 + nand-use-soft-ecc-engine;
258 + nand-ecc-algo = "bch";
259 +
260 +- /* controller specific properties */
261 ++ /* NAND chip specific properties */
262 + };
263 +
264 + nand@1 {
265 +diff --git a/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml b/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml
266 +index cb554084bdf11..0df4e114fdd69 100644
267 +--- a/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml
268 ++++ b/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml
269 +@@ -145,7 +145,7 @@ examples:
270 + clocks = <&sys_clk>;
271 + pinctrl-0 = <&sgpio2_pins>;
272 + pinctrl-names = "default";
273 +- reg = <0x1101059c 0x100>;
274 ++ reg = <0x1101059c 0x118>;
275 + microchip,sgpio-port-ranges = <0 0>, <16 18>, <28 31>;
276 + bus-frequency = <25000000>;
277 + sgpio_in2: gpio@0 {
278 +diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml
279 +index 328ea59c5466f..8299662c2c096 100644
280 +--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml
281 ++++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml
282 +@@ -99,6 +99,14 @@ patternProperties:
283 + enum: [2, 4, 6, 8, 10, 12, 14, 16]
284 +
285 + bias-pull-down:
286 ++ oneOf:
287 ++ - type: boolean
288 ++ - enum: [100, 101, 102, 103]
289 ++ description: mt8195 pull down PUPD/R0/R1 type define value.
290 ++ - enum: [200, 201, 202, 203, 204, 205, 206, 207]
291 ++ description: mt8195 pull down RSEL type define value.
292 ++ - enum: [75000, 5000]
293 ++ description: mt8195 pull down RSEL type si unit value(ohm).
294 + description: |
295 + For pull down type is normal, it don't need add RSEL & R1R0 define
296 + and resistance value.
297 +@@ -115,13 +123,6 @@ patternProperties:
298 + & "MTK_PULL_SET_RSEL_110" & "MTK_PULL_SET_RSEL_111"
299 + define in mt8195. It can also support resistance value(ohm)
300 + "75000" & "5000" in mt8195.
301 +- oneOf:
302 +- - enum: [100, 101, 102, 103]
303 +- - description: mt8195 pull down PUPD/R0/R1 type define value.
304 +- - enum: [200, 201, 202, 203, 204, 205, 206, 207]
305 +- - description: mt8195 pull down RSEL type define value.
306 +- - enum: [75000, 5000]
307 +- - description: mt8195 pull down RSEL type si unit value(ohm).
308 +
309 + An example of using RSEL define:
310 + pincontroller {
311 +@@ -146,6 +147,14 @@ patternProperties:
312 + };
313 +
314 + bias-pull-up:
315 ++ oneOf:
316 ++ - type: boolean
317 ++ - enum: [100, 101, 102, 103]
318 ++ description: mt8195 pull up PUPD/R0/R1 type define value.
319 ++ - enum: [200, 201, 202, 203, 204, 205, 206, 207]
320 ++ description: mt8195 pull up RSEL type define value.
321 ++ - enum: [1000, 1500, 2000, 3000, 4000, 5000, 10000, 75000]
322 ++ description: mt8195 pull up RSEL type si unit value(ohm).
323 + description: |
324 + For pull up type is normal, it don't need add RSEL & R1R0 define
325 + and resistance value.
326 +@@ -163,13 +172,6 @@ patternProperties:
327 + define in mt8195. It can also support resistance value(ohm)
328 + "1000" & "1500" & "2000" & "3000" & "4000" & "5000" & "10000" &
329 + "75000" in mt8195.
330 +- oneOf:
331 +- - enum: [100, 101, 102, 103]
332 +- - description: mt8195 pull up PUPD/R0/R1 type define value.
333 +- - enum: [200, 201, 202, 203, 204, 205, 206, 207]
334 +- - description: mt8195 pull up RSEL type define value.
335 +- - enum: [1000, 1500, 2000, 3000, 4000, 5000, 10000, 75000]
336 +- - description: mt8195 pull up RSEL type si unit value(ohm).
337 + An example of using RSEL define:
338 + pincontroller {
339 + i2c0-pins {
340 +diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml
341 +index 35a8045b2c70d..53627c6e2ae32 100644
342 +--- a/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml
343 ++++ b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml
344 +@@ -106,7 +106,7 @@ examples:
345 + dma-names = "rx", "tx";
346 +
347 + flash@0 {
348 +- compatible = "spi-nor";
349 ++ compatible = "jedec,spi-nor";
350 + reg = <0>;
351 + spi-max-frequency = <104000000>;
352 + spi-tx-bus-width = <2>;
353 +diff --git a/Documentation/devicetree/bindings/spi/spi-mxic.txt b/Documentation/devicetree/bindings/spi/spi-mxic.txt
354 +index 529f2dab2648a..7bcbb229b78bb 100644
355 +--- a/Documentation/devicetree/bindings/spi/spi-mxic.txt
356 ++++ b/Documentation/devicetree/bindings/spi/spi-mxic.txt
357 +@@ -8,11 +8,13 @@ Required properties:
358 + - reg: should contain 2 entries, one for the registers and one for the direct
359 + mapping area
360 + - reg-names: should contain "regs" and "dirmap"
361 +-- interrupts: interrupt line connected to the SPI controller
362 + - clock-names: should contain "ps_clk", "send_clk" and "send_dly_clk"
363 + - clocks: should contain 3 entries for the "ps_clk", "send_clk" and
364 + "send_dly_clk" clocks
365 +
366 ++Optional properties:
367 ++- interrupts: interrupt line connected to the SPI controller
368 ++
369 + Example:
370 +
371 + spi@43c30000 {
372 +diff --git a/Documentation/devicetree/bindings/usb/usb-hcd.yaml b/Documentation/devicetree/bindings/usb/usb-hcd.yaml
373 +index 56853c17af667..1dc3d5d7b44fe 100644
374 +--- a/Documentation/devicetree/bindings/usb/usb-hcd.yaml
375 ++++ b/Documentation/devicetree/bindings/usb/usb-hcd.yaml
376 +@@ -33,7 +33,7 @@ patternProperties:
377 + "^.*@[0-9a-f]{1,2}$":
378 + description: The hard wired USB devices
379 + type: object
380 +- $ref: /usb/usb-device.yaml
381 ++ $ref: /schemas/usb/usb-device.yaml
382 +
383 + additionalProperties: true
384 +
385 +diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst
386 +index 3b8f41395f6b5..c8f7a16cd0e3c 100644
387 +--- a/Documentation/driver-api/cxl/memory-devices.rst
388 ++++ b/Documentation/driver-api/cxl/memory-devices.rst
389 +@@ -36,10 +36,10 @@ CXL Core
390 + .. kernel-doc:: drivers/cxl/cxl.h
391 + :internal:
392 +
393 +-.. kernel-doc:: drivers/cxl/core/bus.c
394 ++.. kernel-doc:: drivers/cxl/core/port.c
395 + :doc: cxl core
396 +
397 +-.. kernel-doc:: drivers/cxl/core/bus.c
398 ++.. kernel-doc:: drivers/cxl/core/port.c
399 + :identifiers:
400 +
401 + .. kernel-doc:: drivers/cxl/core/pmem.c
402 +diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
403 +index 003c865e9c212..fbcb48bc2a903 100644
404 +--- a/Documentation/process/stable-kernel-rules.rst
405 ++++ b/Documentation/process/stable-kernel-rules.rst
406 +@@ -168,7 +168,16 @@ Trees
407 + - The finalized and tagged releases of all stable kernels can be found
408 + in separate branches per version at:
409 +
410 +- https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git
411 ++ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
412 ++
413 ++ - The release candidate of all stable kernel versions can be found at:
414 ++
415 ++ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git/
416 ++
417 ++ .. warning::
418 ++ The -stable-rc tree is a snapshot in time of the stable-queue tree and
419 ++ will change frequently, hence will be rebased often. It should only be
420 ++ used for testing purposes (e.g. to be consumed by CI systems).
421 +
422 +
423 + Review committee
424 +diff --git a/Documentation/security/SCTP.rst b/Documentation/security/SCTP.rst
425 +index d5fd6ccc3dcbd..b73eb764a0017 100644
426 +--- a/Documentation/security/SCTP.rst
427 ++++ b/Documentation/security/SCTP.rst
428 +@@ -15,10 +15,7 @@ For security module support, three SCTP specific hooks have been implemented::
429 + security_sctp_assoc_request()
430 + security_sctp_bind_connect()
431 + security_sctp_sk_clone()
432 +-
433 +-Also the following security hook has been utilised::
434 +-
435 +- security_inet_conn_established()
436 ++ security_sctp_assoc_established()
437 +
438 + The usage of these hooks are described below with the SELinux implementation
439 + described in the `SCTP SELinux Support`_ chapter.
440 +@@ -122,11 +119,12 @@ calls **sctp_peeloff**\(3).
441 + @newsk - pointer to new sock structure.
442 +
443 +
444 +-security_inet_conn_established()
445 +-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
446 +-Called when a COOKIE ACK is received::
447 ++security_sctp_assoc_established()
448 ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
449 ++Called when a COOKIE ACK is received, and the peer secid will be
450 ++saved into ``@asoc->peer_secid`` for client::
451 +
452 +- @sk - pointer to sock structure.
453 ++ @asoc - pointer to sctp association structure.
454 + @skb - pointer to skbuff of the COOKIE ACK packet.
455 +
456 +
457 +@@ -134,7 +132,7 @@ Security Hooks used for Association Establishment
458 + -------------------------------------------------
459 +
460 + The following diagram shows the use of ``security_sctp_bind_connect()``,
461 +-``security_sctp_assoc_request()``, ``security_inet_conn_established()`` when
462 ++``security_sctp_assoc_request()``, ``security_sctp_assoc_established()`` when
463 + establishing an association.
464 + ::
465 +
466 +@@ -172,7 +170,7 @@ establishing an association.
467 + <------------------------------------------- COOKIE ACK
468 + | |
469 + sctp_sf_do_5_1E_ca |
470 +- Call security_inet_conn_established() |
471 ++ Call security_sctp_assoc_established() |
472 + to set the peer label. |
473 + | |
474 + | If SCTP_SOCKET_TCP or peeled off
475 +@@ -198,7 +196,7 @@ hooks with the SELinux specifics expanded below::
476 + security_sctp_assoc_request()
477 + security_sctp_bind_connect()
478 + security_sctp_sk_clone()
479 +- security_inet_conn_established()
480 ++ security_sctp_assoc_established()
481 +
482 +
483 + security_sctp_assoc_request()
484 +@@ -271,12 +269,12 @@ sockets sid and peer sid to that contained in the ``@asoc sid`` and
485 + @newsk - pointer to new sock structure.
486 +
487 +
488 +-security_inet_conn_established()
489 +-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
490 ++security_sctp_assoc_established()
491 ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
492 + Called when a COOKIE ACK is received where it sets the connection's peer sid
493 + to that in ``@skb``::
494 +
495 +- @sk - pointer to sock structure.
496 ++ @asoc - pointer to sctp association structure.
497 + @skb - pointer to skbuff of the COOKIE ACK packet.
498 +
499 +
500 +diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
501 +index d25335993e553..9b52f50a68542 100644
502 +--- a/Documentation/sound/hd-audio/models.rst
503 ++++ b/Documentation/sound/hd-audio/models.rst
504 +@@ -261,6 +261,10 @@ alc-sense-combo
505 + huawei-mbx-stereo
506 + Enable initialization verbs for Huawei MBX stereo speakers;
507 + might be risky, try this at your own risk
508 ++alc298-samsung-headphone
509 ++ Samsung laptops with ALC298
510 ++alc256-samsung-headphone
511 ++ Samsung laptops with ALC256
512 +
513 + ALC66x/67x/892
514 + ==============
515 +diff --git a/Documentation/sphinx/requirements.txt b/Documentation/sphinx/requirements.txt
516 +index 9a35f50798a65..2c573541ab712 100644
517 +--- a/Documentation/sphinx/requirements.txt
518 ++++ b/Documentation/sphinx/requirements.txt
519 +@@ -1,2 +1,4 @@
520 ++# jinja2>=3.1 is not compatible with Sphinx<4.0
521 ++jinja2<3.1
522 + sphinx_rtd_theme
523 + Sphinx==2.4.4
524 +diff --git a/MAINTAINERS b/MAINTAINERS
525 +index cd0f68d4a34a6..d9b2f1731ee06 100644
526 +--- a/MAINTAINERS
527 ++++ b/MAINTAINERS
528 +@@ -16373,8 +16373,7 @@ M: Linus Walleij <linus.walleij@××××××.org>
529 + M: Alvin Šipraga <alsi@××××××××××××.dk>
530 + S: Maintained
531 + F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
532 +-F: drivers/net/dsa/realtek-smi*
533 +-F: drivers/net/dsa/rtl83*
534 ++F: drivers/net/dsa/realtek/*
535 +
536 + REALTEK WIRELESS DRIVER (rtlwifi family)
537 + M: Ping-Ke Shih <pkshih@×××××××.com>
538 +diff --git a/Makefile b/Makefile
539 +index 34f9f5a9457af..06d852cad74ff 100644
540 +--- a/Makefile
541 ++++ b/Makefile
542 +@@ -1,7 +1,7 @@
543 + # SPDX-License-Identifier: GPL-2.0
544 + VERSION = 5
545 + PATCHLEVEL = 17
546 +-SUBLEVEL = 1
547 ++SUBLEVEL = 2
548 + EXTRAVERSION =
549 + NAME = Superb Owl
550 +
551 +diff --git a/arch/Kconfig b/arch/Kconfig
552 +index 678a80713b213..5e88237f84d2d 100644
553 +--- a/arch/Kconfig
554 ++++ b/arch/Kconfig
555 +@@ -1162,6 +1162,7 @@ config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
556 + config RANDOMIZE_KSTACK_OFFSET_DEFAULT
557 + bool "Randomize kernel stack offset on syscall entry"
558 + depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
559 ++ depends on INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION >= 140000
560 + help
561 + The kernel stack offset can be randomized (after pt_regs) by
562 + roughly 5 bits of entropy, frustrating memory corruption
563 +diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
564 +index 8e90052f6f056..5f7f5aab361f1 100644
565 +--- a/arch/arc/kernel/process.c
566 ++++ b/arch/arc/kernel/process.c
567 +@@ -43,7 +43,7 @@ SYSCALL_DEFINE0(arc_gettls)
568 + return task_thread_info(current)->thr_ptr;
569 + }
570 +
571 +-SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
572 ++SYSCALL_DEFINE3(arc_usr_cmpxchg, int __user *, uaddr, int, expected, int, new)
573 + {
574 + struct pt_regs *regs = current_pt_regs();
575 + u32 uval;
576 +diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
577 +index 21294f775a20f..89af57482bc8f 100644
578 +--- a/arch/arm/boot/dts/bcm2711.dtsi
579 ++++ b/arch/arm/boot/dts/bcm2711.dtsi
580 +@@ -459,12 +459,26 @@
581 + #size-cells = <0>;
582 + enable-method = "brcm,bcm2836-smp"; // for ARM 32-bit
583 +
584 ++ /* Source for d/i-cache-line-size and d/i-cache-sets
585 ++ * https://developer.arm.com/documentation/100095/0003
586 ++ * /Level-1-Memory-System/About-the-L1-memory-system?lang=en
587 ++ * Source for d/i-cache-size
588 ++ * https://www.raspberrypi.com/documentation/computers
589 ++ * /processors.html#bcm2711
590 ++ */
591 + cpu0: cpu@0 {
592 + device_type = "cpu";
593 + compatible = "arm,cortex-a72";
594 + reg = <0>;
595 + enable-method = "spin-table";
596 + cpu-release-addr = <0x0 0x000000d8>;
597 ++ d-cache-size = <0x8000>;
598 ++ d-cache-line-size = <64>;
599 ++ d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set
600 ++ i-cache-size = <0xc000>;
601 ++ i-cache-line-size = <64>;
602 ++ i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set
603 ++ next-level-cache = <&l2>;
604 + };
605 +
606 + cpu1: cpu@1 {
607 +@@ -473,6 +487,13 @@
608 + reg = <1>;
609 + enable-method = "spin-table";
610 + cpu-release-addr = <0x0 0x000000e0>;
611 ++ d-cache-size = <0x8000>;
612 ++ d-cache-line-size = <64>;
613 ++ d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set
614 ++ i-cache-size = <0xc000>;
615 ++ i-cache-line-size = <64>;
616 ++ i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set
617 ++ next-level-cache = <&l2>;
618 + };
619 +
620 + cpu2: cpu@2 {
621 +@@ -481,6 +502,13 @@
622 + reg = <2>;
623 + enable-method = "spin-table";
624 + cpu-release-addr = <0x0 0x000000e8>;
625 ++ d-cache-size = <0x8000>;
626 ++ d-cache-line-size = <64>;
627 ++ d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set
628 ++ i-cache-size = <0xc000>;
629 ++ i-cache-line-size = <64>;
630 ++ i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set
631 ++ next-level-cache = <&l2>;
632 + };
633 +
634 + cpu3: cpu@3 {
635 +@@ -489,6 +517,28 @@
636 + reg = <3>;
637 + enable-method = "spin-table";
638 + cpu-release-addr = <0x0 0x000000f0>;
639 ++ d-cache-size = <0x8000>;
640 ++ d-cache-line-size = <64>;
641 ++ d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set
642 ++ i-cache-size = <0xc000>;
643 ++ i-cache-line-size = <64>;
644 ++ i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set
645 ++ next-level-cache = <&l2>;
646 ++ };
647 ++
648 ++ /* Source for d/i-cache-line-size and d/i-cache-sets
649 ++ * https://developer.arm.com/documentation/100095/0003
650 ++ * /Level-2-Memory-System/About-the-L2-memory-system?lang=en
651 ++ * Source for d/i-cache-size
652 ++ * https://www.raspberrypi.com/documentation/computers
653 ++ * /processors.html#bcm2711
654 ++ */
655 ++ l2: l2-cache0 {
656 ++ compatible = "cache";
657 ++ cache-size = <0x100000>;
658 ++ cache-line-size = <64>;
659 ++ cache-sets = <1024>; // 1MiB(size)/64(line-size)=16384ways/16-way set
660 ++ cache-level = <2>;
661 + };
662 + };
663 +
664 +diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi
665 +index 0199ec98cd616..5dbdebc462594 100644
666 +--- a/arch/arm/boot/dts/bcm2837.dtsi
667 ++++ b/arch/arm/boot/dts/bcm2837.dtsi
668 +@@ -40,12 +40,26 @@
669 + #size-cells = <0>;
670 + enable-method = "brcm,bcm2836-smp"; // for ARM 32-bit
671 +
672 ++ /* Source for d/i-cache-line-size and d/i-cache-sets
673 ++ * https://developer.arm.com/documentation/ddi0500/e/level-1-memory-system
674 ++ * /about-the-l1-memory-system?lang=en
675 ++ *
676 ++ * Source for d/i-cache-size
677 ++ * https://magpi.raspberrypi.com/articles/raspberry-pi-3-specs-benchmarks
678 ++ */
679 + cpu0: cpu@0 {
680 + device_type = "cpu";
681 + compatible = "arm,cortex-a53";
682 + reg = <0>;
683 + enable-method = "spin-table";
684 + cpu-release-addr = <0x0 0x000000d8>;
685 ++ d-cache-size = <0x8000>;
686 ++ d-cache-line-size = <64>;
687 ++ d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set
688 ++ i-cache-size = <0x8000>;
689 ++ i-cache-line-size = <64>;
690 ++ i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set
691 ++ next-level-cache = <&l2>;
692 + };
693 +
694 + cpu1: cpu@1 {
695 +@@ -54,6 +68,13 @@
696 + reg = <1>;
697 + enable-method = "spin-table";
698 + cpu-release-addr = <0x0 0x000000e0>;
699 ++ d-cache-size = <0x8000>;
700 ++ d-cache-line-size = <64>;
701 ++ d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set
702 ++ i-cache-size = <0x8000>;
703 ++ i-cache-line-size = <64>;
704 ++ i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set
705 ++ next-level-cache = <&l2>;
706 + };
707 +
708 + cpu2: cpu@2 {
709 +@@ -62,6 +83,13 @@
710 + reg = <2>;
711 + enable-method = "spin-table";
712 + cpu-release-addr = <0x0 0x000000e8>;
713 ++ d-cache-size = <0x8000>;
714 ++ d-cache-line-size = <64>;
715 ++ d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set
716 ++ i-cache-size = <0x8000>;
717 ++ i-cache-line-size = <64>;
718 ++ i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set
719 ++ next-level-cache = <&l2>;
720 + };
721 +
722 + cpu3: cpu@3 {
723 +@@ -70,6 +98,27 @@
724 + reg = <3>;
725 + enable-method = "spin-table";
726 + cpu-release-addr = <0x0 0x000000f0>;
727 ++ d-cache-size = <0x8000>;
728 ++ d-cache-line-size = <64>;
729 ++ d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set
730 ++ i-cache-size = <0x8000>;
731 ++ i-cache-line-size = <64>;
732 ++ i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set
733 ++ next-level-cache = <&l2>;
734 ++ };
735 ++
736 ++ /* Source for cache-line-size + cache-sets
737 ++ * https://developer.arm.com/documentation/ddi0500
738 ++ * /e/level-2-memory-system/about-the-l2-memory-system?lang=en
739 ++ * Source for cache-size
740 ++ * https://datasheets.raspberrypi.com/cm/cm1-and-cm3-datasheet.pdf
741 ++ */
742 ++ l2: l2-cache0 {
743 ++ compatible = "cache";
744 ++ cache-size = <0x80000>;
745 ++ cache-line-size = <64>;
746 ++ cache-sets = <512>; // 512KiB(size)/64(line-size)=8192ways/16-way set
747 ++ cache-level = <2>;
748 + };
749 + };
750 + };
751 +diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
752 +index 956a26d52a4c3..0a11bacffc1f1 100644
753 +--- a/arch/arm/boot/dts/dra7-l4.dtsi
754 ++++ b/arch/arm/boot/dts/dra7-l4.dtsi
755 +@@ -3482,8 +3482,7 @@
756 + ti,timer-pwm;
757 + };
758 + };
759 +-
760 +- target-module@2c000 { /* 0x4882c000, ap 17 02.0 */
761 ++ timer15_target: target-module@2c000 { /* 0x4882c000, ap 17 02.0 */
762 + compatible = "ti,sysc-omap4-timer", "ti,sysc";
763 + reg = <0x2c000 0x4>,
764 + <0x2c010 0x4>;
765 +@@ -3511,7 +3510,7 @@
766 + };
767 + };
768 +
769 +- target-module@2e000 { /* 0x4882e000, ap 19 14.0 */
770 ++ timer16_target: target-module@2e000 { /* 0x4882e000, ap 19 14.0 */
771 + compatible = "ti,sysc-omap4-timer", "ti,sysc";
772 + reg = <0x2e000 0x4>,
773 + <0x2e010 0x4>;
774 +diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
775 +index 42bff117656cf..97ce0c4f1df7e 100644
776 +--- a/arch/arm/boot/dts/dra7.dtsi
777 ++++ b/arch/arm/boot/dts/dra7.dtsi
778 +@@ -1339,20 +1339,20 @@
779 + };
780 +
781 + /* Local timers, see ARM architected timer wrap erratum i940 */
782 +-&timer3_target {
783 ++&timer15_target {
784 + ti,no-reset-on-init;
785 + ti,no-idle;
786 + timer@0 {
787 +- assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>;
788 ++ assigned-clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>;
789 + assigned-clock-parents = <&timer_sys_clk_div>;
790 + };
791 + };
792 +
793 +-&timer4_target {
794 ++&timer16_target {
795 + ti,no-reset-on-init;
796 + ti,no-idle;
797 + timer@0 {
798 +- assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>;
799 ++ assigned-clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>;
800 + assigned-clock-parents = <&timer_sys_clk_div>;
801 + };
802 + };
803 +diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
804 +index d31a68672bfac..d7d756614edd1 100644
805 +--- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
806 ++++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
807 +@@ -260,7 +260,7 @@
808 + };
809 +
810 + uart3_data: uart3-data {
811 +- samsung,pins = "gpa1-4", "gpa1-4";
812 ++ samsung,pins = "gpa1-4", "gpa1-5";
813 + samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
814 + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
815 + samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
816 +diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
817 +index 39bbe18145cf2..f042954bdfa5d 100644
818 +--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
819 ++++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
820 +@@ -118,6 +118,9 @@
821 + status = "okay";
822 + ddc = <&i2c_2>;
823 + hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
824 ++ vdd-supply = <&ldo8_reg>;
825 ++ vdd_osc-supply = <&ldo10_reg>;
826 ++ vdd_pll-supply = <&ldo8_reg>;
827 + };
828 +
829 + &i2c_0 {
830 +diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts
831 +index a4f0e3ffedbd3..07f65213aae65 100644
832 +--- a/arch/arm/boot/dts/exynos5420-smdk5420.dts
833 ++++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts
834 +@@ -124,6 +124,9 @@
835 + hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>;
836 + pinctrl-names = "default";
837 + pinctrl-0 = <&hdmi_hpd_irq>;
838 ++ vdd-supply = <&ldo6_reg>;
839 ++ vdd_osc-supply = <&ldo7_reg>;
840 ++ vdd_pll-supply = <&ldo6_reg>;
841 + };
842 +
843 + &hsi2c_4 {
844 +diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts
845 +index 4f88e96d81ddb..d5c68d1ea707c 100644
846 +--- a/arch/arm/boot/dts/imx53-m53menlo.dts
847 ++++ b/arch/arm/boot/dts/imx53-m53menlo.dts
848 +@@ -53,6 +53,31 @@
849 + };
850 + };
851 +
852 ++ lvds-decoder {
853 ++ compatible = "ti,ds90cf364a", "lvds-decoder";
854 ++
855 ++ ports {
856 ++ #address-cells = <1>;
857 ++ #size-cells = <0>;
858 ++
859 ++ port@0 {
860 ++ reg = <0>;
861 ++
862 ++ lvds_decoder_in: endpoint {
863 ++ remote-endpoint = <&lvds0_out>;
864 ++ };
865 ++ };
866 ++
867 ++ port@1 {
868 ++ reg = <1>;
869 ++
870 ++ lvds_decoder_out: endpoint {
871 ++ remote-endpoint = <&panel_in>;
872 ++ };
873 ++ };
874 ++ };
875 ++ };
876 ++
877 + panel {
878 + compatible = "edt,etm0700g0dh6";
879 + pinctrl-0 = <&pinctrl_display_gpio>;
880 +@@ -61,7 +86,7 @@
881 +
882 + port {
883 + panel_in: endpoint {
884 +- remote-endpoint = <&lvds0_out>;
885 ++ remote-endpoint = <&lvds_decoder_out>;
886 + };
887 + };
888 + };
889 +@@ -450,7 +475,7 @@
890 + reg = <2>;
891 +
892 + lvds0_out: endpoint {
893 +- remote-endpoint = <&panel_in>;
894 ++ remote-endpoint = <&lvds_decoder_in>;
895 + };
896 + };
897 + };
898 +diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
899 +index 62b771c1d5a9a..f1c60b0cb143e 100644
900 +--- a/arch/arm/boot/dts/imx7-colibri.dtsi
901 ++++ b/arch/arm/boot/dts/imx7-colibri.dtsi
902 +@@ -40,7 +40,7 @@
903 +
904 + dailink_master: simple-audio-card,codec {
905 + sound-dai = <&codec>;
906 +- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
907 ++ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
908 + };
909 + };
910 + };
911 +@@ -293,7 +293,7 @@
912 + compatible = "fsl,sgtl5000";
913 + #sound-dai-cells = <0>;
914 + reg = <0x0a>;
915 +- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
916 ++ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
917 + pinctrl-names = "default";
918 + pinctrl-0 = <&pinctrl_sai1_mclk>;
919 + VDDA-supply = <&reg_module_3v3_avdd>;
920 +diff --git a/arch/arm/boot/dts/imx7-mba7.dtsi b/arch/arm/boot/dts/imx7-mba7.dtsi
921 +index 49086c6b6a0a2..3df6dff7734ae 100644
922 +--- a/arch/arm/boot/dts/imx7-mba7.dtsi
923 ++++ b/arch/arm/boot/dts/imx7-mba7.dtsi
924 +@@ -302,7 +302,7 @@
925 + tlv320aic32x4: audio-codec@18 {
926 + compatible = "ti,tlv320aic32x4";
927 + reg = <0x18>;
928 +- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
929 ++ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
930 + clock-names = "mclk";
931 + ldoin-supply = <&reg_audio_3v3>;
932 + iov-supply = <&reg_audio_3v3>;
933 +diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
934 +index e0751e6ba3c0f..a31de900139d6 100644
935 +--- a/arch/arm/boot/dts/imx7d-nitrogen7.dts
936 ++++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
937 +@@ -288,7 +288,7 @@
938 + codec: wm8960@1a {
939 + compatible = "wlf,wm8960";
940 + reg = <0x1a>;
941 +- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
942 ++ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
943 + clock-names = "mclk";
944 + wlf,shared-lrclk;
945 + };
946 +diff --git a/arch/arm/boot/dts/imx7d-pico-hobbit.dts b/arch/arm/boot/dts/imx7d-pico-hobbit.dts
947 +index 7b2198a9372c6..d917dc4f2f227 100644
948 +--- a/arch/arm/boot/dts/imx7d-pico-hobbit.dts
949 ++++ b/arch/arm/boot/dts/imx7d-pico-hobbit.dts
950 +@@ -31,7 +31,7 @@
951 +
952 + dailink_master: simple-audio-card,codec {
953 + sound-dai = <&sgtl5000>;
954 +- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
955 ++ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
956 + };
957 + };
958 + };
959 +@@ -41,7 +41,7 @@
960 + #sound-dai-cells = <0>;
961 + reg = <0x0a>;
962 + compatible = "fsl,sgtl5000";
963 +- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
964 ++ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
965 + VDDA-supply = <&reg_2p5v>;
966 + VDDIO-supply = <&reg_vref_1v8>;
967 + };
968 +diff --git a/arch/arm/boot/dts/imx7d-pico-pi.dts b/arch/arm/boot/dts/imx7d-pico-pi.dts
969 +index 70bea95c06d83..f263e391e24cb 100644
970 +--- a/arch/arm/boot/dts/imx7d-pico-pi.dts
971 ++++ b/arch/arm/boot/dts/imx7d-pico-pi.dts
972 +@@ -31,7 +31,7 @@
973 +
974 + dailink_master: simple-audio-card,codec {
975 + sound-dai = <&sgtl5000>;
976 +- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
977 ++ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
978 + };
979 + };
980 + };
981 +@@ -41,7 +41,7 @@
982 + #sound-dai-cells = <0>;
983 + reg = <0x0a>;
984 + compatible = "fsl,sgtl5000";
985 +- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
986 ++ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
987 + VDDA-supply = <&reg_2p5v>;
988 + VDDIO-supply = <&reg_vref_1v8>;
989 + };
990 +diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
991 +index 7813ef960f6ee..f053f51227417 100644
992 +--- a/arch/arm/boot/dts/imx7d-sdb.dts
993 ++++ b/arch/arm/boot/dts/imx7d-sdb.dts
994 +@@ -385,14 +385,14 @@
995 + codec: wm8960@1a {
996 + compatible = "wlf,wm8960";
997 + reg = <0x1a>;
998 +- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
999 ++ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
1000 + clock-names = "mclk";
1001 + wlf,shared-lrclk;
1002 + wlf,hp-cfg = <2 2 3>;
1003 + wlf,gpio-cfg = <1 3>;
1004 + assigned-clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_SRC>,
1005 + <&clks IMX7D_PLL_AUDIO_POST_DIV>,
1006 +- <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
1007 ++ <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
1008 + assigned-clock-parents = <&clks IMX7D_PLL_AUDIO_POST_DIV>;
1009 + assigned-clock-rates = <0>, <884736000>, <12288000>;
1010 + };
1011 +diff --git a/arch/arm/boot/dts/imx7s-warp.dts b/arch/arm/boot/dts/imx7s-warp.dts
1012 +index 4f1edef06c922..e8734d218b9de 100644
1013 +--- a/arch/arm/boot/dts/imx7s-warp.dts
1014 ++++ b/arch/arm/boot/dts/imx7s-warp.dts
1015 +@@ -75,7 +75,7 @@
1016 +
1017 + dailink_master: simple-audio-card,codec {
1018 + sound-dai = <&codec>;
1019 +- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
1020 ++ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
1021 + };
1022 + };
1023 + };
1024 +@@ -232,7 +232,7 @@
1025 + #sound-dai-cells = <0>;
1026 + reg = <0x0a>;
1027 + compatible = "fsl,sgtl5000";
1028 +- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
1029 ++ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
1030 + pinctrl-names = "default";
1031 + pinctrl-0 = <&pinctrl_sai1_mclk>;
1032 + VDDA-supply = <&vgen4_reg>;
1033 +diff --git a/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi b/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi
1034 +index 31f59de5190b8..7af41361c4800 100644
1035 +--- a/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi
1036 ++++ b/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi
1037 +@@ -28,7 +28,7 @@ partitions {
1038 + label = "rofs";
1039 + };
1040 +
1041 +- rwfs@6000000 {
1042 ++ rwfs@2a00000 {
1043 + reg = <0x2a00000 0x1600000>; // 22MB
1044 + label = "rwfs";
1045 + };
1046 +diff --git a/arch/arm/boot/dts/openbmc-flash-layout.dtsi b/arch/arm/boot/dts/openbmc-flash-layout.dtsi
1047 +index 6c26524e93e11..b47e14063c380 100644
1048 +--- a/arch/arm/boot/dts/openbmc-flash-layout.dtsi
1049 ++++ b/arch/arm/boot/dts/openbmc-flash-layout.dtsi
1050 +@@ -20,7 +20,7 @@ partitions {
1051 + label = "kernel";
1052 + };
1053 +
1054 +- rofs@c0000 {
1055 ++ rofs@4c0000 {
1056 + reg = <0x4c0000 0x1740000>;
1057 + label = "rofs";
1058 + };
1059 +diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
1060 +index 7dec0553636e5..51c365fdf3bfd 100644
1061 +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
1062 ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
1063 +@@ -142,7 +142,8 @@
1064 + clocks {
1065 + sleep_clk: sleep_clk {
1066 + compatible = "fixed-clock";
1067 +- clock-frequency = <32768>;
1068 ++ clock-frequency = <32000>;
1069 ++ clock-output-names = "gcc_sleep_clk_src";
1070 + #clock-cells = <0>;
1071 + };
1072 +
1073 +diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi
1074 +index 2a0ec97a264f2..a0f9ab7f08f34 100644
1075 +--- a/arch/arm/boot/dts/qcom-msm8960.dtsi
1076 ++++ b/arch/arm/boot/dts/qcom-msm8960.dtsi
1077 +@@ -146,7 +146,9 @@
1078 + reg = <0x108000 0x1000>;
1079 + qcom,ipc = <&l2cc 0x8 2>;
1080 +
1081 +- interrupts = <0 19 0>, <0 21 0>, <0 22 0>;
1082 ++ interrupts = <GIC_SPI 19 IRQ_TYPE_EDGE_RISING>,
1083 ++ <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>,
1084 ++ <GIC_SPI 22 IRQ_TYPE_EDGE_RISING>;
1085 + interrupt-names = "ack", "err", "wakeup";
1086 +
1087 + regulators {
1088 +@@ -192,7 +194,7 @@
1089 + compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
1090 + reg = <0x16440000 0x1000>,
1091 + <0x16400000 0x1000>;
1092 +- interrupts = <0 154 0x0>;
1093 ++ interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
1094 + clocks = <&gcc GSBI5_UART_CLK>, <&gcc GSBI5_H_CLK>;
1095 + clock-names = "core", "iface";
1096 + status = "disabled";
1097 +@@ -318,7 +320,7 @@
1098 + #address-cells = <1>;
1099 + #size-cells = <0>;
1100 + reg = <0x16080000 0x1000>;
1101 +- interrupts = <0 147 0>;
1102 ++ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
1103 + spi-max-frequency = <24000000>;
1104 + cs-gpios = <&msmgpio 8 0>;
1105 +
1106 +diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
1107 +index 8ac0492c76595..40f11159f061e 100644
1108 +--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
1109 ++++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
1110 +@@ -413,7 +413,7 @@
1111 + <0x40000000 0xf1d>,
1112 + <0x40000f20 0xc8>,
1113 + <0x40001000 0x1000>,
1114 +- <0x40002000 0x10000>,
1115 ++ <0x40200000 0x100000>,
1116 + <0x01c03000 0x3000>;
1117 + reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
1118 + "mmio";
1119 +diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
1120 +index 09c741e8ecb87..c700c3b19e4c4 100644
1121 +--- a/arch/arm/boot/dts/sama5d2.dtsi
1122 ++++ b/arch/arm/boot/dts/sama5d2.dtsi
1123 +@@ -415,7 +415,7 @@
1124 + pmecc: ecc-engine@f8014070 {
1125 + compatible = "atmel,sama5d2-pmecc";
1126 + reg = <0xf8014070 0x490>,
1127 +- <0xf8014500 0x100>;
1128 ++ <0xf8014500 0x200>;
1129 + };
1130 + };
1131 +
1132 +diff --git a/arch/arm/boot/dts/sama7g5.dtsi b/arch/arm/boot/dts/sama7g5.dtsi
1133 +index eddcfbf4d2233..22520cdd37fc5 100644
1134 +--- a/arch/arm/boot/dts/sama7g5.dtsi
1135 ++++ b/arch/arm/boot/dts/sama7g5.dtsi
1136 +@@ -382,8 +382,6 @@
1137 + dmas = <&dma0 AT91_XDMAC_DT_PERID(7)>,
1138 + <&dma0 AT91_XDMAC_DT_PERID(8)>;
1139 + dma-names = "rx", "tx";
1140 +- atmel,use-dma-rx;
1141 +- atmel,use-dma-tx;
1142 + status = "disabled";
1143 + };
1144 + };
1145 +@@ -558,8 +556,6 @@
1146 + dmas = <&dma0 AT91_XDMAC_DT_PERID(21)>,
1147 + <&dma0 AT91_XDMAC_DT_PERID(22)>;
1148 + dma-names = "rx", "tx";
1149 +- atmel,use-dma-rx;
1150 +- atmel,use-dma-tx;
1151 + status = "disabled";
1152 + };
1153 + };
1154 +@@ -584,8 +580,6 @@
1155 + dmas = <&dma0 AT91_XDMAC_DT_PERID(23)>,
1156 + <&dma0 AT91_XDMAC_DT_PERID(24)>;
1157 + dma-names = "rx", "tx";
1158 +- atmel,use-dma-rx;
1159 +- atmel,use-dma-tx;
1160 + status = "disabled";
1161 + };
1162 + };
1163 +diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
1164 +index 827e887afbda4..13e1bdb3ddbf1 100644
1165 +--- a/arch/arm/boot/dts/spear1340.dtsi
1166 ++++ b/arch/arm/boot/dts/spear1340.dtsi
1167 +@@ -134,9 +134,9 @@
1168 + reg = <0xb4100000 0x1000>;
1169 + interrupts = <0 105 0x4>;
1170 + status = "disabled";
1171 +- dmas = <&dwdma0 12 0 1>,
1172 +- <&dwdma0 13 1 0>;
1173 +- dma-names = "tx", "rx";
1174 ++ dmas = <&dwdma0 13 0 1>,
1175 ++ <&dwdma0 12 1 0>;
1176 ++ dma-names = "rx", "tx";
1177 + };
1178 +
1179 + thermal@e07008c4 {
1180 +diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
1181 +index c87b881b2c8bb..9135533676879 100644
1182 +--- a/arch/arm/boot/dts/spear13xx.dtsi
1183 ++++ b/arch/arm/boot/dts/spear13xx.dtsi
1184 +@@ -284,9 +284,9 @@
1185 + #size-cells = <0>;
1186 + interrupts = <0 31 0x4>;
1187 + status = "disabled";
1188 +- dmas = <&dwdma0 4 0 0>,
1189 +- <&dwdma0 5 0 0>;
1190 +- dma-names = "tx", "rx";
1191 ++ dmas = <&dwdma0 5 0 0>,
1192 ++ <&dwdma0 4 0 0>;
1193 ++ dma-names = "rx", "tx";
1194 + };
1195 +
1196 + rtc@e0580000 {
1197 +diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
1198 +index 3b65130affec8..6161f5906ec11 100644
1199 +--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
1200 ++++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
1201 +@@ -1190,7 +1190,7 @@
1202 + };
1203 + };
1204 +
1205 +- sai2a_sleep_pins_c: sai2a-2 {
1206 ++ sai2a_sleep_pins_c: sai2a-sleep-2 {
1207 + pins {
1208 + pinmux = <STM32_PINMUX('D', 13, ANALOG)>, /* SAI2_SCK_A */
1209 + <STM32_PINMUX('D', 11, ANALOG)>, /* SAI2_SD_A */
1210 +diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
1211 +index b30bc1a25ebb9..084323d5c61cb 100644
1212 +--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
1213 ++++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
1214 +@@ -593,6 +593,17 @@
1215 + #size-cells = <0>;
1216 + };
1217 +
1218 ++ gic: interrupt-controller@1c81000 {
1219 ++ compatible = "arm,gic-400";
1220 ++ reg = <0x01c81000 0x1000>,
1221 ++ <0x01c82000 0x2000>,
1222 ++ <0x01c84000 0x2000>,
1223 ++ <0x01c86000 0x2000>;
1224 ++ interrupt-controller;
1225 ++ #interrupt-cells = <3>;
1226 ++ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
1227 ++ };
1228 ++
1229 + csi1: camera@1cb4000 {
1230 + compatible = "allwinner,sun8i-v3s-csi";
1231 + reg = <0x01cb4000 0x3000>;
1232 +@@ -604,16 +615,5 @@
1233 + resets = <&ccu RST_BUS_CSI>;
1234 + status = "disabled";
1235 + };
1236 +-
1237 +- gic: interrupt-controller@1c81000 {
1238 +- compatible = "arm,gic-400";
1239 +- reg = <0x01c81000 0x1000>,
1240 +- <0x01c82000 0x2000>,
1241 +- <0x01c84000 0x2000>,
1242 +- <0x01c86000 0x2000>;
1243 +- interrupt-controller;
1244 +- #interrupt-cells = <3>;
1245 +- interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
1246 +- };
1247 + };
1248 + };
1249 +diff --git a/arch/arm/boot/dts/tegra20-asus-tf101.dts b/arch/arm/boot/dts/tegra20-asus-tf101.dts
1250 +index 020172ee7340e..e3267cda15cc9 100644
1251 +--- a/arch/arm/boot/dts/tegra20-asus-tf101.dts
1252 ++++ b/arch/arm/boot/dts/tegra20-asus-tf101.dts
1253 +@@ -442,11 +442,13 @@
1254 +
1255 + serial@70006040 {
1256 + compatible = "nvidia,tegra20-hsuart";
1257 ++ /delete-property/ reg-shift;
1258 + /* GPS BCM4751 */
1259 + };
1260 +
1261 + serial@70006200 {
1262 + compatible = "nvidia,tegra20-hsuart";
1263 ++ /delete-property/ reg-shift;
1264 + status = "okay";
1265 +
1266 + /* Azurewave AW-NH615 BCM4329B1 */
1267 +diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
1268 +index de39c5465c0a9..0e19bd0a847c8 100644
1269 +--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
1270 ++++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
1271 +@@ -183,8 +183,8 @@
1272 + };
1273 + conf_ata {
1274 + nvidia,pins = "ata", "atb", "atc", "atd", "ate",
1275 +- "cdev1", "cdev2", "dap1", "dtb", "gma",
1276 +- "gmb", "gmc", "gmd", "gme", "gpu7",
1277 ++ "cdev1", "cdev2", "dap1", "dtb", "dtf",
1278 ++ "gma", "gmb", "gmc", "gmd", "gme", "gpu7",
1279 + "gpv", "i2cp", "irrx", "irtx", "pta",
1280 + "rm", "slxa", "slxk", "spia", "spib",
1281 + "uac";
1282 +@@ -203,7 +203,7 @@
1283 + };
1284 + conf_crtp {
1285 + nvidia,pins = "crtp", "dap2", "dap3", "dap4",
1286 +- "dtc", "dte", "dtf", "gpu", "sdio1",
1287 ++ "dtc", "dte", "gpu", "sdio1",
1288 + "slxc", "slxd", "spdi", "spdo", "spig",
1289 + "uda";
1290 + nvidia,pull = <TEGRA_PIN_PULL_NONE>;
1291 +diff --git a/arch/arm/boot/dts/tegra30-asus-transformer-common.dtsi b/arch/arm/boot/dts/tegra30-asus-transformer-common.dtsi
1292 +index 85b43a86a26d9..c662ab261ed5f 100644
1293 +--- a/arch/arm/boot/dts/tegra30-asus-transformer-common.dtsi
1294 ++++ b/arch/arm/boot/dts/tegra30-asus-transformer-common.dtsi
1295 +@@ -1080,6 +1080,7 @@
1296 +
1297 + serial@70006040 {
1298 + compatible = "nvidia,tegra30-hsuart";
1299 ++ /delete-property/ reg-shift;
1300 + status = "okay";
1301 +
1302 + /* Broadcom GPS BCM47511 */
1303 +@@ -1087,6 +1088,7 @@
1304 +
1305 + serial@70006200 {
1306 + compatible = "nvidia,tegra30-hsuart";
1307 ++ /delete-property/ reg-shift;
1308 + status = "okay";
1309 +
1310 + nvidia,adjust-baud-rates = <0 9600 100>,
1311 +diff --git a/arch/arm/boot/dts/tegra30-pegatron-chagall.dts b/arch/arm/boot/dts/tegra30-pegatron-chagall.dts
1312 +index f4b2d4218849c..8ce61035290b5 100644
1313 +--- a/arch/arm/boot/dts/tegra30-pegatron-chagall.dts
1314 ++++ b/arch/arm/boot/dts/tegra30-pegatron-chagall.dts
1315 +@@ -1103,6 +1103,7 @@
1316 +
1317 + uartb: serial@70006040 {
1318 + compatible = "nvidia,tegra30-hsuart";
1319 ++ /delete-property/ reg-shift;
1320 + status = "okay";
1321 +
1322 + /* Broadcom GPS BCM47511 */
1323 +@@ -1110,6 +1111,7 @@
1324 +
1325 + uartc: serial@70006200 {
1326 + compatible = "nvidia,tegra30-hsuart";
1327 ++ /delete-property/ reg-shift;
1328 + status = "okay";
1329 +
1330 + nvidia,adjust-baud-rates = <0 9600 100>,
1331 +diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
1332 +index fe8d760256a4c..3e3beb0cc33de 100644
1333 +--- a/arch/arm/configs/multi_v5_defconfig
1334 ++++ b/arch/arm/configs/multi_v5_defconfig
1335 +@@ -188,6 +188,7 @@ CONFIG_REGULATOR=y
1336 + CONFIG_REGULATOR_FIXED_VOLTAGE=y
1337 + CONFIG_MEDIA_SUPPORT=y
1338 + CONFIG_MEDIA_CAMERA_SUPPORT=y
1339 ++CONFIG_MEDIA_PLATFORM_SUPPORT=y
1340 + CONFIG_V4L_PLATFORM_DRIVERS=y
1341 + CONFIG_VIDEO_ASPEED=m
1342 + CONFIG_VIDEO_ATMEL_ISI=m
1343 +@@ -196,6 +197,7 @@ CONFIG_DRM_ATMEL_HLCDC=m
1344 + CONFIG_DRM_PANEL_SIMPLE=y
1345 + CONFIG_DRM_PANEL_EDP=y
1346 + CONFIG_DRM_ASPEED_GFX=m
1347 ++CONFIG_FB=y
1348 + CONFIG_FB_IMX=y
1349 + CONFIG_FB_ATMEL=y
1350 + CONFIG_BACKLIGHT_ATMEL_LCDC=y
1351 +diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
1352 +index 2b575792363e5..e4dba5461cb3e 100644
1353 +--- a/arch/arm/crypto/Kconfig
1354 ++++ b/arch/arm/crypto/Kconfig
1355 +@@ -102,6 +102,8 @@ config CRYPTO_AES_ARM_BS
1356 + depends on KERNEL_MODE_NEON
1357 + select CRYPTO_SKCIPHER
1358 + select CRYPTO_LIB_AES
1359 ++ select CRYPTO_AES
1360 ++ select CRYPTO_CBC
1361 + select CRYPTO_SIMD
1362 + help
1363 + Use a faster and more secure NEON based implementation of AES in CBC,
1364 +diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
1365 +index a74289ebc8036..5f1b1ce10473a 100644
1366 +--- a/arch/arm/kernel/entry-ftrace.S
1367 ++++ b/arch/arm/kernel/entry-ftrace.S
1368 +@@ -22,10 +22,7 @@
1369 + * mcount can be thought of as a function called in the middle of a subroutine
1370 + * call. As such, it needs to be transparent for both the caller and the
1371 + * callee: the original lr needs to be restored when leaving mcount, and no
1372 +- * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
1373 +- * clobber the ip register. This is OK because the ARM calling convention
1374 +- * allows it to be clobbered in subroutines and doesn't use it to hold
1375 +- * parameters.)
1376 ++ * registers should be clobbered.
1377 + *
1378 + * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}"
1379 + * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
1380 +@@ -70,26 +67,25 @@
1381 +
1382 + .macro __ftrace_regs_caller
1383 +
1384 +- sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
1385 ++ str lr, [sp, #-8]! @ store LR as PC and make space for CPSR/OLD_R0,
1386 + @ OLD_R0 will overwrite previous LR
1387 +
1388 +- add ip, sp, #12 @ move in IP the value of SP as it was
1389 +- @ before the push {lr} of the mcount mechanism
1390 ++ ldr lr, [sp, #8] @ get previous LR
1391 +
1392 +- str lr, [sp, #0] @ store LR instead of PC
1393 ++ str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
1394 +
1395 +- ldr lr, [sp, #8] @ get previous LR
1396 ++ str lr, [sp, #-4]! @ store previous LR as LR
1397 +
1398 +- str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
1399 ++ add lr, sp, #16 @ move in LR the value of SP as it was
1400 ++ @ before the push {lr} of the mcount mechanism
1401 +
1402 +- stmdb sp!, {ip, lr}
1403 +- stmdb sp!, {r0-r11, lr}
1404 ++ push {r0-r11, ip, lr}
1405 +
1406 + @ stack content at this point:
1407 + @ 0 4 48 52 56 60 64 68 72
1408 +- @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
1409 ++ @ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 |
1410 +
1411 +- mov r3, sp @ struct pt_regs*
1412 ++ mov r3, sp @ struct pt_regs*
1413 +
1414 + ldr r2, =function_trace_op
1415 + ldr r2, [r2] @ pointer to the current
1416 +@@ -112,11 +108,9 @@ ftrace_graph_regs_call:
1417 + #endif
1418 +
1419 + @ pop saved regs
1420 +- ldmia sp!, {r0-r12} @ restore r0 through r12
1421 +- ldr ip, [sp, #8] @ restore PC
1422 +- ldr lr, [sp, #4] @ restore LR
1423 +- ldr sp, [sp, #0] @ restore SP
1424 +- mov pc, ip @ return
1425 ++ pop {r0-r11, ip, lr} @ restore r0 through r12
1426 ++ ldr lr, [sp], #4 @ restore LR
1427 ++ ldr pc, [sp], #12
1428 + .endm
1429 +
1430 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1431 +@@ -132,11 +126,9 @@ ftrace_graph_regs_call:
1432 + bl prepare_ftrace_return
1433 +
1434 + @ pop registers saved in ftrace_regs_caller
1435 +- ldmia sp!, {r0-r12} @ restore r0 through r12
1436 +- ldr ip, [sp, #8] @ restore PC
1437 +- ldr lr, [sp, #4] @ restore LR
1438 +- ldr sp, [sp, #0] @ restore SP
1439 +- mov pc, ip @ return
1440 ++ pop {r0-r11, ip, lr} @ restore r0 through r12
1441 ++ ldr lr, [sp], #4 @ restore LR
1442 ++ ldr pc, [sp], #12
1443 +
1444 + .endm
1445 + #endif
1446 +@@ -202,16 +194,17 @@ ftrace_graph_call\suffix:
1447 + .endm
1448 +
1449 + .macro mcount_exit
1450 +- ldmia sp!, {r0-r3, ip, lr}
1451 +- ret ip
1452 ++ ldmia sp!, {r0-r3}
1453 ++ ldr lr, [sp, #4]
1454 ++ ldr pc, [sp], #8
1455 + .endm
1456 +
1457 + ENTRY(__gnu_mcount_nc)
1458 + UNWIND(.fnstart)
1459 + #ifdef CONFIG_DYNAMIC_FTRACE
1460 +- mov ip, lr
1461 +- ldmia sp!, {lr}
1462 +- ret ip
1463 ++ push {lr}
1464 ++ ldr lr, [sp, #4]
1465 ++ ldr pc, [sp], #8
1466 + #else
1467 + __mcount
1468 + #endif
1469 +diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
1470 +index 6166ba38bf994..b74bfcf94fb1a 100644
1471 +--- a/arch/arm/kernel/swp_emulate.c
1472 ++++ b/arch/arm/kernel/swp_emulate.c
1473 +@@ -195,7 +195,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
1474 + destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
1475 +
1476 + /* Check access in reasonable access range for both SWP and SWPB */
1477 +- if (!access_ok((address & ~3), 4)) {
1478 ++ if (!access_ok((void __user *)(address & ~3), 4)) {
1479 + pr_debug("SWP{B} emulation: access to %p not allowed!\n",
1480 + (void *)address);
1481 + res = -EFAULT;
1482 +diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1483 +index cae4a748811f8..5d58aee24087f 100644
1484 +--- a/arch/arm/kernel/traps.c
1485 ++++ b/arch/arm/kernel/traps.c
1486 +@@ -577,7 +577,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
1487 + if (end < start || flags)
1488 + return -EINVAL;
1489 +
1490 +- if (!access_ok(start, end - start))
1491 ++ if (!access_ok((void __user *)start, end - start))
1492 + return -EFAULT;
1493 +
1494 + return __do_cache_op(start, end);
1495 +diff --git a/arch/arm/mach-iop32x/include/mach/entry-macro.S b/arch/arm/mach-iop32x/include/mach/entry-macro.S
1496 +index 8e6766d4621eb..341e5d9a6616d 100644
1497 +--- a/arch/arm/mach-iop32x/include/mach/entry-macro.S
1498 ++++ b/arch/arm/mach-iop32x/include/mach/entry-macro.S
1499 +@@ -20,7 +20,7 @@
1500 + mrc p6, 0, \irqstat, c8, c0, 0 @ Read IINTSRC
1501 + cmp \irqstat, #0
1502 + clzne \irqnr, \irqstat
1503 +- rsbne \irqnr, \irqnr, #31
1504 ++ rsbne \irqnr, \irqnr, #32
1505 + .endm
1506 +
1507 + .macro arch_ret_to_user, tmp1, tmp2
1508 +diff --git a/arch/arm/mach-iop32x/include/mach/irqs.h b/arch/arm/mach-iop32x/include/mach/irqs.h
1509 +index c4e78df428e86..e09ae5f48aec5 100644
1510 +--- a/arch/arm/mach-iop32x/include/mach/irqs.h
1511 ++++ b/arch/arm/mach-iop32x/include/mach/irqs.h
1512 +@@ -9,6 +9,6 @@
1513 + #ifndef __IRQS_H
1514 + #define __IRQS_H
1515 +
1516 +-#define NR_IRQS 32
1517 ++#define NR_IRQS 33
1518 +
1519 + #endif
1520 +diff --git a/arch/arm/mach-iop32x/irq.c b/arch/arm/mach-iop32x/irq.c
1521 +index 2d48bf1398c10..d1e8824cbd824 100644
1522 +--- a/arch/arm/mach-iop32x/irq.c
1523 ++++ b/arch/arm/mach-iop32x/irq.c
1524 +@@ -32,14 +32,14 @@ static void intstr_write(u32 val)
1525 + static void
1526 + iop32x_irq_mask(struct irq_data *d)
1527 + {
1528 +- iop32x_mask &= ~(1 << d->irq);
1529 ++ iop32x_mask &= ~(1 << (d->irq - 1));
1530 + intctl_write(iop32x_mask);
1531 + }
1532 +
1533 + static void
1534 + iop32x_irq_unmask(struct irq_data *d)
1535 + {
1536 +- iop32x_mask |= 1 << d->irq;
1537 ++ iop32x_mask |= 1 << (d->irq - 1);
1538 + intctl_write(iop32x_mask);
1539 + }
1540 +
1541 +@@ -65,7 +65,7 @@ void __init iop32x_init_irq(void)
1542 + machine_is_em7210())
1543 + *IOP3XX_PCIIRSR = 0x0f;
1544 +
1545 +- for (i = 0; i < NR_IRQS; i++) {
1546 ++ for (i = 1; i < NR_IRQS; i++) {
1547 + irq_set_chip_and_handler(i, &ext_chip, handle_level_irq);
1548 + irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
1549 + }
1550 +diff --git a/arch/arm/mach-iop32x/irqs.h b/arch/arm/mach-iop32x/irqs.h
1551 +index 69858e4e905d1..e1dfc8b4e7d7e 100644
1552 +--- a/arch/arm/mach-iop32x/irqs.h
1553 ++++ b/arch/arm/mach-iop32x/irqs.h
1554 +@@ -7,36 +7,40 @@
1555 + #ifndef __IOP32X_IRQS_H
1556 + #define __IOP32X_IRQS_H
1557 +
1558 ++/* Interrupts in Linux start at 1, hardware starts at 0 */
1559 ++
1560 ++#define IOP_IRQ(x) ((x) + 1)
1561 ++
1562 + /*
1563 + * IOP80321 chipset interrupts
1564 + */
1565 +-#define IRQ_IOP32X_DMA0_EOT 0
1566 +-#define IRQ_IOP32X_DMA0_EOC 1
1567 +-#define IRQ_IOP32X_DMA1_EOT 2
1568 +-#define IRQ_IOP32X_DMA1_EOC 3
1569 +-#define IRQ_IOP32X_AA_EOT 6
1570 +-#define IRQ_IOP32X_AA_EOC 7
1571 +-#define IRQ_IOP32X_CORE_PMON 8
1572 +-#define IRQ_IOP32X_TIMER0 9
1573 +-#define IRQ_IOP32X_TIMER1 10
1574 +-#define IRQ_IOP32X_I2C_0 11
1575 +-#define IRQ_IOP32X_I2C_1 12
1576 +-#define IRQ_IOP32X_MESSAGING 13
1577 +-#define IRQ_IOP32X_ATU_BIST 14
1578 +-#define IRQ_IOP32X_PERFMON 15
1579 +-#define IRQ_IOP32X_CORE_PMU 16
1580 +-#define IRQ_IOP32X_BIU_ERR 17
1581 +-#define IRQ_IOP32X_ATU_ERR 18
1582 +-#define IRQ_IOP32X_MCU_ERR 19
1583 +-#define IRQ_IOP32X_DMA0_ERR 20
1584 +-#define IRQ_IOP32X_DMA1_ERR 21
1585 +-#define IRQ_IOP32X_AA_ERR 23
1586 +-#define IRQ_IOP32X_MSG_ERR 24
1587 +-#define IRQ_IOP32X_SSP 25
1588 +-#define IRQ_IOP32X_XINT0 27
1589 +-#define IRQ_IOP32X_XINT1 28
1590 +-#define IRQ_IOP32X_XINT2 29
1591 +-#define IRQ_IOP32X_XINT3 30
1592 +-#define IRQ_IOP32X_HPI 31
1593 ++#define IRQ_IOP32X_DMA0_EOT IOP_IRQ(0)
1594 ++#define IRQ_IOP32X_DMA0_EOC IOP_IRQ(1)
1595 ++#define IRQ_IOP32X_DMA1_EOT IOP_IRQ(2)
1596 ++#define IRQ_IOP32X_DMA1_EOC IOP_IRQ(3)
1597 ++#define IRQ_IOP32X_AA_EOT IOP_IRQ(6)
1598 ++#define IRQ_IOP32X_AA_EOC IOP_IRQ(7)
1599 ++#define IRQ_IOP32X_CORE_PMON IOP_IRQ(8)
1600 ++#define IRQ_IOP32X_TIMER0 IOP_IRQ(9)
1601 ++#define IRQ_IOP32X_TIMER1 IOP_IRQ(10)
1602 ++#define IRQ_IOP32X_I2C_0 IOP_IRQ(11)
1603 ++#define IRQ_IOP32X_I2C_1 IOP_IRQ(12)
1604 ++#define IRQ_IOP32X_MESSAGING IOP_IRQ(13)
1605 ++#define IRQ_IOP32X_ATU_BIST IOP_IRQ(14)
1606 ++#define IRQ_IOP32X_PERFMON IOP_IRQ(15)
1607 ++#define IRQ_IOP32X_CORE_PMU IOP_IRQ(16)
1608 ++#define IRQ_IOP32X_BIU_ERR IOP_IRQ(17)
1609 ++#define IRQ_IOP32X_ATU_ERR IOP_IRQ(18)
1610 ++#define IRQ_IOP32X_MCU_ERR IOP_IRQ(19)
1611 ++#define IRQ_IOP32X_DMA0_ERR IOP_IRQ(20)
1612 ++#define IRQ_IOP32X_DMA1_ERR IOP_IRQ(21)
1613 ++#define IRQ_IOP32X_AA_ERR IOP_IRQ(23)
1614 ++#define IRQ_IOP32X_MSG_ERR IOP_IRQ(24)
1615 ++#define IRQ_IOP32X_SSP IOP_IRQ(25)
1616 ++#define IRQ_IOP32X_XINT0 IOP_IRQ(27)
1617 ++#define IRQ_IOP32X_XINT1 IOP_IRQ(28)
1618 ++#define IRQ_IOP32X_XINT2 IOP_IRQ(29)
1619 ++#define IRQ_IOP32X_XINT3 IOP_IRQ(30)
1620 ++#define IRQ_IOP32X_HPI IOP_IRQ(31)
1621 +
1622 + #endif
1623 +diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c
1624 +index 6794e2db1ad5f..ecc46c31004f6 100644
1625 +--- a/arch/arm/mach-mmp/sram.c
1626 ++++ b/arch/arm/mach-mmp/sram.c
1627 +@@ -72,6 +72,8 @@ static int sram_probe(struct platform_device *pdev)
1628 + if (!info)
1629 + return -ENOMEM;
1630 +
1631 ++ platform_set_drvdata(pdev, info);
1632 ++
1633 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1634 + if (res == NULL) {
1635 + dev_err(&pdev->dev, "no memory resource defined\n");
1636 +@@ -107,8 +109,6 @@ static int sram_probe(struct platform_device *pdev)
1637 + list_add(&info->node, &sram_bank_list);
1638 + mutex_unlock(&sram_lock);
1639 +
1640 +- platform_set_drvdata(pdev, info);
1641 +-
1642 + dev_info(&pdev->dev, "initialized\n");
1643 + return 0;
1644 +
1645 +@@ -127,17 +127,19 @@ static int sram_remove(struct platform_device *pdev)
1646 + struct sram_bank_info *info;
1647 +
1648 + info = platform_get_drvdata(pdev);
1649 +- if (info == NULL)
1650 +- return -ENODEV;
1651 +
1652 +- mutex_lock(&sram_lock);
1653 +- list_del(&info->node);
1654 +- mutex_unlock(&sram_lock);
1655 ++ if (info->sram_size) {
1656 ++ mutex_lock(&sram_lock);
1657 ++ list_del(&info->node);
1658 ++ mutex_unlock(&sram_lock);
1659 ++
1660 ++ gen_pool_destroy(info->gpool);
1661 ++ iounmap(info->sram_virt);
1662 ++ kfree(info->pool_name);
1663 ++ }
1664 +
1665 +- gen_pool_destroy(info->gpool);
1666 +- iounmap(info->sram_virt);
1667 +- kfree(info->pool_name);
1668 + kfree(info);
1669 ++
1670 + return 0;
1671 + }
1672 +
1673 +diff --git a/arch/arm/mach-s3c/mach-jive.c b/arch/arm/mach-s3c/mach-jive.c
1674 +index 285e1f0f4145a..0d7d408c37291 100644
1675 +--- a/arch/arm/mach-s3c/mach-jive.c
1676 ++++ b/arch/arm/mach-s3c/mach-jive.c
1677 +@@ -236,11 +236,11 @@ static int __init jive_mtdset(char *options)
1678 + unsigned long set;
1679 +
1680 + if (options == NULL || options[0] == '\0')
1681 +- return 0;
1682 ++ return 1;
1683 +
1684 + if (kstrtoul(options, 10, &set)) {
1685 + printk(KERN_ERR "failed to parse mtdset=%s\n", options);
1686 +- return 0;
1687 ++ return 1;
1688 + }
1689 +
1690 + switch (set) {
1691 +@@ -256,7 +256,7 @@ static int __init jive_mtdset(char *options)
1692 + "using default.", set);
1693 + }
1694 +
1695 +- return 0;
1696 ++ return 1;
1697 + }
1698 +
1699 + /* parse the mtdset= option given to the kernel command line */
1700 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
1701 +index c842878f81331..baa0e9bbe7547 100644
1702 +--- a/arch/arm64/Kconfig
1703 ++++ b/arch/arm64/Kconfig
1704 +@@ -683,6 +683,7 @@ config ARM64_ERRATUM_2051678
1705 +
1706 + config ARM64_ERRATUM_2077057
1707 + bool "Cortex-A510: 2077057: workaround software-step corrupting SPSR_EL2"
1708 ++ default y
1709 + help
1710 + This option adds the workaround for ARM Cortex-A510 erratum 2077057.
1711 + Affected Cortex-A510 may corrupt SPSR_EL2 when the a step exception is
1712 +diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
1713 +index 984c737fa627a..6e738f2a37013 100644
1714 +--- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
1715 ++++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
1716 +@@ -273,9 +273,9 @@
1717 + #size-cells = <1>;
1718 + ranges = <0x00 0x00 0xff800000 0x3000>;
1719 +
1720 +- timer: timer@400 {
1721 +- compatible = "brcm,bcm6328-timer", "syscon";
1722 +- reg = <0x400 0x3c>;
1723 ++ twd: timer-mfd@400 {
1724 ++ compatible = "brcm,bcm4908-twd", "simple-mfd", "syscon";
1725 ++ reg = <0x400 0x4c>;
1726 + };
1727 +
1728 + gpio0: gpio-controller@500 {
1729 +@@ -330,7 +330,7 @@
1730 +
1731 + reboot {
1732 + compatible = "syscon-reboot";
1733 +- regmap = <&timer>;
1734 ++ regmap = <&twd>;
1735 + offset = <0x34>;
1736 + mask = <1>;
1737 + };
1738 +diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts
1739 +index ec19fbf928a14..12a4b1c03390c 100644
1740 +--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts
1741 ++++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts
1742 +@@ -111,8 +111,8 @@
1743 + compatible = "silabs,si3226x";
1744 + reg = <0>;
1745 + spi-max-frequency = <5000000>;
1746 +- spi-cpha = <1>;
1747 +- spi-cpol = <1>;
1748 ++ spi-cpha;
1749 ++ spi-cpol;
1750 + pl022,hierarchy = <0>;
1751 + pl022,interface = <0>;
1752 + pl022,slave-tx-disable = <0>;
1753 +@@ -135,8 +135,8 @@
1754 + at25,byte-len = <0x8000>;
1755 + at25,addr-mode = <2>;
1756 + at25,page-size = <64>;
1757 +- spi-cpha = <1>;
1758 +- spi-cpol = <1>;
1759 ++ spi-cpha;
1760 ++ spi-cpol;
1761 + pl022,hierarchy = <0>;
1762 + pl022,interface = <0>;
1763 + pl022,slave-tx-disable = <0>;
1764 +diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
1765 +index 2cfeaf3b0a876..8c218689fef70 100644
1766 +--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
1767 ++++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
1768 +@@ -687,7 +687,7 @@
1769 + };
1770 + };
1771 +
1772 +- sata: ahci@663f2000 {
1773 ++ sata: sata@663f2000 {
1774 + compatible = "brcm,iproc-ahci", "generic-ahci";
1775 + reg = <0x663f2000 0x1000>;
1776 + dma-coherent;
1777 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
1778 +index 01b01e3204118..35d1939e690b0 100644
1779 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
1780 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
1781 +@@ -536,9 +536,9 @@
1782 + clock-names = "i2c";
1783 + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
1784 + QORIQ_CLK_PLL_DIV(1)>;
1785 +- dmas = <&edma0 1 39>,
1786 +- <&edma0 1 38>;
1787 +- dma-names = "tx", "rx";
1788 ++ dmas = <&edma0 1 38>,
1789 ++ <&edma0 1 39>;
1790 ++ dma-names = "rx", "tx";
1791 + status = "disabled";
1792 + };
1793 +
1794 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
1795 +index 687fea6d8afa4..4e7bd04d97984 100644
1796 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
1797 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
1798 +@@ -499,9 +499,9 @@
1799 + interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
1800 + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
1801 + QORIQ_CLK_PLL_DIV(2)>;
1802 +- dmas = <&edma0 1 39>,
1803 +- <&edma0 1 38>;
1804 +- dma-names = "tx", "rx";
1805 ++ dmas = <&edma0 1 38>,
1806 ++ <&edma0 1 39>;
1807 ++ dma-names = "rx", "tx";
1808 + status = "disabled";
1809 + };
1810 +
1811 +diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
1812 +index 66ec5615651d4..5dea37651adfc 100644
1813 +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
1814 ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
1815 +@@ -748,7 +748,7 @@
1816 + snps,hird-threshold = /bits/ 8 <0x0>;
1817 + snps,dis_u2_susphy_quirk;
1818 + snps,dis_u3_susphy_quirk;
1819 +- snps,ref-clock-period-ns = <0x32>;
1820 ++ snps,ref-clock-period-ns = <0x29>;
1821 + dr_mode = "host";
1822 + };
1823 + };
1824 +diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-j5.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-j5.dts
1825 +index 687bea438a571..6c408d61de75a 100644
1826 +--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-j5.dts
1827 ++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-j5.dts
1828 +@@ -41,7 +41,7 @@
1829 + };
1830 +
1831 + home-key {
1832 +- lable = "Home Key";
1833 ++ label = "Home Key";
1834 + gpios = <&msmgpio 109 GPIO_ACTIVE_LOW>;
1835 + linux,code = <KEY_HOMEPAGE>;
1836 + };
1837 +diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
1838 +index 5a9a5ed0565f6..215f56daa26c2 100644
1839 +--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
1840 ++++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
1841 +@@ -713,6 +713,9 @@
1842 + #reset-cells = <1>;
1843 + #power-domain-cells = <1>;
1844 + reg = <0xfc400000 0x2000>;
1845 ++
1846 ++ clock-names = "xo", "sleep_clk";
1847 ++ clocks = <&xo_board>, <&sleep_clk>;
1848 + };
1849 +
1850 + rpm_msg_ram: sram@fc428000 {
1851 +diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
1852 +index 937c2e0e93eb9..eab7a85050531 100644
1853 +--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
1854 ++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
1855 +@@ -1790,7 +1790,7 @@
1856 + };
1857 + };
1858 +
1859 +- gmu: gmu@3d69000 {
1860 ++ gmu: gmu@3d6a000 {
1861 + compatible="qcom,adreno-gmu-635.0", "qcom,adreno-gmu";
1862 + reg = <0 0x03d6a000 0 0x34000>,
1863 + <0 0x3de0000 0 0x10000>,
1864 +diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
1865 +index cfdeaa81f1bbc..1bb4d98db96fa 100644
1866 +--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
1867 ++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
1868 +@@ -3613,10 +3613,10 @@
1869 + #clock-cells = <0>;
1870 + clock-frequency = <9600000>;
1871 + clock-output-names = "mclk";
1872 +- qcom,micbias1-millivolt = <1800>;
1873 +- qcom,micbias2-millivolt = <1800>;
1874 +- qcom,micbias3-millivolt = <1800>;
1875 +- qcom,micbias4-millivolt = <1800>;
1876 ++ qcom,micbias1-microvolt = <1800000>;
1877 ++ qcom,micbias2-microvolt = <1800000>;
1878 ++ qcom,micbias3-microvolt = <1800000>;
1879 ++ qcom,micbias4-microvolt = <1800000>;
1880 +
1881 + #address-cells = <1>;
1882 + #size-cells = <1>;
1883 +diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
1884 +index 6012322a59846..78265646feff7 100644
1885 +--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
1886 ++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
1887 +@@ -3556,9 +3556,9 @@
1888 + qcom,tcs-offset = <0xd00>;
1889 + qcom,drv-id = <2>;
1890 + qcom,tcs-config = <ACTIVE_TCS 2>,
1891 +- <SLEEP_TCS 1>,
1892 +- <WAKE_TCS 1>,
1893 +- <CONTROL_TCS 0>;
1894 ++ <SLEEP_TCS 3>,
1895 ++ <WAKE_TCS 3>,
1896 ++ <CONTROL_TCS 1>;
1897 +
1898 + rpmhcc: clock-controller {
1899 + compatible = "qcom,sm8150-rpmh-clk";
1900 +diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
1901 +index 5617a46e5ccdd..a92230bec1ddb 100644
1902 +--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
1903 ++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
1904 +@@ -1740,8 +1740,8 @@
1905 + phys = <&pcie0_lane>;
1906 + phy-names = "pciephy";
1907 +
1908 +- perst-gpio = <&tlmm 79 GPIO_ACTIVE_LOW>;
1909 +- enable-gpio = <&tlmm 81 GPIO_ACTIVE_HIGH>;
1910 ++ perst-gpios = <&tlmm 79 GPIO_ACTIVE_LOW>;
1911 ++ wake-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>;
1912 +
1913 + pinctrl-names = "default";
1914 + pinctrl-0 = <&pcie0_default_state>;
1915 +@@ -1801,7 +1801,7 @@
1916 + ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
1917 + <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
1918 +
1919 +- interrupts = <GIC_SPI 306 IRQ_TYPE_EDGE_RISING>;
1920 ++ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
1921 + interrupt-names = "msi";
1922 + #interrupt-cells = <1>;
1923 + interrupt-map-mask = <0 0 0 0x7>;
1924 +@@ -1844,8 +1844,8 @@
1925 + phys = <&pcie1_lane>;
1926 + phy-names = "pciephy";
1927 +
1928 +- perst-gpio = <&tlmm 82 GPIO_ACTIVE_LOW>;
1929 +- enable-gpio = <&tlmm 84 GPIO_ACTIVE_HIGH>;
1930 ++ perst-gpios = <&tlmm 82 GPIO_ACTIVE_LOW>;
1931 ++ wake-gpios = <&tlmm 84 GPIO_ACTIVE_HIGH>;
1932 +
1933 + pinctrl-names = "default";
1934 + pinctrl-0 = <&pcie1_default_state>;
1935 +@@ -1907,7 +1907,7 @@
1936 + ranges = <0x01000000 0x0 0x64200000 0x0 0x64200000 0x0 0x100000>,
1937 + <0x02000000 0x0 0x64300000 0x0 0x64300000 0x0 0x3d00000>;
1938 +
1939 +- interrupts = <GIC_SPI 236 IRQ_TYPE_EDGE_RISING>;
1940 ++ interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
1941 + interrupt-names = "msi";
1942 + #interrupt-cells = <1>;
1943 + interrupt-map-mask = <0 0 0 0x7>;
1944 +@@ -1950,8 +1950,8 @@
1945 + phys = <&pcie2_lane>;
1946 + phy-names = "pciephy";
1947 +
1948 +- perst-gpio = <&tlmm 85 GPIO_ACTIVE_LOW>;
1949 +- enable-gpio = <&tlmm 87 GPIO_ACTIVE_HIGH>;
1950 ++ perst-gpios = <&tlmm 85 GPIO_ACTIVE_LOW>;
1951 ++ wake-gpios = <&tlmm 87 GPIO_ACTIVE_HIGH>;
1952 +
1953 + pinctrl-names = "default";
1954 + pinctrl-0 = <&pcie2_default_state>;
1955 +diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
1956 +index 4b19744bcfb34..765d018e6306c 100644
1957 +--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
1958 ++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
1959 +@@ -1820,7 +1820,7 @@
1960 + qcom,tcs-offset = <0xd00>;
1961 + qcom,drv-id = <2>;
1962 + qcom,tcs-config = <ACTIVE_TCS 2>, <SLEEP_TCS 3>,
1963 +- <WAKE_TCS 3>, <CONTROL_TCS 1>;
1964 ++ <WAKE_TCS 3>, <CONTROL_TCS 0>;
1965 +
1966 + rpmhcc: clock-controller {
1967 + compatible = "qcom,sm8350-rpmh-clk";
1968 +diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
1969 +index 02b97e838c474..9ee055143f8a8 100644
1970 +--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
1971 ++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
1972 +@@ -203,9 +203,9 @@
1973 + compatible = "arm,idle-state";
1974 + idle-state-name = "silver-rail-power-collapse";
1975 + arm,psci-suspend-param = <0x40000004>;
1976 +- entry-latency-us = <274>;
1977 +- exit-latency-us = <480>;
1978 +- min-residency-us = <3934>;
1979 ++ entry-latency-us = <800>;
1980 ++ exit-latency-us = <750>;
1981 ++ min-residency-us = <4090>;
1982 + local-timer-stop;
1983 + };
1984 +
1985 +@@ -213,9 +213,9 @@
1986 + compatible = "arm,idle-state";
1987 + idle-state-name = "gold-rail-power-collapse";
1988 + arm,psci-suspend-param = <0x40000004>;
1989 +- entry-latency-us = <327>;
1990 +- exit-latency-us = <1502>;
1991 +- min-residency-us = <4488>;
1992 ++ entry-latency-us = <600>;
1993 ++ exit-latency-us = <1550>;
1994 ++ min-residency-us = <4791>;
1995 + local-timer-stop;
1996 + };
1997 + };
1998 +@@ -224,10 +224,10 @@
1999 + CLUSTER_SLEEP_0: cluster-sleep-0 {
2000 + compatible = "domain-idle-state";
2001 + idle-state-name = "cluster-l3-off";
2002 +- arm,psci-suspend-param = <0x4100c344>;
2003 +- entry-latency-us = <584>;
2004 +- exit-latency-us = <2332>;
2005 +- min-residency-us = <6118>;
2006 ++ arm,psci-suspend-param = <0x41000044>;
2007 ++ entry-latency-us = <1050>;
2008 ++ exit-latency-us = <2500>;
2009 ++ min-residency-us = <5309>;
2010 + local-timer-stop;
2011 + };
2012 +
2013 +@@ -235,9 +235,9 @@
2014 + compatible = "domain-idle-state";
2015 + idle-state-name = "cluster-power-collapse";
2016 + arm,psci-suspend-param = <0x4100c344>;
2017 +- entry-latency-us = <2893>;
2018 +- exit-latency-us = <4023>;
2019 +- min-residency-us = <9987>;
2020 ++ entry-latency-us = <2700>;
2021 ++ exit-latency-us = <3500>;
2022 ++ min-residency-us = <13959>;
2023 + local-timer-stop;
2024 + };
2025 + };
2026 +@@ -315,7 +315,7 @@
2027 +
2028 + CLUSTER_PD: cpu-cluster0 {
2029 + #power-domain-cells = <0>;
2030 +- domain-idle-states = <&CLUSTER_SLEEP_0>;
2031 ++ domain-idle-states = <&CLUSTER_SLEEP_0>, <&CLUSTER_SLEEP_1>;
2032 + };
2033 + };
2034 +
2035 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
2036 +index c4dd2a6b48368..f81ce3240342c 100644
2037 +--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
2038 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
2039 +@@ -770,8 +770,8 @@
2040 + sd-uhs-sdr104;
2041 +
2042 + /* Power supply */
2043 +- vqmmc-supply = &vcc1v8_s3; /* IO line */
2044 +- vmmc-supply = &vcc_sdio; /* card's power */
2045 ++ vqmmc-supply = <&vcc1v8_s3>; /* IO line */
2046 ++ vmmc-supply = <&vcc_sdio>; /* card's power */
2047 +
2048 + #address-cells = <1>;
2049 + #size-cells = <0>;
2050 +diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
2051 +index 012011dc619a5..ce4daff758e7e 100644
2052 +--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
2053 ++++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
2054 +@@ -59,7 +59,10 @@
2055 + #interrupt-cells = <3>;
2056 + interrupt-controller;
2057 + reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */
2058 +- <0x00 0x01840000 0x00 0xC0000>; /* GICR */
2059 ++ <0x00 0x01840000 0x00 0xC0000>, /* GICR */
2060 ++ <0x01 0x00000000 0x00 0x2000>, /* GICC */
2061 ++ <0x01 0x00010000 0x00 0x1000>, /* GICH */
2062 ++ <0x01 0x00020000 0x00 0x2000>; /* GICV */
2063 + /*
2064 + * vcpumntirq:
2065 + * virtual CPU interface maintenance interrupt
2066 +diff --git a/arch/arm64/boot/dts/ti/k3-am64.dtsi b/arch/arm64/boot/dts/ti/k3-am64.dtsi
2067 +index 120974726be81..19684865d0d68 100644
2068 +--- a/arch/arm64/boot/dts/ti/k3-am64.dtsi
2069 ++++ b/arch/arm64/boot/dts/ti/k3-am64.dtsi
2070 +@@ -87,6 +87,7 @@
2071 + <0x00 0x68000000 0x00 0x68000000 0x00 0x08000000>, /* PCIe DAT0 */
2072 + <0x00 0x70000000 0x00 0x70000000 0x00 0x00200000>, /* OC SRAM */
2073 + <0x00 0x78000000 0x00 0x78000000 0x00 0x00800000>, /* Main R5FSS */
2074 ++ <0x01 0x00000000 0x01 0x00000000 0x00 0x00310000>, /* A53 PERIPHBASE */
2075 + <0x06 0x00000000 0x06 0x00000000 0x01 0x00000000>, /* PCIe DAT1 */
2076 + <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, /* FSS0 DAT3 */
2077 +
2078 +diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
2079 +index ce8bb4a61011e..e749343accedd 100644
2080 +--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
2081 ++++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
2082 +@@ -35,7 +35,10 @@
2083 + #interrupt-cells = <3>;
2084 + interrupt-controller;
2085 + reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */
2086 +- <0x00 0x01880000 0x00 0x90000>; /* GICR */
2087 ++ <0x00 0x01880000 0x00 0x90000>, /* GICR */
2088 ++ <0x00 0x6f000000 0x00 0x2000>, /* GICC */
2089 ++ <0x00 0x6f010000 0x00 0x1000>, /* GICH */
2090 ++ <0x00 0x6f020000 0x00 0x2000>; /* GICV */
2091 + /*
2092 + * vcpumntirq:
2093 + * virtual CPU interface maintenance interrupt
2094 +diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi
2095 +index a58a39fa42dbc..c538a0bf3cdda 100644
2096 +--- a/arch/arm64/boot/dts/ti/k3-am65.dtsi
2097 ++++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi
2098 +@@ -86,6 +86,7 @@
2099 + <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>,
2100 + <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>,
2101 + <0x00 0x50000000 0x00 0x50000000 0x00 0x8000000>,
2102 ++ <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A53 PERIPHBASE */
2103 + <0x00 0x70000000 0x00 0x70000000 0x00 0x200000>,
2104 + <0x05 0x00000000 0x05 0x00000000 0x01 0x0000000>,
2105 + <0x07 0x00000000 0x07 0x00000000 0x01 0x0000000>;
2106 +diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
2107 +index 05a627ad6cdc4..16684a2f054d9 100644
2108 +--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
2109 ++++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
2110 +@@ -54,7 +54,10 @@
2111 + #interrupt-cells = <3>;
2112 + interrupt-controller;
2113 + reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */
2114 +- <0x00 0x01900000 0x00 0x100000>; /* GICR */
2115 ++ <0x00 0x01900000 0x00 0x100000>, /* GICR */
2116 ++ <0x00 0x6f000000 0x00 0x2000>, /* GICC */
2117 ++ <0x00 0x6f010000 0x00 0x1000>, /* GICH */
2118 ++ <0x00 0x6f020000 0x00 0x2000>; /* GICV */
2119 +
2120 + /* vcpumntirq: virtual CPU interface maintenance interrupt */
2121 + interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
2122 +diff --git a/arch/arm64/boot/dts/ti/k3-j7200.dtsi b/arch/arm64/boot/dts/ti/k3-j7200.dtsi
2123 +index 64fef4e67d76a..b6da0454cc5bd 100644
2124 +--- a/arch/arm64/boot/dts/ti/k3-j7200.dtsi
2125 ++++ b/arch/arm64/boot/dts/ti/k3-j7200.dtsi
2126 +@@ -129,6 +129,7 @@
2127 + <0x00 0x00a40000 0x00 0x00a40000 0x00 0x00000800>, /* timesync router */
2128 + <0x00 0x01000000 0x00 0x01000000 0x00 0x0d000000>, /* Most peripherals */
2129 + <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>, /* MAIN NAVSS */
2130 ++ <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */
2131 + <0x00 0x70000000 0x00 0x70000000 0x00 0x00800000>, /* MSMC RAM */
2132 + <0x00 0x18000000 0x00 0x18000000 0x00 0x08000000>, /* PCIe1 DAT0 */
2133 + <0x41 0x00000000 0x41 0x00000000 0x01 0x00000000>, /* PCIe1 DAT1 */
2134 +diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
2135 +index 599861259a30f..db0669985e42a 100644
2136 +--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
2137 ++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
2138 +@@ -76,7 +76,10 @@
2139 + #interrupt-cells = <3>;
2140 + interrupt-controller;
2141 + reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */
2142 +- <0x00 0x01900000 0x00 0x100000>; /* GICR */
2143 ++ <0x00 0x01900000 0x00 0x100000>, /* GICR */
2144 ++ <0x00 0x6f000000 0x00 0x2000>, /* GICC */
2145 ++ <0x00 0x6f010000 0x00 0x1000>, /* GICH */
2146 ++ <0x00 0x6f020000 0x00 0x2000>; /* GICV */
2147 +
2148 + /* vcpumntirq: virtual CPU interface maintenance interrupt */
2149 + interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
2150 +diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
2151 +index 4a3872fce5339..0e23886c9fd1d 100644
2152 +--- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi
2153 ++++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
2154 +@@ -139,6 +139,7 @@
2155 + <0x00 0x0e000000 0x00 0x0e000000 0x00 0x01800000>, /* PCIe Core*/
2156 + <0x00 0x10000000 0x00 0x10000000 0x00 0x10000000>, /* PCIe DAT */
2157 + <0x00 0x64800000 0x00 0x64800000 0x00 0x00800000>, /* C71 */
2158 ++ <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */
2159 + <0x44 0x00000000 0x44 0x00000000 0x00 0x08000000>, /* PCIe2 DAT */
2160 + <0x44 0x10000000 0x44 0x10000000 0x00 0x08000000>, /* PCIe3 DAT */
2161 + <0x4d 0x80800000 0x4d 0x80800000 0x00 0x00800000>, /* C66_0 */
2162 +diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
2163 +index b04db1d3ab617..be7f39299894e 100644
2164 +--- a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
2165 ++++ b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
2166 +@@ -34,7 +34,10 @@
2167 + #interrupt-cells = <3>;
2168 + interrupt-controller;
2169 + reg = <0x00 0x01800000 0x00 0x200000>, /* GICD */
2170 +- <0x00 0x01900000 0x00 0x100000>; /* GICR */
2171 ++ <0x00 0x01900000 0x00 0x100000>, /* GICR */
2172 ++ <0x00 0x6f000000 0x00 0x2000>, /* GICC */
2173 ++ <0x00 0x6f010000 0x00 0x1000>, /* GICH */
2174 ++ <0x00 0x6f020000 0x00 0x2000>; /* GICV */
2175 +
2176 + /* vcpumntirq: virtual CPU interface maintenance interrupt */
2177 + interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
2178 +diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
2179 +index 7521963719ff9..6c5c02edb375d 100644
2180 +--- a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
2181 ++++ b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
2182 +@@ -108,7 +108,7 @@
2183 + reg = <0x00 0x42110000 0x00 0x100>;
2184 + gpio-controller;
2185 + #gpio-cells = <2>;
2186 +- interrupt-parent = <&main_gpio_intr>;
2187 ++ interrupt-parent = <&wkup_gpio_intr>;
2188 + interrupts = <103>, <104>, <105>, <106>, <107>, <108>;
2189 + interrupt-controller;
2190 + #interrupt-cells = <2>;
2191 +@@ -124,7 +124,7 @@
2192 + reg = <0x00 0x42100000 0x00 0x100>;
2193 + gpio-controller;
2194 + #gpio-cells = <2>;
2195 +- interrupt-parent = <&main_gpio_intr>;
2196 ++ interrupt-parent = <&wkup_gpio_intr>;
2197 + interrupts = <112>, <113>, <114>, <115>, <116>, <117>;
2198 + interrupt-controller;
2199 + #interrupt-cells = <2>;
2200 +diff --git a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
2201 +index fe5234c40f6ce..7b930a85a29d6 100644
2202 +--- a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
2203 ++++ b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
2204 +@@ -119,6 +119,7 @@
2205 + <0x00 0x18000000 0x00 0x18000000 0x00 0x08000000>, /* PCIe1 DAT0 */
2206 + <0x00 0x64800000 0x00 0x64800000 0x00 0x0070c000>, /* C71_1 */
2207 + <0x00 0x65800000 0x00 0x65800000 0x00 0x0070c000>, /* C71_2 */
2208 ++ <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */
2209 + <0x00 0x70000000 0x00 0x70000000 0x00 0x00400000>, /* MSMC RAM */
2210 + <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>, /* MAIN NAVSS */
2211 + <0x41 0x00000000 0x41 0x00000000 0x01 0x00000000>, /* PCIe1 DAT1 */
2212 +diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
2213 +index 30516dc0b70ec..7411e4f9b5545 100644
2214 +--- a/arch/arm64/configs/defconfig
2215 ++++ b/arch/arm64/configs/defconfig
2216 +@@ -939,7 +939,7 @@ CONFIG_DMADEVICES=y
2217 + CONFIG_DMA_BCM2835=y
2218 + CONFIG_DMA_SUN6I=m
2219 + CONFIG_FSL_EDMA=y
2220 +-CONFIG_IMX_SDMA=y
2221 ++CONFIG_IMX_SDMA=m
2222 + CONFIG_K3_DMA=y
2223 + CONFIG_MV_XOR=y
2224 + CONFIG_MV_XOR_V2=y
2225 +diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
2226 +index a11ccadd47d29..094701ec5500b 100644
2227 +--- a/arch/arm64/include/asm/module.lds.h
2228 ++++ b/arch/arm64/include/asm/module.lds.h
2229 +@@ -1,8 +1,8 @@
2230 + SECTIONS {
2231 + #ifdef CONFIG_ARM64_MODULE_PLTS
2232 +- .plt 0 (NOLOAD) : { BYTE(0) }
2233 +- .init.plt 0 (NOLOAD) : { BYTE(0) }
2234 +- .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
2235 ++ .plt 0 : { BYTE(0) }
2236 ++ .init.plt 0 : { BYTE(0) }
2237 ++ .text.ftrace_trampoline 0 : { BYTE(0) }
2238 + #endif
2239 +
2240 + #ifdef CONFIG_KASAN_SW_TAGS
2241 +diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h
2242 +index 86e0cc9b9c685..aa3d3607d5c8d 100644
2243 +--- a/arch/arm64/include/asm/spectre.h
2244 ++++ b/arch/arm64/include/asm/spectre.h
2245 +@@ -67,7 +67,8 @@ struct bp_hardening_data {
2246 +
2247 + DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
2248 +
2249 +-static inline void arm64_apply_bp_hardening(void)
2250 ++/* Called during entry so must be __always_inline */
2251 ++static __always_inline void arm64_apply_bp_hardening(void)
2252 + {
2253 + struct bp_hardening_data *d;
2254 +
2255 +diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
2256 +index 6d45c63c64548..5777929d35bf4 100644
2257 +--- a/arch/arm64/kernel/proton-pack.c
2258 ++++ b/arch/arm64/kernel/proton-pack.c
2259 +@@ -233,17 +233,20 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn)
2260 + __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
2261 + }
2262 +
2263 +-static void call_smc_arch_workaround_1(void)
2264 ++/* Called during entry so must be noinstr */
2265 ++static noinstr void call_smc_arch_workaround_1(void)
2266 + {
2267 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
2268 + }
2269 +
2270 +-static void call_hvc_arch_workaround_1(void)
2271 ++/* Called during entry so must be noinstr */
2272 ++static noinstr void call_hvc_arch_workaround_1(void)
2273 + {
2274 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
2275 + }
2276 +
2277 +-static void qcom_link_stack_sanitisation(void)
2278 ++/* Called during entry so must be noinstr */
2279 ++static noinstr void qcom_link_stack_sanitisation(void)
2280 + {
2281 + u64 tmp;
2282 +
2283 +diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
2284 +index d8aaf4b6f4320..3d66fba69016f 100644
2285 +--- a/arch/arm64/kernel/signal.c
2286 ++++ b/arch/arm64/kernel/signal.c
2287 +@@ -577,10 +577,12 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
2288 + {
2289 + int err;
2290 +
2291 +- err = sigframe_alloc(user, &user->fpsimd_offset,
2292 +- sizeof(struct fpsimd_context));
2293 +- if (err)
2294 +- return err;
2295 ++ if (system_supports_fpsimd()) {
2296 ++ err = sigframe_alloc(user, &user->fpsimd_offset,
2297 ++ sizeof(struct fpsimd_context));
2298 ++ if (err)
2299 ++ return err;
2300 ++ }
2301 +
2302 + /* fault information, if valid */
2303 + if (add_all || current->thread.fault_code) {
2304 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
2305 +index db63cc885771a..9e26ec80d3175 100644
2306 +--- a/arch/arm64/mm/init.c
2307 ++++ b/arch/arm64/mm/init.c
2308 +@@ -61,8 +61,34 @@ EXPORT_SYMBOL(memstart_addr);
2309 + * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
2310 + * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
2311 + * otherwise it is empty.
2312 ++ *
2313 ++ * Memory reservation for crash kernel either done early or deferred
2314 ++ * depending on DMA memory zones configs (ZONE_DMA) --
2315 ++ *
2316 ++ * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
2317 ++ * here instead of max_zone_phys(). This lets early reservation of
2318 ++ * crash kernel memory which has a dependency on arm64_dma_phys_limit.
2319 ++ * Reserving memory early for crash kernel allows linear creation of block
2320 ++ * mappings (greater than page-granularity) for all the memory bank rangs.
2321 ++ * In this scheme a comparatively quicker boot is observed.
2322 ++ *
2323 ++ * If ZONE_DMA configs are defined, crash kernel memory reservation
2324 ++ * is delayed until DMA zone memory range size initilazation performed in
2325 ++ * zone_sizes_init(). The defer is necessary to steer clear of DMA zone
2326 ++ * memory range to avoid overlap allocation. So crash kernel memory boundaries
2327 ++ * are not known when mapping all bank memory ranges, which otherwise means
2328 ++ * not possible to exclude crash kernel range from creating block mappings
2329 ++ * so page-granularity mappings are created for the entire memory range.
2330 ++ * Hence a slightly slower boot is observed.
2331 ++ *
2332 ++ * Note: Page-granularity mapppings are necessary for crash kernel memory
2333 ++ * range for shrinking its size via /sys/kernel/kexec_crash_size interface.
2334 + */
2335 +-phys_addr_t arm64_dma_phys_limit __ro_after_init;
2336 ++#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
2337 ++phys_addr_t __ro_after_init arm64_dma_phys_limit;
2338 ++#else
2339 ++phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
2340 ++#endif
2341 +
2342 + #ifdef CONFIG_KEXEC_CORE
2343 + /*
2344 +@@ -153,8 +179,6 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
2345 + if (!arm64_dma_phys_limit)
2346 + arm64_dma_phys_limit = dma32_phys_limit;
2347 + #endif
2348 +- if (!arm64_dma_phys_limit)
2349 +- arm64_dma_phys_limit = PHYS_MASK + 1;
2350 + max_zone_pfns[ZONE_NORMAL] = max;
2351 +
2352 + free_area_init(max_zone_pfns);
2353 +@@ -315,6 +339,9 @@ void __init arm64_memblock_init(void)
2354 +
2355 + early_init_fdt_scan_reserved_mem();
2356 +
2357 ++ if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32))
2358 ++ reserve_crashkernel();
2359 ++
2360 + high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
2361 + }
2362 +
2363 +@@ -361,7 +388,8 @@ void __init bootmem_init(void)
2364 + * request_standard_resources() depends on crashkernel's memory being
2365 + * reserved, so do it here.
2366 + */
2367 +- reserve_crashkernel();
2368 ++ if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32))
2369 ++ reserve_crashkernel();
2370 +
2371 + memblock_dump_all();
2372 + }
2373 +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
2374 +index 49abbf43bf355..37b8230cda6a8 100644
2375 +--- a/arch/arm64/mm/mmu.c
2376 ++++ b/arch/arm64/mm/mmu.c
2377 +@@ -63,6 +63,7 @@ static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
2378 + static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
2379 +
2380 + static DEFINE_SPINLOCK(swapper_pgdir_lock);
2381 ++static DEFINE_MUTEX(fixmap_lock);
2382 +
2383 + void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
2384 + {
2385 +@@ -329,6 +330,12 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
2386 + }
2387 + BUG_ON(p4d_bad(p4d));
2388 +
2389 ++ /*
2390 ++ * No need for locking during early boot. And it doesn't work as
2391 ++ * expected with KASLR enabled.
2392 ++ */
2393 ++ if (system_state != SYSTEM_BOOTING)
2394 ++ mutex_lock(&fixmap_lock);
2395 + pudp = pud_set_fixmap_offset(p4dp, addr);
2396 + do {
2397 + pud_t old_pud = READ_ONCE(*pudp);
2398 +@@ -359,6 +366,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
2399 + } while (pudp++, addr = next, addr != end);
2400 +
2401 + pud_clear_fixmap();
2402 ++ if (system_state != SYSTEM_BOOTING)
2403 ++ mutex_unlock(&fixmap_lock);
2404 + }
2405 +
2406 + static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
2407 +@@ -517,7 +526,7 @@ static void __init map_mem(pgd_t *pgdp)
2408 + */
2409 + BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
2410 +
2411 +- if (can_set_direct_map() || crash_mem_map || IS_ENABLED(CONFIG_KFENCE))
2412 ++ if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE))
2413 + flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
2414 +
2415 + /*
2416 +@@ -528,6 +537,17 @@ static void __init map_mem(pgd_t *pgdp)
2417 + */
2418 + memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
2419 +
2420 ++#ifdef CONFIG_KEXEC_CORE
2421 ++ if (crash_mem_map) {
2422 ++ if (IS_ENABLED(CONFIG_ZONE_DMA) ||
2423 ++ IS_ENABLED(CONFIG_ZONE_DMA32))
2424 ++ flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
2425 ++ else if (crashk_res.end)
2426 ++ memblock_mark_nomap(crashk_res.start,
2427 ++ resource_size(&crashk_res));
2428 ++ }
2429 ++#endif
2430 ++
2431 + /* map all the memory banks */
2432 + for_each_mem_range(i, &start, &end) {
2433 + if (start >= end)
2434 +@@ -554,6 +574,25 @@ static void __init map_mem(pgd_t *pgdp)
2435 + __map_memblock(pgdp, kernel_start, kernel_end,
2436 + PAGE_KERNEL, NO_CONT_MAPPINGS);
2437 + memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
2438 ++
2439 ++ /*
2440 ++ * Use page-level mappings here so that we can shrink the region
2441 ++ * in page granularity and put back unused memory to buddy system
2442 ++ * through /sys/kernel/kexec_crash_size interface.
2443 ++ */
2444 ++#ifdef CONFIG_KEXEC_CORE
2445 ++ if (crash_mem_map &&
2446 ++ !IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) {
2447 ++ if (crashk_res.end) {
2448 ++ __map_memblock(pgdp, crashk_res.start,
2449 ++ crashk_res.end + 1,
2450 ++ PAGE_KERNEL,
2451 ++ NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
2452 ++ memblock_clear_nomap(crashk_res.start,
2453 ++ resource_size(&crashk_res));
2454 ++ }
2455 ++ }
2456 ++#endif
2457 + }
2458 +
2459 + void mark_rodata_ro(void)
2460 +diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
2461 +index e96d4d87291f3..cbc41e261f1e7 100644
2462 +--- a/arch/arm64/net/bpf_jit_comp.c
2463 ++++ b/arch/arm64/net/bpf_jit_comp.c
2464 +@@ -1049,15 +1049,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2465 + goto out_off;
2466 + }
2467 +
2468 +- /* 1. Initial fake pass to compute ctx->idx. */
2469 +-
2470 +- /* Fake pass to fill in ctx->offset. */
2471 +- if (build_body(&ctx, extra_pass)) {
2472 ++ /*
2473 ++ * 1. Initial fake pass to compute ctx->idx and ctx->offset.
2474 ++ *
2475 ++ * BPF line info needs ctx->offset[i] to be the offset of
2476 ++ * instruction[i] in jited image, so build prologue first.
2477 ++ */
2478 ++ if (build_prologue(&ctx, was_classic)) {
2479 + prog = orig_prog;
2480 + goto out_off;
2481 + }
2482 +
2483 +- if (build_prologue(&ctx, was_classic)) {
2484 ++ if (build_body(&ctx, extra_pass)) {
2485 + prog = orig_prog;
2486 + goto out_off;
2487 + }
2488 +@@ -1130,6 +1133,11 @@ skip_init_ctx:
2489 + prog->jited_len = prog_size;
2490 +
2491 + if (!prog->is_func || extra_pass) {
2492 ++ int i;
2493 ++
2494 ++ /* offset[prog->len] is the size of program */
2495 ++ for (i = 0; i <= prog->len; i++)
2496 ++ ctx.offset[i] *= AARCH64_INSN_SIZE;
2497 + bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
2498 + out_off:
2499 + kfree(ctx.offset);
2500 +diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c
2501 +index 92057de08f4f0..1612f43540877 100644
2502 +--- a/arch/csky/kernel/perf_callchain.c
2503 ++++ b/arch/csky/kernel/perf_callchain.c
2504 +@@ -49,7 +49,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
2505 + {
2506 + struct stackframe buftail;
2507 + unsigned long lr = 0;
2508 +- unsigned long *user_frame_tail = (unsigned long *)fp;
2509 ++ unsigned long __user *user_frame_tail = (unsigned long __user *)fp;
2510 +
2511 + /* Check accessibility of one struct frame_tail beyond */
2512 + if (!access_ok(user_frame_tail, sizeof(buftail)))
2513 +diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
2514 +index c7b763d2f526e..8867ddf3e6c77 100644
2515 +--- a/arch/csky/kernel/signal.c
2516 ++++ b/arch/csky/kernel/signal.c
2517 +@@ -136,7 +136,7 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
2518 + static int
2519 + setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
2520 + {
2521 +- struct rt_sigframe *frame;
2522 ++ struct rt_sigframe __user *frame;
2523 + int err = 0;
2524 +
2525 + frame = get_sigframe(ksig, regs, sizeof(*frame));
2526 +diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
2527 +index 0386252e9d043..4218750414bbf 100644
2528 +--- a/arch/m68k/coldfire/device.c
2529 ++++ b/arch/m68k/coldfire/device.c
2530 +@@ -480,7 +480,7 @@ static struct platform_device mcf_i2c5 = {
2531 + #endif /* MCFI2C_BASE5 */
2532 + #endif /* IS_ENABLED(CONFIG_I2C_IMX) */
2533 +
2534 +-#if IS_ENABLED(CONFIG_MCF_EDMA)
2535 ++#ifdef MCFEDMA_BASE
2536 +
2537 + static const struct dma_slave_map mcf_edma_map[] = {
2538 + { "dreq0", "rx-tx", MCF_EDMA_FILTER_PARAM(0) },
2539 +@@ -552,7 +552,7 @@ static struct platform_device mcf_edma = {
2540 + .platform_data = &mcf_edma_data,
2541 + }
2542 + };
2543 +-#endif /* IS_ENABLED(CONFIG_MCF_EDMA) */
2544 ++#endif /* MCFEDMA_BASE */
2545 +
2546 + #ifdef MCFSDHC_BASE
2547 + static struct mcf_esdhc_platform_data mcf_esdhc_data = {
2548 +@@ -651,7 +651,7 @@ static struct platform_device *mcf_devices[] __initdata = {
2549 + &mcf_i2c5,
2550 + #endif
2551 + #endif
2552 +-#if IS_ENABLED(CONFIG_MCF_EDMA)
2553 ++#ifdef MCFEDMA_BASE
2554 + &mcf_edma,
2555 + #endif
2556 + #ifdef MCFSDHC_BASE
2557 +diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
2558 +index 5b6e0e7788f44..3fe96979d2c62 100644
2559 +--- a/arch/microblaze/include/asm/uaccess.h
2560 ++++ b/arch/microblaze/include/asm/uaccess.h
2561 +@@ -130,27 +130,27 @@ extern long __user_bad(void);
2562 +
2563 + #define __get_user(x, ptr) \
2564 + ({ \
2565 +- unsigned long __gu_val = 0; \
2566 + long __gu_err; \
2567 + switch (sizeof(*(ptr))) { \
2568 + case 1: \
2569 +- __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
2570 ++ __get_user_asm("lbu", (ptr), x, __gu_err); \
2571 + break; \
2572 + case 2: \
2573 +- __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
2574 ++ __get_user_asm("lhu", (ptr), x, __gu_err); \
2575 + break; \
2576 + case 4: \
2577 +- __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
2578 ++ __get_user_asm("lw", (ptr), x, __gu_err); \
2579 + break; \
2580 +- case 8: \
2581 +- __gu_err = __copy_from_user(&__gu_val, ptr, 8); \
2582 +- if (__gu_err) \
2583 +- __gu_err = -EFAULT; \
2584 ++ case 8: { \
2585 ++ __u64 __x = 0; \
2586 ++ __gu_err = raw_copy_from_user(&__x, ptr, 8) ? \
2587 ++ -EFAULT : 0; \
2588 ++ (x) = (typeof(x))(typeof((x) - (x)))__x; \
2589 + break; \
2590 ++ } \
2591 + default: \
2592 + /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
2593 + } \
2594 +- x = (__force __typeof__(*(ptr))) __gu_val; \
2595 + __gu_err; \
2596 + })
2597 +
2598 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2599 +index 058446f01487c..651d4fe355da6 100644
2600 +--- a/arch/mips/Kconfig
2601 ++++ b/arch/mips/Kconfig
2602 +@@ -101,6 +101,7 @@ config MIPS
2603 + select TRACE_IRQFLAGS_SUPPORT
2604 + select VIRT_TO_BUS
2605 + select ARCH_HAS_ELFCORE_COMPAT
2606 ++ select HAVE_ARCH_KCSAN if 64BIT
2607 +
2608 + config MIPS_FIXUP_BIGPHYS_ADDR
2609 + bool
2610 +diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2611 +index e036fc025cccb..4478c5661d61d 100644
2612 +--- a/arch/mips/Makefile
2613 ++++ b/arch/mips/Makefile
2614 +@@ -340,14 +340,12 @@ drivers-$(CONFIG_PM) += arch/mips/power/
2615 + boot-y := vmlinux.bin
2616 + boot-y += vmlinux.ecoff
2617 + boot-y += vmlinux.srec
2618 +-ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0)
2619 + boot-y += uImage
2620 + boot-y += uImage.bin
2621 + boot-y += uImage.bz2
2622 + boot-y += uImage.gz
2623 + boot-y += uImage.lzma
2624 + boot-y += uImage.lzo
2625 +-endif
2626 + boot-y += vmlinux.itb
2627 + boot-y += vmlinux.gz.itb
2628 + boot-y += vmlinux.bz2.itb
2629 +@@ -359,9 +357,7 @@ bootz-y := vmlinuz
2630 + bootz-y += vmlinuz.bin
2631 + bootz-y += vmlinuz.ecoff
2632 + bootz-y += vmlinuz.srec
2633 +-ifeq ($(shell expr $(zload-y) \< 0xffffffff80000000 2> /dev/null), 0)
2634 + bootz-y += uzImage.bin
2635 +-endif
2636 + bootz-y += vmlinuz.itb
2637 +
2638 + #
2639 +diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
2640 +index 5a15d51e88841..6cc28173bee89 100644
2641 +--- a/arch/mips/boot/compressed/Makefile
2642 ++++ b/arch/mips/boot/compressed/Makefile
2643 +@@ -38,6 +38,7 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
2644 + KCOV_INSTRUMENT := n
2645 + GCOV_PROFILE := n
2646 + UBSAN_SANITIZE := n
2647 ++KCSAN_SANITIZE := n
2648 +
2649 + # decompressor objects (linked with vmlinuz)
2650 + vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o $(obj)/bswapsi.o
2651 +diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c
2652 +index 0a03529cf3178..3e4f5ba104f89 100644
2653 +--- a/arch/mips/crypto/crc32-mips.c
2654 ++++ b/arch/mips/crypto/crc32-mips.c
2655 +@@ -28,7 +28,7 @@ enum crc_type {
2656 + };
2657 +
2658 + #ifndef TOOLCHAIN_SUPPORTS_CRC
2659 +-#define _ASM_MACRO_CRC32(OP, SZ, TYPE) \
2660 ++#define _ASM_SET_CRC(OP, SZ, TYPE) \
2661 + _ASM_MACRO_3R(OP, rt, rs, rt2, \
2662 + ".ifnc \\rt, \\rt2\n\t" \
2663 + ".error \"invalid operands \\\"" #OP " \\rt,\\rs,\\rt2\\\"\"\n\t" \
2664 +@@ -37,30 +37,36 @@ _ASM_MACRO_3R(OP, rt, rs, rt2, \
2665 + ((SZ) << 6) | ((TYPE) << 8)) \
2666 + _ASM_INSN32_IF_MM(0x00000030 | (__rs << 16) | (__rt << 21) | \
2667 + ((SZ) << 14) | ((TYPE) << 3)))
2668 +-_ASM_MACRO_CRC32(crc32b, 0, 0);
2669 +-_ASM_MACRO_CRC32(crc32h, 1, 0);
2670 +-_ASM_MACRO_CRC32(crc32w, 2, 0);
2671 +-_ASM_MACRO_CRC32(crc32d, 3, 0);
2672 +-_ASM_MACRO_CRC32(crc32cb, 0, 1);
2673 +-_ASM_MACRO_CRC32(crc32ch, 1, 1);
2674 +-_ASM_MACRO_CRC32(crc32cw, 2, 1);
2675 +-_ASM_MACRO_CRC32(crc32cd, 3, 1);
2676 +-#define _ASM_SET_CRC ""
2677 ++#define _ASM_UNSET_CRC(op, SZ, TYPE) ".purgem " #op "\n\t"
2678 + #else /* !TOOLCHAIN_SUPPORTS_CRC */
2679 +-#define _ASM_SET_CRC ".set\tcrc\n\t"
2680 ++#define _ASM_SET_CRC(op, SZ, TYPE) ".set\tcrc\n\t"
2681 ++#define _ASM_UNSET_CRC(op, SZ, TYPE)
2682 + #endif
2683 +
2684 +-#define _CRC32(crc, value, size, type) \
2685 +-do { \
2686 +- __asm__ __volatile__( \
2687 +- ".set push\n\t" \
2688 +- _ASM_SET_CRC \
2689 +- #type #size " %0, %1, %0\n\t" \
2690 +- ".set pop" \
2691 +- : "+r" (crc) \
2692 +- : "r" (value)); \
2693 ++#define __CRC32(crc, value, op, SZ, TYPE) \
2694 ++do { \
2695 ++ __asm__ __volatile__( \
2696 ++ ".set push\n\t" \
2697 ++ _ASM_SET_CRC(op, SZ, TYPE) \
2698 ++ #op " %0, %1, %0\n\t" \
2699 ++ _ASM_UNSET_CRC(op, SZ, TYPE) \
2700 ++ ".set pop" \
2701 ++ : "+r" (crc) \
2702 ++ : "r" (value)); \
2703 + } while (0)
2704 +
2705 ++#define _CRC32_crc32b(crc, value) __CRC32(crc, value, crc32b, 0, 0)
2706 ++#define _CRC32_crc32h(crc, value) __CRC32(crc, value, crc32h, 1, 0)
2707 ++#define _CRC32_crc32w(crc, value) __CRC32(crc, value, crc32w, 2, 0)
2708 ++#define _CRC32_crc32d(crc, value) __CRC32(crc, value, crc32d, 3, 0)
2709 ++#define _CRC32_crc32cb(crc, value) __CRC32(crc, value, crc32cb, 0, 1)
2710 ++#define _CRC32_crc32ch(crc, value) __CRC32(crc, value, crc32ch, 1, 1)
2711 ++#define _CRC32_crc32cw(crc, value) __CRC32(crc, value, crc32cw, 2, 1)
2712 ++#define _CRC32_crc32cd(crc, value) __CRC32(crc, value, crc32cd, 3, 1)
2713 ++
2714 ++#define _CRC32(crc, value, size, op) \
2715 ++ _CRC32_##op##size(crc, value)
2716 ++
2717 + #define CRC32(crc, value, size) \
2718 + _CRC32(crc, value, size, crc32)
2719 +
2720 +diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
2721 +index ea5b5a83f1e11..011d1d678840a 100644
2722 +--- a/arch/mips/dec/int-handler.S
2723 ++++ b/arch/mips/dec/int-handler.S
2724 +@@ -131,7 +131,7 @@
2725 + */
2726 + mfc0 t0,CP0_CAUSE # get pending interrupts
2727 + mfc0 t1,CP0_STATUS
2728 +-#ifdef CONFIG_32BIT
2729 ++#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT)
2730 + lw t2,cpu_fpu_mask
2731 + #endif
2732 + andi t0,ST0_IM # CAUSE.CE may be non-zero!
2733 +@@ -139,7 +139,7 @@
2734 +
2735 + beqz t0,spurious
2736 +
2737 +-#ifdef CONFIG_32BIT
2738 ++#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT)
2739 + and t2,t0
2740 + bnez t2,fpu # handle FPU immediately
2741 + #endif
2742 +@@ -280,7 +280,7 @@ handle_it:
2743 + j dec_irq_dispatch
2744 + nop
2745 +
2746 +-#ifdef CONFIG_32BIT
2747 ++#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT)
2748 + fpu:
2749 + lw t0,fpu_kstat_irq
2750 + nop
2751 +diff --git a/arch/mips/dec/prom/Makefile b/arch/mips/dec/prom/Makefile
2752 +index d95016016b42b..2bad87551203b 100644
2753 +--- a/arch/mips/dec/prom/Makefile
2754 ++++ b/arch/mips/dec/prom/Makefile
2755 +@@ -6,4 +6,4 @@
2756 +
2757 + lib-y += init.o memory.o cmdline.o identify.o console.o
2758 +
2759 +-lib-$(CONFIG_32BIT) += locore.o
2760 ++lib-$(CONFIG_CPU_R3000) += locore.o
2761 +diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
2762 +index a8a30bb1dee8c..82b00e45ce50a 100644
2763 +--- a/arch/mips/dec/setup.c
2764 ++++ b/arch/mips/dec/setup.c
2765 +@@ -746,7 +746,8 @@ void __init arch_init_irq(void)
2766 + dec_interrupt[DEC_IRQ_HALT] = -1;
2767 +
2768 + /* Register board interrupts: FPU and cascade. */
2769 +- if (dec_interrupt[DEC_IRQ_FPU] >= 0 && cpu_has_fpu) {
2770 ++ if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT) &&
2771 ++ dec_interrupt[DEC_IRQ_FPU] >= 0 && cpu_has_fpu) {
2772 + struct irq_desc *desc_fpu;
2773 + int irq_fpu;
2774 +
2775 +diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
2776 +index 62c7dfb90e06c..1e1247add1cf8 100644
2777 +--- a/arch/mips/include/asm/dec/prom.h
2778 ++++ b/arch/mips/include/asm/dec/prom.h
2779 +@@ -43,16 +43,11 @@
2780 + */
2781 + #define REX_PROM_MAGIC 0x30464354
2782 +
2783 +-#ifdef CONFIG_64BIT
2784 +-
2785 +-#define prom_is_rex(magic) 1 /* KN04 and KN05 are REX PROMs. */
2786 +-
2787 +-#else /* !CONFIG_64BIT */
2788 +-
2789 +-#define prom_is_rex(magic) ((magic) == REX_PROM_MAGIC)
2790 +-
2791 +-#endif /* !CONFIG_64BIT */
2792 +-
2793 ++/* KN04 and KN05 are REX PROMs, so only do the check for R3k systems. */
2794 ++static inline bool prom_is_rex(u32 magic)
2795 ++{
2796 ++ return !IS_ENABLED(CONFIG_CPU_R3000) || magic == REX_PROM_MAGIC;
2797 ++}
2798 +
2799 + /*
2800 + * 3MIN/MAXINE PROM entry points for DS5000/1xx's, DS5000/xx's and
2801 +diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2802 +index c7925d0e98746..867e9c3db76e9 100644
2803 +--- a/arch/mips/include/asm/pgalloc.h
2804 ++++ b/arch/mips/include/asm/pgalloc.h
2805 +@@ -15,6 +15,7 @@
2806 +
2807 + #define __HAVE_ARCH_PMD_ALLOC_ONE
2808 + #define __HAVE_ARCH_PUD_ALLOC_ONE
2809 ++#define __HAVE_ARCH_PGD_FREE
2810 + #include <asm-generic/pgalloc.h>
2811 +
2812 + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
2813 +@@ -48,6 +49,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2814 + extern void pgd_init(unsigned long page);
2815 + extern pgd_t *pgd_alloc(struct mm_struct *mm);
2816 +
2817 ++static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
2818 ++{
2819 ++ free_pages((unsigned long)pgd, PGD_ORDER);
2820 ++}
2821 ++
2822 + #define __pte_free_tlb(tlb,pte,address) \
2823 + do { \
2824 + pgtable_pte_page_dtor(pte); \
2825 +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
2826 +index b131e6a773832..5cda07688f67a 100644
2827 +--- a/arch/mips/mm/tlbex.c
2828 ++++ b/arch/mips/mm/tlbex.c
2829 +@@ -2160,16 +2160,14 @@ static void build_r4000_tlb_load_handler(void)
2830 + uasm_i_tlbr(&p);
2831 +
2832 + switch (current_cpu_type()) {
2833 +- default:
2834 +- if (cpu_has_mips_r2_exec_hazard) {
2835 +- uasm_i_ehb(&p);
2836 +- fallthrough;
2837 +-
2838 + case CPU_CAVIUM_OCTEON:
2839 + case CPU_CAVIUM_OCTEON_PLUS:
2840 + case CPU_CAVIUM_OCTEON2:
2841 +- break;
2842 +- }
2843 ++ break;
2844 ++ default:
2845 ++ if (cpu_has_mips_r2_exec_hazard)
2846 ++ uasm_i_ehb(&p);
2847 ++ break;
2848 + }
2849 +
2850 + /* Examine entrylo 0 or 1 based on ptr. */
2851 +@@ -2236,15 +2234,14 @@ static void build_r4000_tlb_load_handler(void)
2852 + uasm_i_tlbr(&p);
2853 +
2854 + switch (current_cpu_type()) {
2855 +- default:
2856 +- if (cpu_has_mips_r2_exec_hazard) {
2857 +- uasm_i_ehb(&p);
2858 +-
2859 + case CPU_CAVIUM_OCTEON:
2860 + case CPU_CAVIUM_OCTEON_PLUS:
2861 + case CPU_CAVIUM_OCTEON2:
2862 +- break;
2863 +- }
2864 ++ break;
2865 ++ default:
2866 ++ if (cpu_has_mips_r2_exec_hazard)
2867 ++ uasm_i_ehb(&p);
2868 ++ break;
2869 + }
2870 +
2871 + /* Examine entrylo 0 or 1 based on ptr. */
2872 +diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
2873 +index 04684990e28ef..b7f6f782d9a13 100644
2874 +--- a/arch/mips/rb532/devices.c
2875 ++++ b/arch/mips/rb532/devices.c
2876 +@@ -301,11 +301,9 @@ static int __init plat_setup_devices(void)
2877 + static int __init setup_kmac(char *s)
2878 + {
2879 + printk(KERN_INFO "korina mac = %s\n", s);
2880 +- if (!mac_pton(s, korina_dev0_data.mac)) {
2881 ++ if (!mac_pton(s, korina_dev0_data.mac))
2882 + printk(KERN_ERR "Invalid mac\n");
2883 +- return -EINVAL;
2884 +- }
2885 +- return 0;
2886 ++ return 1;
2887 + }
2888 +
2889 + __setup("kmac=", setup_kmac);
2890 +diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
2891 +index d65f55f67e19b..f72658b3a53f7 100644
2892 +--- a/arch/mips/vdso/Makefile
2893 ++++ b/arch/mips/vdso/Makefile
2894 +@@ -1,6 +1,9 @@
2895 + # SPDX-License-Identifier: GPL-2.0
2896 + # Objects to go into the VDSO.
2897 +
2898 ++# Sanitizer runtimes are unavailable and cannot be linked here.
2899 ++ KCSAN_SANITIZE := n
2900 ++
2901 + # Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
2902 + # the inclusion of generic Makefile.
2903 + ARCH_REL_TYPE_ABS := R_MIPS_JUMP_SLOT|R_MIPS_GLOB_DAT
2904 +diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
2905 +index ba9340e96fd4c..ca9285a915efa 100644
2906 +--- a/arch/nios2/include/asm/uaccess.h
2907 ++++ b/arch/nios2/include/asm/uaccess.h
2908 +@@ -88,6 +88,7 @@ extern __must_check long strnlen_user(const char __user *s, long n);
2909 + /* Optimized macros */
2910 + #define __get_user_asm(val, insn, addr, err) \
2911 + { \
2912 ++ unsigned long __gu_val; \
2913 + __asm__ __volatile__( \
2914 + " movi %0, %3\n" \
2915 + "1: " insn " %1, 0(%2)\n" \
2916 +@@ -96,14 +97,20 @@ extern __must_check long strnlen_user(const char __user *s, long n);
2917 + " .section __ex_table,\"a\"\n" \
2918 + " .word 1b, 2b\n" \
2919 + " .previous" \
2920 +- : "=&r" (err), "=r" (val) \
2921 ++ : "=&r" (err), "=r" (__gu_val) \
2922 + : "r" (addr), "i" (-EFAULT)); \
2923 ++ val = (__force __typeof__(*(addr)))__gu_val; \
2924 + }
2925 +
2926 +-#define __get_user_unknown(val, size, ptr, err) do { \
2927 ++extern void __get_user_unknown(void);
2928 ++
2929 ++#define __get_user_8(val, ptr, err) do { \
2930 ++ u64 __val = 0; \
2931 + err = 0; \
2932 +- if (__copy_from_user(&(val), ptr, size)) { \
2933 ++ if (raw_copy_from_user(&(__val), ptr, sizeof(val))) { \
2934 + err = -EFAULT; \
2935 ++ } else { \
2936 ++ val = (typeof(val))(typeof((val) - (val)))__val; \
2937 + } \
2938 + } while (0)
2939 +
2940 +@@ -119,8 +126,11 @@ do { \
2941 + case 4: \
2942 + __get_user_asm(val, "ldw", ptr, err); \
2943 + break; \
2944 ++ case 8: \
2945 ++ __get_user_8(val, ptr, err); \
2946 ++ break; \
2947 + default: \
2948 +- __get_user_unknown(val, size, ptr, err); \
2949 ++ __get_user_unknown(); \
2950 + break; \
2951 + } \
2952 + } while (0)
2953 +@@ -129,9 +139,7 @@ do { \
2954 + ({ \
2955 + long __gu_err = -EFAULT; \
2956 + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
2957 +- unsigned long __gu_val = 0; \
2958 +- __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
2959 +- (x) = (__force __typeof__(x))__gu_val; \
2960 ++ __get_user_common(x, sizeof(*(ptr)), __gu_ptr, __gu_err); \
2961 + __gu_err; \
2962 + })
2963 +
2964 +@@ -139,11 +147,9 @@ do { \
2965 + ({ \
2966 + long __gu_err = -EFAULT; \
2967 + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
2968 +- unsigned long __gu_val = 0; \
2969 + if (access_ok( __gu_ptr, sizeof(*__gu_ptr))) \
2970 +- __get_user_common(__gu_val, sizeof(*__gu_ptr), \
2971 ++ __get_user_common(x, sizeof(*__gu_ptr), \
2972 + __gu_ptr, __gu_err); \
2973 +- (x) = (__force __typeof__(x))__gu_val; \
2974 + __gu_err; \
2975 + })
2976 +
2977 +diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
2978 +index 2009ae2d3c3bb..386e46443b605 100644
2979 +--- a/arch/nios2/kernel/signal.c
2980 ++++ b/arch/nios2/kernel/signal.c
2981 +@@ -36,10 +36,10 @@ struct rt_sigframe {
2982 +
2983 + static inline int rt_restore_ucontext(struct pt_regs *regs,
2984 + struct switch_stack *sw,
2985 +- struct ucontext *uc, int *pr2)
2986 ++ struct ucontext __user *uc, int *pr2)
2987 + {
2988 + int temp;
2989 +- unsigned long *gregs = uc->uc_mcontext.gregs;
2990 ++ unsigned long __user *gregs = uc->uc_mcontext.gregs;
2991 + int err;
2992 +
2993 + /* Always make any pending restarted system calls return -EINTR */
2994 +@@ -102,10 +102,11 @@ asmlinkage int do_rt_sigreturn(struct switch_stack *sw)
2995 + {
2996 + struct pt_regs *regs = (struct pt_regs *)(sw + 1);
2997 + /* Verify, can we follow the stack back */
2998 +- struct rt_sigframe *frame = (struct rt_sigframe *) regs->sp;
2999 ++ struct rt_sigframe __user *frame;
3000 + sigset_t set;
3001 + int rval;
3002 +
3003 ++ frame = (struct rt_sigframe __user *) regs->sp;
3004 + if (!access_ok(frame, sizeof(*frame)))
3005 + goto badframe;
3006 +
3007 +@@ -124,10 +125,10 @@ badframe:
3008 + return 0;
3009 + }
3010 +
3011 +-static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
3012 ++static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
3013 + {
3014 + struct switch_stack *sw = (struct switch_stack *)regs - 1;
3015 +- unsigned long *gregs = uc->uc_mcontext.gregs;
3016 ++ unsigned long __user *gregs = uc->uc_mcontext.gregs;
3017 + int err = 0;
3018 +
3019 + err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
3020 +@@ -162,8 +163,9 @@ static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
3021 + return err;
3022 + }
3023 +
3024 +-static inline void *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
3025 +- size_t frame_size)
3026 ++static inline void __user *get_sigframe(struct ksignal *ksig,
3027 ++ struct pt_regs *regs,
3028 ++ size_t frame_size)
3029 + {
3030 + unsigned long usp;
3031 +
3032 +@@ -174,13 +176,13 @@ static inline void *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
3033 + usp = sigsp(usp, ksig);
3034 +
3035 + /* Verify, is it 32 or 64 bit aligned */
3036 +- return (void *)((usp - frame_size) & -8UL);
3037 ++ return (void __user *)((usp - frame_size) & -8UL);
3038 + }
3039 +
3040 + static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
3041 + struct pt_regs *regs)
3042 + {
3043 +- struct rt_sigframe *frame;
3044 ++ struct rt_sigframe __user *frame;
3045 + int err = 0;
3046 +
3047 + frame = get_sigframe(ksig, regs, sizeof(*frame));
3048 +diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
3049 +index 34619f010c631..0ccdb738a9a36 100644
3050 +--- a/arch/parisc/include/asm/traps.h
3051 ++++ b/arch/parisc/include/asm/traps.h
3052 +@@ -18,6 +18,7 @@ unsigned long parisc_acctyp(unsigned long code, unsigned int inst);
3053 + const char *trap_name(unsigned long code);
3054 + void do_page_fault(struct pt_regs *regs, unsigned long code,
3055 + unsigned long address);
3056 ++int handle_nadtlb_fault(struct pt_regs *regs);
3057 + #endif
3058 +
3059 + #endif
3060 +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
3061 +index 94150b91c96fb..bce71cefe5724 100644
3062 +--- a/arch/parisc/kernel/cache.c
3063 ++++ b/arch/parisc/kernel/cache.c
3064 +@@ -558,15 +558,6 @@ static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
3065 + }
3066 + }
3067 +
3068 +-static void flush_user_cache_tlb(struct vm_area_struct *vma,
3069 +- unsigned long start, unsigned long end)
3070 +-{
3071 +- flush_user_dcache_range_asm(start, end);
3072 +- if (vma->vm_flags & VM_EXEC)
3073 +- flush_user_icache_range_asm(start, end);
3074 +- flush_tlb_range(vma, start, end);
3075 +-}
3076 +-
3077 + void flush_cache_mm(struct mm_struct *mm)
3078 + {
3079 + struct vm_area_struct *vma;
3080 +@@ -581,17 +572,8 @@ void flush_cache_mm(struct mm_struct *mm)
3081 + return;
3082 + }
3083 +
3084 +- preempt_disable();
3085 +- if (mm->context == mfsp(3)) {
3086 +- for (vma = mm->mmap; vma; vma = vma->vm_next)
3087 +- flush_user_cache_tlb(vma, vma->vm_start, vma->vm_end);
3088 +- preempt_enable();
3089 +- return;
3090 +- }
3091 +-
3092 + for (vma = mm->mmap; vma; vma = vma->vm_next)
3093 + flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
3094 +- preempt_enable();
3095 + }
3096 +
3097 + void flush_cache_range(struct vm_area_struct *vma,
3098 +@@ -605,15 +587,7 @@ void flush_cache_range(struct vm_area_struct *vma,
3099 + return;
3100 + }
3101 +
3102 +- preempt_disable();
3103 +- if (vma->vm_mm->context == mfsp(3)) {
3104 +- flush_user_cache_tlb(vma, start, end);
3105 +- preempt_enable();
3106 +- return;
3107 +- }
3108 +-
3109 +- flush_cache_pages(vma, vma->vm_mm, vma->vm_start, vma->vm_end);
3110 +- preempt_enable();
3111 ++ flush_cache_pages(vma, vma->vm_mm, start, end);
3112 + }
3113 +
3114 + void
3115 +diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3116 +index b6fdebddc8e99..39576a9245c7f 100644
3117 +--- a/arch/parisc/kernel/traps.c
3118 ++++ b/arch/parisc/kernel/traps.c
3119 +@@ -662,6 +662,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3120 + by hand. Technically we need to emulate:
3121 + fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
3122 + */
3123 ++ if (code == 17 && handle_nadtlb_fault(regs))
3124 ++ return;
3125 + fault_address = regs->ior;
3126 + fault_space = regs->isr;
3127 + break;
3128 +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3129 +index e9eabf8f14d7e..f114e102aaf21 100644
3130 +--- a/arch/parisc/mm/fault.c
3131 ++++ b/arch/parisc/mm/fault.c
3132 +@@ -425,3 +425,92 @@ out_of_memory:
3133 + }
3134 + pagefault_out_of_memory();
3135 + }
3136 ++
3137 ++/* Handle non-access data TLB miss faults.
3138 ++ *
3139 ++ * For probe instructions, accesses to userspace are considered allowed
3140 ++ * if they lie in a valid VMA and the access type matches. We are not
3141 ++ * allowed to handle MM faults here so there may be situations where an
3142 ++ * actual access would fail even though a probe was successful.
3143 ++ */
3144 ++int
3145 ++handle_nadtlb_fault(struct pt_regs *regs)
3146 ++{
3147 ++ unsigned long insn = regs->iir;
3148 ++ int breg, treg, xreg, val = 0;
3149 ++ struct vm_area_struct *vma, *prev_vma;
3150 ++ struct task_struct *tsk;
3151 ++ struct mm_struct *mm;
3152 ++ unsigned long address;
3153 ++ unsigned long acc_type;
3154 ++
3155 ++ switch (insn & 0x380) {
3156 ++ case 0x280:
3157 ++ /* FDC instruction */
3158 ++ fallthrough;
3159 ++ case 0x380:
3160 ++ /* PDC and FIC instructions */
3161 ++ if (printk_ratelimit()) {
3162 ++ pr_warn("BUG: nullifying cache flush/purge instruction\n");
3163 ++ show_regs(regs);
3164 ++ }
3165 ++ if (insn & 0x20) {
3166 ++ /* Base modification */
3167 ++ breg = (insn >> 21) & 0x1f;
3168 ++ xreg = (insn >> 16) & 0x1f;
3169 ++ if (breg && xreg)
3170 ++ regs->gr[breg] += regs->gr[xreg];
3171 ++ }
3172 ++ regs->gr[0] |= PSW_N;
3173 ++ return 1;
3174 ++
3175 ++ case 0x180:
3176 ++ /* PROBE instruction */
3177 ++ treg = insn & 0x1f;
3178 ++ if (regs->isr) {
3179 ++ tsk = current;
3180 ++ mm = tsk->mm;
3181 ++ if (mm) {
3182 ++ /* Search for VMA */
3183 ++ address = regs->ior;
3184 ++ mmap_read_lock(mm);
3185 ++ vma = find_vma_prev(mm, address, &prev_vma);
3186 ++ mmap_read_unlock(mm);
3187 ++
3188 ++ /*
3189 ++ * Check if access to the VMA is okay.
3190 ++ * We don't allow for stack expansion.
3191 ++ */
3192 ++ acc_type = (insn & 0x40) ? VM_WRITE : VM_READ;
3193 ++ if (vma
3194 ++ && address >= vma->vm_start
3195 ++ && (vma->vm_flags & acc_type) == acc_type)
3196 ++ val = 1;
3197 ++ }
3198 ++ }
3199 ++ if (treg)
3200 ++ regs->gr[treg] = val;
3201 ++ regs->gr[0] |= PSW_N;
3202 ++ return 1;
3203 ++
3204 ++ case 0x300:
3205 ++ /* LPA instruction */
3206 ++ if (insn & 0x20) {
3207 ++ /* Base modification */
3208 ++ breg = (insn >> 21) & 0x1f;
3209 ++ xreg = (insn >> 16) & 0x1f;
3210 ++ if (breg && xreg)
3211 ++ regs->gr[breg] += regs->gr[xreg];
3212 ++ }
3213 ++ treg = insn & 0x1f;
3214 ++ if (treg)
3215 ++ regs->gr[treg] = 0;
3216 ++ regs->gr[0] |= PSW_N;
3217 ++ return 1;
3218 ++
3219 ++ default:
3220 ++ break;
3221 ++ }
3222 ++
3223 ++ return 0;
3224 ++}
3225 +diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
3226 +index 5f16ac1583c5d..887efa31f60ab 100644
3227 +--- a/arch/powerpc/Makefile
3228 ++++ b/arch/powerpc/Makefile
3229 +@@ -171,7 +171,7 @@ else
3230 + CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5))
3231 + CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4)
3232 + endif
3233 +-else
3234 ++else ifdef CONFIG_PPC_BOOK3E_64
3235 + CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
3236 + endif
3237 +
3238 +diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
3239 +new file mode 100644
3240 +index 0000000000000..73f8c998c64df
3241 +--- /dev/null
3242 ++++ b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
3243 +@@ -0,0 +1,30 @@
3244 ++// SPDX-License-Identifier: GPL-2.0-or-later
3245 ++/*
3246 ++ * T1040RDB-REV-A Device Tree Source
3247 ++ *
3248 ++ * Copyright 2014 - 2015 Freescale Semiconductor Inc.
3249 ++ *
3250 ++ */
3251 ++
3252 ++#include "t1040rdb.dts"
3253 ++
3254 ++/ {
3255 ++ model = "fsl,T1040RDB-REV-A";
3256 ++ compatible = "fsl,T1040RDB-REV-A";
3257 ++};
3258 ++
3259 ++&seville_port0 {
3260 ++ label = "ETH5";
3261 ++};
3262 ++
3263 ++&seville_port2 {
3264 ++ label = "ETH7";
3265 ++};
3266 ++
3267 ++&seville_port4 {
3268 ++ label = "ETH9";
3269 ++};
3270 ++
3271 ++&seville_port6 {
3272 ++ label = "ETH11";
3273 ++};
3274 +diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb.dts b/arch/powerpc/boot/dts/fsl/t1040rdb.dts
3275 +index af0c8a6f56138..b6733e7e65805 100644
3276 +--- a/arch/powerpc/boot/dts/fsl/t1040rdb.dts
3277 ++++ b/arch/powerpc/boot/dts/fsl/t1040rdb.dts
3278 +@@ -119,7 +119,7 @@
3279 + managed = "in-band-status";
3280 + phy-handle = <&phy_qsgmii_0>;
3281 + phy-mode = "qsgmii";
3282 +- label = "ETH5";
3283 ++ label = "ETH3";
3284 + status = "okay";
3285 + };
3286 +
3287 +@@ -135,7 +135,7 @@
3288 + managed = "in-band-status";
3289 + phy-handle = <&phy_qsgmii_2>;
3290 + phy-mode = "qsgmii";
3291 +- label = "ETH7";
3292 ++ label = "ETH5";
3293 + status = "okay";
3294 + };
3295 +
3296 +@@ -151,7 +151,7 @@
3297 + managed = "in-band-status";
3298 + phy-handle = <&phy_qsgmii_4>;
3299 + phy-mode = "qsgmii";
3300 +- label = "ETH9";
3301 ++ label = "ETH7";
3302 + status = "okay";
3303 + };
3304 +
3305 +@@ -167,7 +167,7 @@
3306 + managed = "in-band-status";
3307 + phy-handle = <&phy_qsgmii_6>;
3308 + phy-mode = "qsgmii";
3309 +- label = "ETH11";
3310 ++ label = "ETH9";
3311 + status = "okay";
3312 + };
3313 +
3314 +diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
3315 +index beba4979bff93..fee979d3a1aa4 100644
3316 +--- a/arch/powerpc/include/asm/io.h
3317 ++++ b/arch/powerpc/include/asm/io.h
3318 +@@ -359,25 +359,37 @@ static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr)
3319 + */
3320 + static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr)
3321 + {
3322 +- __asm__ __volatile__("stbcix %0,0,%1"
3323 ++ __asm__ __volatile__(".machine push; \
3324 ++ .machine power6; \
3325 ++ stbcix %0,0,%1; \
3326 ++ .machine pop;"
3327 + : : "r" (val), "r" (paddr) : "memory");
3328 + }
3329 +
3330 + static inline void __raw_rm_writew(u16 val, volatile void __iomem *paddr)
3331 + {
3332 +- __asm__ __volatile__("sthcix %0,0,%1"
3333 ++ __asm__ __volatile__(".machine push; \
3334 ++ .machine power6; \
3335 ++ sthcix %0,0,%1; \
3336 ++ .machine pop;"
3337 + : : "r" (val), "r" (paddr) : "memory");
3338 + }
3339 +
3340 + static inline void __raw_rm_writel(u32 val, volatile void __iomem *paddr)
3341 + {
3342 +- __asm__ __volatile__("stwcix %0,0,%1"
3343 ++ __asm__ __volatile__(".machine push; \
3344 ++ .machine power6; \
3345 ++ stwcix %0,0,%1; \
3346 ++ .machine pop;"
3347 + : : "r" (val), "r" (paddr) : "memory");
3348 + }
3349 +
3350 + static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
3351 + {
3352 +- __asm__ __volatile__("stdcix %0,0,%1"
3353 ++ __asm__ __volatile__(".machine push; \
3354 ++ .machine power6; \
3355 ++ stdcix %0,0,%1; \
3356 ++ .machine pop;"
3357 + : : "r" (val), "r" (paddr) : "memory");
3358 + }
3359 +
3360 +@@ -389,7 +401,10 @@ static inline void __raw_rm_writeq_be(u64 val, volatile void __iomem *paddr)
3361 + static inline u8 __raw_rm_readb(volatile void __iomem *paddr)
3362 + {
3363 + u8 ret;
3364 +- __asm__ __volatile__("lbzcix %0,0, %1"
3365 ++ __asm__ __volatile__(".machine push; \
3366 ++ .machine power6; \
3367 ++ lbzcix %0,0, %1; \
3368 ++ .machine pop;"
3369 + : "=r" (ret) : "r" (paddr) : "memory");
3370 + return ret;
3371 + }
3372 +@@ -397,7 +412,10 @@ static inline u8 __raw_rm_readb(volatile void __iomem *paddr)
3373 + static inline u16 __raw_rm_readw(volatile void __iomem *paddr)
3374 + {
3375 + u16 ret;
3376 +- __asm__ __volatile__("lhzcix %0,0, %1"
3377 ++ __asm__ __volatile__(".machine push; \
3378 ++ .machine power6; \
3379 ++ lhzcix %0,0, %1; \
3380 ++ .machine pop;"
3381 + : "=r" (ret) : "r" (paddr) : "memory");
3382 + return ret;
3383 + }
3384 +@@ -405,7 +423,10 @@ static inline u16 __raw_rm_readw(volatile void __iomem *paddr)
3385 + static inline u32 __raw_rm_readl(volatile void __iomem *paddr)
3386 + {
3387 + u32 ret;
3388 +- __asm__ __volatile__("lwzcix %0,0, %1"
3389 ++ __asm__ __volatile__(".machine push; \
3390 ++ .machine power6; \
3391 ++ lwzcix %0,0, %1; \
3392 ++ .machine pop;"
3393 + : "=r" (ret) : "r" (paddr) : "memory");
3394 + return ret;
3395 + }
3396 +@@ -413,7 +434,10 @@ static inline u32 __raw_rm_readl(volatile void __iomem *paddr)
3397 + static inline u64 __raw_rm_readq(volatile void __iomem *paddr)
3398 + {
3399 + u64 ret;
3400 +- __asm__ __volatile__("ldcix %0,0, %1"
3401 ++ __asm__ __volatile__(".machine push; \
3402 ++ .machine power6; \
3403 ++ ldcix %0,0, %1; \
3404 ++ .machine pop;"
3405 + : "=r" (ret) : "r" (paddr) : "memory");
3406 + return ret;
3407 + }
3408 +diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
3409 +index b040094f79202..7ebc807aa8cc8 100644
3410 +--- a/arch/powerpc/include/asm/set_memory.h
3411 ++++ b/arch/powerpc/include/asm/set_memory.h
3412 +@@ -6,6 +6,8 @@
3413 + #define SET_MEMORY_RW 1
3414 + #define SET_MEMORY_NX 2
3415 + #define SET_MEMORY_X 3
3416 ++#define SET_MEMORY_NP 4 /* Set memory non present */
3417 ++#define SET_MEMORY_P 5 /* Set memory present */
3418 +
3419 + int change_memory_attr(unsigned long addr, int numpages, long action);
3420 +
3421 +@@ -29,6 +31,14 @@ static inline int set_memory_x(unsigned long addr, int numpages)
3422 + return change_memory_attr(addr, numpages, SET_MEMORY_X);
3423 + }
3424 +
3425 +-int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot);
3426 ++static inline int set_memory_np(unsigned long addr, int numpages)
3427 ++{
3428 ++ return change_memory_attr(addr, numpages, SET_MEMORY_NP);
3429 ++}
3430 ++
3431 ++static inline int set_memory_p(unsigned long addr, int numpages)
3432 ++{
3433 ++ return change_memory_attr(addr, numpages, SET_MEMORY_P);
3434 ++}
3435 +
3436 + #endif
3437 +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3438 +index 63316100080c1..4a35423f766db 100644
3439 +--- a/arch/powerpc/include/asm/uaccess.h
3440 ++++ b/arch/powerpc/include/asm/uaccess.h
3441 +@@ -125,8 +125,11 @@ do { \
3442 + */
3443 + #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
3444 + __asm__ __volatile__( \
3445 ++ ".machine push\n" \
3446 ++ ".machine altivec\n" \
3447 + "1: lvx 0,0,%1 # get user\n" \
3448 + " stvx 0,0,%2 # put kernel\n" \
3449 ++ ".machine pop\n" \
3450 + "2:\n" \
3451 + ".section .fixup,\"ax\"\n" \
3452 + "3: li %0,%3\n" \
3453 +diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
3454 +index cd0b8b71ecddc..384f58a3f373f 100644
3455 +--- a/arch/powerpc/kernel/time.c
3456 ++++ b/arch/powerpc/kernel/time.c
3457 +@@ -582,8 +582,9 @@ void timer_rearm_host_dec(u64 now)
3458 + local_paca->irq_happened |= PACA_IRQ_DEC;
3459 + } else {
3460 + now = *next_tb - now;
3461 +- if (now <= decrementer_max)
3462 +- set_dec_or_work(now);
3463 ++ if (now > decrementer_max)
3464 ++ now = decrementer_max;
3465 ++ set_dec_or_work(now);
3466 + }
3467 + }
3468 + EXPORT_SYMBOL_GPL(timer_rearm_host_dec);
3469 +diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
3470 +index 3beecc32940bc..5a0f023a26e90 100644
3471 +--- a/arch/powerpc/kernel/tm.S
3472 ++++ b/arch/powerpc/kernel/tm.S
3473 +@@ -443,7 +443,8 @@ restore_gprs:
3474 +
3475 + REST_GPR(0, r7) /* GPR0 */
3476 + REST_GPRS(2, 4, r7) /* GPR2-4 */
3477 +- REST_GPRS(8, 31, r7) /* GPR8-31 */
3478 ++ REST_GPRS(8, 12, r7) /* GPR8-12 */
3479 ++ REST_GPRS(14, 31, r7) /* GPR14-31 */
3480 +
3481 + /* Load up PPR and DSCR here so we don't run with user values for long */
3482 + mtspr SPRN_DSCR, r5
3483 +@@ -479,18 +480,24 @@ restore_gprs:
3484 + REST_GPR(6, r7)
3485 +
3486 + /*
3487 +- * Store r1 and r5 on the stack so that we can access them after we
3488 +- * clear MSR RI.
3489 ++ * Store user r1 and r5 and r13 on the stack (in the unused save
3490 ++ * areas / compiler reserved areas), so that we can access them after
3491 ++ * we clear MSR RI.
3492 + */
3493 +
3494 + REST_GPR(5, r7)
3495 + std r5, -8(r1)
3496 +- ld r5, GPR1(r7)
3497 ++ ld r5, GPR13(r7)
3498 + std r5, -16(r1)
3499 ++ ld r5, GPR1(r7)
3500 ++ std r5, -24(r1)
3501 +
3502 + REST_GPR(7, r7)
3503 +
3504 +- /* Clear MSR RI since we are about to use SCRATCH0. EE is already off */
3505 ++ /* Stash the stack pointer away for use after recheckpoint */
3506 ++ std r1, PACAR1(r13)
3507 ++
3508 ++ /* Clear MSR RI since we are about to clobber r13. EE is already off */
3509 + li r5, 0
3510 + mtmsrd r5, 1
3511 +
3512 +@@ -501,9 +508,9 @@ restore_gprs:
3513 + * until we turn MSR RI back on.
3514 + */
3515 +
3516 +- SET_SCRATCH0(r1)
3517 + ld r5, -8(r1)
3518 +- ld r1, -16(r1)
3519 ++ ld r13, -16(r1)
3520 ++ ld r1, -24(r1)
3521 +
3522 + /* Commit register state as checkpointed state: */
3523 + TRECHKPT
3524 +@@ -519,9 +526,9 @@ restore_gprs:
3525 + */
3526 +
3527 + GET_PACA(r13)
3528 +- GET_SCRATCH0(r1)
3529 ++ ld r1, PACAR1(r13)
3530 +
3531 +- /* R1 is restored, so we are recoverable again. EE is still off */
3532 ++ /* R13, R1 is restored, so we are recoverable again. EE is still off */
3533 + li r4, MSR_RI
3534 + mtmsrd r4, 1
3535 +
3536 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
3537 +index 84c89f08ae9aa..791db769080d2 100644
3538 +--- a/arch/powerpc/kvm/book3s_hv.c
3539 ++++ b/arch/powerpc/kvm/book3s_hv.c
3540 +@@ -6137,8 +6137,11 @@ static int kvmppc_book3s_init_hv(void)
3541 + if (r)
3542 + return r;
3543 +
3544 +- if (kvmppc_radix_possible())
3545 ++ if (kvmppc_radix_possible()) {
3546 + r = kvmppc_radix_init();
3547 ++ if (r)
3548 ++ return r;
3549 ++ }
3550 +
3551 + r = kvmppc_uvmem_init();
3552 + if (r < 0)
3553 +diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
3554 +index 2ad0ccd202d5d..f0c4545dc3ab8 100644
3555 +--- a/arch/powerpc/kvm/powerpc.c
3556 ++++ b/arch/powerpc/kvm/powerpc.c
3557 +@@ -1499,7 +1499,7 @@ int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
3558 + {
3559 + enum emulation_result emulated = EMULATE_DONE;
3560 +
3561 +- if (vcpu->arch.mmio_vsx_copy_nums > 2)
3562 ++ if (vcpu->arch.mmio_vmx_copy_nums > 2)
3563 + return EMULATE_FAIL;
3564 +
3565 + while (vcpu->arch.mmio_vmx_copy_nums) {
3566 +@@ -1596,7 +1596,7 @@ int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
3567 + unsigned int index = rs & KVM_MMIO_REG_MASK;
3568 + enum emulation_result emulated = EMULATE_DONE;
3569 +
3570 +- if (vcpu->arch.mmio_vsx_copy_nums > 2)
3571 ++ if (vcpu->arch.mmio_vmx_copy_nums > 2)
3572 + return EMULATE_FAIL;
3573 +
3574 + vcpu->arch.io_gpr = rs;
3575 +diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
3576 +index bd3734d5be892..bf755a7be5147 100644
3577 +--- a/arch/powerpc/lib/sstep.c
3578 ++++ b/arch/powerpc/lib/sstep.c
3579 +@@ -112,9 +112,9 @@ static nokprobe_inline long address_ok(struct pt_regs *regs,
3580 + {
3581 + if (!user_mode(regs))
3582 + return 1;
3583 +- if (__access_ok(ea, nb))
3584 ++ if (access_ok((void __user *)ea, nb))
3585 + return 1;
3586 +- if (__access_ok(ea, 1))
3587 ++ if (access_ok((void __user *)ea, 1))
3588 + /* Access overlaps the end of the user region */
3589 + regs->dar = TASK_SIZE_MAX - 1;
3590 + else
3591 +@@ -1097,7 +1097,10 @@ NOKPROBE_SYMBOL(emulate_dcbz);
3592 +
3593 + #define __put_user_asmx(x, addr, err, op, cr) \
3594 + __asm__ __volatile__( \
3595 ++ ".machine push\n" \
3596 ++ ".machine power8\n" \
3597 + "1: " op " %2,0,%3\n" \
3598 ++ ".machine pop\n" \
3599 + " mfcr %1\n" \
3600 + "2:\n" \
3601 + ".section .fixup,\"ax\"\n" \
3602 +@@ -1110,7 +1113,10 @@ NOKPROBE_SYMBOL(emulate_dcbz);
3603 +
3604 + #define __get_user_asmx(x, addr, err, op) \
3605 + __asm__ __volatile__( \
3606 ++ ".machine push\n" \
3607 ++ ".machine power8\n" \
3608 + "1: "op" %1,0,%2\n" \
3609 ++ ".machine pop\n" \
3610 + "2:\n" \
3611 + ".section .fixup,\"ax\"\n" \
3612 + "3: li %0,%3\n" \
3613 +@@ -3389,7 +3395,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3614 + __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3615 + break;
3616 + case 2:
3617 +- __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3618 ++ __put_user_asmx(op->val, ea, err, "sthcx.", cr);
3619 + break;
3620 + #endif
3621 + case 4:
3622 +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3623 +index eb8ecd7343a99..7ba6d3eff636d 100644
3624 +--- a/arch/powerpc/mm/fault.c
3625 ++++ b/arch/powerpc/mm/fault.c
3626 +@@ -567,18 +567,24 @@ NOKPROBE_SYMBOL(hash__do_page_fault);
3627 + static void __bad_page_fault(struct pt_regs *regs, int sig)
3628 + {
3629 + int is_write = page_fault_is_write(regs->dsisr);
3630 ++ const char *msg;
3631 +
3632 + /* kernel has accessed a bad area */
3633 +
3634 ++ if (regs->dar < PAGE_SIZE)
3635 ++ msg = "Kernel NULL pointer dereference";
3636 ++ else
3637 ++ msg = "Unable to handle kernel data access";
3638 ++
3639 + switch (TRAP(regs)) {
3640 + case INTERRUPT_DATA_STORAGE:
3641 +- case INTERRUPT_DATA_SEGMENT:
3642 + case INTERRUPT_H_DATA_STORAGE:
3643 +- pr_alert("BUG: %s on %s at 0x%08lx\n",
3644 +- regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" :
3645 +- "Unable to handle kernel data access",
3646 ++ pr_alert("BUG: %s on %s at 0x%08lx\n", msg,
3647 + is_write ? "write" : "read", regs->dar);
3648 + break;
3649 ++ case INTERRUPT_DATA_SEGMENT:
3650 ++ pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar);
3651 ++ break;
3652 + case INTERRUPT_INST_STORAGE:
3653 + case INTERRUPT_INST_SEGMENT:
3654 + pr_alert("BUG: Unable to handle kernel instruction fetch%s",
3655 +diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
3656 +index cf8770b1a692e..f3e4d069e0ba7 100644
3657 +--- a/arch/powerpc/mm/kasan/kasan_init_32.c
3658 ++++ b/arch/powerpc/mm/kasan/kasan_init_32.c
3659 +@@ -83,13 +83,12 @@ void __init
3660 + kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte)
3661 + {
3662 + unsigned long k_cur;
3663 +- phys_addr_t pa = __pa(kasan_early_shadow_page);
3664 +
3665 + for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) {
3666 + pmd_t *pmd = pmd_off_k(k_cur);
3667 + pte_t *ptep = pte_offset_kernel(pmd, k_cur);
3668 +
3669 +- if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
3670 ++ if (pte_page(*ptep) != virt_to_page(lm_alias(kasan_early_shadow_page)))
3671 + continue;
3672 +
3673 + __set_pte_at(&init_mm, k_cur, ptep, pte, 0);
3674 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
3675 +index 9d5f710d2c205..b9b7fefbb64b9 100644
3676 +--- a/arch/powerpc/mm/numa.c
3677 ++++ b/arch/powerpc/mm/numa.c
3678 +@@ -956,7 +956,9 @@ static int __init parse_numa_properties(void)
3679 + of_node_put(cpu);
3680 + }
3681 +
3682 +- node_set_online(nid);
3683 ++ /* node_set_online() is an UB if 'nid' is negative */
3684 ++ if (likely(nid >= 0))
3685 ++ node_set_online(nid);
3686 + }
3687 +
3688 + get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
3689 +diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
3690 +index edea388e9d3fb..3bb9d168e3b31 100644
3691 +--- a/arch/powerpc/mm/pageattr.c
3692 ++++ b/arch/powerpc/mm/pageattr.c
3693 +@@ -48,6 +48,12 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
3694 + case SET_MEMORY_X:
3695 + pte = pte_mkexec(pte);
3696 + break;
3697 ++ case SET_MEMORY_NP:
3698 ++ pte_update(&init_mm, addr, ptep, _PAGE_PRESENT, 0, 0);
3699 ++ break;
3700 ++ case SET_MEMORY_P:
3701 ++ pte_update(&init_mm, addr, ptep, 0, _PAGE_PRESENT, 0);
3702 ++ break;
3703 + default:
3704 + WARN_ON_ONCE(1);
3705 + break;
3706 +@@ -96,36 +102,3 @@ int change_memory_attr(unsigned long addr, int numpages, long action)
3707 + return apply_to_existing_page_range(&init_mm, start, size,
3708 + change_page_attr, (void *)action);
3709 + }
3710 +-
3711 +-/*
3712 +- * Set the attributes of a page:
3713 +- *
3714 +- * This function is used by PPC32 at the end of init to set final kernel memory
3715 +- * protection. It includes changing the maping of the page it is executing from
3716 +- * and data pages it is using.
3717 +- */
3718 +-static int set_page_attr(pte_t *ptep, unsigned long addr, void *data)
3719 +-{
3720 +- pgprot_t prot = __pgprot((unsigned long)data);
3721 +-
3722 +- spin_lock(&init_mm.page_table_lock);
3723 +-
3724 +- set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot));
3725 +- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
3726 +-
3727 +- spin_unlock(&init_mm.page_table_lock);
3728 +-
3729 +- return 0;
3730 +-}
3731 +-
3732 +-int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot)
3733 +-{
3734 +- unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
3735 +- unsigned long sz = numpages * PAGE_SIZE;
3736 +-
3737 +- if (numpages <= 0)
3738 +- return 0;
3739 +-
3740 +- return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr,
3741 +- (void *)pgprot_val(prot));
3742 +-}
3743 +diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
3744 +index 906e4e4328b2e..f71ededdc02a5 100644
3745 +--- a/arch/powerpc/mm/pgtable_32.c
3746 ++++ b/arch/powerpc/mm/pgtable_32.c
3747 +@@ -135,10 +135,12 @@ void mark_initmem_nx(void)
3748 + unsigned long numpages = PFN_UP((unsigned long)_einittext) -
3749 + PFN_DOWN((unsigned long)_sinittext);
3750 +
3751 +- if (v_block_mapped((unsigned long)_sinittext))
3752 ++ if (v_block_mapped((unsigned long)_sinittext)) {
3753 + mmu_mark_initmem_nx();
3754 +- else
3755 +- set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL);
3756 ++ } else {
3757 ++ set_memory_nx((unsigned long)_sinittext, numpages);
3758 ++ set_memory_rw((unsigned long)_sinittext, numpages);
3759 ++ }
3760 + }
3761 +
3762 + #ifdef CONFIG_STRICT_KERNEL_RWX
3763 +@@ -152,18 +154,14 @@ void mark_rodata_ro(void)
3764 + return;
3765 + }
3766 +
3767 +- numpages = PFN_UP((unsigned long)_etext) -
3768 +- PFN_DOWN((unsigned long)_stext);
3769 +-
3770 +- set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX);
3771 + /*
3772 +- * mark .rodata as read only. Use __init_begin rather than __end_rodata
3773 +- * to cover NOTES and EXCEPTION_TABLE.
3774 ++ * mark .text and .rodata as read only. Use __init_begin rather than
3775 ++ * __end_rodata to cover NOTES and EXCEPTION_TABLE.
3776 + */
3777 + numpages = PFN_UP((unsigned long)__init_begin) -
3778 +- PFN_DOWN((unsigned long)__start_rodata);
3779 ++ PFN_DOWN((unsigned long)_stext);
3780 +
3781 +- set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO);
3782 ++ set_memory_ro((unsigned long)_stext, numpages);
3783 +
3784 + // mark_initmem_nx() should have already run by now
3785 + ptdump_check_wx();
3786 +@@ -179,8 +177,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
3787 + return;
3788 +
3789 + if (enable)
3790 +- set_memory_attr(addr, numpages, PAGE_KERNEL);
3791 ++ set_memory_p(addr, numpages);
3792 + else
3793 +- set_memory_attr(addr, numpages, __pgprot(0));
3794 ++ set_memory_np(addr, numpages);
3795 + }
3796 + #endif /* CONFIG_DEBUG_PAGEALLOC */
3797 +diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
3798 +index e106909ff9c37..e7583fbcc8fa1 100644
3799 +--- a/arch/powerpc/perf/imc-pmu.c
3800 ++++ b/arch/powerpc/perf/imc-pmu.c
3801 +@@ -1457,7 +1457,11 @@ static int trace_imc_event_init(struct perf_event *event)
3802 +
3803 + event->hw.idx = -1;
3804 +
3805 +- event->pmu->task_ctx_nr = perf_hw_context;
3806 ++ /*
3807 ++ * There can only be a single PMU for perf_hw_context events which is assigned to
3808 ++ * core PMU. Hence use "perf_sw_context" for trace_imc.
3809 ++ */
3810 ++ event->pmu->task_ctx_nr = perf_sw_context;
3811 + event->destroy = reset_global_refc;
3812 + return 0;
3813 + }
3814 +diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c
3815 +index f2ba837249d69..04a6abf14c295 100644
3816 +--- a/arch/powerpc/platforms/8xx/pic.c
3817 ++++ b/arch/powerpc/platforms/8xx/pic.c
3818 +@@ -153,6 +153,7 @@ int __init mpc8xx_pic_init(void)
3819 + if (mpc8xx_pic_host == NULL) {
3820 + printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
3821 + ret = -ENOMEM;
3822 ++ goto out;
3823 + }
3824 +
3825 + ret = 0;
3826 +diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
3827 +index b4386714494a6..e3d44b36ae98f 100644
3828 +--- a/arch/powerpc/platforms/powernv/rng.c
3829 ++++ b/arch/powerpc/platforms/powernv/rng.c
3830 +@@ -43,7 +43,11 @@ static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val)
3831 + unsigned long parity;
3832 +
3833 + /* Calculate the parity of the value */
3834 +- asm ("popcntd %0,%1" : "=r" (parity) : "r" (val));
3835 ++ asm (".machine push; \
3836 ++ .machine power7; \
3837 ++ popcntd %0,%1; \
3838 ++ .machine pop;"
3839 ++ : "=r" (parity) : "r" (val));
3840 +
3841 + /* xor our value with the previous mask */
3842 + val ^= rng->mask;
3843 +diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
3844 +index 90c9d3531694b..4ba8245681192 100644
3845 +--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
3846 ++++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
3847 +@@ -78,6 +78,9 @@ int remove_phb_dynamic(struct pci_controller *phb)
3848 +
3849 + pseries_msi_free_domains(phb);
3850 +
3851 ++ /* Keep a reference so phb isn't freed yet */
3852 ++ get_device(&host_bridge->dev);
3853 ++
3854 + /* Remove the PCI bus and unregister the bridge device from sysfs */
3855 + phb->bus = NULL;
3856 + pci_remove_bus(b);
3857 +@@ -101,6 +104,7 @@ int remove_phb_dynamic(struct pci_controller *phb)
3858 + * the pcibios_free_controller_deferred() callback;
3859 + * see pseries_root_bridge_prepare().
3860 + */
3861 ++ put_device(&host_bridge->dev);
3862 +
3863 + return 0;
3864 + }
3865 +diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c
3866 +index 8963eaffb1b7b..39186ad6b3c3a 100644
3867 +--- a/arch/powerpc/sysdev/fsl_gtm.c
3868 ++++ b/arch/powerpc/sysdev/fsl_gtm.c
3869 +@@ -86,7 +86,7 @@ static LIST_HEAD(gtms);
3870 + */
3871 + struct gtm_timer *gtm_get_timer16(void)
3872 + {
3873 +- struct gtm *gtm = NULL;
3874 ++ struct gtm *gtm;
3875 + int i;
3876 +
3877 + list_for_each_entry(gtm, &gtms, list_node) {
3878 +@@ -103,7 +103,7 @@ struct gtm_timer *gtm_get_timer16(void)
3879 + spin_unlock_irq(&gtm->lock);
3880 + }
3881 +
3882 +- if (gtm)
3883 ++ if (!list_empty(&gtms))
3884 + return ERR_PTR(-EBUSY);
3885 + return ERR_PTR(-ENODEV);
3886 + }
3887 +diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
3888 +index 1ca5564bda9d0..89c86f32aff86 100644
3889 +--- a/arch/powerpc/sysdev/xive/common.c
3890 ++++ b/arch/powerpc/sysdev/xive/common.c
3891 +@@ -1708,20 +1708,20 @@ __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
3892 + static int __init xive_off(char *arg)
3893 + {
3894 + xive_cmdline_disabled = true;
3895 +- return 0;
3896 ++ return 1;
3897 + }
3898 + __setup("xive=off", xive_off);
3899 +
3900 + static int __init xive_store_eoi_cmdline(char *arg)
3901 + {
3902 + if (!arg)
3903 +- return -EINVAL;
3904 ++ return 1;
3905 +
3906 + if (strncmp(arg, "off", 3) == 0) {
3907 + pr_info("StoreEOI disabled on kernel command line\n");
3908 + xive_store_eoi = false;
3909 + }
3910 +- return 0;
3911 ++ return 1;
3912 + }
3913 + __setup("xive.store-eoi=", xive_store_eoi_cmdline);
3914 +
3915 +diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
3916 +index 984872f3d3a9b..b9e30df127fef 100644
3917 +--- a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
3918 ++++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
3919 +@@ -203,6 +203,8 @@
3920 + compatible = "jedec,spi-nor";
3921 + reg = <0>;
3922 + spi-max-frequency = <50000000>;
3923 ++ spi-tx-bus-width = <4>;
3924 ++ spi-rx-bus-width = <4>;
3925 + m25p,fast-read;
3926 + broken-flash-reset;
3927 + };
3928 +diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
3929 +index 7ba99b4da3042..8d23401b0bbb6 100644
3930 +--- a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
3931 ++++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
3932 +@@ -205,6 +205,8 @@
3933 + compatible = "jedec,spi-nor";
3934 + reg = <0>;
3935 + spi-max-frequency = <50000000>;
3936 ++ spi-tx-bus-width = <4>;
3937 ++ spi-rx-bus-width = <4>;
3938 + m25p,fast-read;
3939 + broken-flash-reset;
3940 + };
3941 +diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
3942 +index be9b12c9b374a..24fd83b43d9d5 100644
3943 +--- a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
3944 ++++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
3945 +@@ -213,6 +213,8 @@
3946 + compatible = "jedec,spi-nor";
3947 + reg = <0>;
3948 + spi-max-frequency = <50000000>;
3949 ++ spi-tx-bus-width = <4>;
3950 ++ spi-rx-bus-width = <4>;
3951 + m25p,fast-read;
3952 + broken-flash-reset;
3953 + };
3954 +diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
3955 +index 031c0c28f8195..25341f38292aa 100644
3956 +--- a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
3957 ++++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
3958 +@@ -178,6 +178,8 @@
3959 + compatible = "jedec,spi-nor";
3960 + reg = <0>;
3961 + spi-max-frequency = <50000000>;
3962 ++ spi-tx-bus-width = <4>;
3963 ++ spi-rx-bus-width = <4>;
3964 + m25p,fast-read;
3965 + broken-flash-reset;
3966 + };
3967 +diff --git a/arch/riscv/include/asm/module.lds.h b/arch/riscv/include/asm/module.lds.h
3968 +index 4254ff2ff0494..1075beae1ac64 100644
3969 +--- a/arch/riscv/include/asm/module.lds.h
3970 ++++ b/arch/riscv/include/asm/module.lds.h
3971 +@@ -2,8 +2,8 @@
3972 + /* Copyright (C) 2017 Andes Technology Corporation */
3973 + #ifdef CONFIG_MODULE_SECTIONS
3974 + SECTIONS {
3975 +- .plt (NOLOAD) : { BYTE(0) }
3976 +- .got (NOLOAD) : { BYTE(0) }
3977 +- .got.plt (NOLOAD) : { BYTE(0) }
3978 ++ .plt : { BYTE(0) }
3979 ++ .got : { BYTE(0) }
3980 ++ .got.plt : { BYTE(0) }
3981 + }
3982 + #endif
3983 +diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
3984 +index 60da0dcacf145..74d888c8d631a 100644
3985 +--- a/arch/riscv/include/asm/thread_info.h
3986 ++++ b/arch/riscv/include/asm/thread_info.h
3987 +@@ -11,11 +11,17 @@
3988 + #include <asm/page.h>
3989 + #include <linux/const.h>
3990 +
3991 ++#ifdef CONFIG_KASAN
3992 ++#define KASAN_STACK_ORDER 1
3993 ++#else
3994 ++#define KASAN_STACK_ORDER 0
3995 ++#endif
3996 ++
3997 + /* thread information allocation */
3998 + #ifdef CONFIG_64BIT
3999 +-#define THREAD_SIZE_ORDER (2)
4000 ++#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER)
4001 + #else
4002 +-#define THREAD_SIZE_ORDER (1)
4003 ++#define THREAD_SIZE_ORDER (1 + KASAN_STACK_ORDER)
4004 + #endif
4005 + #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
4006 +
4007 +diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
4008 +index dae29cbfe550b..7f2ad008274f3 100644
4009 +--- a/arch/riscv/kernel/cpu_ops_sbi.c
4010 ++++ b/arch/riscv/kernel/cpu_ops_sbi.c
4011 +@@ -21,7 +21,7 @@ const struct cpu_operations cpu_ops_sbi;
4012 + * be invoked from multiple threads in parallel. Define a per cpu data
4013 + * to handle that.
4014 + */
4015 +-DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
4016 ++static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
4017 +
4018 + static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
4019 + unsigned long priv)
4020 +diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
4021 +index 1fc075b8f764a..3348a61de7d99 100644
4022 +--- a/arch/riscv/kernel/perf_callchain.c
4023 ++++ b/arch/riscv/kernel/perf_callchain.c
4024 +@@ -15,8 +15,8 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
4025 + {
4026 + struct stackframe buftail;
4027 + unsigned long ra = 0;
4028 +- unsigned long *user_frame_tail =
4029 +- (unsigned long *)(fp - sizeof(struct stackframe));
4030 ++ unsigned long __user *user_frame_tail =
4031 ++ (unsigned long __user *)(fp - sizeof(struct stackframe));
4032 +
4033 + /* Check accessibility of one struct frame_tail beyond */
4034 + if (!access_ok(user_frame_tail, sizeof(buftail)))
4035 +@@ -68,7 +68,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
4036 +
4037 + static bool fill_callchain(void *entry, unsigned long pc)
4038 + {
4039 +- return perf_callchain_store(entry, pc);
4040 ++ return perf_callchain_store(entry, pc) == 0;
4041 + }
4042 +
4043 + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
4044 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
4045 +index 2296b1ff1e023..4e3db4004bfdc 100644
4046 +--- a/arch/s390/kvm/kvm-s390.c
4047 ++++ b/arch/s390/kvm/kvm-s390.c
4048 +@@ -3869,14 +3869,12 @@ retry:
4049 + return 0;
4050 + }
4051 +
4052 +-void kvm_s390_set_tod_clock(struct kvm *kvm,
4053 +- const struct kvm_s390_vm_tod_clock *gtod)
4054 ++static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4055 + {
4056 + struct kvm_vcpu *vcpu;
4057 + union tod_clock clk;
4058 + unsigned long i;
4059 +
4060 +- mutex_lock(&kvm->lock);
4061 + preempt_disable();
4062 +
4063 + store_tod_clock_ext(&clk);
4064 +@@ -3897,7 +3895,22 @@ void kvm_s390_set_tod_clock(struct kvm *kvm,
4065 +
4066 + kvm_s390_vcpu_unblock_all(kvm);
4067 + preempt_enable();
4068 ++}
4069 ++
4070 ++void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4071 ++{
4072 ++ mutex_lock(&kvm->lock);
4073 ++ __kvm_s390_set_tod_clock(kvm, gtod);
4074 ++ mutex_unlock(&kvm->lock);
4075 ++}
4076 ++
4077 ++int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4078 ++{
4079 ++ if (!mutex_trylock(&kvm->lock))
4080 ++ return 0;
4081 ++ __kvm_s390_set_tod_clock(kvm, gtod);
4082 + mutex_unlock(&kvm->lock);
4083 ++ return 1;
4084 + }
4085 +
4086 + /**
4087 +diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
4088 +index 098831e815e6c..f2c910763d7fa 100644
4089 +--- a/arch/s390/kvm/kvm-s390.h
4090 ++++ b/arch/s390/kvm/kvm-s390.h
4091 +@@ -349,8 +349,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
4092 + int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
4093 +
4094 + /* implemented in kvm-s390.c */
4095 +-void kvm_s390_set_tod_clock(struct kvm *kvm,
4096 +- const struct kvm_s390_vm_tod_clock *gtod);
4097 ++void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
4098 ++int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
4099 + long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
4100 + int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
4101 + int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
4102 +diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
4103 +index 417154b314a64..6a765fe22eafc 100644
4104 +--- a/arch/s390/kvm/priv.c
4105 ++++ b/arch/s390/kvm/priv.c
4106 +@@ -102,7 +102,20 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
4107 + return kvm_s390_inject_prog_cond(vcpu, rc);
4108 +
4109 + VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
4110 +- kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
4111 ++ /*
4112 ++ * To set the TOD clock the kvm lock must be taken, but the vcpu lock
4113 ++ * is already held in handle_set_clock. The usual lock order is the
4114 ++ * opposite. As SCK is deprecated and should not be used in several
4115 ++ * cases, for example when the multiple epoch facility or TOD clock
4116 ++ * steering facility is installed (see Principles of Operation), a
4117 ++ * slow path can be used. If the lock can not be taken via try_lock,
4118 ++ * the instruction will be retried via -EAGAIN at a later point in
4119 ++ * time.
4120 ++ */
4121 ++ if (!kvm_s390_try_set_tod_clock(vcpu->kvm, &gtod)) {
4122 ++ kvm_s390_retry_instr(vcpu);
4123 ++ return -EAGAIN;
4124 ++ }
4125 +
4126 + kvm_s390_set_psw_cc(vcpu, 0);
4127 + return 0;
4128 +diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
4129 +index ffab16369beac..74f80443b195f 100644
4130 +--- a/arch/sparc/kernel/signal_32.c
4131 ++++ b/arch/sparc/kernel/signal_32.c
4132 +@@ -65,7 +65,7 @@ struct rt_signal_frame {
4133 + */
4134 + static inline bool invalid_frame_pointer(void __user *fp, int fplen)
4135 + {
4136 +- if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
4137 ++ if ((((unsigned long) fp) & 15) || !access_ok(fp, fplen))
4138 + return true;
4139 +
4140 + return false;
4141 +diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
4142 +index 6ead1e2404576..8ca67a6926830 100644
4143 +--- a/arch/um/drivers/mconsole_kern.c
4144 ++++ b/arch/um/drivers/mconsole_kern.c
4145 +@@ -224,7 +224,7 @@ void mconsole_go(struct mc_request *req)
4146 +
4147 + void mconsole_stop(struct mc_request *req)
4148 + {
4149 +- deactivate_fd(req->originating_fd, MCONSOLE_IRQ);
4150 ++ block_signals();
4151 + os_set_fd_block(req->originating_fd, 1);
4152 + mconsole_reply(req, "stopped", 0, 0);
4153 + for (;;) {
4154 +@@ -247,6 +247,7 @@ void mconsole_stop(struct mc_request *req)
4155 + }
4156 + os_set_fd_block(req->originating_fd, 0);
4157 + mconsole_reply(req, "", 0, 0);
4158 ++ unblock_signals();
4159 + }
4160 +
4161 + static DEFINE_SPINLOCK(mc_devices_lock);
4162 +diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
4163 +index 71fae5a09e56d..2077ce7a56479 100644
4164 +--- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
4165 ++++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
4166 +@@ -297,7 +297,7 @@ ___
4167 + $code.=<<___;
4168 + mov \$1,%eax
4169 + .Lno_key:
4170 +- ret
4171 ++ RET
4172 + ___
4173 + &end_function("poly1305_init_x86_64");
4174 +
4175 +@@ -373,7 +373,7 @@ $code.=<<___;
4176 + .cfi_adjust_cfa_offset -48
4177 + .Lno_data:
4178 + .Lblocks_epilogue:
4179 +- ret
4180 ++ RET
4181 + .cfi_endproc
4182 + ___
4183 + &end_function("poly1305_blocks_x86_64");
4184 +@@ -399,7 +399,7 @@ $code.=<<___;
4185 + mov %rax,0($mac) # write result
4186 + mov %rcx,8($mac)
4187 +
4188 +- ret
4189 ++ RET
4190 + ___
4191 + &end_function("poly1305_emit_x86_64");
4192 + if ($avx) {
4193 +@@ -429,7 +429,7 @@ ___
4194 + &poly1305_iteration();
4195 + $code.=<<___;
4196 + pop $ctx
4197 +- ret
4198 ++ RET
4199 + .size __poly1305_block,.-__poly1305_block
4200 +
4201 + .type __poly1305_init_avx,\@abi-omnipotent
4202 +@@ -594,7 +594,7 @@ __poly1305_init_avx:
4203 +
4204 + lea -48-64($ctx),$ctx # size [de-]optimization
4205 + pop %rbp
4206 +- ret
4207 ++ RET
4208 + .size __poly1305_init_avx,.-__poly1305_init_avx
4209 + ___
4210 +
4211 +@@ -747,7 +747,7 @@ $code.=<<___;
4212 + .cfi_restore %rbp
4213 + .Lno_data_avx:
4214 + .Lblocks_avx_epilogue:
4215 +- ret
4216 ++ RET
4217 + .cfi_endproc
4218 +
4219 + .align 32
4220 +@@ -1452,7 +1452,7 @@ $code.=<<___ if (!$win64);
4221 + ___
4222 + $code.=<<___;
4223 + vzeroupper
4224 +- ret
4225 ++ RET
4226 + .cfi_endproc
4227 + ___
4228 + &end_function("poly1305_blocks_avx");
4229 +@@ -1508,7 +1508,7 @@ $code.=<<___;
4230 + mov %rax,0($mac) # write result
4231 + mov %rcx,8($mac)
4232 +
4233 +- ret
4234 ++ RET
4235 + ___
4236 + &end_function("poly1305_emit_avx");
4237 +
4238 +@@ -1675,7 +1675,7 @@ $code.=<<___;
4239 + .cfi_restore %rbp
4240 + .Lno_data_avx2$suffix:
4241 + .Lblocks_avx2_epilogue$suffix:
4242 +- ret
4243 ++ RET
4244 + .cfi_endproc
4245 +
4246 + .align 32
4247 +@@ -2201,7 +2201,7 @@ $code.=<<___ if (!$win64);
4248 + ___
4249 + $code.=<<___;
4250 + vzeroupper
4251 +- ret
4252 ++ RET
4253 + .cfi_endproc
4254 + ___
4255 + if($avx > 2 && $avx512) {
4256 +@@ -2792,7 +2792,7 @@ $code.=<<___ if (!$win64);
4257 + .cfi_def_cfa_register %rsp
4258 + ___
4259 + $code.=<<___;
4260 +- ret
4261 ++ RET
4262 + .cfi_endproc
4263 + ___
4264 +
4265 +@@ -2893,7 +2893,7 @@ $code.=<<___ if ($flavour =~ /elf32/);
4266 + ___
4267 + $code.=<<___;
4268 + mov \$1,%eax
4269 +- ret
4270 ++ RET
4271 + .size poly1305_init_base2_44,.-poly1305_init_base2_44
4272 + ___
4273 + {
4274 +@@ -3010,7 +3010,7 @@ poly1305_blocks_vpmadd52:
4275 + jnz .Lblocks_vpmadd52_4x
4276 +
4277 + .Lno_data_vpmadd52:
4278 +- ret
4279 ++ RET
4280 + .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
4281 + ___
4282 + }
4283 +@@ -3451,7 +3451,7 @@ poly1305_blocks_vpmadd52_4x:
4284 + vzeroall
4285 +
4286 + .Lno_data_vpmadd52_4x:
4287 +- ret
4288 ++ RET
4289 + .size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x
4290 + ___
4291 + }
4292 +@@ -3824,7 +3824,7 @@ $code.=<<___;
4293 + vzeroall
4294 +
4295 + .Lno_data_vpmadd52_8x:
4296 +- ret
4297 ++ RET
4298 + .size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x
4299 + ___
4300 + }
4301 +@@ -3861,7 +3861,7 @@ poly1305_emit_base2_44:
4302 + mov %rax,0($mac) # write result
4303 + mov %rcx,8($mac)
4304 +
4305 +- ret
4306 ++ RET
4307 + .size poly1305_emit_base2_44,.-poly1305_emit_base2_44
4308 + ___
4309 + } } }
4310 +@@ -3916,7 +3916,7 @@ xor128_encrypt_n_pad:
4311 +
4312 + .Ldone_enc:
4313 + mov $otp,%rax
4314 +- ret
4315 ++ RET
4316 + .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad
4317 +
4318 + .globl xor128_decrypt_n_pad
4319 +@@ -3967,7 +3967,7 @@ xor128_decrypt_n_pad:
4320 +
4321 + .Ldone_dec:
4322 + mov $otp,%rax
4323 +- ret
4324 ++ RET
4325 + .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad
4326 + ___
4327 + }
4328 +@@ -4109,7 +4109,7 @@ avx_handler:
4329 + pop %rbx
4330 + pop %rdi
4331 + pop %rsi
4332 +- ret
4333 ++ RET
4334 + .size avx_handler,.-avx_handler
4335 +
4336 + .section .pdata
4337 +diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
4338 +index 2d33bba9a1440..215aed65e9782 100644
4339 +--- a/arch/x86/events/intel/pt.c
4340 ++++ b/arch/x86/events/intel/pt.c
4341 +@@ -472,7 +472,7 @@ static u64 pt_config_filters(struct perf_event *event)
4342 + pt->filters.filter[range].msr_b = filter->msr_b;
4343 + }
4344 +
4345 +- rtit_ctl |= filter->config << pt_address_ranges[range].reg_off;
4346 ++ rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off;
4347 + }
4348 +
4349 + return rtit_ctl;
4350 +diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
4351 +index bb2fb78523cee..ab572d8def2b7 100644
4352 +--- a/arch/x86/include/asm/svm.h
4353 ++++ b/arch/x86/include/asm/svm.h
4354 +@@ -222,17 +222,19 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
4355 +
4356 +
4357 + /* AVIC */
4358 +-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
4359 ++#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFFULL)
4360 + #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
4361 + #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
4362 +
4363 +-#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
4364 ++#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
4365 + #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
4366 + #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
4367 + #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
4368 +-#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFF)
4369 ++#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFFULL)
4370 +
4371 +-#define AVIC_DOORBELL_PHYSICAL_ID_MASK (0xFF)
4372 ++#define AVIC_DOORBELL_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
4373 ++
4374 ++#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
4375 +
4376 + #define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
4377 + #define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
4378 +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
4379 +index 7c7824ae78622..dc6d5e98d2963 100644
4380 +--- a/arch/x86/kernel/fpu/xstate.c
4381 ++++ b/arch/x86/kernel/fpu/xstate.c
4382 +@@ -1639,7 +1639,7 @@ static int __xstate_request_perm(u64 permitted, u64 requested, bool guest)
4383 +
4384 + perm = guest ? &fpu->guest_perm : &fpu->perm;
4385 + /* Pairs with the READ_ONCE() in xstate_get_group_perm() */
4386 +- WRITE_ONCE(perm->__state_perm, requested);
4387 ++ WRITE_ONCE(perm->__state_perm, mask);
4388 + /* Protected by sighand lock */
4389 + perm->__state_size = ksize;
4390 + perm->__user_state_size = usize;
4391 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
4392 +index d77481ecb0d5f..ed8a13ac4ab23 100644
4393 +--- a/arch/x86/kernel/kvm.c
4394 ++++ b/arch/x86/kernel/kvm.c
4395 +@@ -517,7 +517,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
4396 + } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
4397 + ipi_bitmap <<= min - apic_id;
4398 + min = apic_id;
4399 +- } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
4400 ++ } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) {
4401 + max = apic_id < max ? max : apic_id;
4402 + } else {
4403 + ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
4404 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
4405 +index e86d610dc6b7a..02d061a06aa19 100644
4406 +--- a/arch/x86/kvm/emulate.c
4407 ++++ b/arch/x86/kvm/emulate.c
4408 +@@ -1623,11 +1623,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
4409 + goto exception;
4410 + }
4411 +
4412 +- if (!seg_desc.p) {
4413 +- err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
4414 +- goto exception;
4415 +- }
4416 +-
4417 + dpl = seg_desc.dpl;
4418 +
4419 + switch (seg) {
4420 +@@ -1667,6 +1662,10 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
4421 + case VCPU_SREG_TR:
4422 + if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
4423 + goto exception;
4424 ++ if (!seg_desc.p) {
4425 ++ err_vec = NP_VECTOR;
4426 ++ goto exception;
4427 ++ }
4428 + old_desc = seg_desc;
4429 + seg_desc.type |= 2; /* busy */
4430 + ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
4431 +@@ -1691,6 +1690,11 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
4432 + break;
4433 + }
4434 +
4435 ++ if (!seg_desc.p) {
4436 ++ err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
4437 ++ goto exception;
4438 ++ }
4439 ++
4440 + if (seg_desc.s) {
4441 + /* mark segment as accessed */
4442 + if (!(seg_desc.type & 1)) {
4443 +diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
4444 +index 6e38a7d22e97a..10bc257d3803b 100644
4445 +--- a/arch/x86/kvm/hyperv.c
4446 ++++ b/arch/x86/kvm/hyperv.c
4447 +@@ -236,7 +236,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
4448 + struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
4449 + int ret;
4450 +
4451 +- if (!synic->active && !host)
4452 ++ if (!synic->active && (!host || data))
4453 + return 1;
4454 +
4455 + trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
4456 +@@ -282,6 +282,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
4457 + case HV_X64_MSR_EOM: {
4458 + int i;
4459 +
4460 ++ if (!synic->active)
4461 ++ break;
4462 ++
4463 + for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
4464 + kvm_hv_notify_acked_sint(vcpu, i);
4465 + break;
4466 +@@ -446,6 +449,9 @@ static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
4467 + struct kvm_lapic_irq irq;
4468 + int ret, vector;
4469 +
4470 ++ if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
4471 ++ return -EINVAL;
4472 ++
4473 + if (sint >= ARRAY_SIZE(synic->sint))
4474 + return -EINVAL;
4475 +
4476 +@@ -658,7 +664,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
4477 + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
4478 + struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
4479 +
4480 +- if (!synic->active && !host)
4481 ++ if (!synic->active && (!host || config))
4482 + return 1;
4483 +
4484 + if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
4485 +@@ -687,7 +693,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
4486 + struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
4487 + struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
4488 +
4489 +- if (!synic->active && !host)
4490 ++ if (!synic->active && (!host || count))
4491 + return 1;
4492 +
4493 + trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
4494 +@@ -1750,7 +1756,7 @@ struct kvm_hv_hcall {
4495 + sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
4496 + };
4497 +
4498 +-static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
4499 ++static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
4500 + {
4501 + int i;
4502 + gpa_t gpa;
4503 +@@ -1765,7 +1771,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
4504 + int sparse_banks_len;
4505 + bool all_cpus;
4506 +
4507 +- if (!ex) {
4508 ++ if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
4509 ++ hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) {
4510 + if (hc->fast) {
4511 + flush.address_space = hc->ingpa;
4512 + flush.flags = hc->outgpa;
4513 +@@ -1819,7 +1826,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
4514 +
4515 + if (!all_cpus) {
4516 + if (hc->fast) {
4517 +- if (sparse_banks_len > HV_HYPERCALL_MAX_XMM_REGISTERS - 1)
4518 ++ /* XMM0 is already consumed, each XMM holds two sparse banks. */
4519 ++ if (sparse_banks_len > 2 * (HV_HYPERCALL_MAX_XMM_REGISTERS - 1))
4520 + return HV_STATUS_INVALID_HYPERCALL_INPUT;
4521 + for (i = 0; i < sparse_banks_len; i += 2) {
4522 + sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]);
4523 +@@ -1875,7 +1883,7 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
4524 + }
4525 + }
4526 +
4527 +-static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
4528 ++static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
4529 + {
4530 + struct kvm *kvm = vcpu->kvm;
4531 + struct hv_send_ipi_ex send_ipi_ex;
4532 +@@ -1888,8 +1896,9 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
4533 + int sparse_banks_len;
4534 + u32 vector;
4535 + bool all_cpus;
4536 ++ int i;
4537 +
4538 +- if (!ex) {
4539 ++ if (hc->code == HVCALL_SEND_IPI) {
4540 + if (!hc->fast) {
4541 + if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
4542 + sizeof(send_ipi))))
4543 +@@ -1908,9 +1917,15 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
4544 +
4545 + trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
4546 + } else {
4547 +- if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
4548 +- sizeof(send_ipi_ex))))
4549 +- return HV_STATUS_INVALID_HYPERCALL_INPUT;
4550 ++ if (!hc->fast) {
4551 ++ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
4552 ++ sizeof(send_ipi_ex))))
4553 ++ return HV_STATUS_INVALID_HYPERCALL_INPUT;
4554 ++ } else {
4555 ++ send_ipi_ex.vector = (u32)hc->ingpa;
4556 ++ send_ipi_ex.vp_set.format = hc->outgpa;
4557 ++ send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]);
4558 ++ }
4559 +
4560 + trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
4561 + send_ipi_ex.vp_set.format,
4562 +@@ -1918,8 +1933,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
4563 +
4564 + vector = send_ipi_ex.vector;
4565 + valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
4566 +- sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
4567 +- sizeof(sparse_banks[0]);
4568 ++ sparse_banks_len = bitmap_weight(&valid_bank_mask, 64);
4569 +
4570 + all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
4571 +
4572 +@@ -1929,12 +1943,27 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
4573 + if (!sparse_banks_len)
4574 + goto ret_success;
4575 +
4576 +- if (kvm_read_guest(kvm,
4577 +- hc->ingpa + offsetof(struct hv_send_ipi_ex,
4578 +- vp_set.bank_contents),
4579 +- sparse_banks,
4580 +- sparse_banks_len))
4581 +- return HV_STATUS_INVALID_HYPERCALL_INPUT;
4582 ++ if (!hc->fast) {
4583 ++ if (kvm_read_guest(kvm,
4584 ++ hc->ingpa + offsetof(struct hv_send_ipi_ex,
4585 ++ vp_set.bank_contents),
4586 ++ sparse_banks,
4587 ++ sparse_banks_len * sizeof(sparse_banks[0])))
4588 ++ return HV_STATUS_INVALID_HYPERCALL_INPUT;
4589 ++ } else {
4590 ++ /*
4591 ++ * The lower half of XMM0 is already consumed, each XMM holds
4592 ++ * two sparse banks.
4593 ++ */
4594 ++ if (sparse_banks_len > (2 * HV_HYPERCALL_MAX_XMM_REGISTERS - 1))
4595 ++ return HV_STATUS_INVALID_HYPERCALL_INPUT;
4596 ++ for (i = 0; i < sparse_banks_len; i++) {
4597 ++ if (i % 2)
4598 ++ sparse_banks[i] = sse128_lo(hc->xmm[(i + 1) / 2]);
4599 ++ else
4600 ++ sparse_banks[i] = sse128_hi(hc->xmm[i / 2]);
4601 ++ }
4602 ++ }
4603 + }
4604 +
4605 + check_and_send_ipi:
4606 +@@ -2096,6 +2125,7 @@ static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
4607 + case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
4608 + case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
4609 + case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
4610 ++ case HVCALL_SEND_IPI_EX:
4611 + return true;
4612 + }
4613 +
4614 +@@ -2247,46 +2277,28 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
4615 + kvm_hv_hypercall_complete_userspace;
4616 + return 0;
4617 + case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
4618 +- if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
4619 +- ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
4620 +- break;
4621 +- }
4622 +- ret = kvm_hv_flush_tlb(vcpu, &hc, false);
4623 +- break;
4624 +- case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
4625 +- if (unlikely(hc.rep)) {
4626 +- ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
4627 +- break;
4628 +- }
4629 +- ret = kvm_hv_flush_tlb(vcpu, &hc, false);
4630 +- break;
4631 + case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
4632 + if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
4633 + ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
4634 + break;
4635 + }
4636 +- ret = kvm_hv_flush_tlb(vcpu, &hc, true);
4637 ++ ret = kvm_hv_flush_tlb(vcpu, &hc);
4638 + break;
4639 ++ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
4640 + case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
4641 + if (unlikely(hc.rep)) {
4642 + ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
4643 + break;
4644 + }
4645 +- ret = kvm_hv_flush_tlb(vcpu, &hc, true);
4646 ++ ret = kvm_hv_flush_tlb(vcpu, &hc);
4647 + break;
4648 + case HVCALL_SEND_IPI:
4649 +- if (unlikely(hc.rep)) {
4650 +- ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
4651 +- break;
4652 +- }
4653 +- ret = kvm_hv_send_ipi(vcpu, &hc, false);
4654 +- break;
4655 + case HVCALL_SEND_IPI_EX:
4656 +- if (unlikely(hc.fast || hc.rep)) {
4657 ++ if (unlikely(hc.rep)) {
4658 + ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
4659 + break;
4660 + }
4661 +- ret = kvm_hv_send_ipi(vcpu, &hc, true);
4662 ++ ret = kvm_hv_send_ipi(vcpu, &hc);
4663 + break;
4664 + case HVCALL_POST_DEBUG_DATA:
4665 + case HVCALL_RETRIEVE_DEBUG_DATA:
4666 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
4667 +index 9322e6340a742..2a10d0033c964 100644
4668 +--- a/arch/x86/kvm/lapic.c
4669 ++++ b/arch/x86/kvm/lapic.c
4670 +@@ -992,6 +992,10 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
4671 + *r = -1;
4672 +
4673 + if (irq->shorthand == APIC_DEST_SELF) {
4674 ++ if (KVM_BUG_ON(!src, kvm)) {
4675 ++ *r = 0;
4676 ++ return true;
4677 ++ }
4678 + *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
4679 + return true;
4680 + }
4681 +@@ -2242,10 +2246,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
4682 +
4683 + void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
4684 + {
4685 +- struct kvm_lapic *apic = vcpu->arch.apic;
4686 +-
4687 +- apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
4688 +- | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
4689 ++ apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
4690 + }
4691 +
4692 + u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
4693 +diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
4694 +index e9fbb2c8bbe2d..c7070973f0de1 100644
4695 +--- a/arch/x86/kvm/mmu.h
4696 ++++ b/arch/x86/kvm/mmu.h
4697 +@@ -48,6 +48,7 @@
4698 + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE)
4699 +
4700 + #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
4701 ++#define KVM_MMU_EFER_ROLE_BITS (EFER_LME | EFER_NX)
4702 +
4703 + static __always_inline u64 rsvd_bits(int s, int e)
4704 + {
4705 +diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
4706 +index 5b5bdac97c7b9..3821d5140ea31 100644
4707 +--- a/arch/x86/kvm/mmu/paging_tmpl.h
4708 ++++ b/arch/x86/kvm/mmu/paging_tmpl.h
4709 +@@ -34,9 +34,8 @@
4710 + #define PT_HAVE_ACCESSED_DIRTY(mmu) true
4711 + #ifdef CONFIG_X86_64
4712 + #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
4713 +- #define CMPXCHG cmpxchg
4714 ++ #define CMPXCHG "cmpxchgq"
4715 + #else
4716 +- #define CMPXCHG cmpxchg64
4717 + #define PT_MAX_FULL_LEVELS 2
4718 + #endif
4719 + #elif PTTYPE == 32
4720 +@@ -52,7 +51,7 @@
4721 + #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
4722 + #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
4723 + #define PT_HAVE_ACCESSED_DIRTY(mmu) true
4724 +- #define CMPXCHG cmpxchg
4725 ++ #define CMPXCHG "cmpxchgl"
4726 + #elif PTTYPE == PTTYPE_EPT
4727 + #define pt_element_t u64
4728 + #define guest_walker guest_walkerEPT
4729 +@@ -65,7 +64,9 @@
4730 + #define PT_GUEST_DIRTY_SHIFT 9
4731 + #define PT_GUEST_ACCESSED_SHIFT 8
4732 + #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
4733 +- #define CMPXCHG cmpxchg64
4734 ++ #ifdef CONFIG_X86_64
4735 ++ #define CMPXCHG "cmpxchgq"
4736 ++ #endif
4737 + #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
4738 + #else
4739 + #error Invalid PTTYPE value
4740 +@@ -147,43 +148,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4741 + pt_element_t __user *ptep_user, unsigned index,
4742 + pt_element_t orig_pte, pt_element_t new_pte)
4743 + {
4744 +- int npages;
4745 +- pt_element_t ret;
4746 +- pt_element_t *table;
4747 +- struct page *page;
4748 +-
4749 +- npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
4750 +- if (likely(npages == 1)) {
4751 +- table = kmap_atomic(page);
4752 +- ret = CMPXCHG(&table[index], orig_pte, new_pte);
4753 +- kunmap_atomic(table);
4754 +-
4755 +- kvm_release_page_dirty(page);
4756 +- } else {
4757 +- struct vm_area_struct *vma;
4758 +- unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK;
4759 +- unsigned long pfn;
4760 +- unsigned long paddr;
4761 +-
4762 +- mmap_read_lock(current->mm);
4763 +- vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
4764 +- if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
4765 +- mmap_read_unlock(current->mm);
4766 +- return -EFAULT;
4767 +- }
4768 +- pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
4769 +- paddr = pfn << PAGE_SHIFT;
4770 +- table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
4771 +- if (!table) {
4772 +- mmap_read_unlock(current->mm);
4773 +- return -EFAULT;
4774 +- }
4775 +- ret = CMPXCHG(&table[index], orig_pte, new_pte);
4776 +- memunmap(table);
4777 +- mmap_read_unlock(current->mm);
4778 +- }
4779 ++ signed char r;
4780 +
4781 +- return (ret != orig_pte);
4782 ++ if (!user_access_begin(ptep_user, sizeof(pt_element_t)))
4783 ++ return -EFAULT;
4784 ++
4785 ++#ifdef CMPXCHG
4786 ++ asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n"
4787 ++ "setnz %b[r]\n"
4788 ++ "2:"
4789 ++ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
4790 ++ : [ptr] "+m" (*ptep_user),
4791 ++ [old] "+a" (orig_pte),
4792 ++ [r] "=q" (r)
4793 ++ : [new] "r" (new_pte)
4794 ++ : "memory");
4795 ++#else
4796 ++ asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n"
4797 ++ "setnz %b[r]\n"
4798 ++ "2:"
4799 ++ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
4800 ++ : [ptr] "+m" (*ptep_user),
4801 ++ [old] "+A" (orig_pte),
4802 ++ [r] "=q" (r)
4803 ++ : [new_lo] "b" ((u32)new_pte),
4804 ++ [new_hi] "c" ((u32)(new_pte >> 32))
4805 ++ : "memory");
4806 ++#endif
4807 ++
4808 ++ user_access_end();
4809 ++ return r;
4810 + }
4811 +
4812 + static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
4813 +diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
4814 +index bc9e3553fba2d..d2e69b2ddbbee 100644
4815 +--- a/arch/x86/kvm/mmu/tdp_mmu.c
4816 ++++ b/arch/x86/kvm/mmu/tdp_mmu.c
4817 +@@ -99,15 +99,18 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
4818 + }
4819 +
4820 + /*
4821 +- * Finds the next valid root after root (or the first valid root if root
4822 +- * is NULL), takes a reference on it, and returns that next root. If root
4823 +- * is not NULL, this thread should have already taken a reference on it, and
4824 +- * that reference will be dropped. If no valid root is found, this
4825 +- * function will return NULL.
4826 ++ * Returns the next root after @prev_root (or the first root if @prev_root is
4827 ++ * NULL). A reference to the returned root is acquired, and the reference to
4828 ++ * @prev_root is released (the caller obviously must hold a reference to
4829 ++ * @prev_root if it's non-NULL).
4830 ++ *
4831 ++ * If @only_valid is true, invalid roots are skipped.
4832 ++ *
4833 ++ * Returns NULL if the end of tdp_mmu_roots was reached.
4834 + */
4835 + static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
4836 + struct kvm_mmu_page *prev_root,
4837 +- bool shared)
4838 ++ bool shared, bool only_valid)
4839 + {
4840 + struct kvm_mmu_page *next_root;
4841 +
4842 +@@ -121,9 +124,14 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
4843 + next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
4844 + typeof(*next_root), link);
4845 +
4846 +- while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root))
4847 ++ while (next_root) {
4848 ++ if ((!only_valid || !next_root->role.invalid) &&
4849 ++ kvm_tdp_mmu_get_root(kvm, next_root))
4850 ++ break;
4851 ++
4852 + next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
4853 + &next_root->link, typeof(*next_root), link);
4854 ++ }
4855 +
4856 + rcu_read_unlock();
4857 +
4858 +@@ -143,13 +151,19 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
4859 + * mode. In the unlikely event that this thread must free a root, the lock
4860 + * will be temporarily dropped and reacquired in write mode.
4861 + */
4862 +-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
4863 +- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \
4864 +- _root; \
4865 +- _root = tdp_mmu_next_root(_kvm, _root, _shared)) \
4866 +- if (kvm_mmu_page_as_id(_root) != _as_id) { \
4867 ++#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
4868 ++ for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
4869 ++ _root; \
4870 ++ _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
4871 ++ if (kvm_mmu_page_as_id(_root) != _as_id) { \
4872 + } else
4873 +
4874 ++#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
4875 ++ __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
4876 ++
4877 ++#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
4878 ++ __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, false)
4879 ++
4880 + #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
4881 + list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \
4882 + lockdep_is_held_type(&kvm->mmu_lock, 0) || \
4883 +@@ -200,7 +214,10 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
4884 +
4885 + role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
4886 +
4887 +- /* Check for an existing root before allocating a new one. */
4888 ++ /*
4889 ++ * Check for an existing root before allocating a new one. Note, the
4890 ++ * role check prevents consuming an invalid root.
4891 ++ */
4892 + for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
4893 + if (root->role.word == role.word &&
4894 + kvm_tdp_mmu_get_root(kvm, root))
4895 +@@ -1032,13 +1049,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4896 + bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
4897 + bool flush)
4898 + {
4899 +- struct kvm_mmu_page *root;
4900 +-
4901 +- for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
4902 +- flush = zap_gfn_range(kvm, root, range->start, range->end,
4903 +- range->may_block, flush, false);
4904 +-
4905 +- return flush;
4906 ++ return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start,
4907 ++ range->end, range->may_block, flush);
4908 + }
4909 +
4910 + typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
4911 +@@ -1221,7 +1233,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
4912 +
4913 + lockdep_assert_held_read(&kvm->mmu_lock);
4914 +
4915 +- for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
4916 ++ for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
4917 + spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
4918 + slot->base_gfn + slot->npages, min_level);
4919 +
4920 +@@ -1249,6 +1261,9 @@ retry:
4921 + if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
4922 + continue;
4923 +
4924 ++ if (!is_shadow_present_pte(iter.old_spte))
4925 ++ continue;
4926 ++
4927 + if (spte_ad_need_write_protect(iter.old_spte)) {
4928 + if (is_writable_pte(iter.old_spte))
4929 + new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
4930 +@@ -1291,7 +1306,7 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
4931 +
4932 + lockdep_assert_held_read(&kvm->mmu_lock);
4933 +
4934 +- for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
4935 ++ for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
4936 + spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
4937 + slot->base_gfn + slot->npages);
4938 +
4939 +@@ -1416,7 +1431,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
4940 +
4941 + lockdep_assert_held_read(&kvm->mmu_lock);
4942 +
4943 +- for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
4944 ++ for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
4945 + zap_collapsible_spte_range(kvm, root, slot);
4946 + }
4947 +
4948 +diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
4949 +index 3899004a5d91e..08c917511fedd 100644
4950 +--- a/arch/x86/kvm/mmu/tdp_mmu.h
4951 ++++ b/arch/x86/kvm/mmu/tdp_mmu.h
4952 +@@ -10,9 +10,6 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
4953 + __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm,
4954 + struct kvm_mmu_page *root)
4955 + {
4956 +- if (root->role.invalid)
4957 +- return false;
4958 +-
4959 + return refcount_inc_not_zero(&root->tdp_mmu_root_count);
4960 + }
4961 +
4962 +diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
4963 +index fb3e207913388..7ef229011db8a 100644
4964 +--- a/arch/x86/kvm/svm/avic.c
4965 ++++ b/arch/x86/kvm/svm/avic.c
4966 +@@ -783,7 +783,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
4967 + {
4968 + struct kvm_kernel_irq_routing_entry *e;
4969 + struct kvm_irq_routing_table *irq_rt;
4970 +- int idx, ret = -EINVAL;
4971 ++ int idx, ret = 0;
4972 +
4973 + if (!kvm_arch_has_assigned_device(kvm) ||
4974 + !irq_remapping_cap(IRQ_POSTING_CAP))
4975 +@@ -794,7 +794,13 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
4976 +
4977 + idx = srcu_read_lock(&kvm->irq_srcu);
4978 + irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
4979 +- WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
4980 ++
4981 ++ if (guest_irq >= irq_rt->nr_rt_entries ||
4982 ++ hlist_empty(&irq_rt->map[guest_irq])) {
4983 ++ pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
4984 ++ guest_irq, irq_rt->nr_rt_entries);
4985 ++ goto out;
4986 ++ }
4987 +
4988 + hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
4989 + struct vcpu_data vcpu_info;
4990 +@@ -927,17 +933,12 @@ out:
4991 + void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
4992 + {
4993 + u64 entry;
4994 +- /* ID = 0xff (broadcast), ID > 0xff (reserved) */
4995 + int h_physical_id = kvm_cpu_get_apicid(cpu);
4996 + struct vcpu_svm *svm = to_svm(vcpu);
4997 +
4998 + lockdep_assert_preemption_disabled();
4999 +
5000 +- /*
5001 +- * Since the host physical APIC id is 8 bits,
5002 +- * we can support host APIC ID upto 255.
5003 +- */
5004 +- if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
5005 ++ if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
5006 + return;
5007 +
5008 + /*
5009 +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
5010 +index 17b53457d8664..fef9758525826 100644
5011 +--- a/arch/x86/kvm/svm/sev.c
5012 ++++ b/arch/x86/kvm/svm/sev.c
5013 +@@ -2358,7 +2358,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
5014 + memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
5015 + }
5016 +
5017 +-static bool sev_es_validate_vmgexit(struct vcpu_svm *svm)
5018 ++static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
5019 + {
5020 + struct kvm_vcpu *vcpu;
5021 + struct ghcb *ghcb;
5022 +@@ -2463,7 +2463,7 @@ static bool sev_es_validate_vmgexit(struct vcpu_svm *svm)
5023 + goto vmgexit_err;
5024 + }
5025 +
5026 +- return true;
5027 ++ return 0;
5028 +
5029 + vmgexit_err:
5030 + vcpu = &svm->vcpu;
5031 +@@ -2486,7 +2486,8 @@ vmgexit_err:
5032 + ghcb_set_sw_exit_info_1(ghcb, 2);
5033 + ghcb_set_sw_exit_info_2(ghcb, reason);
5034 +
5035 +- return false;
5036 ++ /* Resume the guest to "return" the error code. */
5037 ++ return 1;
5038 + }
5039 +
5040 + void sev_es_unmap_ghcb(struct vcpu_svm *svm)
5041 +@@ -2545,7 +2546,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
5042 + }
5043 +
5044 + #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
5045 +-static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
5046 ++static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
5047 + {
5048 + struct vmcb_control_area *control = &svm->vmcb->control;
5049 + struct ghcb *ghcb = svm->sev_es.ghcb;
5050 +@@ -2598,14 +2599,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
5051 + }
5052 + scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
5053 + if (!scratch_va)
5054 +- goto e_scratch;
5055 ++ return -ENOMEM;
5056 +
5057 + if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
5058 + /* Unable to copy scratch area from guest */
5059 + pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
5060 +
5061 + kvfree(scratch_va);
5062 +- goto e_scratch;
5063 ++ return -EFAULT;
5064 + }
5065 +
5066 + /*
5067 +@@ -2621,13 +2622,13 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
5068 + svm->sev_es.ghcb_sa = scratch_va;
5069 + svm->sev_es.ghcb_sa_len = len;
5070 +
5071 +- return true;
5072 ++ return 0;
5073 +
5074 + e_scratch:
5075 + ghcb_set_sw_exit_info_1(ghcb, 2);
5076 + ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
5077 +
5078 +- return false;
5079 ++ return 1;
5080 + }
5081 +
5082 + static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
5083 +@@ -2765,17 +2766,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
5084 +
5085 + exit_code = ghcb_get_sw_exit_code(ghcb);
5086 +
5087 +- if (!sev_es_validate_vmgexit(svm))
5088 +- return 1;
5089 ++ ret = sev_es_validate_vmgexit(svm);
5090 ++ if (ret)
5091 ++ return ret;
5092 +
5093 + sev_es_sync_from_ghcb(svm);
5094 + ghcb_set_sw_exit_info_1(ghcb, 0);
5095 + ghcb_set_sw_exit_info_2(ghcb, 0);
5096 +
5097 +- ret = 1;
5098 + switch (exit_code) {
5099 + case SVM_VMGEXIT_MMIO_READ:
5100 +- if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
5101 ++ ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
5102 ++ if (ret)
5103 + break;
5104 +
5105 + ret = kvm_sev_es_mmio_read(vcpu,
5106 +@@ -2784,7 +2786,8 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
5107 + svm->sev_es.ghcb_sa);
5108 + break;
5109 + case SVM_VMGEXIT_MMIO_WRITE:
5110 +- if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
5111 ++ ret = setup_vmgexit_scratch(svm, false, control->exit_info_2);
5112 ++ if (ret)
5113 + break;
5114 +
5115 + ret = kvm_sev_es_mmio_write(vcpu,
5116 +@@ -2817,6 +2820,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
5117 + ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
5118 + }
5119 +
5120 ++ ret = 1;
5121 + break;
5122 + }
5123 + case SVM_VMGEXIT_UNSUPPORTED_EVENT:
5124 +@@ -2836,6 +2840,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
5125 + {
5126 + int count;
5127 + int bytes;
5128 ++ int r;
5129 +
5130 + if (svm->vmcb->control.exit_info_2 > INT_MAX)
5131 + return -EINVAL;
5132 +@@ -2844,8 +2849,9 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
5133 + if (unlikely(check_mul_overflow(count, size, &bytes)))
5134 + return -EINVAL;
5135 +
5136 +- if (!setup_vmgexit_scratch(svm, in, bytes))
5137 +- return 1;
5138 ++ r = setup_vmgexit_scratch(svm, in, bytes);
5139 ++ if (r)
5140 ++ return r;
5141 +
5142 + return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
5143 + count, in);
5144 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
5145 +index eb4029660bd9f..9b6166348c94f 100644
5146 +--- a/arch/x86/kvm/x86.c
5147 ++++ b/arch/x86/kvm/x86.c
5148 +@@ -1656,8 +1656,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
5149 + return r;
5150 + }
5151 +
5152 +- /* Update reserved bits */
5153 +- if ((efer ^ old_efer) & EFER_NX)
5154 ++ if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS)
5155 + kvm_mmu_reset_context(vcpu);
5156 +
5157 + return 0;
5158 +diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
5159 +index df50451d94ef7..3e2f33fc33de2 100644
5160 +--- a/arch/x86/lib/iomem.c
5161 ++++ b/arch/x86/lib/iomem.c
5162 +@@ -22,7 +22,7 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n)
5163 + : "memory");
5164 + }
5165 +
5166 +-void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
5167 ++static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
5168 + {
5169 + if (unlikely(!n))
5170 + return;
5171 +@@ -38,9 +38,8 @@ void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
5172 + }
5173 + rep_movs(to, (const void *)from, n);
5174 + }
5175 +-EXPORT_SYMBOL(memcpy_fromio);
5176 +
5177 +-void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
5178 ++static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
5179 + {
5180 + if (unlikely(!n))
5181 + return;
5182 +@@ -56,14 +55,64 @@ void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
5183 + }
5184 + rep_movs((void *)to, (const void *) from, n);
5185 + }
5186 ++
5187 ++static void unrolled_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
5188 ++{
5189 ++ const volatile char __iomem *in = from;
5190 ++ char *out = to;
5191 ++ int i;
5192 ++
5193 ++ for (i = 0; i < n; ++i)
5194 ++ out[i] = readb(&in[i]);
5195 ++}
5196 ++
5197 ++static void unrolled_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
5198 ++{
5199 ++ volatile char __iomem *out = to;
5200 ++ const char *in = from;
5201 ++ int i;
5202 ++
5203 ++ for (i = 0; i < n; ++i)
5204 ++ writeb(in[i], &out[i]);
5205 ++}
5206 ++
5207 ++static void unrolled_memset_io(volatile void __iomem *a, int b, size_t c)
5208 ++{
5209 ++ volatile char __iomem *mem = a;
5210 ++ int i;
5211 ++
5212 ++ for (i = 0; i < c; ++i)
5213 ++ writeb(b, &mem[i]);
5214 ++}
5215 ++
5216 ++void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
5217 ++{
5218 ++ if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO))
5219 ++ unrolled_memcpy_fromio(to, from, n);
5220 ++ else
5221 ++ string_memcpy_fromio(to, from, n);
5222 ++}
5223 ++EXPORT_SYMBOL(memcpy_fromio);
5224 ++
5225 ++void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
5226 ++{
5227 ++ if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO))
5228 ++ unrolled_memcpy_toio(to, from, n);
5229 ++ else
5230 ++ string_memcpy_toio(to, from, n);
5231 ++}
5232 + EXPORT_SYMBOL(memcpy_toio);
5233 +
5234 + void memset_io(volatile void __iomem *a, int b, size_t c)
5235 + {
5236 +- /*
5237 +- * TODO: memset can mangle the IO patterns quite a bit.
5238 +- * perhaps it would be better to use a dumb one:
5239 +- */
5240 +- memset((void *)a, b, c);
5241 ++ if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) {
5242 ++ unrolled_memset_io(a, b, c);
5243 ++ } else {
5244 ++ /*
5245 ++ * TODO: memset can mangle the IO patterns quite a bit.
5246 ++ * perhaps it would be better to use a dumb one:
5247 ++ */
5248 ++ memset((void *)a, b, c);
5249 ++ }
5250 + }
5251 + EXPORT_SYMBOL(memset_io);
5252 +diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
5253 +index 89dd6b1708b04..21ecbe754cb2f 100644
5254 +--- a/arch/x86/xen/pmu.c
5255 ++++ b/arch/x86/xen/pmu.c
5256 +@@ -506,10 +506,7 @@ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
5257 + return ret;
5258 + }
5259 +
5260 +-bool is_xen_pmu(int cpu)
5261 +-{
5262 +- return (get_xenpmu_data() != NULL);
5263 +-}
5264 ++bool is_xen_pmu;
5265 +
5266 + void xen_pmu_init(int cpu)
5267 + {
5268 +@@ -520,7 +517,7 @@ void xen_pmu_init(int cpu)
5269 +
5270 + BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE);
5271 +
5272 +- if (xen_hvm_domain())
5273 ++ if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu))
5274 + return;
5275 +
5276 + xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL);
5277 +@@ -541,7 +538,8 @@ void xen_pmu_init(int cpu)
5278 + per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data;
5279 + per_cpu(xenpmu_shared, cpu).flags = 0;
5280 +
5281 +- if (cpu == 0) {
5282 ++ if (!is_xen_pmu) {
5283 ++ is_xen_pmu = true;
5284 + perf_register_guest_info_callbacks(&xen_guest_cbs);
5285 + xen_pmu_arch_init();
5286 + }
5287 +diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h
5288 +index 0e83a160589bc..65c58894fc79f 100644
5289 +--- a/arch/x86/xen/pmu.h
5290 ++++ b/arch/x86/xen/pmu.h
5291 +@@ -4,6 +4,8 @@
5292 +
5293 + #include <xen/interface/xenpmu.h>
5294 +
5295 ++extern bool is_xen_pmu;
5296 ++
5297 + irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
5298 + #ifdef CONFIG_XEN_HAVE_VPMU
5299 + void xen_pmu_init(int cpu);
5300 +@@ -12,7 +14,6 @@ void xen_pmu_finish(int cpu);
5301 + static inline void xen_pmu_init(int cpu) {}
5302 + static inline void xen_pmu_finish(int cpu) {}
5303 + #endif
5304 +-bool is_xen_pmu(int cpu);
5305 + bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
5306 + bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
5307 + int pmu_apic_update(uint32_t reg);
5308 +diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
5309 +index 4a6019238ee7d..688aa8b6ae29a 100644
5310 +--- a/arch/x86/xen/smp_pv.c
5311 ++++ b/arch/x86/xen/smp_pv.c
5312 +@@ -129,7 +129,7 @@ int xen_smp_intr_init_pv(unsigned int cpu)
5313 + per_cpu(xen_irq_work, cpu).irq = rc;
5314 + per_cpu(xen_irq_work, cpu).name = callfunc_name;
5315 +
5316 +- if (is_xen_pmu(cpu)) {
5317 ++ if (is_xen_pmu) {
5318 + pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
5319 + rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
5320 + xen_pmu_irq_handler,
5321 +diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
5322 +index bd5aeb7955675..a63eca1266577 100644
5323 +--- a/arch/xtensa/include/asm/pgtable.h
5324 ++++ b/arch/xtensa/include/asm/pgtable.h
5325 +@@ -411,6 +411,10 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
5326 +
5327 + typedef pte_t *pte_addr_t;
5328 +
5329 ++void update_mmu_tlb(struct vm_area_struct *vma,
5330 ++ unsigned long address, pte_t *ptep);
5331 ++#define __HAVE_ARCH_UPDATE_MMU_TLB
5332 ++
5333 + #endif /* !defined (__ASSEMBLY__) */
5334 +
5335 + #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
5336 +diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
5337 +index 37d3e9887fe7b..d68987d703e75 100644
5338 +--- a/arch/xtensa/include/asm/processor.h
5339 ++++ b/arch/xtensa/include/asm/processor.h
5340 +@@ -246,8 +246,8 @@ extern unsigned long __get_wchan(struct task_struct *p);
5341 +
5342 + #define xtensa_set_sr(x, sr) \
5343 + ({ \
5344 +- unsigned int v = (unsigned int)(x); \
5345 +- __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: "a"(v)); \
5346 ++ __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: \
5347 ++ "a"((unsigned int)(x))); \
5348 + })
5349 +
5350 + #define xtensa_get_sr(sr) \
5351 +diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
5352 +index 61cf6497a646b..0dde21e0d3de4 100644
5353 +--- a/arch/xtensa/kernel/jump_label.c
5354 ++++ b/arch/xtensa/kernel/jump_label.c
5355 +@@ -61,7 +61,7 @@ static void patch_text(unsigned long addr, const void *data, size_t sz)
5356 + .data = data,
5357 + };
5358 + stop_machine_cpuslocked(patch_text_stop_machine,
5359 +- &patch, NULL);
5360 ++ &patch, cpu_online_mask);
5361 + } else {
5362 + unsigned long flags;
5363 +
5364 +diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S
5365 +index 9f38437427264..b702c0908b1f6 100644
5366 +--- a/arch/xtensa/kernel/mxhead.S
5367 ++++ b/arch/xtensa/kernel/mxhead.S
5368 +@@ -37,11 +37,13 @@ _SetupOCD:
5369 + * xt-gdb to single step via DEBUG exceptions received directly
5370 + * by ocd.
5371 + */
5372 ++#if XCHAL_HAVE_WINDOWED
5373 + movi a1, 1
5374 + movi a0, 0
5375 + wsr a1, windowstart
5376 + wsr a0, windowbase
5377 + rsync
5378 ++#endif
5379 +
5380 + movi a1, LOCKLEVEL
5381 + wsr a1, ps
5382 +diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
5383 +index f436cf2efd8b7..27a477dae2322 100644
5384 +--- a/arch/xtensa/mm/tlb.c
5385 ++++ b/arch/xtensa/mm/tlb.c
5386 +@@ -162,6 +162,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
5387 + }
5388 + }
5389 +
5390 ++void update_mmu_tlb(struct vm_area_struct *vma,
5391 ++ unsigned long address, pte_t *ptep)
5392 ++{
5393 ++ local_flush_tlb_page(vma, address);
5394 ++}
5395 ++
5396 + #ifdef CONFIG_DEBUG_TLB_SANITY
5397 +
5398 + static unsigned get_pte_for_vaddr(unsigned vaddr)
5399 +diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
5400 +index 24a5c5329bcd0..809bc612d96b3 100644
5401 +--- a/block/bfq-cgroup.c
5402 ++++ b/block/bfq-cgroup.c
5403 +@@ -646,6 +646,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5404 + {
5405 + struct bfq_entity *entity = &bfqq->entity;
5406 +
5407 ++ /*
5408 ++ * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
5409 ++ * until elevator exit.
5410 ++ */
5411 ++ if (bfqq == &bfqd->oom_bfqq)
5412 ++ return;
5413 + /*
5414 + * Get extra reference to prevent bfqq from being freed in
5415 + * next possible expire or deactivate.
5416 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
5417 +index 36a66e97e3c28..1dff82d34b44b 100644
5418 +--- a/block/bfq-iosched.c
5419 ++++ b/block/bfq-iosched.c
5420 +@@ -2782,6 +2782,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
5421 + * are likely to increase the throughput.
5422 + */
5423 + bfqq->new_bfqq = new_bfqq;
5424 ++ /*
5425 ++ * The above assignment schedules the following redirections:
5426 ++ * each time some I/O for bfqq arrives, the process that
5427 ++ * generated that I/O is disassociated from bfqq and
5428 ++ * associated with new_bfqq. Here we increases new_bfqq->ref
5429 ++ * in advance, adding the number of processes that are
5430 ++ * expected to be associated with new_bfqq as they happen to
5431 ++ * issue I/O.
5432 ++ */
5433 + new_bfqq->ref += process_refs;
5434 + return new_bfqq;
5435 + }
5436 +@@ -2844,6 +2853,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5437 + {
5438 + struct bfq_queue *in_service_bfqq, *new_bfqq;
5439 +
5440 ++ /* if a merge has already been setup, then proceed with that first */
5441 ++ if (bfqq->new_bfqq)
5442 ++ return bfqq->new_bfqq;
5443 ++
5444 + /*
5445 + * Check delayed stable merge for rotational or non-queueing
5446 + * devs. For this branch to be executed, bfqq must not be
5447 +@@ -2945,9 +2958,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5448 + if (bfq_too_late_for_merging(bfqq))
5449 + return NULL;
5450 +
5451 +- if (bfqq->new_bfqq)
5452 +- return bfqq->new_bfqq;
5453 +-
5454 + if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
5455 + return NULL;
5456 +
5457 +@@ -5181,7 +5191,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
5458 + struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
5459 + struct request *rq;
5460 + struct bfq_queue *in_serv_queue;
5461 +- bool waiting_rq, idle_timer_disabled;
5462 ++ bool waiting_rq, idle_timer_disabled = false;
5463 +
5464 + spin_lock_irq(&bfqd->lock);
5465 +
5466 +@@ -5189,14 +5199,15 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
5467 + waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
5468 +
5469 + rq = __bfq_dispatch_request(hctx);
5470 +-
5471 +- idle_timer_disabled =
5472 +- waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
5473 ++ if (in_serv_queue == bfqd->in_service_queue) {
5474 ++ idle_timer_disabled =
5475 ++ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
5476 ++ }
5477 +
5478 + spin_unlock_irq(&bfqd->lock);
5479 +-
5480 +- bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
5481 +- idle_timer_disabled);
5482 ++ bfq_update_dispatch_stats(hctx->queue, rq,
5483 ++ idle_timer_disabled ? in_serv_queue : NULL,
5484 ++ idle_timer_disabled);
5485 +
5486 + return rq;
5487 + }
5488 +diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
5489 +index b74cc0da118ec..709b901de3ca9 100644
5490 +--- a/block/bfq-wf2q.c
5491 ++++ b/block/bfq-wf2q.c
5492 +@@ -519,7 +519,7 @@ unsigned short bfq_ioprio_to_weight(int ioprio)
5493 + static unsigned short bfq_weight_to_ioprio(int weight)
5494 + {
5495 + return max_t(int, 0,
5496 +- IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF - weight);
5497 ++ IOPRIO_NR_LEVELS - weight / BFQ_WEIGHT_CONVERSION_COEFF);
5498 + }
5499 +
5500 + static void bfq_get_entity(struct bfq_entity *entity)
5501 +diff --git a/block/bio.c b/block/bio.c
5502 +index 4312a8085396b..1be1e360967d0 100644
5503 +--- a/block/bio.c
5504 ++++ b/block/bio.c
5505 +@@ -1486,8 +1486,7 @@ again:
5506 + if (!bio_integrity_endio(bio))
5507 + return;
5508 +
5509 +- if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
5510 +- rq_qos_done_bio(bdev_get_queue(bio->bi_bdev), bio);
5511 ++ rq_qos_done_bio(bio);
5512 +
5513 + if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
5514 + trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
5515 +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
5516 +index 650f7e27989f1..87a1c0c3fa401 100644
5517 +--- a/block/blk-cgroup.c
5518 ++++ b/block/blk-cgroup.c
5519 +@@ -857,11 +857,11 @@ static void blkcg_fill_root_iostats(void)
5520 + blk_queue_root_blkg(bdev_get_queue(bdev));
5521 + struct blkg_iostat tmp;
5522 + int cpu;
5523 ++ unsigned long flags;
5524 +
5525 + memset(&tmp, 0, sizeof(tmp));
5526 + for_each_possible_cpu(cpu) {
5527 + struct disk_stats *cpu_dkstats;
5528 +- unsigned long flags;
5529 +
5530 + cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
5531 + tmp.ios[BLKG_IOSTAT_READ] +=
5532 +@@ -877,11 +877,11 @@ static void blkcg_fill_root_iostats(void)
5533 + cpu_dkstats->sectors[STAT_WRITE] << 9;
5534 + tmp.bytes[BLKG_IOSTAT_DISCARD] +=
5535 + cpu_dkstats->sectors[STAT_DISCARD] << 9;
5536 +-
5537 +- flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
5538 +- blkg_iostat_set(&blkg->iostat.cur, &tmp);
5539 +- u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
5540 + }
5541 ++
5542 ++ flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
5543 ++ blkg_iostat_set(&blkg->iostat.cur, &tmp);
5544 ++ u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
5545 + }
5546 + }
5547 +
5548 +diff --git a/block/blk-ioc.c b/block/blk-ioc.c
5549 +index 11f49f78db32b..df9cfe4ca5328 100644
5550 +--- a/block/blk-ioc.c
5551 ++++ b/block/blk-ioc.c
5552 +@@ -280,7 +280,6 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
5553 +
5554 + task_lock(task);
5555 + if (task->flags & PF_EXITING) {
5556 +- err = -ESRCH;
5557 + kmem_cache_free(iocontext_cachep, ioc);
5558 + goto out;
5559 + }
5560 +@@ -292,7 +291,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
5561 + task->io_context->ioprio = ioprio;
5562 + out:
5563 + task_unlock(task);
5564 +- return err;
5565 ++ return 0;
5566 + }
5567 + EXPORT_SYMBOL_GPL(set_task_ioprio);
5568 +
5569 +diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
5570 +index 6593c7123b97e..24d70e0555ddb 100644
5571 +--- a/block/blk-iolatency.c
5572 ++++ b/block/blk-iolatency.c
5573 +@@ -598,7 +598,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
5574 + int inflight = 0;
5575 +
5576 + blkg = bio->bi_blkg;
5577 +- if (!blkg || !bio_flagged(bio, BIO_TRACKED))
5578 ++ if (!blkg || !bio_flagged(bio, BIO_QOS_THROTTLED))
5579 + return;
5580 +
5581 + iolat = blkg_to_lat(bio->bi_blkg);
5582 +diff --git a/block/blk-merge.c b/block/blk-merge.c
5583 +index 4de34a332c9fd..ea6968313b4a8 100644
5584 +--- a/block/blk-merge.c
5585 ++++ b/block/blk-merge.c
5586 +@@ -9,6 +9,7 @@
5587 + #include <linux/blk-integrity.h>
5588 + #include <linux/scatterlist.h>
5589 + #include <linux/part_stat.h>
5590 ++#include <linux/blk-cgroup.h>
5591 +
5592 + #include <trace/events/block.h>
5593 +
5594 +@@ -368,8 +369,6 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
5595 + trace_block_split(split, (*bio)->bi_iter.bi_sector);
5596 + submit_bio_noacct(*bio);
5597 + *bio = split;
5598 +-
5599 +- blk_throtl_charge_bio_split(*bio);
5600 + }
5601 + }
5602 +
5603 +@@ -600,6 +599,9 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
5604 + static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
5605 + unsigned int nr_phys_segs)
5606 + {
5607 ++ if (!blk_cgroup_mergeable(req, bio))
5608 ++ goto no_merge;
5609 ++
5610 + if (blk_integrity_merge_bio(req->q, req, bio) == false)
5611 + goto no_merge;
5612 +
5613 +@@ -696,6 +698,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
5614 + if (total_phys_segments > blk_rq_get_max_segments(req))
5615 + return 0;
5616 +
5617 ++ if (!blk_cgroup_mergeable(req, next->bio))
5618 ++ return 0;
5619 ++
5620 + if (blk_integrity_merge_rq(q, req, next) == false)
5621 + return 0;
5622 +
5623 +@@ -904,6 +909,10 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
5624 + if (bio_data_dir(bio) != rq_data_dir(rq))
5625 + return false;
5626 +
5627 ++ /* don't merge across cgroup boundaries */
5628 ++ if (!blk_cgroup_mergeable(rq, bio))
5629 ++ return false;
5630 ++
5631 + /* only merge integrity protected bio into ditto rq */
5632 + if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
5633 + return false;
5634 +@@ -1089,12 +1098,20 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
5635 + if (!plug || rq_list_empty(plug->mq_list))
5636 + return false;
5637 +
5638 +- /* check the previously added entry for a quick merge attempt */
5639 +- rq = rq_list_peek(&plug->mq_list);
5640 +- if (rq->q == q) {
5641 +- if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
5642 +- BIO_MERGE_OK)
5643 +- return true;
5644 ++ rq_list_for_each(&plug->mq_list, rq) {
5645 ++ if (rq->q == q) {
5646 ++ if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
5647 ++ BIO_MERGE_OK)
5648 ++ return true;
5649 ++ break;
5650 ++ }
5651 ++
5652 ++ /*
5653 ++ * Only keep iterating plug list for merges if we have multiple
5654 ++ * queues
5655 ++ */
5656 ++ if (!plug->multiple_queues)
5657 ++ break;
5658 + }
5659 + return false;
5660 + }
5661 +diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
5662 +index 55488ba978232..80e0eb26b697c 100644
5663 +--- a/block/blk-mq-sched.c
5664 ++++ b/block/blk-mq-sched.c
5665 +@@ -180,11 +180,18 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
5666 +
5667 + static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
5668 + {
5669 ++ unsigned long end = jiffies + HZ;
5670 + int ret;
5671 +
5672 + do {
5673 + ret = __blk_mq_do_dispatch_sched(hctx);
5674 +- } while (ret == 1);
5675 ++ if (ret != 1)
5676 ++ break;
5677 ++ if (need_resched() || time_is_before_jiffies(end)) {
5678 ++ blk_mq_delay_run_hw_queue(hctx, 0);
5679 ++ break;
5680 ++ }
5681 ++ } while (1);
5682 +
5683 + return ret;
5684 + }
5685 +diff --git a/block/blk-mq.c b/block/blk-mq.c
5686 +index 9a9185a0a2d13..cb50097366cd4 100644
5687 +--- a/block/blk-mq.c
5688 ++++ b/block/blk-mq.c
5689 +@@ -2561,13 +2561,36 @@ static void __blk_mq_flush_plug_list(struct request_queue *q,
5690 + q->mq_ops->queue_rqs(&plug->mq_list);
5691 + }
5692 +
5693 ++static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
5694 ++{
5695 ++ struct blk_mq_hw_ctx *this_hctx = NULL;
5696 ++ struct blk_mq_ctx *this_ctx = NULL;
5697 ++ struct request *requeue_list = NULL;
5698 ++ unsigned int depth = 0;
5699 ++ LIST_HEAD(list);
5700 ++
5701 ++ do {
5702 ++ struct request *rq = rq_list_pop(&plug->mq_list);
5703 ++
5704 ++ if (!this_hctx) {
5705 ++ this_hctx = rq->mq_hctx;
5706 ++ this_ctx = rq->mq_ctx;
5707 ++ } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
5708 ++ rq_list_add(&requeue_list, rq);
5709 ++ continue;
5710 ++ }
5711 ++ list_add_tail(&rq->queuelist, &list);
5712 ++ depth++;
5713 ++ } while (!rq_list_empty(plug->mq_list));
5714 ++
5715 ++ plug->mq_list = requeue_list;
5716 ++ trace_block_unplug(this_hctx->queue, depth, !from_sched);
5717 ++ blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
5718 ++}
5719 ++
5720 + void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
5721 + {
5722 +- struct blk_mq_hw_ctx *this_hctx;
5723 +- struct blk_mq_ctx *this_ctx;
5724 + struct request *rq;
5725 +- unsigned int depth;
5726 +- LIST_HEAD(list);
5727 +
5728 + if (rq_list_empty(plug->mq_list))
5729 + return;
5730 +@@ -2603,35 +2626,9 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
5731 + return;
5732 + }
5733 +
5734 +- this_hctx = NULL;
5735 +- this_ctx = NULL;
5736 +- depth = 0;
5737 + do {
5738 +- rq = rq_list_pop(&plug->mq_list);
5739 +-
5740 +- if (!this_hctx) {
5741 +- this_hctx = rq->mq_hctx;
5742 +- this_ctx = rq->mq_ctx;
5743 +- } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
5744 +- trace_block_unplug(this_hctx->queue, depth,
5745 +- !from_schedule);
5746 +- blk_mq_sched_insert_requests(this_hctx, this_ctx,
5747 +- &list, from_schedule);
5748 +- depth = 0;
5749 +- this_hctx = rq->mq_hctx;
5750 +- this_ctx = rq->mq_ctx;
5751 +-
5752 +- }
5753 +-
5754 +- list_add(&rq->queuelist, &list);
5755 +- depth++;
5756 ++ blk_mq_dispatch_plug_list(plug, from_schedule);
5757 + } while (!rq_list_empty(plug->mq_list));
5758 +-
5759 +- if (!list_empty(&list)) {
5760 +- trace_block_unplug(this_hctx->queue, depth, !from_schedule);
5761 +- blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
5762 +- from_schedule);
5763 +- }
5764 + }
5765 +
5766 + void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
5767 +diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
5768 +index 3cfbc8668cba9..68267007da1c6 100644
5769 +--- a/block/blk-rq-qos.h
5770 ++++ b/block/blk-rq-qos.h
5771 +@@ -177,20 +177,20 @@ static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
5772 + __rq_qos_requeue(q->rq_qos, rq);
5773 + }
5774 +
5775 +-static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
5776 ++static inline void rq_qos_done_bio(struct bio *bio)
5777 + {
5778 +- if (q->rq_qos)
5779 +- __rq_qos_done_bio(q->rq_qos, bio);
5780 ++ if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
5781 ++ bio_flagged(bio, BIO_QOS_MERGED))) {
5782 ++ struct request_queue *q = bdev_get_queue(bio->bi_bdev);
5783 ++ if (q->rq_qos)
5784 ++ __rq_qos_done_bio(q->rq_qos, bio);
5785 ++ }
5786 + }
5787 +
5788 + static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
5789 + {
5790 +- /*
5791 +- * BIO_TRACKED lets controllers know that a bio went through the
5792 +- * normal rq_qos path.
5793 +- */
5794 + if (q->rq_qos) {
5795 +- bio_set_flag(bio, BIO_TRACKED);
5796 ++ bio_set_flag(bio, BIO_QOS_THROTTLED);
5797 + __rq_qos_throttle(q->rq_qos, bio);
5798 + }
5799 + }
5800 +@@ -205,8 +205,10 @@ static inline void rq_qos_track(struct request_queue *q, struct request *rq,
5801 + static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
5802 + struct bio *bio)
5803 + {
5804 +- if (q->rq_qos)
5805 ++ if (q->rq_qos) {
5806 ++ bio_set_flag(bio, BIO_QOS_MERGED);
5807 + __rq_qos_merge(q->rq_qos, rq, bio);
5808 ++ }
5809 + }
5810 +
5811 + static inline void rq_qos_queue_depth_changed(struct request_queue *q)
5812 +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
5813 +index 9f32882ceb2f6..7923f49f1046f 100644
5814 +--- a/block/blk-sysfs.c
5815 ++++ b/block/blk-sysfs.c
5816 +@@ -954,9 +954,6 @@ void blk_unregister_queue(struct gendisk *disk)
5817 + */
5818 + if (queue_is_mq(q))
5819 + blk_mq_unregister_dev(disk_to_dev(disk), q);
5820 +-
5821 +- kobject_uevent(&q->kobj, KOBJ_REMOVE);
5822 +- kobject_del(&q->kobj);
5823 + blk_trace_remove_sysfs(disk_to_dev(disk));
5824 +
5825 + mutex_lock(&q->sysfs_lock);
5826 +@@ -964,6 +961,11 @@ void blk_unregister_queue(struct gendisk *disk)
5827 + elv_unregister_queue(q);
5828 + disk_unregister_independent_access_ranges(disk);
5829 + mutex_unlock(&q->sysfs_lock);
5830 ++
5831 ++ /* Now that we've deleted all child objects, we can delete the queue. */
5832 ++ kobject_uevent(&q->kobj, KOBJ_REMOVE);
5833 ++ kobject_del(&q->kobj);
5834 ++
5835 + mutex_unlock(&q->sysfs_dir_lock);
5836 +
5837 + kobject_put(&disk_to_dev(disk)->kobj);
5838 +diff --git a/block/blk-throttle.c b/block/blk-throttle.c
5839 +index 7c462c006b269..87769b337fc55 100644
5840 +--- a/block/blk-throttle.c
5841 ++++ b/block/blk-throttle.c
5842 +@@ -808,7 +808,8 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
5843 + unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
5844 + unsigned int bio_size = throtl_bio_data_size(bio);
5845 +
5846 +- if (bps_limit == U64_MAX) {
5847 ++ /* no need to throttle if this bio's bytes have been accounted */
5848 ++ if (bps_limit == U64_MAX || bio_flagged(bio, BIO_THROTTLED)) {
5849 + if (wait)
5850 + *wait = 0;
5851 + return true;
5852 +@@ -920,9 +921,12 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
5853 + unsigned int bio_size = throtl_bio_data_size(bio);
5854 +
5855 + /* Charge the bio to the group */
5856 +- tg->bytes_disp[rw] += bio_size;
5857 ++ if (!bio_flagged(bio, BIO_THROTTLED)) {
5858 ++ tg->bytes_disp[rw] += bio_size;
5859 ++ tg->last_bytes_disp[rw] += bio_size;
5860 ++ }
5861 ++
5862 + tg->io_disp[rw]++;
5863 +- tg->last_bytes_disp[rw] += bio_size;
5864 + tg->last_io_disp[rw]++;
5865 +
5866 + /*
5867 +diff --git a/block/blk-throttle.h b/block/blk-throttle.h
5868 +index 175f03abd9e41..cb43f4417d6ea 100644
5869 +--- a/block/blk-throttle.h
5870 ++++ b/block/blk-throttle.h
5871 +@@ -170,8 +170,6 @@ static inline bool blk_throtl_bio(struct bio *bio)
5872 + {
5873 + struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
5874 +
5875 +- if (bio_flagged(bio, BIO_THROTTLED))
5876 +- return false;
5877 + if (!tg->has_rules[bio_data_dir(bio)])
5878 + return false;
5879 +
5880 +diff --git a/block/genhd.c b/block/genhd.c
5881 +index 9eca1f7d35c97..9d9d702d07787 100644
5882 +--- a/block/genhd.c
5883 ++++ b/block/genhd.c
5884 +@@ -330,7 +330,7 @@ int blk_alloc_ext_minor(void)
5885 + {
5886 + int idx;
5887 +
5888 +- idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
5889 ++ idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL);
5890 + if (idx == -ENOSPC)
5891 + return -EBUSY;
5892 + return idx;
5893 +@@ -927,12 +927,17 @@ ssize_t part_stat_show(struct device *dev,
5894 + struct disk_stats stat;
5895 + unsigned int inflight;
5896 +
5897 +- part_stat_read_all(bdev, &stat);
5898 + if (queue_is_mq(q))
5899 + inflight = blk_mq_in_flight(q, bdev);
5900 + else
5901 + inflight = part_in_flight(bdev);
5902 +
5903 ++ if (inflight) {
5904 ++ part_stat_lock();
5905 ++ update_io_ticks(bdev, jiffies, true);
5906 ++ part_stat_unlock();
5907 ++ }
5908 ++ part_stat_read_all(bdev, &stat);
5909 + return sprintf(buf,
5910 + "%8lu %8lu %8llu %8u "
5911 + "%8lu %8lu %8llu %8u "
5912 +@@ -1188,12 +1193,17 @@ static int diskstats_show(struct seq_file *seqf, void *v)
5913 + xa_for_each(&gp->part_tbl, idx, hd) {
5914 + if (bdev_is_partition(hd) && !bdev_nr_sectors(hd))
5915 + continue;
5916 +- part_stat_read_all(hd, &stat);
5917 + if (queue_is_mq(gp->queue))
5918 + inflight = blk_mq_in_flight(gp->queue, hd);
5919 + else
5920 + inflight = part_in_flight(hd);
5921 +
5922 ++ if (inflight) {
5923 ++ part_stat_lock();
5924 ++ update_io_ticks(hd, jiffies, true);
5925 ++ part_stat_unlock();
5926 ++ }
5927 ++ part_stat_read_all(hd, &stat);
5928 + seq_printf(seqf, "%4d %7d %pg "
5929 + "%lu %lu %lu %u "
5930 + "%lu %lu %lu %u "
5931 +diff --git a/crypto/Kconfig b/crypto/Kconfig
5932 +index 442765219c375..2cca54c59fecd 100644
5933 +--- a/crypto/Kconfig
5934 ++++ b/crypto/Kconfig
5935 +@@ -1847,6 +1847,7 @@ config CRYPTO_JITTERENTROPY
5936 +
5937 + config CRYPTO_KDF800108_CTR
5938 + tristate
5939 ++ select CRYPTO_HMAC
5940 + select CRYPTO_SHA256
5941 +
5942 + config CRYPTO_USER_API
5943 +diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
5944 +index 0b4d07aa88111..f94a1d1ad3a6c 100644
5945 +--- a/crypto/asymmetric_keys/pkcs7_verify.c
5946 ++++ b/crypto/asymmetric_keys/pkcs7_verify.c
5947 +@@ -174,12 +174,6 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
5948 + pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
5949 + sinfo->index, certix);
5950 +
5951 +- if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) {
5952 +- pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
5953 +- sinfo->index);
5954 +- continue;
5955 +- }
5956 +-
5957 + sinfo->signer = x509;
5958 + return 0;
5959 + }
5960 +diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
5961 +index 4fefb219bfdc8..7c9e6be35c30c 100644
5962 +--- a/crypto/asymmetric_keys/public_key.c
5963 ++++ b/crypto/asymmetric_keys/public_key.c
5964 +@@ -60,39 +60,83 @@ static void public_key_destroy(void *payload0, void *payload3)
5965 + }
5966 +
5967 + /*
5968 +- * Determine the crypto algorithm name.
5969 ++ * Given a public_key, and an encoding and hash_algo to be used for signing
5970 ++ * and/or verification with that key, determine the name of the corresponding
5971 ++ * akcipher algorithm. Also check that encoding and hash_algo are allowed.
5972 + */
5973 +-static
5974 +-int software_key_determine_akcipher(const char *encoding,
5975 +- const char *hash_algo,
5976 +- const struct public_key *pkey,
5977 +- char alg_name[CRYPTO_MAX_ALG_NAME])
5978 ++static int
5979 ++software_key_determine_akcipher(const struct public_key *pkey,
5980 ++ const char *encoding, const char *hash_algo,
5981 ++ char alg_name[CRYPTO_MAX_ALG_NAME])
5982 + {
5983 + int n;
5984 +
5985 +- if (strcmp(encoding, "pkcs1") == 0) {
5986 +- /* The data wangled by the RSA algorithm is typically padded
5987 +- * and encoded in some manner, such as EMSA-PKCS1-1_5 [RFC3447
5988 +- * sec 8.2].
5989 ++ if (!encoding)
5990 ++ return -EINVAL;
5991 ++
5992 ++ if (strcmp(pkey->pkey_algo, "rsa") == 0) {
5993 ++ /*
5994 ++ * RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2].
5995 ++ */
5996 ++ if (strcmp(encoding, "pkcs1") == 0) {
5997 ++ if (!hash_algo)
5998 ++ n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
5999 ++ "pkcs1pad(%s)",
6000 ++ pkey->pkey_algo);
6001 ++ else
6002 ++ n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
6003 ++ "pkcs1pad(%s,%s)",
6004 ++ pkey->pkey_algo, hash_algo);
6005 ++ return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
6006 ++ }
6007 ++ if (strcmp(encoding, "raw") != 0)
6008 ++ return -EINVAL;
6009 ++ /*
6010 ++ * Raw RSA cannot differentiate between different hash
6011 ++ * algorithms.
6012 ++ */
6013 ++ if (hash_algo)
6014 ++ return -EINVAL;
6015 ++ } else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) {
6016 ++ if (strcmp(encoding, "x962") != 0)
6017 ++ return -EINVAL;
6018 ++ /*
6019 ++ * ECDSA signatures are taken over a raw hash, so they don't
6020 ++ * differentiate between different hash algorithms. That means
6021 ++ * that the verifier should hard-code a specific hash algorithm.
6022 ++ * Unfortunately, in practice ECDSA is used with multiple SHAs,
6023 ++ * so we have to allow all of them and not just one.
6024 + */
6025 + if (!hash_algo)
6026 +- n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
6027 +- "pkcs1pad(%s)",
6028 +- pkey->pkey_algo);
6029 +- else
6030 +- n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
6031 +- "pkcs1pad(%s,%s)",
6032 +- pkey->pkey_algo, hash_algo);
6033 +- return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
6034 +- }
6035 +-
6036 +- if (strcmp(encoding, "raw") == 0 ||
6037 +- strcmp(encoding, "x962") == 0) {
6038 +- strcpy(alg_name, pkey->pkey_algo);
6039 +- return 0;
6040 ++ return -EINVAL;
6041 ++ if (strcmp(hash_algo, "sha1") != 0 &&
6042 ++ strcmp(hash_algo, "sha224") != 0 &&
6043 ++ strcmp(hash_algo, "sha256") != 0 &&
6044 ++ strcmp(hash_algo, "sha384") != 0 &&
6045 ++ strcmp(hash_algo, "sha512") != 0)
6046 ++ return -EINVAL;
6047 ++ } else if (strcmp(pkey->pkey_algo, "sm2") == 0) {
6048 ++ if (strcmp(encoding, "raw") != 0)
6049 ++ return -EINVAL;
6050 ++ if (!hash_algo)
6051 ++ return -EINVAL;
6052 ++ if (strcmp(hash_algo, "sm3") != 0)
6053 ++ return -EINVAL;
6054 ++ } else if (strcmp(pkey->pkey_algo, "ecrdsa") == 0) {
6055 ++ if (strcmp(encoding, "raw") != 0)
6056 ++ return -EINVAL;
6057 ++ if (!hash_algo)
6058 ++ return -EINVAL;
6059 ++ if (strcmp(hash_algo, "streebog256") != 0 &&
6060 ++ strcmp(hash_algo, "streebog512") != 0)
6061 ++ return -EINVAL;
6062 ++ } else {
6063 ++ /* Unknown public key algorithm */
6064 ++ return -ENOPKG;
6065 + }
6066 +-
6067 +- return -ENOPKG;
6068 ++ if (strscpy(alg_name, pkey->pkey_algo, CRYPTO_MAX_ALG_NAME) < 0)
6069 ++ return -EINVAL;
6070 ++ return 0;
6071 + }
6072 +
6073 + static u8 *pkey_pack_u32(u8 *dst, u32 val)
6074 +@@ -113,9 +157,8 @@ static int software_key_query(const struct kernel_pkey_params *params,
6075 + u8 *key, *ptr;
6076 + int ret, len;
6077 +
6078 +- ret = software_key_determine_akcipher(params->encoding,
6079 +- params->hash_algo,
6080 +- pkey, alg_name);
6081 ++ ret = software_key_determine_akcipher(pkey, params->encoding,
6082 ++ params->hash_algo, alg_name);
6083 + if (ret < 0)
6084 + return ret;
6085 +
6086 +@@ -179,9 +222,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
6087 +
6088 + pr_devel("==>%s()\n", __func__);
6089 +
6090 +- ret = software_key_determine_akcipher(params->encoding,
6091 +- params->hash_algo,
6092 +- pkey, alg_name);
6093 ++ ret = software_key_determine_akcipher(pkey, params->encoding,
6094 ++ params->hash_algo, alg_name);
6095 + if (ret < 0)
6096 + return ret;
6097 +
6098 +@@ -325,9 +367,23 @@ int public_key_verify_signature(const struct public_key *pkey,
6099 + BUG_ON(!sig);
6100 + BUG_ON(!sig->s);
6101 +
6102 +- ret = software_key_determine_akcipher(sig->encoding,
6103 +- sig->hash_algo,
6104 +- pkey, alg_name);
6105 ++ /*
6106 ++ * If the signature specifies a public key algorithm, it *must* match
6107 ++ * the key's actual public key algorithm.
6108 ++ *
6109 ++ * Small exception: ECDSA signatures don't specify the curve, but ECDSA
6110 ++ * keys do. So the strings can mismatch slightly in that case:
6111 ++ * "ecdsa-nist-*" for the key, but "ecdsa" for the signature.
6112 ++ */
6113 ++ if (sig->pkey_algo) {
6114 ++ if (strcmp(pkey->pkey_algo, sig->pkey_algo) != 0 &&
6115 ++ (strncmp(pkey->pkey_algo, "ecdsa-", 6) != 0 ||
6116 ++ strcmp(sig->pkey_algo, "ecdsa") != 0))
6117 ++ return -EKEYREJECTED;
6118 ++ }
6119 ++
6120 ++ ret = software_key_determine_akcipher(pkey, sig->encoding,
6121 ++ sig->hash_algo, alg_name);
6122 + if (ret < 0)
6123 + return ret;
6124 +
6125 +diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
6126 +index fe14cae115b51..71cc1738fbfd2 100644
6127 +--- a/crypto/asymmetric_keys/x509_public_key.c
6128 ++++ b/crypto/asymmetric_keys/x509_public_key.c
6129 +@@ -128,12 +128,6 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
6130 + goto out;
6131 + }
6132 +
6133 +- ret = -EKEYREJECTED;
6134 +- if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0 &&
6135 +- (strncmp(cert->pub->pkey_algo, "ecdsa-", 6) != 0 ||
6136 +- strcmp(cert->sig->pkey_algo, "ecdsa") != 0))
6137 +- goto out;
6138 +-
6139 + ret = public_key_verify_signature(cert->pub, cert->sig);
6140 + if (ret < 0) {
6141 + if (ret == -ENOPKG) {
6142 +diff --git a/crypto/authenc.c b/crypto/authenc.c
6143 +index 670bf1a01d00e..17f674a7cdff5 100644
6144 +--- a/crypto/authenc.c
6145 ++++ b/crypto/authenc.c
6146 +@@ -253,7 +253,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
6147 + dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
6148 +
6149 + skcipher_request_set_tfm(skreq, ctx->enc);
6150 +- skcipher_request_set_callback(skreq, aead_request_flags(req),
6151 ++ skcipher_request_set_callback(skreq, flags,
6152 + req->base.complete, req->base.data);
6153 + skcipher_request_set_crypt(skreq, src, dst,
6154 + req->cryptlen - authsize, req->iv);
6155 +diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
6156 +index 8ac3e73e8ea65..9d804831c8b3f 100644
6157 +--- a/crypto/rsa-pkcs1pad.c
6158 ++++ b/crypto/rsa-pkcs1pad.c
6159 +@@ -476,6 +476,8 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
6160 + pos++;
6161 +
6162 + if (digest_info) {
6163 ++ if (digest_info->size > dst_len - pos)
6164 ++ goto done;
6165 + if (crypto_memneq(out_buf + pos, digest_info->data,
6166 + digest_info->size))
6167 + goto done;
6168 +@@ -495,7 +497,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
6169 + sg_nents_for_len(req->src,
6170 + req->src_len + req->dst_len),
6171 + req_ctx->out_buf + ctx->key_size,
6172 +- req->dst_len, ctx->key_size);
6173 ++ req->dst_len, req->src_len);
6174 + /* Do the actual verification step. */
6175 + if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
6176 + req->dst_len) != 0)
6177 +@@ -538,7 +540,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
6178 +
6179 + if (WARN_ON(req->dst) ||
6180 + WARN_ON(!req->dst_len) ||
6181 +- !ctx->key_size || req->src_len < ctx->key_size)
6182 ++ !ctx->key_size || req->src_len != ctx->key_size)
6183 + return -EINVAL;
6184 +
6185 + req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL);
6186 +@@ -621,6 +623,11 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
6187 +
6188 + rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn);
6189 +
6190 ++ if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) {
6191 ++ err = -EINVAL;
6192 ++ goto err_free_inst;
6193 ++ }
6194 ++
6195 + err = -ENAMETOOLONG;
6196 + hash_name = crypto_attr_alg_name(tb[2]);
6197 + if (IS_ERR(hash_name)) {
6198 +diff --git a/crypto/xts.c b/crypto/xts.c
6199 +index 6c12f30dbdd6d..63c85b9e64e08 100644
6200 +--- a/crypto/xts.c
6201 ++++ b/crypto/xts.c
6202 +@@ -466,3 +466,4 @@ MODULE_LICENSE("GPL");
6203 + MODULE_DESCRIPTION("XTS block cipher mode");
6204 + MODULE_ALIAS_CRYPTO("xts");
6205 + MODULE_IMPORT_NS(CRYPTO_INTERNAL);
6206 ++MODULE_SOFTDEP("pre: ecb");
6207 +diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
6208 +index 915c2433463d7..e7c30ce06e189 100644
6209 +--- a/drivers/acpi/acpica/nswalk.c
6210 ++++ b/drivers/acpi/acpica/nswalk.c
6211 +@@ -169,6 +169,9 @@ acpi_ns_walk_namespace(acpi_object_type type,
6212 +
6213 + if (start_node == ACPI_ROOT_OBJECT) {
6214 + start_node = acpi_gbl_root_node;
6215 ++ if (!start_node) {
6216 ++ return_ACPI_STATUS(AE_NO_NAMESPACE);
6217 ++ }
6218 + }
6219 +
6220 + /* Null child means "get first node" */
6221 +diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
6222 +index 19e50fcbf4d6f..598fd19b65fa4 100644
6223 +--- a/drivers/acpi/apei/bert.c
6224 ++++ b/drivers/acpi/apei/bert.c
6225 +@@ -29,6 +29,7 @@
6226 +
6227 + #undef pr_fmt
6228 + #define pr_fmt(fmt) "BERT: " fmt
6229 ++#define ACPI_BERT_PRINT_MAX_LEN 1024
6230 +
6231 + static int bert_disable;
6232 +
6233 +@@ -58,8 +59,11 @@ static void __init bert_print_all(struct acpi_bert_region *region,
6234 + }
6235 +
6236 + pr_info_once("Error records from previous boot:\n");
6237 +-
6238 +- cper_estatus_print(KERN_INFO HW_ERR, estatus);
6239 ++ if (region_len < ACPI_BERT_PRINT_MAX_LEN)
6240 ++ cper_estatus_print(KERN_INFO HW_ERR, estatus);
6241 ++ else
6242 ++ pr_info_once("Max print length exceeded, table data is available at:\n"
6243 ++ "/sys/firmware/acpi/tables/data/BERT");
6244 +
6245 + /*
6246 + * Because the boot error source is "one-time polled" type,
6247 +@@ -77,7 +81,7 @@ static int __init setup_bert_disable(char *str)
6248 + {
6249 + bert_disable = 1;
6250 +
6251 +- return 0;
6252 ++ return 1;
6253 + }
6254 + __setup("bert_disable", setup_bert_disable);
6255 +
6256 +diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
6257 +index 242f3c2d55330..698d67cee0527 100644
6258 +--- a/drivers/acpi/apei/erst.c
6259 ++++ b/drivers/acpi/apei/erst.c
6260 +@@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(erst_clear);
6261 + static int __init setup_erst_disable(char *str)
6262 + {
6263 + erst_disable = 1;
6264 +- return 0;
6265 ++ return 1;
6266 + }
6267 +
6268 + __setup("erst_disable", setup_erst_disable);
6269 +diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
6270 +index 0edc1ed476737..6aef1ee5e1bdb 100644
6271 +--- a/drivers/acpi/apei/hest.c
6272 ++++ b/drivers/acpi/apei/hest.c
6273 +@@ -224,7 +224,7 @@ err:
6274 + static int __init setup_hest_disable(char *str)
6275 + {
6276 + hest_disable = HEST_DISABLED;
6277 +- return 0;
6278 ++ return 1;
6279 + }
6280 +
6281 + __setup("hest_disable", setup_hest_disable);
6282 +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
6283 +index 07f604832fd6b..079b952ab59f2 100644
6284 +--- a/drivers/acpi/bus.c
6285 ++++ b/drivers/acpi/bus.c
6286 +@@ -332,21 +332,32 @@ static void acpi_bus_osc_negotiate_platform_control(void)
6287 + if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
6288 + return;
6289 +
6290 +- kfree(context.ret.pointer);
6291 ++ capbuf_ret = context.ret.pointer;
6292 ++ if (context.ret.length <= OSC_SUPPORT_DWORD) {
6293 ++ kfree(context.ret.pointer);
6294 ++ return;
6295 ++ }
6296 +
6297 +- /* Now run _OSC again with query flag clear */
6298 ++ /*
6299 ++ * Now run _OSC again with query flag clear and with the caps
6300 ++ * supported by both the OS and the platform.
6301 ++ */
6302 + capbuf[OSC_QUERY_DWORD] = 0;
6303 ++ capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
6304 ++ kfree(context.ret.pointer);
6305 +
6306 + if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
6307 + return;
6308 +
6309 + capbuf_ret = context.ret.pointer;
6310 +- osc_sb_apei_support_acked =
6311 +- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
6312 +- osc_pc_lpi_support_confirmed =
6313 +- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
6314 +- osc_sb_native_usb4_support_confirmed =
6315 +- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
6316 ++ if (context.ret.length > OSC_SUPPORT_DWORD) {
6317 ++ osc_sb_apei_support_acked =
6318 ++ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
6319 ++ osc_pc_lpi_support_confirmed =
6320 ++ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
6321 ++ osc_sb_native_usb4_support_confirmed =
6322 ++ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
6323 ++ }
6324 +
6325 + kfree(context.ret.pointer);
6326 + }
6327 +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
6328 +index 866560cbb082c..123e98a765de7 100644
6329 +--- a/drivers/acpi/cppc_acpi.c
6330 ++++ b/drivers/acpi/cppc_acpi.c
6331 +@@ -676,6 +676,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
6332 + cpc_obj = &out_obj->package.elements[0];
6333 + if (cpc_obj->type == ACPI_TYPE_INTEGER) {
6334 + num_ent = cpc_obj->integer.value;
6335 ++ if (num_ent <= 1) {
6336 ++ pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
6337 ++ num_ent, pr->id);
6338 ++ goto out_free;
6339 ++ }
6340 + } else {
6341 + pr_debug("Unexpected entry type(%d) for NumEntries\n",
6342 + cpc_obj->type);
6343 +diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
6344 +index d0986bda29640..3fceb4681ec9f 100644
6345 +--- a/drivers/acpi/property.c
6346 ++++ b/drivers/acpi/property.c
6347 +@@ -685,7 +685,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
6348 + */
6349 + if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) {
6350 + if (index)
6351 +- return -EINVAL;
6352 ++ return -ENOENT;
6353 +
6354 + device = acpi_fetch_acpi_dev(obj->reference.handle);
6355 + if (!device)
6356 +diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
6357 +index ffdeed5334d6f..664070fc83498 100644
6358 +--- a/drivers/acpi/x86/utils.c
6359 ++++ b/drivers/acpi/x86/utils.c
6360 +@@ -284,6 +284,27 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
6361 + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
6362 + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
6363 + },
6364 ++ {
6365 ++ /* Lenovo Yoga Tablet 1050F/L */
6366 ++ .matches = {
6367 ++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
6368 ++ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
6369 ++ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
6370 ++ /* Partial match on beginning of BIOS version */
6371 ++ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
6372 ++ },
6373 ++ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
6374 ++ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
6375 ++ },
6376 ++ {
6377 ++ /* Nextbook Ares 8 */
6378 ++ .matches = {
6379 ++ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
6380 ++ DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"),
6381 ++ },
6382 ++ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
6383 ++ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
6384 ++ },
6385 + {
6386 + /* Whitelabel (sold as various brands) TM800A550L */
6387 + .matches = {
6388 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
6389 +index f47cab21430f9..752a11d16e262 100644
6390 +--- a/drivers/base/dd.c
6391 ++++ b/drivers/base/dd.c
6392 +@@ -810,7 +810,7 @@ static int __init save_async_options(char *buf)
6393 + pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
6394 +
6395 + strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
6396 +- return 0;
6397 ++ return 1;
6398 + }
6399 + __setup("driver_async_probe=", save_async_options);
6400 +
6401 +diff --git a/drivers/base/memory.c b/drivers/base/memory.c
6402 +index 365cd4a7f2397..60c38f9cf1a75 100644
6403 +--- a/drivers/base/memory.c
6404 ++++ b/drivers/base/memory.c
6405 +@@ -663,14 +663,16 @@ static int init_memory_block(unsigned long block_id, unsigned long state,
6406 + mem->nr_vmemmap_pages = nr_vmemmap_pages;
6407 + INIT_LIST_HEAD(&mem->group_next);
6408 +
6409 ++ ret = register_memory(mem);
6410 ++ if (ret)
6411 ++ return ret;
6412 ++
6413 + if (group) {
6414 + mem->group = group;
6415 + list_add(&mem->group_next, &group->memory_blocks);
6416 + }
6417 +
6418 +- ret = register_memory(mem);
6419 +-
6420 +- return ret;
6421 ++ return 0;
6422 + }
6423 +
6424 + static int add_memory_block(unsigned long base_section_nr)
6425 +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
6426 +index 5db704f02e712..7e8039d1884cc 100644
6427 +--- a/drivers/base/power/domain.c
6428 ++++ b/drivers/base/power/domain.c
6429 +@@ -2058,9 +2058,9 @@ static int genpd_remove(struct generic_pm_domain *genpd)
6430 + kfree(link);
6431 + }
6432 +
6433 +- genpd_debug_remove(genpd);
6434 + list_del(&genpd->gpd_list_node);
6435 + genpd_unlock(genpd);
6436 ++ genpd_debug_remove(genpd);
6437 + cancel_work_sync(&genpd->power_off_work);
6438 + if (genpd_is_cpu_domain(genpd))
6439 + free_cpumask_var(genpd->cpus);
6440 +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
6441 +index 04ea92cbd9cfd..08c8a69d7b810 100644
6442 +--- a/drivers/base/power/main.c
6443 ++++ b/drivers/base/power/main.c
6444 +@@ -2018,7 +2018,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
6445 +
6446 + void device_pm_check_callbacks(struct device *dev)
6447 + {
6448 +- spin_lock_irq(&dev->power.lock);
6449 ++ unsigned long flags;
6450 ++
6451 ++ spin_lock_irqsave(&dev->power.lock, flags);
6452 + dev->power.no_pm_callbacks =
6453 + (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
6454 + !dev->bus->suspend && !dev->bus->resume)) &&
6455 +@@ -2027,7 +2029,7 @@ void device_pm_check_callbacks(struct device *dev)
6456 + (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
6457 + (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
6458 + !dev->driver->suspend && !dev->driver->resume));
6459 +- spin_unlock_irq(&dev->power.lock);
6460 ++ spin_unlock_irqrestore(&dev->power.lock, flags);
6461 + }
6462 +
6463 + bool dev_pm_skip_suspend(struct device *dev)
6464 +diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
6465 +index 3235532ae0778..8b26f631ebc15 100644
6466 +--- a/drivers/block/drbd/drbd_req.c
6467 ++++ b/drivers/block/drbd/drbd_req.c
6468 +@@ -180,7 +180,8 @@ void start_new_tl_epoch(struct drbd_connection *connection)
6469 + void complete_master_bio(struct drbd_device *device,
6470 + struct bio_and_error *m)
6471 + {
6472 +- m->bio->bi_status = errno_to_blk_status(m->error);
6473 ++ if (unlikely(m->error))
6474 ++ m->bio->bi_status = errno_to_blk_status(m->error);
6475 + bio_endio(m->bio);
6476 + dec_ap_bio(device);
6477 + }
6478 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
6479 +index 19fe19eaa50e9..d46a3d5d0c2ec 100644
6480 +--- a/drivers/block/loop.c
6481 ++++ b/drivers/block/loop.c
6482 +@@ -681,33 +681,33 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
6483 +
6484 + static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
6485 + {
6486 +- return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
6487 ++ return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset);
6488 + }
6489 +
6490 + static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
6491 + {
6492 +- return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
6493 ++ return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
6494 + }
6495 +
6496 + static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
6497 + {
6498 + int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
6499 +
6500 +- return sprintf(buf, "%s\n", autoclear ? "1" : "0");
6501 ++ return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0");
6502 + }
6503 +
6504 + static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
6505 + {
6506 + int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
6507 +
6508 +- return sprintf(buf, "%s\n", partscan ? "1" : "0");
6509 ++ return sysfs_emit(buf, "%s\n", partscan ? "1" : "0");
6510 + }
6511 +
6512 + static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
6513 + {
6514 + int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
6515 +
6516 +- return sprintf(buf, "%s\n", dio ? "1" : "0");
6517 ++ return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
6518 + }
6519 +
6520 + LOOP_ATTR_RO(backing_file);
6521 +@@ -1592,6 +1592,7 @@ struct compat_loop_info {
6522 + compat_ulong_t lo_inode; /* ioctl r/o */
6523 + compat_dev_t lo_rdevice; /* ioctl r/o */
6524 + compat_int_t lo_offset;
6525 ++ compat_int_t lo_encrypt_type; /* obsolete, ignored */
6526 + compat_int_t lo_encrypt_key_size; /* ioctl w/o */
6527 + compat_int_t lo_flags; /* ioctl r/o */
6528 + char lo_name[LO_NAME_SIZE];
6529 +diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
6530 +index 4db9a8c244af5..e094d2b8b5a92 100644
6531 +--- a/drivers/block/n64cart.c
6532 ++++ b/drivers/block/n64cart.c
6533 +@@ -88,7 +88,7 @@ static void n64cart_submit_bio(struct bio *bio)
6534 + {
6535 + struct bio_vec bvec;
6536 + struct bvec_iter iter;
6537 +- struct device *dev = bio->bi_disk->private_data;
6538 ++ struct device *dev = bio->bi_bdev->bd_disk->private_data;
6539 + u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
6540 +
6541 + bio_for_each_segment(bvec, bio, iter) {
6542 +diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
6543 +index 1a4f8b227eac0..06514ed660229 100644
6544 +--- a/drivers/bluetooth/btintel.c
6545 ++++ b/drivers/bluetooth/btintel.c
6546 +@@ -2428,10 +2428,15 @@ static int btintel_setup_combined(struct hci_dev *hdev)
6547 +
6548 + /* Apply the device specific HCI quirks
6549 + *
6550 +- * WBS for SdP - SdP and Stp have a same hw_varaint but
6551 +- * different fw_variant
6552 ++ * WBS for SdP - For the Legacy ROM products, only SdP
6553 ++ * supports the WBS. But the version information is not
6554 ++ * enough to use here because the StP2 and SdP have same
6555 ++ * hw_variant and fw_variant. So, this flag is set by
6556 ++ * the transport driver (btusb) based on the HW info
6557 ++ * (idProduct)
6558 + */
6559 +- if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22)
6560 ++ if (!btintel_test_flag(hdev,
6561 ++ INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
6562 + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
6563 + &hdev->quirks);
6564 +
6565 +diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
6566 +index c9b24e9299e2a..e0060e58573c3 100644
6567 +--- a/drivers/bluetooth/btintel.h
6568 ++++ b/drivers/bluetooth/btintel.h
6569 +@@ -152,6 +152,7 @@ enum {
6570 + INTEL_BROKEN_INITIAL_NCMD,
6571 + INTEL_BROKEN_SHUTDOWN_LED,
6572 + INTEL_ROM_LEGACY,
6573 ++ INTEL_ROM_LEGACY_NO_WBS_SUPPORT,
6574 +
6575 + __INTEL_NUM_FLAGS,
6576 + };
6577 +diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
6578 +index b5ea8d3bffaa7..9b868f187316d 100644
6579 +--- a/drivers/bluetooth/btmtksdio.c
6580 ++++ b/drivers/bluetooth/btmtksdio.c
6581 +@@ -38,21 +38,25 @@ static bool enable_autosuspend;
6582 + struct btmtksdio_data {
6583 + const char *fwname;
6584 + u16 chipid;
6585 ++ bool lp_mbox_supported;
6586 + };
6587 +
6588 + static const struct btmtksdio_data mt7663_data = {
6589 + .fwname = FIRMWARE_MT7663,
6590 + .chipid = 0x7663,
6591 ++ .lp_mbox_supported = false,
6592 + };
6593 +
6594 + static const struct btmtksdio_data mt7668_data = {
6595 + .fwname = FIRMWARE_MT7668,
6596 + .chipid = 0x7668,
6597 ++ .lp_mbox_supported = false,
6598 + };
6599 +
6600 + static const struct btmtksdio_data mt7921_data = {
6601 + .fwname = FIRMWARE_MT7961,
6602 + .chipid = 0x7921,
6603 ++ .lp_mbox_supported = true,
6604 + };
6605 +
6606 + static const struct sdio_device_id btmtksdio_table[] = {
6607 +@@ -87,8 +91,17 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
6608 + #define RX_DONE_INT BIT(1)
6609 + #define TX_EMPTY BIT(2)
6610 + #define TX_FIFO_OVERFLOW BIT(8)
6611 ++#define FW_MAILBOX_INT BIT(15)
6612 ++#define INT_MASK GENMASK(15, 0)
6613 + #define RX_PKT_LEN GENMASK(31, 16)
6614 +
6615 ++#define MTK_REG_CSICR 0xc0
6616 ++#define CSICR_CLR_MBOX_ACK BIT(0)
6617 ++#define MTK_REG_PH2DSM0R 0xc4
6618 ++#define PH2DSM0R_DRIVER_OWN BIT(0)
6619 ++#define MTK_REG_PD2HRM0R 0xdc
6620 ++#define PD2HRM0R_DRV_OWN BIT(0)
6621 ++
6622 + #define MTK_REG_CTDR 0x18
6623 +
6624 + #define MTK_REG_CRDR 0x1c
6625 +@@ -100,6 +113,7 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
6626 + #define BTMTKSDIO_TX_WAIT_VND_EVT 1
6627 + #define BTMTKSDIO_HW_TX_READY 2
6628 + #define BTMTKSDIO_FUNC_ENABLED 3
6629 ++#define BTMTKSDIO_PATCH_ENABLED 4
6630 +
6631 + struct mtkbtsdio_hdr {
6632 + __le16 len;
6633 +@@ -278,6 +292,78 @@ static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
6634 + return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
6635 + }
6636 +
6637 ++static u32 btmtksdio_drv_own_query_79xx(struct btmtksdio_dev *bdev)
6638 ++{
6639 ++ return sdio_readl(bdev->func, MTK_REG_PD2HRM0R, NULL);
6640 ++}
6641 ++
6642 ++static int btmtksdio_fw_pmctrl(struct btmtksdio_dev *bdev)
6643 ++{
6644 ++ u32 status;
6645 ++ int err;
6646 ++
6647 ++ sdio_claim_host(bdev->func);
6648 ++
6649 ++ if (bdev->data->lp_mbox_supported &&
6650 ++ test_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state)) {
6651 ++ sdio_writel(bdev->func, CSICR_CLR_MBOX_ACK, MTK_REG_CSICR,
6652 ++ &err);
6653 ++ err = readx_poll_timeout(btmtksdio_drv_own_query_79xx, bdev,
6654 ++ status, !(status & PD2HRM0R_DRV_OWN),
6655 ++ 2000, 1000000);
6656 ++ if (err < 0) {
6657 ++ bt_dev_err(bdev->hdev, "mailbox ACK not cleared");
6658 ++ goto out;
6659 ++ }
6660 ++ }
6661 ++
6662 ++ /* Return ownership to the device */
6663 ++ sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
6664 ++ if (err < 0)
6665 ++ goto out;
6666 ++
6667 ++ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
6668 ++ !(status & C_COM_DRV_OWN), 2000, 1000000);
6669 ++
6670 ++out:
6671 ++ sdio_release_host(bdev->func);
6672 ++
6673 ++ if (err < 0)
6674 ++ bt_dev_err(bdev->hdev, "Cannot return ownership to device");
6675 ++
6676 ++ return err;
6677 ++}
6678 ++
6679 ++static int btmtksdio_drv_pmctrl(struct btmtksdio_dev *bdev)
6680 ++{
6681 ++ u32 status;
6682 ++ int err;
6683 ++
6684 ++ sdio_claim_host(bdev->func);
6685 ++
6686 ++ /* Get ownership from the device */
6687 ++ sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
6688 ++ if (err < 0)
6689 ++ goto out;
6690 ++
6691 ++ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
6692 ++ status & C_COM_DRV_OWN, 2000, 1000000);
6693 ++
6694 ++ if (!err && bdev->data->lp_mbox_supported &&
6695 ++ test_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state))
6696 ++ err = readx_poll_timeout(btmtksdio_drv_own_query_79xx, bdev,
6697 ++ status, status & PD2HRM0R_DRV_OWN,
6698 ++ 2000, 1000000);
6699 ++
6700 ++out:
6701 ++ sdio_release_host(bdev->func);
6702 ++
6703 ++ if (err < 0)
6704 ++ bt_dev_err(bdev->hdev, "Cannot get ownership from device");
6705 ++
6706 ++ return err;
6707 ++}
6708 ++
6709 + static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
6710 + {
6711 + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
6712 +@@ -480,6 +566,13 @@ static void btmtksdio_txrx_work(struct work_struct *work)
6713 + * FIFO.
6714 + */
6715 + sdio_writel(bdev->func, int_status, MTK_REG_CHISR, NULL);
6716 ++ int_status &= INT_MASK;
6717 ++
6718 ++ if ((int_status & FW_MAILBOX_INT) &&
6719 ++ bdev->data->chipid == 0x7921) {
6720 ++ sdio_writel(bdev->func, PH2DSM0R_DRIVER_OWN,
6721 ++ MTK_REG_PH2DSM0R, 0);
6722 ++ }
6723 +
6724 + if (int_status & FW_OWN_BACK_INT)
6725 + bt_dev_dbg(bdev->hdev, "Get fw own back");
6726 +@@ -531,7 +624,7 @@ static void btmtksdio_interrupt(struct sdio_func *func)
6727 + static int btmtksdio_open(struct hci_dev *hdev)
6728 + {
6729 + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
6730 +- u32 status, val;
6731 ++ u32 val;
6732 + int err;
6733 +
6734 + sdio_claim_host(bdev->func);
6735 +@@ -542,18 +635,10 @@ static int btmtksdio_open(struct hci_dev *hdev)
6736 +
6737 + set_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state);
6738 +
6739 +- /* Get ownership from the device */
6740 +- sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
6741 ++ err = btmtksdio_drv_pmctrl(bdev);
6742 + if (err < 0)
6743 + goto err_disable_func;
6744 +
6745 +- err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
6746 +- status & C_COM_DRV_OWN, 2000, 1000000);
6747 +- if (err < 0) {
6748 +- bt_dev_err(bdev->hdev, "Cannot get ownership from device");
6749 +- goto err_disable_func;
6750 +- }
6751 +-
6752 + /* Disable interrupt & mask out all interrupt sources */
6753 + sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err);
6754 + if (err < 0)
6755 +@@ -623,8 +708,6 @@ err_release_host:
6756 + static int btmtksdio_close(struct hci_dev *hdev)
6757 + {
6758 + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
6759 +- u32 status;
6760 +- int err;
6761 +
6762 + sdio_claim_host(bdev->func);
6763 +
6764 +@@ -635,13 +718,7 @@ static int btmtksdio_close(struct hci_dev *hdev)
6765 +
6766 + cancel_work_sync(&bdev->txrx_work);
6767 +
6768 +- /* Return ownership to the device */
6769 +- sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
6770 +-
6771 +- err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
6772 +- !(status & C_COM_DRV_OWN), 2000, 1000000);
6773 +- if (err < 0)
6774 +- bt_dev_err(bdev->hdev, "Cannot return ownership to device");
6775 ++ btmtksdio_fw_pmctrl(bdev);
6776 +
6777 + clear_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state);
6778 + sdio_disable_func(bdev->func);
6779 +@@ -686,6 +763,7 @@ static int btmtksdio_func_query(struct hci_dev *hdev)
6780 +
6781 + static int mt76xx_setup(struct hci_dev *hdev, const char *fwname)
6782 + {
6783 ++ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
6784 + struct btmtk_hci_wmt_params wmt_params;
6785 + struct btmtk_tci_sleep tci_sleep;
6786 + struct sk_buff *skb;
6787 +@@ -746,6 +824,8 @@ ignore_setup_fw:
6788 + return err;
6789 + }
6790 +
6791 ++ set_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
6792 ++
6793 + ignore_func_on:
6794 + /* Apply the low power environment setup */
6795 + tci_sleep.mode = 0x5;
6796 +@@ -768,6 +848,7 @@ ignore_func_on:
6797 +
6798 + static int mt79xx_setup(struct hci_dev *hdev, const char *fwname)
6799 + {
6800 ++ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
6801 + struct btmtk_hci_wmt_params wmt_params;
6802 + u8 param = 0x1;
6803 + int err;
6804 +@@ -793,6 +874,7 @@ static int mt79xx_setup(struct hci_dev *hdev, const char *fwname)
6805 +
6806 + hci_set_msft_opcode(hdev, 0xFD30);
6807 + hci_set_aosp_capable(hdev);
6808 ++ set_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
6809 +
6810 + return err;
6811 + }
6812 +@@ -862,6 +944,15 @@ static int btmtksdio_setup(struct hci_dev *hdev)
6813 + err = mt79xx_setup(hdev, fwname);
6814 + if (err < 0)
6815 + return err;
6816 ++
6817 ++ err = btmtksdio_fw_pmctrl(bdev);
6818 ++ if (err < 0)
6819 ++ return err;
6820 ++
6821 ++ err = btmtksdio_drv_pmctrl(bdev);
6822 ++ if (err < 0)
6823 ++ return err;
6824 ++
6825 + break;
6826 + case 0x7663:
6827 + case 0x7668:
6828 +@@ -1004,6 +1095,8 @@ static int btmtksdio_probe(struct sdio_func *func,
6829 + hdev->manufacturer = 70;
6830 + set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
6831 +
6832 ++ sdio_set_drvdata(func, bdev);
6833 ++
6834 + err = hci_register_dev(hdev);
6835 + if (err < 0) {
6836 + dev_err(&func->dev, "Can't register HCI device\n");
6837 +@@ -1011,8 +1104,6 @@ static int btmtksdio_probe(struct sdio_func *func,
6838 + return err;
6839 + }
6840 +
6841 +- sdio_set_drvdata(func, bdev);
6842 +-
6843 + /* pm_runtime_enable would be done after the firmware is being
6844 + * downloaded because the core layer probably already enables
6845 + * runtime PM for this func such as the case host->caps &
6846 +@@ -1058,7 +1149,6 @@ static int btmtksdio_runtime_suspend(struct device *dev)
6847 + {
6848 + struct sdio_func *func = dev_to_sdio_func(dev);
6849 + struct btmtksdio_dev *bdev;
6850 +- u32 status;
6851 + int err;
6852 +
6853 + bdev = sdio_get_drvdata(func);
6854 +@@ -1070,19 +1160,10 @@ static int btmtksdio_runtime_suspend(struct device *dev)
6855 +
6856 + sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
6857 +
6858 +- sdio_claim_host(bdev->func);
6859 +-
6860 +- sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
6861 +- if (err < 0)
6862 +- goto out;
6863 ++ err = btmtksdio_fw_pmctrl(bdev);
6864 +
6865 +- err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
6866 +- !(status & C_COM_DRV_OWN), 2000, 1000000);
6867 +-out:
6868 + bt_dev_info(bdev->hdev, "status (%d) return ownership to device", err);
6869 +
6870 +- sdio_release_host(bdev->func);
6871 +-
6872 + return err;
6873 + }
6874 +
6875 +@@ -1090,7 +1171,6 @@ static int btmtksdio_runtime_resume(struct device *dev)
6876 + {
6877 + struct sdio_func *func = dev_to_sdio_func(dev);
6878 + struct btmtksdio_dev *bdev;
6879 +- u32 status;
6880 + int err;
6881 +
6882 + bdev = sdio_get_drvdata(func);
6883 +@@ -1100,19 +1180,10 @@ static int btmtksdio_runtime_resume(struct device *dev)
6884 + if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
6885 + return 0;
6886 +
6887 +- sdio_claim_host(bdev->func);
6888 ++ err = btmtksdio_drv_pmctrl(bdev);
6889 +
6890 +- sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
6891 +- if (err < 0)
6892 +- goto out;
6893 +-
6894 +- err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
6895 +- status & C_COM_DRV_OWN, 2000, 1000000);
6896 +-out:
6897 + bt_dev_info(bdev->hdev, "status (%d) get ownership from device", err);
6898 +
6899 +- sdio_release_host(bdev->func);
6900 +-
6901 + return err;
6902 + }
6903 +
6904 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
6905 +index 19d5686f8a2a1..2afbd87d77c9b 100644
6906 +--- a/drivers/bluetooth/btusb.c
6907 ++++ b/drivers/bluetooth/btusb.c
6908 +@@ -62,6 +62,7 @@ static struct usb_driver btusb_driver;
6909 + #define BTUSB_QCA_WCN6855 0x1000000
6910 + #define BTUSB_INTEL_BROKEN_SHUTDOWN_LED 0x2000000
6911 + #define BTUSB_INTEL_BROKEN_INITIAL_NCMD 0x4000000
6912 ++#define BTUSB_INTEL_NO_WBS_SUPPORT 0x8000000
6913 +
6914 + static const struct usb_device_id btusb_table[] = {
6915 + /* Generic Bluetooth USB device */
6916 +@@ -385,9 +386,11 @@ static const struct usb_device_id blacklist_table[] = {
6917 + { USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED },
6918 + { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
6919 + { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED |
6920 ++ BTUSB_INTEL_NO_WBS_SUPPORT |
6921 + BTUSB_INTEL_BROKEN_INITIAL_NCMD |
6922 + BTUSB_INTEL_BROKEN_SHUTDOWN_LED },
6923 + { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL_COMBINED |
6924 ++ BTUSB_INTEL_NO_WBS_SUPPORT |
6925 + BTUSB_INTEL_BROKEN_SHUTDOWN_LED },
6926 + { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_COMBINED },
6927 + { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL_COMBINED |
6928 +@@ -3743,6 +3746,9 @@ static int btusb_probe(struct usb_interface *intf,
6929 + hdev->send = btusb_send_frame_intel;
6930 + hdev->cmd_timeout = btusb_intel_cmd_timeout;
6931 +
6932 ++ if (id->driver_info & BTUSB_INTEL_NO_WBS_SUPPORT)
6933 ++ btintel_set_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT);
6934 ++
6935 + if (id->driver_info & BTUSB_INTEL_BROKEN_INITIAL_NCMD)
6936 + btintel_set_flag(hdev, INTEL_BROKEN_INITIAL_NCMD);
6937 +
6938 +diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
6939 +index 34286ffe0568f..7ac6908a4dfb4 100644
6940 +--- a/drivers/bluetooth/hci_h5.c
6941 ++++ b/drivers/bluetooth/hci_h5.c
6942 +@@ -629,9 +629,11 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
6943 + break;
6944 + }
6945 +
6946 +- pm_runtime_get_sync(&hu->serdev->dev);
6947 +- pm_runtime_mark_last_busy(&hu->serdev->dev);
6948 +- pm_runtime_put_autosuspend(&hu->serdev->dev);
6949 ++ if (hu->serdev) {
6950 ++ pm_runtime_get_sync(&hu->serdev->dev);
6951 ++ pm_runtime_mark_last_busy(&hu->serdev->dev);
6952 ++ pm_runtime_put_autosuspend(&hu->serdev->dev);
6953 ++ }
6954 +
6955 + return 0;
6956 + }
6957 +diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
6958 +index 3b00d82d36cf7..4cda890ce6470 100644
6959 +--- a/drivers/bluetooth/hci_serdev.c
6960 ++++ b/drivers/bluetooth/hci_serdev.c
6961 +@@ -305,6 +305,8 @@ int hci_uart_register_device(struct hci_uart *hu,
6962 + if (err)
6963 + return err;
6964 +
6965 ++ percpu_init_rwsem(&hu->proto_lock);
6966 ++
6967 + err = p->open(hu);
6968 + if (err)
6969 + goto err_open;
6970 +@@ -327,7 +329,6 @@ int hci_uart_register_device(struct hci_uart *hu,
6971 +
6972 + INIT_WORK(&hu->init_ready, hci_uart_init_work);
6973 + INIT_WORK(&hu->write_work, hci_uart_write_work);
6974 +- percpu_init_rwsem(&hu->proto_lock);
6975 +
6976 + /* Only when vendor specific setup callback is provided, consider
6977 + * the manufacturer information valid. This avoids filling in the
6978 +diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/core/debugfs.c
6979 +index 858d7516410bb..d818586c229d2 100644
6980 +--- a/drivers/bus/mhi/core/debugfs.c
6981 ++++ b/drivers/bus/mhi/core/debugfs.c
6982 +@@ -60,16 +60,16 @@ static int mhi_debugfs_events_show(struct seq_file *m, void *d)
6983 + }
6984 +
6985 + seq_printf(m, "Index: %d intmod count: %lu time: %lu",
6986 +- i, (er_ctxt->intmod & EV_CTX_INTMODC_MASK) >>
6987 ++ i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >>
6988 + EV_CTX_INTMODC_SHIFT,
6989 +- (er_ctxt->intmod & EV_CTX_INTMODT_MASK) >>
6990 ++ (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >>
6991 + EV_CTX_INTMODT_SHIFT);
6992 +
6993 +- seq_printf(m, " base: 0x%0llx len: 0x%llx", er_ctxt->rbase,
6994 +- er_ctxt->rlen);
6995 ++ seq_printf(m, " base: 0x%0llx len: 0x%llx", le64_to_cpu(er_ctxt->rbase),
6996 ++ le64_to_cpu(er_ctxt->rlen));
6997 +
6998 +- seq_printf(m, " rp: 0x%llx wp: 0x%llx", er_ctxt->rp,
6999 +- er_ctxt->wp);
7000 ++ seq_printf(m, " rp: 0x%llx wp: 0x%llx", le64_to_cpu(er_ctxt->rp),
7001 ++ le64_to_cpu(er_ctxt->wp));
7002 +
7003 + seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp,
7004 + &mhi_event->db_cfg.db_val);
7005 +@@ -106,18 +106,18 @@ static int mhi_debugfs_channels_show(struct seq_file *m, void *d)
7006 +
7007 + seq_printf(m,
7008 + "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx",
7009 +- mhi_chan->name, mhi_chan->chan, (chan_ctxt->chcfg &
7010 ++ mhi_chan->name, mhi_chan->chan, (le32_to_cpu(chan_ctxt->chcfg) &
7011 + CHAN_CTX_CHSTATE_MASK) >> CHAN_CTX_CHSTATE_SHIFT,
7012 +- (chan_ctxt->chcfg & CHAN_CTX_BRSTMODE_MASK) >>
7013 +- CHAN_CTX_BRSTMODE_SHIFT, (chan_ctxt->chcfg &
7014 ++ (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_BRSTMODE_MASK) >>
7015 ++ CHAN_CTX_BRSTMODE_SHIFT, (le32_to_cpu(chan_ctxt->chcfg) &
7016 + CHAN_CTX_POLLCFG_MASK) >> CHAN_CTX_POLLCFG_SHIFT);
7017 +
7018 +- seq_printf(m, " type: 0x%x event ring: %u", chan_ctxt->chtype,
7019 +- chan_ctxt->erindex);
7020 ++ seq_printf(m, " type: 0x%x event ring: %u", le32_to_cpu(chan_ctxt->chtype),
7021 ++ le32_to_cpu(chan_ctxt->erindex));
7022 +
7023 + seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx",
7024 +- chan_ctxt->rbase, chan_ctxt->rlen, chan_ctxt->rp,
7025 +- chan_ctxt->wp);
7026 ++ le64_to_cpu(chan_ctxt->rbase), le64_to_cpu(chan_ctxt->rlen),
7027 ++ le64_to_cpu(chan_ctxt->rp), le64_to_cpu(chan_ctxt->wp));
7028 +
7029 + seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n",
7030 + ring->rp, ring->wp,
7031 +diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
7032 +index 046f407dc5d6e..d8787aaa176ba 100644
7033 +--- a/drivers/bus/mhi/core/init.c
7034 ++++ b/drivers/bus/mhi/core/init.c
7035 +@@ -77,12 +77,14 @@ static const char * const mhi_pm_state_str[] = {
7036 + [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
7037 + };
7038 +
7039 +-const char *to_mhi_pm_state_str(enum mhi_pm_state state)
7040 ++const char *to_mhi_pm_state_str(u32 state)
7041 + {
7042 +- unsigned long pm_state = state;
7043 +- int index = find_last_bit(&pm_state, 32);
7044 ++ int index;
7045 +
7046 +- if (index >= ARRAY_SIZE(mhi_pm_state_str))
7047 ++ if (state)
7048 ++ index = __fls(state);
7049 ++
7050 ++ if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
7051 + return "Invalid State";
7052 +
7053 + return mhi_pm_state_str[index];
7054 +@@ -291,17 +293,17 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
7055 + if (mhi_chan->offload_ch)
7056 + continue;
7057 +
7058 +- tmp = chan_ctxt->chcfg;
7059 ++ tmp = le32_to_cpu(chan_ctxt->chcfg);
7060 + tmp &= ~CHAN_CTX_CHSTATE_MASK;
7061 + tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
7062 + tmp &= ~CHAN_CTX_BRSTMODE_MASK;
7063 + tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
7064 + tmp &= ~CHAN_CTX_POLLCFG_MASK;
7065 + tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
7066 +- chan_ctxt->chcfg = tmp;
7067 ++ chan_ctxt->chcfg = cpu_to_le32(tmp);
7068 +
7069 +- chan_ctxt->chtype = mhi_chan->type;
7070 +- chan_ctxt->erindex = mhi_chan->er_index;
7071 ++ chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
7072 ++ chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
7073 +
7074 + mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
7075 + mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
7076 +@@ -326,14 +328,14 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
7077 + if (mhi_event->offload_ev)
7078 + continue;
7079 +
7080 +- tmp = er_ctxt->intmod;
7081 ++ tmp = le32_to_cpu(er_ctxt->intmod);
7082 + tmp &= ~EV_CTX_INTMODC_MASK;
7083 + tmp &= ~EV_CTX_INTMODT_MASK;
7084 + tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
7085 +- er_ctxt->intmod = tmp;
7086 ++ er_ctxt->intmod = cpu_to_le32(tmp);
7087 +
7088 +- er_ctxt->ertype = MHI_ER_TYPE_VALID;
7089 +- er_ctxt->msivec = mhi_event->irq;
7090 ++ er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
7091 ++ er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
7092 + mhi_event->db_cfg.db_mode = true;
7093 +
7094 + ring->el_size = sizeof(struct mhi_tre);
7095 +@@ -347,9 +349,9 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
7096 + * ring is empty
7097 + */
7098 + ring->rp = ring->wp = ring->base;
7099 +- er_ctxt->rbase = ring->iommu_base;
7100 ++ er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
7101 + er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
7102 +- er_ctxt->rlen = ring->len;
7103 ++ er_ctxt->rlen = cpu_to_le64(ring->len);
7104 + ring->ctxt_wp = &er_ctxt->wp;
7105 + }
7106 +
7107 +@@ -376,9 +378,9 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
7108 + goto error_alloc_cmd;
7109 +
7110 + ring->rp = ring->wp = ring->base;
7111 +- cmd_ctxt->rbase = ring->iommu_base;
7112 ++ cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
7113 + cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
7114 +- cmd_ctxt->rlen = ring->len;
7115 ++ cmd_ctxt->rlen = cpu_to_le64(ring->len);
7116 + ring->ctxt_wp = &cmd_ctxt->wp;
7117 + }
7118 +
7119 +@@ -579,10 +581,10 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
7120 + chan_ctxt->rp = 0;
7121 + chan_ctxt->wp = 0;
7122 +
7123 +- tmp = chan_ctxt->chcfg;
7124 ++ tmp = le32_to_cpu(chan_ctxt->chcfg);
7125 + tmp &= ~CHAN_CTX_CHSTATE_MASK;
7126 + tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
7127 +- chan_ctxt->chcfg = tmp;
7128 ++ chan_ctxt->chcfg = cpu_to_le32(tmp);
7129 +
7130 + /* Update to all cores */
7131 + smp_wmb();
7132 +@@ -616,14 +618,14 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
7133 + return -ENOMEM;
7134 + }
7135 +
7136 +- tmp = chan_ctxt->chcfg;
7137 ++ tmp = le32_to_cpu(chan_ctxt->chcfg);
7138 + tmp &= ~CHAN_CTX_CHSTATE_MASK;
7139 + tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
7140 +- chan_ctxt->chcfg = tmp;
7141 ++ chan_ctxt->chcfg = cpu_to_le32(tmp);
7142 +
7143 +- chan_ctxt->rbase = tre_ring->iommu_base;
7144 ++ chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
7145 + chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
7146 +- chan_ctxt->rlen = tre_ring->len;
7147 ++ chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
7148 + tre_ring->ctxt_wp = &chan_ctxt->wp;
7149 +
7150 + tre_ring->rp = tre_ring->wp = tre_ring->base;
7151 +diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
7152 +index e2e10474a9d92..37c39bf1c7a98 100644
7153 +--- a/drivers/bus/mhi/core/internal.h
7154 ++++ b/drivers/bus/mhi/core/internal.h
7155 +@@ -209,14 +209,14 @@ extern struct bus_type mhi_bus_type;
7156 + #define EV_CTX_INTMODT_MASK GENMASK(31, 16)
7157 + #define EV_CTX_INTMODT_SHIFT 16
7158 + struct mhi_event_ctxt {
7159 +- __u32 intmod;
7160 +- __u32 ertype;
7161 +- __u32 msivec;
7162 +-
7163 +- __u64 rbase __packed __aligned(4);
7164 +- __u64 rlen __packed __aligned(4);
7165 +- __u64 rp __packed __aligned(4);
7166 +- __u64 wp __packed __aligned(4);
7167 ++ __le32 intmod;
7168 ++ __le32 ertype;
7169 ++ __le32 msivec;
7170 ++
7171 ++ __le64 rbase __packed __aligned(4);
7172 ++ __le64 rlen __packed __aligned(4);
7173 ++ __le64 rp __packed __aligned(4);
7174 ++ __le64 wp __packed __aligned(4);
7175 + };
7176 +
7177 + #define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
7178 +@@ -227,25 +227,25 @@ struct mhi_event_ctxt {
7179 + #define CHAN_CTX_POLLCFG_SHIFT 10
7180 + #define CHAN_CTX_RESERVED_MASK GENMASK(31, 16)
7181 + struct mhi_chan_ctxt {
7182 +- __u32 chcfg;
7183 +- __u32 chtype;
7184 +- __u32 erindex;
7185 +-
7186 +- __u64 rbase __packed __aligned(4);
7187 +- __u64 rlen __packed __aligned(4);
7188 +- __u64 rp __packed __aligned(4);
7189 +- __u64 wp __packed __aligned(4);
7190 ++ __le32 chcfg;
7191 ++ __le32 chtype;
7192 ++ __le32 erindex;
7193 ++
7194 ++ __le64 rbase __packed __aligned(4);
7195 ++ __le64 rlen __packed __aligned(4);
7196 ++ __le64 rp __packed __aligned(4);
7197 ++ __le64 wp __packed __aligned(4);
7198 + };
7199 +
7200 + struct mhi_cmd_ctxt {
7201 +- __u32 reserved0;
7202 +- __u32 reserved1;
7203 +- __u32 reserved2;
7204 +-
7205 +- __u64 rbase __packed __aligned(4);
7206 +- __u64 rlen __packed __aligned(4);
7207 +- __u64 rp __packed __aligned(4);
7208 +- __u64 wp __packed __aligned(4);
7209 ++ __le32 reserved0;
7210 ++ __le32 reserved1;
7211 ++ __le32 reserved2;
7212 ++
7213 ++ __le64 rbase __packed __aligned(4);
7214 ++ __le64 rlen __packed __aligned(4);
7215 ++ __le64 rp __packed __aligned(4);
7216 ++ __le64 wp __packed __aligned(4);
7217 + };
7218 +
7219 + struct mhi_ctxt {
7220 +@@ -258,8 +258,8 @@ struct mhi_ctxt {
7221 + };
7222 +
7223 + struct mhi_tre {
7224 +- u64 ptr;
7225 +- u32 dword[2];
7226 ++ __le64 ptr;
7227 ++ __le32 dword[2];
7228 + };
7229 +
7230 + struct bhi_vec_entry {
7231 +@@ -277,57 +277,58 @@ enum mhi_cmd_type {
7232 + /* No operation command */
7233 + #define MHI_TRE_CMD_NOOP_PTR (0)
7234 + #define MHI_TRE_CMD_NOOP_DWORD0 (0)
7235 +-#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16)
7236 ++#define MHI_TRE_CMD_NOOP_DWORD1 (cpu_to_le32(MHI_CMD_NOP << 16))
7237 +
7238 + /* Channel reset command */
7239 + #define MHI_TRE_CMD_RESET_PTR (0)
7240 + #define MHI_TRE_CMD_RESET_DWORD0 (0)
7241 +-#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
7242 +- (MHI_CMD_RESET_CHAN << 16))
7243 ++#define MHI_TRE_CMD_RESET_DWORD1(chid) (cpu_to_le32((chid << 24) | \
7244 ++ (MHI_CMD_RESET_CHAN << 16)))
7245 +
7246 + /* Channel stop command */
7247 + #define MHI_TRE_CMD_STOP_PTR (0)
7248 + #define MHI_TRE_CMD_STOP_DWORD0 (0)
7249 +-#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \
7250 +- (MHI_CMD_STOP_CHAN << 16))
7251 ++#define MHI_TRE_CMD_STOP_DWORD1(chid) (cpu_to_le32((chid << 24) | \
7252 ++ (MHI_CMD_STOP_CHAN << 16)))
7253 +
7254 + /* Channel start command */
7255 + #define MHI_TRE_CMD_START_PTR (0)
7256 + #define MHI_TRE_CMD_START_DWORD0 (0)
7257 +-#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
7258 +- (MHI_CMD_START_CHAN << 16))
7259 ++#define MHI_TRE_CMD_START_DWORD1(chid) (cpu_to_le32((chid << 24) | \
7260 ++ (MHI_CMD_START_CHAN << 16)))
7261 +
7262 +-#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
7263 +-#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
7264 ++#define MHI_TRE_GET_DWORD(tre, word) (le32_to_cpu((tre)->dword[(word)]))
7265 ++#define MHI_TRE_GET_CMD_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF)
7266 ++#define MHI_TRE_GET_CMD_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF)
7267 +
7268 + /* Event descriptor macros */
7269 +-#define MHI_TRE_EV_PTR(ptr) (ptr)
7270 +-#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len)
7271 +-#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16))
7272 +-#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr)
7273 +-#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF)
7274 +-#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF)
7275 +-#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
7276 +-#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
7277 +-#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
7278 +-#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
7279 +-#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0])
7280 +-#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
7281 +-#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
7282 +-#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
7283 +-#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
7284 +-#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
7285 ++#define MHI_TRE_EV_PTR(ptr) (cpu_to_le64(ptr))
7286 ++#define MHI_TRE_EV_DWORD0(code, len) (cpu_to_le32((code << 24) | len))
7287 ++#define MHI_TRE_EV_DWORD1(chid, type) (cpu_to_le32((chid << 24) | (type << 16)))
7288 ++#define MHI_TRE_GET_EV_PTR(tre) (le64_to_cpu((tre)->ptr))
7289 ++#define MHI_TRE_GET_EV_CODE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF)
7290 ++#define MHI_TRE_GET_EV_LEN(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFFFF)
7291 ++#define MHI_TRE_GET_EV_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF)
7292 ++#define MHI_TRE_GET_EV_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF)
7293 ++#define MHI_TRE_GET_EV_STATE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF)
7294 ++#define MHI_TRE_GET_EV_EXECENV(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF)
7295 ++#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0)
7296 ++#define MHI_TRE_GET_EV_TIME(tre) (MHI_TRE_GET_EV_PTR(tre))
7297 ++#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre))
7298 ++#define MHI_TRE_GET_EV_VEID(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 16) & 0xFF)
7299 ++#define MHI_TRE_GET_EV_LINKSPEED(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF)
7300 ++#define MHI_TRE_GET_EV_LINKWIDTH(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFF)
7301 +
7302 + /* Transfer descriptor macros */
7303 +-#define MHI_TRE_DATA_PTR(ptr) (ptr)
7304 +-#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU)
7305 +-#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
7306 +- | (ieot << 9) | (ieob << 8) | chain)
7307 ++#define MHI_TRE_DATA_PTR(ptr) (cpu_to_le64(ptr))
7308 ++#define MHI_TRE_DATA_DWORD0(len) (cpu_to_le32(len & MHI_MAX_MTU))
7309 ++#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) (cpu_to_le32((2 << 16) | (bei << 10) \
7310 ++ | (ieot << 9) | (ieob << 8) | chain))
7311 +
7312 + /* RSC transfer descriptor macros */
7313 +-#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr)
7314 +-#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
7315 +-#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)
7316 ++#define MHI_RSCTRE_DATA_PTR(ptr, len) (cpu_to_le64(((u64)len << 48) | ptr))
7317 ++#define MHI_RSCTRE_DATA_DWORD0(cookie) (cpu_to_le32(cookie))
7318 ++#define MHI_RSCTRE_DATA_DWORD1 (cpu_to_le32(MHI_PKT_TYPE_COALESCING << 16))
7319 +
7320 + enum mhi_pkt_type {
7321 + MHI_PKT_TYPE_INVALID = 0x0,
7322 +@@ -500,7 +501,7 @@ struct state_transition {
7323 + struct mhi_ring {
7324 + dma_addr_t dma_handle;
7325 + dma_addr_t iommu_base;
7326 +- u64 *ctxt_wp; /* point to ctxt wp */
7327 ++ __le64 *ctxt_wp; /* point to ctxt wp */
7328 + void *pre_aligned;
7329 + void *base;
7330 + void *rp;
7331 +@@ -622,7 +623,7 @@ void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
7332 + enum mhi_pm_state __must_check mhi_tryset_pm_state(
7333 + struct mhi_controller *mhi_cntrl,
7334 + enum mhi_pm_state state);
7335 +-const char *to_mhi_pm_state_str(enum mhi_pm_state state);
7336 ++const char *to_mhi_pm_state_str(u32 state);
7337 + int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
7338 + enum dev_st_transition state);
7339 + void mhi_pm_st_worker(struct work_struct *work);
7340 +diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
7341 +index ffde617f93a3b..85f4f7c8d7c60 100644
7342 +--- a/drivers/bus/mhi/core/main.c
7343 ++++ b/drivers/bus/mhi/core/main.c
7344 +@@ -114,7 +114,7 @@ void mhi_ring_er_db(struct mhi_event *mhi_event)
7345 + struct mhi_ring *ring = &mhi_event->ring;
7346 +
7347 + mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
7348 +- ring->db_addr, *ring->ctxt_wp);
7349 ++ ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
7350 + }
7351 +
7352 + void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
7353 +@@ -123,7 +123,7 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
7354 + struct mhi_ring *ring = &mhi_cmd->ring;
7355 +
7356 + db = ring->iommu_base + (ring->wp - ring->base);
7357 +- *ring->ctxt_wp = db;
7358 ++ *ring->ctxt_wp = cpu_to_le64(db);
7359 + mhi_write_db(mhi_cntrl, ring->db_addr, db);
7360 + }
7361 +
7362 +@@ -140,7 +140,7 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
7363 + * before letting h/w know there is new element to fetch.
7364 + */
7365 + dma_wmb();
7366 +- *ring->ctxt_wp = db;
7367 ++ *ring->ctxt_wp = cpu_to_le64(db);
7368 +
7369 + mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
7370 + ring->db_addr, db);
7371 +@@ -432,7 +432,7 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
7372 + struct mhi_event_ctxt *er_ctxt =
7373 + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
7374 + struct mhi_ring *ev_ring = &mhi_event->ring;
7375 +- dma_addr_t ptr = er_ctxt->rp;
7376 ++ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
7377 + void *dev_rp;
7378 +
7379 + if (!is_valid_ring_ptr(ev_ring, ptr)) {
7380 +@@ -537,14 +537,14 @@ static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
7381 +
7382 + /* Update the WP */
7383 + ring->wp += ring->el_size;
7384 +- ctxt_wp = *ring->ctxt_wp + ring->el_size;
7385 ++ ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size;
7386 +
7387 + if (ring->wp >= (ring->base + ring->len)) {
7388 + ring->wp = ring->base;
7389 + ctxt_wp = ring->iommu_base;
7390 + }
7391 +
7392 +- *ring->ctxt_wp = ctxt_wp;
7393 ++ *ring->ctxt_wp = cpu_to_le64(ctxt_wp);
7394 +
7395 + /* Update the RP */
7396 + ring->rp += ring->el_size;
7397 +@@ -801,7 +801,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
7398 + struct device *dev = &mhi_cntrl->mhi_dev->dev;
7399 + u32 chan;
7400 + int count = 0;
7401 +- dma_addr_t ptr = er_ctxt->rp;
7402 ++ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
7403 +
7404 + /*
7405 + * This is a quick check to avoid unnecessary event processing
7406 +@@ -940,7 +940,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
7407 + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
7408 + local_rp = ev_ring->rp;
7409 +
7410 +- ptr = er_ctxt->rp;
7411 ++ ptr = le64_to_cpu(er_ctxt->rp);
7412 + if (!is_valid_ring_ptr(ev_ring, ptr)) {
7413 + dev_err(&mhi_cntrl->mhi_dev->dev,
7414 + "Event ring rp points outside of the event ring\n");
7415 +@@ -970,7 +970,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
7416 + int count = 0;
7417 + u32 chan;
7418 + struct mhi_chan *mhi_chan;
7419 +- dma_addr_t ptr = er_ctxt->rp;
7420 ++ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
7421 +
7422 + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
7423 + return -EIO;
7424 +@@ -1011,7 +1011,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
7425 + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
7426 + local_rp = ev_ring->rp;
7427 +
7428 +- ptr = er_ctxt->rp;
7429 ++ ptr = le64_to_cpu(er_ctxt->rp);
7430 + if (!is_valid_ring_ptr(ev_ring, ptr)) {
7431 + dev_err(&mhi_cntrl->mhi_dev->dev,
7432 + "Event ring rp points outside of the event ring\n");
7433 +@@ -1533,7 +1533,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
7434 + /* mark all stale events related to channel as STALE event */
7435 + spin_lock_irqsave(&mhi_event->lock, flags);
7436 +
7437 +- ptr = er_ctxt->rp;
7438 ++ ptr = le64_to_cpu(er_ctxt->rp);
7439 + if (!is_valid_ring_ptr(ev_ring, ptr)) {
7440 + dev_err(&mhi_cntrl->mhi_dev->dev,
7441 + "Event ring rp points outside of the event ring\n");
7442 +diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
7443 +index 4aae0baea0084..c35c5ddc72207 100644
7444 +--- a/drivers/bus/mhi/core/pm.c
7445 ++++ b/drivers/bus/mhi/core/pm.c
7446 +@@ -218,7 +218,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
7447 + continue;
7448 +
7449 + ring->wp = ring->base + ring->len - ring->el_size;
7450 +- *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
7451 ++ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
7452 + /* Update all cores */
7453 + smp_wmb();
7454 +
7455 +@@ -420,7 +420,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
7456 + continue;
7457 +
7458 + ring->wp = ring->base + ring->len - ring->el_size;
7459 +- *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
7460 ++ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
7461 + /* Update to all cores */
7462 + smp_wmb();
7463 +
7464 +diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
7465 +index b79895810c52f..9527b7d638401 100644
7466 +--- a/drivers/bus/mhi/pci_generic.c
7467 ++++ b/drivers/bus/mhi/pci_generic.c
7468 +@@ -327,6 +327,7 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
7469 + .config = &modem_quectel_em1xx_config,
7470 + .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
7471 + .dma_data_width = 32,
7472 ++ .mru_default = 32768,
7473 + .sideband_wake = true,
7474 + };
7475 +
7476 +diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
7477 +index 626dedd110cbc..fca0d0669aa97 100644
7478 +--- a/drivers/bus/mips_cdmm.c
7479 ++++ b/drivers/bus/mips_cdmm.c
7480 +@@ -351,6 +351,7 @@ phys_addr_t __weak mips_cdmm_phys_base(void)
7481 + np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm");
7482 + if (np) {
7483 + err = of_address_to_resource(np, 0, &res);
7484 ++ of_node_put(np);
7485 + if (!err)
7486 + return res.start;
7487 + }
7488 +diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
7489 +index 9704963f9d500..a087156a58186 100644
7490 +--- a/drivers/char/hw_random/Kconfig
7491 ++++ b/drivers/char/hw_random/Kconfig
7492 +@@ -401,7 +401,7 @@ config HW_RANDOM_MESON
7493 +
7494 + config HW_RANDOM_CAVIUM
7495 + tristate "Cavium ThunderX Random Number Generator support"
7496 +- depends on HW_RANDOM && PCI && ARM64
7497 ++ depends on HW_RANDOM && PCI && ARCH_THUNDER
7498 + default HW_RANDOM
7499 + help
7500 + This driver provides kernel-side support for the Random Number
7501 +diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
7502 +index ecb71c4317a50..8cf0ef501341e 100644
7503 +--- a/drivers/char/hw_random/atmel-rng.c
7504 ++++ b/drivers/char/hw_random/atmel-rng.c
7505 +@@ -114,6 +114,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
7506 +
7507 + err_register:
7508 + clk_disable_unprepare(trng->clk);
7509 ++ atmel_trng_disable(trng);
7510 + return ret;
7511 + }
7512 +
7513 +diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
7514 +index 6f66919652bf5..7c55f4cf4a8ba 100644
7515 +--- a/drivers/char/hw_random/cavium-rng-vf.c
7516 ++++ b/drivers/char/hw_random/cavium-rng-vf.c
7517 +@@ -179,7 +179,7 @@ static int cavium_map_pf_regs(struct cavium_rng *rng)
7518 + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
7519 + PCI_DEVID_CAVIUM_RNG_PF, NULL);
7520 + if (!pdev) {
7521 +- dev_err(&pdev->dev, "Cannot find RNG PF device\n");
7522 ++ pr_err("Cannot find RNG PF device\n");
7523 + return -EIO;
7524 + }
7525 +
7526 +diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
7527 +index 67947a19aa225..e8f9621e79541 100644
7528 +--- a/drivers/char/hw_random/nomadik-rng.c
7529 ++++ b/drivers/char/hw_random/nomadik-rng.c
7530 +@@ -65,14 +65,14 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
7531 + out_release:
7532 + amba_release_regions(dev);
7533 + out_clk:
7534 +- clk_disable(rng_clk);
7535 ++ clk_disable_unprepare(rng_clk);
7536 + return ret;
7537 + }
7538 +
7539 + static void nmk_rng_remove(struct amba_device *dev)
7540 + {
7541 + amba_release_regions(dev);
7542 +- clk_disable(rng_clk);
7543 ++ clk_disable_unprepare(rng_clk);
7544 + }
7545 +
7546 + static const struct amba_id nmk_rng_ids[] = {
7547 +diff --git a/drivers/clk/actions/owl-s700.c b/drivers/clk/actions/owl-s700.c
7548 +index a2f34d13fb543..6ea7da1d6d755 100644
7549 +--- a/drivers/clk/actions/owl-s700.c
7550 ++++ b/drivers/clk/actions/owl-s700.c
7551 +@@ -162,6 +162,7 @@ static struct clk_div_table hdmia_div_table[] = {
7552 +
7553 + static struct clk_div_table rmii_div_table[] = {
7554 + {0, 4}, {1, 10},
7555 ++ {0, 0}
7556 + };
7557 +
7558 + /* divider clocks */
7559 +diff --git a/drivers/clk/actions/owl-s900.c b/drivers/clk/actions/owl-s900.c
7560 +index 790890978424a..5144ada2c7e1a 100644
7561 +--- a/drivers/clk/actions/owl-s900.c
7562 ++++ b/drivers/clk/actions/owl-s900.c
7563 +@@ -140,7 +140,7 @@ static struct clk_div_table rmii_ref_div_table[] = {
7564 +
7565 + static struct clk_div_table usb3_mac_div_table[] = {
7566 + { 1, 2 }, { 2, 3 }, { 3, 4 },
7567 +- { 0, 8 },
7568 ++ { 0, 0 }
7569 + };
7570 +
7571 + static struct clk_div_table i2s_div_table[] = {
7572 +diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
7573 +index 369dfafabbca2..060e908086a13 100644
7574 +--- a/drivers/clk/at91/sama7g5.c
7575 ++++ b/drivers/clk/at91/sama7g5.c
7576 +@@ -696,16 +696,16 @@ static const struct {
7577 + { .n = "pdmc0_gclk",
7578 + .id = 68,
7579 + .r = { .max = 50000000 },
7580 +- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
7581 +- .pp_mux_table = { 5, 8, },
7582 ++ .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
7583 ++ .pp_mux_table = { 5, 9, },
7584 + .pp_count = 2,
7585 + .pp_chg_id = INT_MIN, },
7586 +
7587 + { .n = "pdmc1_gclk",
7588 + .id = 69,
7589 + .r = { .max = 50000000, },
7590 +- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
7591 +- .pp_mux_table = { 5, 8, },
7592 ++ .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
7593 ++ .pp_mux_table = { 5, 9, },
7594 + .pp_count = 2,
7595 + .pp_chg_id = INT_MIN, },
7596 +
7597 +diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
7598 +index a2c6486ef1708..f8417ee2961aa 100644
7599 +--- a/drivers/clk/clk-clps711x.c
7600 ++++ b/drivers/clk/clk-clps711x.c
7601 +@@ -28,11 +28,13 @@ static const struct clk_div_table spi_div_table[] = {
7602 + { .val = 1, .div = 8, },
7603 + { .val = 2, .div = 2, },
7604 + { .val = 3, .div = 1, },
7605 ++ { /* sentinel */ }
7606 + };
7607 +
7608 + static const struct clk_div_table timer_div_table[] = {
7609 + { .val = 0, .div = 256, },
7610 + { .val = 1, .div = 1, },
7611 ++ { /* sentinel */ }
7612 + };
7613 +
7614 + struct clps711x_clk {
7615 +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
7616 +index 8de6a22498e70..01b64b962e76f 100644
7617 +--- a/drivers/clk/clk.c
7618 ++++ b/drivers/clk/clk.c
7619 +@@ -3456,6 +3456,19 @@ static void clk_core_reparent_orphans_nolock(void)
7620 + __clk_set_parent_after(orphan, parent, NULL);
7621 + __clk_recalc_accuracies(orphan);
7622 + __clk_recalc_rates(orphan, 0);
7623 ++
7624 ++ /*
7625 ++ * __clk_init_parent() will set the initial req_rate to
7626 ++ * 0 if the clock doesn't have clk_ops::recalc_rate and
7627 ++ * is an orphan when it's registered.
7628 ++ *
7629 ++ * 'req_rate' is used by clk_set_rate_range() and
7630 ++ * clk_put() to trigger a clk_set_rate() call whenever
7631 ++ * the boundaries are modified. Let's make sure
7632 ++ * 'req_rate' is set to something non-zero so that
7633 ++ * clk_set_rate_range() doesn't drop the frequency.
7634 ++ */
7635 ++ orphan->req_rate = orphan->rate;
7636 + }
7637 + }
7638 + }
7639 +@@ -3773,8 +3786,9 @@ struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
7640 + struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
7641 + {
7642 + struct device *dev = hw->core->dev;
7643 ++ const char *name = dev ? dev_name(dev) : NULL;
7644 +
7645 +- return clk_hw_create_clk(dev, hw, dev_name(dev), con_id);
7646 ++ return clk_hw_create_clk(dev, hw, name, con_id);
7647 + }
7648 + EXPORT_SYMBOL(clk_hw_get_clk);
7649 +
7650 +diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
7651 +index 56012a3d02192..9ea1a80acbe8b 100644
7652 +--- a/drivers/clk/hisilicon/clk-hi3559a.c
7653 ++++ b/drivers/clk/hisilicon/clk-hi3559a.c
7654 +@@ -611,8 +611,8 @@ static struct hisi_mux_clock hi3559av100_shub_mux_clks[] = {
7655 +
7656 +
7657 + /* shub div clk */
7658 +-static struct clk_div_table shub_spi_clk_table[] = {{0, 8}, {1, 4}, {2, 2}};
7659 +-static struct clk_div_table shub_uart_div_clk_table[] = {{1, 8}, {2, 4}};
7660 ++static struct clk_div_table shub_spi_clk_table[] = {{0, 8}, {1, 4}, {2, 2}, {/*sentinel*/}};
7661 ++static struct clk_div_table shub_uart_div_clk_table[] = {{1, 8}, {2, 4}, {/*sentinel*/}};
7662 +
7663 + static struct hisi_divider_clock hi3559av100_shub_div_clks[] = {
7664 + { HI3559AV100_SHUB_SPI_SOURCE_CLK, "clk_spi_clk", "shub_clk", 0, 0x20, 24, 2,
7665 +diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
7666 +index c4e0f1c07192f..3f6fd7ef2a68f 100644
7667 +--- a/drivers/clk/imx/clk-imx7d.c
7668 ++++ b/drivers/clk/imx/clk-imx7d.c
7669 +@@ -849,7 +849,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
7670 + hws[IMX7D_WDOG4_ROOT_CLK] = imx_clk_hw_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0);
7671 + hws[IMX7D_KPP_ROOT_CLK] = imx_clk_hw_gate4("kpp_root_clk", "ipg_root_clk", base + 0x4aa0, 0);
7672 + hws[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_hw_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0);
7673 +- hws[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_hw_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0);
7674 + hws[IMX7D_WRCLK_ROOT_CLK] = imx_clk_hw_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0);
7675 + hws[IMX7D_USB_CTRL_CLK] = imx_clk_hw_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0);
7676 + hws[IMX7D_USB_PHY1_CLK] = imx_clk_hw_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0);
7677 +diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
7678 +index b23758083ce52..5e31a6a24b3a3 100644
7679 +--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
7680 ++++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
7681 +@@ -248,7 +248,7 @@ static int imx_lpcg_parse_clks_from_dt(struct platform_device *pdev,
7682 +
7683 + for (i = 0; i < count; i++) {
7684 + idx = bit_offset[i] / 4;
7685 +- if (idx > IMX_LPCG_MAX_CLKS) {
7686 ++ if (idx >= IMX_LPCG_MAX_CLKS) {
7687 + dev_warn(&pdev->dev, "invalid bit offset of clock %d\n",
7688 + i);
7689 + ret = -EINVAL;
7690 +diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c
7691 +index 703f87622cf5f..1ebf740380efb 100644
7692 +--- a/drivers/clk/loongson1/clk-loongson1c.c
7693 ++++ b/drivers/clk/loongson1/clk-loongson1c.c
7694 +@@ -37,6 +37,7 @@ static const struct clk_div_table ahb_div_table[] = {
7695 + [1] = { .val = 1, .div = 4 },
7696 + [2] = { .val = 2, .div = 3 },
7697 + [3] = { .val = 3, .div = 3 },
7698 ++ [4] = { /* sentinel */ }
7699 + };
7700 +
7701 + void __init ls1x_clk_init(void)
7702 +diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
7703 +index e1b1b426fae4b..f675fd969c4de 100644
7704 +--- a/drivers/clk/qcom/clk-rcg2.c
7705 ++++ b/drivers/clk/qcom/clk-rcg2.c
7706 +@@ -264,7 +264,7 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
7707 +
7708 + static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
7709 + {
7710 +- u32 cfg, mask;
7711 ++ u32 cfg, mask, d_val, not2d_val, n_minus_m;
7712 + struct clk_hw *hw = &rcg->clkr.hw;
7713 + int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
7714 +
7715 +@@ -283,8 +283,17 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
7716 + if (ret)
7717 + return ret;
7718 +
7719 ++ /* Calculate 2d value */
7720 ++ d_val = f->n;
7721 ++
7722 ++ n_minus_m = f->n - f->m;
7723 ++ n_minus_m *= 2;
7724 ++
7725 ++ d_val = clamp_t(u32, d_val, f->m, n_minus_m);
7726 ++ not2d_val = ~d_val & mask;
7727 ++
7728 + ret = regmap_update_bits(rcg->clkr.regmap,
7729 +- RCG_D_OFFSET(rcg), mask, ~f->n);
7730 ++ RCG_D_OFFSET(rcg), mask, not2d_val);
7731 + if (ret)
7732 + return ret;
7733 + }
7734 +@@ -720,6 +729,7 @@ static const struct frac_entry frac_table_pixel[] = {
7735 + { 2, 9 },
7736 + { 4, 9 },
7737 + { 1, 1 },
7738 ++ { 2, 3 },
7739 + { }
7740 + };
7741 +
7742 +diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
7743 +index 108fe27bee10f..541016db3c4bb 100644
7744 +--- a/drivers/clk/qcom/gcc-ipq8074.c
7745 ++++ b/drivers/clk/qcom/gcc-ipq8074.c
7746 +@@ -60,11 +60,6 @@ static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = {
7747 + { P_GPLL0_DIV2, 4 },
7748 + };
7749 +
7750 +-static const char * const gcc_xo_gpll0[] = {
7751 +- "xo",
7752 +- "gpll0",
7753 +-};
7754 +-
7755 + static const struct parent_map gcc_xo_gpll0_map[] = {
7756 + { P_XO, 0 },
7757 + { P_GPLL0, 1 },
7758 +@@ -956,6 +951,11 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
7759 + },
7760 + };
7761 +
7762 ++static const struct clk_parent_data gcc_xo_gpll0[] = {
7763 ++ { .fw_name = "xo" },
7764 ++ { .hw = &gpll0.clkr.hw },
7765 ++};
7766 ++
7767 + static const struct freq_tbl ftbl_pcie_axi_clk_src[] = {
7768 + F(19200000, P_XO, 1, 0, 0),
7769 + F(200000000, P_GPLL0, 4, 0, 0),
7770 +@@ -969,7 +969,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = {
7771 + .parent_map = gcc_xo_gpll0_map,
7772 + .clkr.hw.init = &(struct clk_init_data){
7773 + .name = "pcie0_axi_clk_src",
7774 +- .parent_names = gcc_xo_gpll0,
7775 ++ .parent_data = gcc_xo_gpll0,
7776 + .num_parents = 2,
7777 + .ops = &clk_rcg2_ops,
7778 + },
7779 +@@ -1016,7 +1016,7 @@ static struct clk_rcg2 pcie1_axi_clk_src = {
7780 + .parent_map = gcc_xo_gpll0_map,
7781 + .clkr.hw.init = &(struct clk_init_data){
7782 + .name = "pcie1_axi_clk_src",
7783 +- .parent_names = gcc_xo_gpll0,
7784 ++ .parent_data = gcc_xo_gpll0,
7785 + .num_parents = 2,
7786 + .ops = &clk_rcg2_ops,
7787 + },
7788 +@@ -1074,7 +1074,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
7789 + .name = "sdcc1_apps_clk_src",
7790 + .parent_names = gcc_xo_gpll0_gpll2_gpll0_out_main_div2,
7791 + .num_parents = 4,
7792 +- .ops = &clk_rcg2_ops,
7793 ++ .ops = &clk_rcg2_floor_ops,
7794 + },
7795 + };
7796 +
7797 +@@ -1330,7 +1330,7 @@ static struct clk_rcg2 nss_ce_clk_src = {
7798 + .parent_map = gcc_xo_gpll0_map,
7799 + .clkr.hw.init = &(struct clk_init_data){
7800 + .name = "nss_ce_clk_src",
7801 +- .parent_names = gcc_xo_gpll0,
7802 ++ .parent_data = gcc_xo_gpll0,
7803 + .num_parents = 2,
7804 + .ops = &clk_rcg2_ops,
7805 + },
7806 +@@ -4329,8 +4329,7 @@ static struct clk_rcg2 pcie0_rchng_clk_src = {
7807 + .parent_map = gcc_xo_gpll0_map,
7808 + .clkr.hw.init = &(struct clk_init_data){
7809 + .name = "pcie0_rchng_clk_src",
7810 +- .parent_hws = (const struct clk_hw *[]) {
7811 +- &gpll0.clkr.hw },
7812 ++ .parent_data = gcc_xo_gpll0,
7813 + .num_parents = 2,
7814 + .ops = &clk_rcg2_ops,
7815 + },
7816 +diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
7817 +index f09499999eb3a..6b702cdacbf2e 100644
7818 +--- a/drivers/clk/qcom/gcc-msm8994.c
7819 ++++ b/drivers/clk/qcom/gcc-msm8994.c
7820 +@@ -77,6 +77,7 @@ static struct clk_alpha_pll gpll4_early = {
7821 +
7822 + static struct clk_alpha_pll_postdiv gpll4 = {
7823 + .offset = 0x1dc0,
7824 ++ .width = 4,
7825 + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
7826 + .clkr.hw.init = &(struct clk_init_data){
7827 + .name = "gpll4",
7828 +diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
7829 +index e6ec02c2c2a8b..344957d533d81 100644
7830 +--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
7831 ++++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
7832 +@@ -103,7 +103,7 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
7833 + DEF_FIXED("s0d12_hsc", R8A779F0_CLK_S0D12_HSC, CLK_S0, 12, 1),
7834 + DEF_FIXED("cl16m_hsc", R8A779F0_CLK_CL16M_HSC, CLK_S0, 48, 1),
7835 + DEF_FIXED("s0d2_cc", R8A779F0_CLK_S0D2_CC, CLK_S0, 2, 1),
7836 +- DEF_FIXED("rsw2", R8A779F0_CLK_RSW2, CLK_PLL5, 2, 1),
7837 ++ DEF_FIXED("rsw2", R8A779F0_CLK_RSW2, CLK_PLL5_DIV2, 5, 1),
7838 + DEF_FIXED("cbfusa", R8A779F0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
7839 + DEF_FIXED("cpex", R8A779F0_CLK_CPEX, CLK_EXTAL, 2, 1),
7840 +
7841 +diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
7842 +index 79042bf46fe85..46359afef0d43 100644
7843 +--- a/drivers/clk/renesas/r9a07g044-cpg.c
7844 ++++ b/drivers/clk/renesas/r9a07g044-cpg.c
7845 +@@ -88,8 +88,8 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
7846 + DEF_FIXED(".osc", R9A07G044_OSCCLK, CLK_EXTAL, 1, 1),
7847 + DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000),
7848 + DEF_SAMPLL(".pll1", CLK_PLL1, CLK_EXTAL, PLL146_CONF(0)),
7849 +- DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 133, 2),
7850 +- DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 133, 2),
7851 ++ DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3),
7852 ++ DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3),
7853 + DEF_FIXED(".pll3_400", CLK_PLL3_400, CLK_PLL3, 1, 4),
7854 + DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3),
7855 +
7856 +diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
7857 +index b7be7e11b0dfe..bb8a844309bf5 100644
7858 +--- a/drivers/clk/rockchip/clk.c
7859 ++++ b/drivers/clk/rockchip/clk.c
7860 +@@ -180,6 +180,7 @@ static void rockchip_fractional_approximation(struct clk_hw *hw,
7861 + unsigned long rate, unsigned long *parent_rate,
7862 + unsigned long *m, unsigned long *n)
7863 + {
7864 ++ struct clk_fractional_divider *fd = to_clk_fd(hw);
7865 + unsigned long p_rate, p_parent_rate;
7866 + struct clk_hw *p_parent;
7867 +
7868 +@@ -190,6 +191,8 @@ static void rockchip_fractional_approximation(struct clk_hw *hw,
7869 + *parent_rate = p_parent_rate;
7870 + }
7871 +
7872 ++ fd->flags |= CLK_FRAC_DIVIDER_POWER_OF_TWO_PS;
7873 ++
7874 + clk_fractional_divider_general_approximation(hw, rate, parent_rate, m, n);
7875 + }
7876 +
7877 +diff --git a/drivers/clk/starfive/clk-starfive-jh7100.c b/drivers/clk/starfive/clk-starfive-jh7100.c
7878 +index 25d31afa0f871..4b59338b5d7d4 100644
7879 +--- a/drivers/clk/starfive/clk-starfive-jh7100.c
7880 ++++ b/drivers/clk/starfive/clk-starfive-jh7100.c
7881 +@@ -32,6 +32,13 @@
7882 + #define JH7100_CLK_MUX_MASK GENMASK(27, 24)
7883 + #define JH7100_CLK_MUX_SHIFT 24
7884 + #define JH7100_CLK_DIV_MASK GENMASK(23, 0)
7885 ++#define JH7100_CLK_FRAC_MASK GENMASK(15, 8)
7886 ++#define JH7100_CLK_FRAC_SHIFT 8
7887 ++#define JH7100_CLK_INT_MASK GENMASK(7, 0)
7888 ++
7889 ++/* fractional divider min/max */
7890 ++#define JH7100_CLK_FRAC_MIN 100UL
7891 ++#define JH7100_CLK_FRAC_MAX 25599UL
7892 +
7893 + /* clock data */
7894 + #define JH7100_GATE(_idx, _name, _flags, _parent) [_idx] = { \
7895 +@@ -55,6 +62,13 @@
7896 + .parents = { [0] = _parent }, \
7897 + }
7898 +
7899 ++#define JH7100_FDIV(_idx, _name, _parent) [_idx] = { \
7900 ++ .name = _name, \
7901 ++ .flags = 0, \
7902 ++ .max = JH7100_CLK_FRAC_MAX, \
7903 ++ .parents = { [0] = _parent }, \
7904 ++}
7905 ++
7906 + #define JH7100__MUX(_idx, _name, _nparents, ...) [_idx] = { \
7907 + .name = _name, \
7908 + .flags = 0, \
7909 +@@ -225,7 +239,7 @@ static const struct {
7910 + JH7100__MUX(JH7100_CLK_USBPHY_25M, "usbphy_25m", 2,
7911 + JH7100_CLK_OSC_SYS,
7912 + JH7100_CLK_USBPHY_PLLDIV25M),
7913 +- JH7100__DIV(JH7100_CLK_AUDIO_DIV, "audio_div", 131072, JH7100_CLK_AUDIO_ROOT),
7914 ++ JH7100_FDIV(JH7100_CLK_AUDIO_DIV, "audio_div", JH7100_CLK_AUDIO_ROOT),
7915 + JH7100_GATE(JH7100_CLK_AUDIO_SRC, "audio_src", 0, JH7100_CLK_AUDIO_DIV),
7916 + JH7100_GATE(JH7100_CLK_AUDIO_12288, "audio_12288", 0, JH7100_CLK_OSC_AUD),
7917 + JH7100_GDIV(JH7100_CLK_VIN_SRC, "vin_src", 0, 4, JH7100_CLK_VIN_ROOT),
7918 +@@ -399,22 +413,13 @@ static unsigned long jh7100_clk_recalc_rate(struct clk_hw *hw,
7919 + return div ? parent_rate / div : 0;
7920 + }
7921 +
7922 +-static unsigned long jh7100_clk_bestdiv(struct jh7100_clk *clk,
7923 +- unsigned long rate, unsigned long parent)
7924 +-{
7925 +- unsigned long max = clk->max_div;
7926 +- unsigned long div = DIV_ROUND_UP(parent, rate);
7927 +-
7928 +- return min(div, max);
7929 +-}
7930 +-
7931 + static int jh7100_clk_determine_rate(struct clk_hw *hw,
7932 + struct clk_rate_request *req)
7933 + {
7934 + struct jh7100_clk *clk = jh7100_clk_from(hw);
7935 + unsigned long parent = req->best_parent_rate;
7936 + unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
7937 +- unsigned long div = jh7100_clk_bestdiv(clk, rate, parent);
7938 ++ unsigned long div = min_t(unsigned long, DIV_ROUND_UP(parent, rate), clk->max_div);
7939 + unsigned long result = parent / div;
7940 +
7941 + /*
7942 +@@ -442,12 +447,56 @@ static int jh7100_clk_set_rate(struct clk_hw *hw,
7943 + unsigned long parent_rate)
7944 + {
7945 + struct jh7100_clk *clk = jh7100_clk_from(hw);
7946 +- unsigned long div = jh7100_clk_bestdiv(clk, rate, parent_rate);
7947 ++ unsigned long div = clamp(DIV_ROUND_CLOSEST(parent_rate, rate),
7948 ++ 1UL, (unsigned long)clk->max_div);
7949 +
7950 + jh7100_clk_reg_rmw(clk, JH7100_CLK_DIV_MASK, div);
7951 + return 0;
7952 + }
7953 +
7954 ++static unsigned long jh7100_clk_frac_recalc_rate(struct clk_hw *hw,
7955 ++ unsigned long parent_rate)
7956 ++{
7957 ++ struct jh7100_clk *clk = jh7100_clk_from(hw);
7958 ++ u32 reg = jh7100_clk_reg_get(clk);
7959 ++ unsigned long div100 = 100 * (reg & JH7100_CLK_INT_MASK) +
7960 ++ ((reg & JH7100_CLK_FRAC_MASK) >> JH7100_CLK_FRAC_SHIFT);
7961 ++
7962 ++ return (div100 >= JH7100_CLK_FRAC_MIN) ? 100 * parent_rate / div100 : 0;
7963 ++}
7964 ++
7965 ++static int jh7100_clk_frac_determine_rate(struct clk_hw *hw,
7966 ++ struct clk_rate_request *req)
7967 ++{
7968 ++ unsigned long parent100 = 100 * req->best_parent_rate;
7969 ++ unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
7970 ++ unsigned long div100 = clamp(DIV_ROUND_CLOSEST(parent100, rate),
7971 ++ JH7100_CLK_FRAC_MIN, JH7100_CLK_FRAC_MAX);
7972 ++ unsigned long result = parent100 / div100;
7973 ++
7974 ++ /* clamp the result as in jh7100_clk_determine_rate() above */
7975 ++ if (result > req->max_rate && div100 < JH7100_CLK_FRAC_MAX)
7976 ++ result = parent100 / (div100 + 1);
7977 ++ if (result < req->min_rate && div100 > JH7100_CLK_FRAC_MIN)
7978 ++ result = parent100 / (div100 - 1);
7979 ++
7980 ++ req->rate = result;
7981 ++ return 0;
7982 ++}
7983 ++
7984 ++static int jh7100_clk_frac_set_rate(struct clk_hw *hw,
7985 ++ unsigned long rate,
7986 ++ unsigned long parent_rate)
7987 ++{
7988 ++ struct jh7100_clk *clk = jh7100_clk_from(hw);
7989 ++ unsigned long div100 = clamp(DIV_ROUND_CLOSEST(100 * parent_rate, rate),
7990 ++ JH7100_CLK_FRAC_MIN, JH7100_CLK_FRAC_MAX);
7991 ++ u32 value = ((div100 % 100) << JH7100_CLK_FRAC_SHIFT) | (div100 / 100);
7992 ++
7993 ++ jh7100_clk_reg_rmw(clk, JH7100_CLK_DIV_MASK, value);
7994 ++ return 0;
7995 ++}
7996 ++
7997 + static u8 jh7100_clk_get_parent(struct clk_hw *hw)
7998 + {
7999 + struct jh7100_clk *clk = jh7100_clk_from(hw);
8000 +@@ -534,6 +583,13 @@ static const struct clk_ops jh7100_clk_div_ops = {
8001 + .debug_init = jh7100_clk_debug_init,
8002 + };
8003 +
8004 ++static const struct clk_ops jh7100_clk_fdiv_ops = {
8005 ++ .recalc_rate = jh7100_clk_frac_recalc_rate,
8006 ++ .determine_rate = jh7100_clk_frac_determine_rate,
8007 ++ .set_rate = jh7100_clk_frac_set_rate,
8008 ++ .debug_init = jh7100_clk_debug_init,
8009 ++};
8010 ++
8011 + static const struct clk_ops jh7100_clk_gdiv_ops = {
8012 + .enable = jh7100_clk_enable,
8013 + .disable = jh7100_clk_disable,
8014 +@@ -572,6 +628,8 @@ static const struct clk_ops *__init jh7100_clk_ops(u32 max)
8015 + if (max & JH7100_CLK_DIV_MASK) {
8016 + if (max & JH7100_CLK_ENABLE)
8017 + return &jh7100_clk_gdiv_ops;
8018 ++ if (max == JH7100_CLK_FRAC_MAX)
8019 ++ return &jh7100_clk_fdiv_ops;
8020 + return &jh7100_clk_div_ops;
8021 + }
8022 +
8023 +diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
8024 +index 74c1d894cca86..219c80653dbdb 100644
8025 +--- a/drivers/clk/tegra/clk-tegra124-emc.c
8026 ++++ b/drivers/clk/tegra/clk-tegra124-emc.c
8027 +@@ -198,6 +198,7 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
8028 +
8029 + tegra->emc = platform_get_drvdata(pdev);
8030 + if (!tegra->emc) {
8031 ++ put_device(&pdev->dev);
8032 + pr_err("%s: cannot find EMC driver\n", __func__);
8033 + return NULL;
8034 + }
8035 +diff --git a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c
8036 +index 5319cd3804801..3bc55ab75314b 100644
8037 +--- a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c
8038 ++++ b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c
8039 +@@ -24,6 +24,7 @@ struct clk_hw *uniphier_clk_register_fixed_rate(struct device *dev,
8040 +
8041 + init.name = name;
8042 + init.ops = &clk_fixed_rate_ops;
8043 ++ init.flags = 0;
8044 + init.parent_names = NULL;
8045 + init.num_parents = 0;
8046 +
8047 +diff --git a/drivers/clk/visconti/clkc-tmpv770x.c b/drivers/clk/visconti/clkc-tmpv770x.c
8048 +index c2b2f41a85a45..6c753b2cb558f 100644
8049 +--- a/drivers/clk/visconti/clkc-tmpv770x.c
8050 ++++ b/drivers/clk/visconti/clkc-tmpv770x.c
8051 +@@ -176,7 +176,7 @@ static const struct visconti_clk_gate_table clk_gate_tables[] = {
8052 + { TMPV770X_CLK_WRCK, "wrck",
8053 + clks_parent_data, ARRAY_SIZE(clks_parent_data),
8054 + 0, 0x68, 0x168, 9, 32,
8055 +- -1, }, /* No reset */
8056 ++ NO_RESET, },
8057 + { TMPV770X_CLK_PICKMON, "pickmon",
8058 + clks_parent_data, ARRAY_SIZE(clks_parent_data),
8059 + 0, 0x10, 0x110, 8, 4,
8060 +diff --git a/drivers/clk/visconti/clkc.c b/drivers/clk/visconti/clkc.c
8061 +index 56a8a4ffebca8..d0b193b5d0b35 100644
8062 +--- a/drivers/clk/visconti/clkc.c
8063 ++++ b/drivers/clk/visconti/clkc.c
8064 +@@ -147,7 +147,7 @@ int visconti_clk_register_gates(struct visconti_clk_provider *ctx,
8065 + if (!dev_name)
8066 + return -ENOMEM;
8067 +
8068 +- if (clks[i].rs_id >= 0) {
8069 ++ if (clks[i].rs_id != NO_RESET) {
8070 + rson_offset = reset[clks[i].rs_id].rson_offset;
8071 + rsoff_offset = reset[clks[i].rs_id].rsoff_offset;
8072 + rs_idx = reset[clks[i].rs_id].rs_idx;
8073 +diff --git a/drivers/clk/visconti/clkc.h b/drivers/clk/visconti/clkc.h
8074 +index 09ed82ff64e45..8756a1ec42efc 100644
8075 +--- a/drivers/clk/visconti/clkc.h
8076 ++++ b/drivers/clk/visconti/clkc.h
8077 +@@ -73,4 +73,7 @@ int visconti_clk_register_gates(struct visconti_clk_provider *data,
8078 + int num_gate,
8079 + const struct visconti_reset_data *reset,
8080 + spinlock_t *lock);
8081 ++
8082 ++#define NO_RESET 0xFF
8083 ++
8084 + #endif /* _VISCONTI_CLKC_H_ */
8085 +diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
8086 +index eb596ff9e7bb3..279ddff81ab49 100644
8087 +--- a/drivers/clocksource/acpi_pm.c
8088 ++++ b/drivers/clocksource/acpi_pm.c
8089 +@@ -229,8 +229,10 @@ static int __init parse_pmtmr(char *arg)
8090 + int ret;
8091 +
8092 + ret = kstrtouint(arg, 16, &base);
8093 +- if (ret)
8094 +- return ret;
8095 ++ if (ret) {
8096 ++ pr_warn("PMTMR: invalid 'pmtmr=' value: '%s'\n", arg);
8097 ++ return 1;
8098 ++ }
8099 +
8100 + pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport,
8101 + base);
8102 +diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
8103 +index 6db3d5511b0ff..03782b399ea1a 100644
8104 +--- a/drivers/clocksource/exynos_mct.c
8105 ++++ b/drivers/clocksource/exynos_mct.c
8106 +@@ -541,6 +541,11 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
8107 + * irqs are specified.
8108 + */
8109 + nr_irqs = of_irq_count(np);
8110 ++ if (nr_irqs > ARRAY_SIZE(mct_irqs)) {
8111 ++ pr_err("exynos-mct: too many (%d) interrupts configured in DT\n",
8112 ++ nr_irqs);
8113 ++ nr_irqs = ARRAY_SIZE(mct_irqs);
8114 ++ }
8115 + for (i = MCT_L0_IRQ; i < nr_irqs; i++)
8116 + mct_irqs[i] = irq_of_parse_and_map(np, i);
8117 +
8118 +@@ -553,11 +558,14 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
8119 + mct_irqs[MCT_L0_IRQ], err);
8120 + } else {
8121 + for_each_possible_cpu(cpu) {
8122 +- int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
8123 ++ int mct_irq;
8124 + struct mct_clock_event_device *pcpu_mevt =
8125 + per_cpu_ptr(&percpu_mct_tick, cpu);
8126 +
8127 + pcpu_mevt->evt.irq = -1;
8128 ++ if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs))
8129 ++ break;
8130 ++ mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
8131 +
8132 + irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
8133 + if (request_irq(mct_irq,
8134 +diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
8135 +index cfa4ec7ef3968..790d2c9b42a70 100644
8136 +--- a/drivers/clocksource/timer-microchip-pit64b.c
8137 ++++ b/drivers/clocksource/timer-microchip-pit64b.c
8138 +@@ -165,7 +165,7 @@ static u64 mchp_pit64b_clksrc_read(struct clocksource *cs)
8139 + return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
8140 + }
8141 +
8142 +-static u64 mchp_pit64b_sched_read_clk(void)
8143 ++static u64 notrace mchp_pit64b_sched_read_clk(void)
8144 + {
8145 + return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
8146 + }
8147 +diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
8148 +index 529cc6a51cdb3..c3f54d9912be7 100644
8149 +--- a/drivers/clocksource/timer-of.c
8150 ++++ b/drivers/clocksource/timer-of.c
8151 +@@ -157,9 +157,9 @@ static __init int timer_of_base_init(struct device_node *np,
8152 + of_base->base = of_base->name ?
8153 + of_io_request_and_map(np, of_base->index, of_base->name) :
8154 + of_iomap(np, of_base->index);
8155 +- if (IS_ERR(of_base->base)) {
8156 +- pr_err("Failed to iomap (%s)\n", of_base->name);
8157 +- return PTR_ERR(of_base->base);
8158 ++ if (IS_ERR_OR_NULL(of_base->base)) {
8159 ++ pr_err("Failed to iomap (%s:%s)\n", np->name, of_base->name);
8160 ++ return of_base->base ? PTR_ERR(of_base->base) : -ENOMEM;
8161 + }
8162 +
8163 + return 0;
8164 +diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
8165 +index 1fccb457fcc54..2737407ff0698 100644
8166 +--- a/drivers/clocksource/timer-ti-dm-systimer.c
8167 ++++ b/drivers/clocksource/timer-ti-dm-systimer.c
8168 +@@ -694,9 +694,9 @@ static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
8169 + return 0;
8170 + }
8171 +
8172 +- if (pa == 0x48034000) /* dra7 dmtimer3 */
8173 ++ if (pa == 0x4882c000) /* dra7 dmtimer15 */
8174 + return dmtimer_percpu_timer_init(np, 0);
8175 +- else if (pa == 0x48036000) /* dra7 dmtimer4 */
8176 ++ else if (pa == 0x4882e000) /* dra7 dmtimer16 */
8177 + return dmtimer_percpu_timer_init(np, 1);
8178 +
8179 + return 0;
8180 +diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
8181 +index d1744b5d96190..6dfa86971a757 100644
8182 +--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
8183 ++++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
8184 +@@ -130,7 +130,7 @@ static void get_krait_bin_format_b(struct device *cpu_dev,
8185 + }
8186 +
8187 + /* Check PVS_BLOW_STATUS */
8188 +- pte_efuse = *(((u32 *)buf) + 4);
8189 ++ pte_efuse = *(((u32 *)buf) + 1);
8190 + pte_efuse &= BIT(21);
8191 + if (pte_efuse) {
8192 + dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs);
8193 +diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
8194 +index 01e77913a4144..5f27dcc6c110f 100644
8195 +--- a/drivers/cpuidle/cpuidle-qcom-spm.c
8196 ++++ b/drivers/cpuidle/cpuidle-qcom-spm.c
8197 +@@ -155,6 +155,22 @@ static struct platform_driver spm_cpuidle_driver = {
8198 + },
8199 + };
8200 +
8201 ++static bool __init qcom_spm_find_any_cpu(void)
8202 ++{
8203 ++ struct device_node *cpu_node, *saw_node;
8204 ++
8205 ++ for_each_of_cpu_node(cpu_node) {
8206 ++ saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
8207 ++ if (of_device_is_available(saw_node)) {
8208 ++ of_node_put(saw_node);
8209 ++ of_node_put(cpu_node);
8210 ++ return true;
8211 ++ }
8212 ++ of_node_put(saw_node);
8213 ++ }
8214 ++ return false;
8215 ++}
8216 ++
8217 + static int __init qcom_spm_cpuidle_init(void)
8218 + {
8219 + struct platform_device *pdev;
8220 +@@ -164,6 +180,10 @@ static int __init qcom_spm_cpuidle_init(void)
8221 + if (ret)
8222 + return ret;
8223 +
8224 ++ /* Make sure there is actually any CPU managed by the SPM */
8225 ++ if (!qcom_spm_find_any_cpu())
8226 ++ return 0;
8227 ++
8228 + pdev = platform_device_register_simple("qcom-spm-cpuidle",
8229 + -1, NULL, 0);
8230 + if (IS_ERR(pdev)) {
8231 +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
8232 +index 54ae8d16e4931..35e3cadccac2b 100644
8233 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
8234 ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
8235 +@@ -11,6 +11,7 @@
8236 + * You could find a link for the datasheet in Documentation/arm/sunxi.rst
8237 + */
8238 +
8239 ++#include <linux/bottom_half.h>
8240 + #include <linux/crypto.h>
8241 + #include <linux/dma-mapping.h>
8242 + #include <linux/io.h>
8243 +@@ -283,7 +284,9 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
8244 +
8245 + flow = rctx->flow;
8246 + err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
8247 ++ local_bh_disable();
8248 + crypto_finalize_skcipher_request(engine, breq, err);
8249 ++ local_bh_enable();
8250 + return 0;
8251 + }
8252 +
8253 +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
8254 +index 88194718a806c..859b7522faaac 100644
8255 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
8256 ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
8257 +@@ -9,6 +9,7 @@
8258 + *
8259 + * You could find the datasheet in Documentation/arm/sunxi.rst
8260 + */
8261 ++#include <linux/bottom_half.h>
8262 + #include <linux/dma-mapping.h>
8263 + #include <linux/pm_runtime.h>
8264 + #include <linux/scatterlist.h>
8265 +@@ -414,6 +415,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
8266 + theend:
8267 + kfree(buf);
8268 + kfree(result);
8269 ++ local_bh_disable();
8270 + crypto_finalize_hash_request(engine, breq, err);
8271 ++ local_bh_enable();
8272 + return 0;
8273 + }
8274 +diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
8275 +index 9ef1c85c4aaa5..554e400d41cad 100644
8276 +--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
8277 ++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
8278 +@@ -11,6 +11,7 @@
8279 + * You could find a link for the datasheet in Documentation/arm/sunxi.rst
8280 + */
8281 +
8282 ++#include <linux/bottom_half.h>
8283 + #include <linux/crypto.h>
8284 + #include <linux/dma-mapping.h>
8285 + #include <linux/io.h>
8286 +@@ -274,7 +275,9 @@ static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *ar
8287 + struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
8288 +
8289 + err = sun8i_ss_cipher(breq);
8290 ++ local_bh_disable();
8291 + crypto_finalize_skcipher_request(engine, breq, err);
8292 ++ local_bh_enable();
8293 +
8294 + return 0;
8295 + }
8296 +diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
8297 +index 80e89066dbd1a..319fe3279a716 100644
8298 +--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
8299 ++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
8300 +@@ -30,6 +30,8 @@
8301 + static const struct ss_variant ss_a80_variant = {
8302 + .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
8303 + },
8304 ++ .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP,
8305 ++ },
8306 + .op_mode = { SS_OP_ECB, SS_OP_CBC,
8307 + },
8308 + .ss_clks = {
8309 +diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
8310 +index 3c073eb3db038..1a71ed49d2333 100644
8311 +--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
8312 ++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
8313 +@@ -9,6 +9,7 @@
8314 + *
8315 + * You could find the datasheet in Documentation/arm/sunxi.rst
8316 + */
8317 ++#include <linux/bottom_half.h>
8318 + #include <linux/dma-mapping.h>
8319 + #include <linux/pm_runtime.h>
8320 + #include <linux/scatterlist.h>
8321 +@@ -442,6 +443,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
8322 + theend:
8323 + kfree(pad);
8324 + kfree(result);
8325 ++ local_bh_disable();
8326 + crypto_finalize_hash_request(engine, breq, err);
8327 ++ local_bh_enable();
8328 + return 0;
8329 + }
8330 +diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
8331 +index c6865cbd334b2..e79514fce731f 100644
8332 +--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
8333 ++++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
8334 +@@ -265,7 +265,9 @@ static int meson_handle_cipher_request(struct crypto_engine *engine,
8335 + struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
8336 +
8337 + err = meson_cipher(breq);
8338 ++ local_bh_disable();
8339 + crypto_finalize_skcipher_request(engine, breq, err);
8340 ++ local_bh_enable();
8341 +
8342 + return 0;
8343 + }
8344 +diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
8345 +index d718db224be42..7d4b4ad1db1f3 100644
8346 +--- a/drivers/crypto/ccp/ccp-dmaengine.c
8347 ++++ b/drivers/crypto/ccp/ccp-dmaengine.c
8348 +@@ -632,6 +632,20 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
8349 + return 0;
8350 + }
8351 +
8352 ++static void ccp_dma_release(struct ccp_device *ccp)
8353 ++{
8354 ++ struct ccp_dma_chan *chan;
8355 ++ struct dma_chan *dma_chan;
8356 ++ unsigned int i;
8357 ++
8358 ++ for (i = 0; i < ccp->cmd_q_count; i++) {
8359 ++ chan = ccp->ccp_dma_chan + i;
8360 ++ dma_chan = &chan->dma_chan;
8361 ++ tasklet_kill(&chan->cleanup_tasklet);
8362 ++ list_del_rcu(&dma_chan->device_node);
8363 ++ }
8364 ++}
8365 ++
8366 + int ccp_dmaengine_register(struct ccp_device *ccp)
8367 + {
8368 + struct ccp_dma_chan *chan;
8369 +@@ -736,6 +750,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
8370 + return 0;
8371 +
8372 + err_reg:
8373 ++ ccp_dma_release(ccp);
8374 + kmem_cache_destroy(ccp->dma_desc_cache);
8375 +
8376 + err_cache:
8377 +@@ -752,6 +767,7 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
8378 + return;
8379 +
8380 + dma_async_device_unregister(dma_dev);
8381 ++ ccp_dma_release(ccp);
8382 +
8383 + kmem_cache_destroy(ccp->dma_desc_cache);
8384 + kmem_cache_destroy(ccp->dma_cmd_cache);
8385 +diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
8386 +index 8fd774a10edc3..6ab93dfd478a9 100644
8387 +--- a/drivers/crypto/ccp/sev-dev.c
8388 ++++ b/drivers/crypto/ccp/sev-dev.c
8389 +@@ -413,7 +413,7 @@ static int __sev_platform_init_locked(int *error)
8390 + {
8391 + struct psp_device *psp = psp_master;
8392 + struct sev_device *sev;
8393 +- int rc, psp_ret;
8394 ++ int rc, psp_ret = -1;
8395 + int (*init_function)(int *error);
8396 +
8397 + if (!psp || !psp->sev_data)
8398 +diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
8399 +index a5e041d9d2cf1..11e0278c8631d 100644
8400 +--- a/drivers/crypto/ccree/cc_buffer_mgr.c
8401 ++++ b/drivers/crypto/ccree/cc_buffer_mgr.c
8402 +@@ -258,6 +258,13 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
8403 + {
8404 + int ret = 0;
8405 +
8406 ++ if (!nbytes) {
8407 ++ *mapped_nents = 0;
8408 ++ *lbytes = 0;
8409 ++ *nents = 0;
8410 ++ return 0;
8411 ++ }
8412 ++
8413 + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
8414 + if (*nents > max_sg_nents) {
8415 + *nents = 0;
8416 +diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
8417 +index 78833491f534d..309da6334a0a0 100644
8418 +--- a/drivers/crypto/ccree/cc_cipher.c
8419 ++++ b/drivers/crypto/ccree/cc_cipher.c
8420 +@@ -257,8 +257,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm)
8421 + &ctx_p->user.key_dma_addr);
8422 +
8423 + /* Free key buffer in context */
8424 +- kfree_sensitive(ctx_p->user.key);
8425 + dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
8426 ++ kfree_sensitive(ctx_p->user.key);
8427 + }
8428 +
8429 + struct tdes_keys {
8430 +diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c
8431 +index c1c2b1d866639..f2be0a7d7f7ac 100644
8432 +--- a/drivers/crypto/gemini/sl3516-ce-cipher.c
8433 ++++ b/drivers/crypto/gemini/sl3516-ce-cipher.c
8434 +@@ -264,7 +264,9 @@ static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *a
8435 + struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
8436 +
8437 + err = sl3516_ce_cipher(breq);
8438 ++ local_bh_disable();
8439 + crypto_finalize_skcipher_request(engine, breq, err);
8440 ++ local_bh_enable();
8441 +
8442 + return 0;
8443 + }
8444 +diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
8445 +index c5b84a5ea3501..3b29c8993b8c7 100644
8446 +--- a/drivers/crypto/hisilicon/qm.c
8447 ++++ b/drivers/crypto/hisilicon/qm.c
8448 +@@ -4295,7 +4295,7 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
8449 + static int qm_vf_read_qos(struct hisi_qm *qm)
8450 + {
8451 + int cnt = 0;
8452 +- int ret;
8453 ++ int ret = -EINVAL;
8454 +
8455 + /* reset mailbox qos val */
8456 + qm->mb_qos = 0;
8457 +diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
8458 +index 6a45bd23b3635..090920ed50c8f 100644
8459 +--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
8460 ++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
8461 +@@ -2284,9 +2284,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
8462 + struct aead_request *aead_req,
8463 + bool encrypt)
8464 + {
8465 +- struct aead_request *subreq = aead_request_ctx(aead_req);
8466 + struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
8467 + struct device *dev = ctx->dev;
8468 ++ struct aead_request *subreq;
8469 ++ int ret;
8470 +
8471 + /* Kunpeng920 aead mode not support input 0 size */
8472 + if (!a_ctx->fallback_aead_tfm) {
8473 +@@ -2294,6 +2295,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
8474 + return -EINVAL;
8475 + }
8476 +
8477 ++ subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
8478 ++ if (!subreq)
8479 ++ return -ENOMEM;
8480 ++
8481 + aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
8482 + aead_request_set_callback(subreq, aead_req->base.flags,
8483 + aead_req->base.complete, aead_req->base.data);
8484 +@@ -2301,8 +2306,13 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
8485 + aead_req->cryptlen, aead_req->iv);
8486 + aead_request_set_ad(subreq, aead_req->assoclen);
8487 +
8488 +- return encrypt ? crypto_aead_encrypt(subreq) :
8489 +- crypto_aead_decrypt(subreq);
8490 ++ if (encrypt)
8491 ++ ret = crypto_aead_encrypt(subreq);
8492 ++ else
8493 ++ ret = crypto_aead_decrypt(subreq);
8494 ++ aead_request_free(subreq);
8495 ++
8496 ++ return ret;
8497 + }
8498 +
8499 + static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
8500 +diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
8501 +index 26d3ab1d308ba..89d4cc767d361 100644
8502 +--- a/drivers/crypto/hisilicon/sec2/sec_main.c
8503 ++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
8504 +@@ -443,9 +443,11 @@ static int sec_engine_init(struct hisi_qm *qm)
8505 +
8506 + writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
8507 +
8508 +- /* Enable sm4 extra mode, as ctr/ecb */
8509 +- writel_relaxed(SEC_BD_ERR_CHK_EN0,
8510 +- qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
8511 ++ /* HW V2 enable sm4 extra mode, as ctr/ecb */
8512 ++ if (qm->ver < QM_HW_V3)
8513 ++ writel_relaxed(SEC_BD_ERR_CHK_EN0,
8514 ++ qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
8515 ++
8516 + /* Enable sm4 xts mode multiple iv */
8517 + writel_relaxed(SEC_BD_ERR_CHK_EN1,
8518 + qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
8519 +diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
8520 +index 9125199f1702b..a48591af12d02 100644
8521 +--- a/drivers/crypto/marvell/Kconfig
8522 ++++ b/drivers/crypto/marvell/Kconfig
8523 +@@ -47,6 +47,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT
8524 + select CRYPTO_SKCIPHER
8525 + select CRYPTO_HASH
8526 + select CRYPTO_AEAD
8527 ++ select NET_DEVLINK
8528 + help
8529 + This driver allows you to utilize the Marvell Cryptographic
8530 + Accelerator Unit(CPT) found in OcteonTX2 series of processors.
8531 +diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
8532 +index 1b4d425bbf0e4..7fd4503d9cfc8 100644
8533 +--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
8534 ++++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
8535 +@@ -1076,6 +1076,39 @@ static void delete_engine_grps(struct pci_dev *pdev,
8536 + delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
8537 + }
8538 +
8539 ++#define PCI_DEVID_CN10K_RNM 0xA098
8540 ++#define RNM_ENTROPY_STATUS 0x8
8541 ++
8542 ++static void rnm_to_cpt_errata_fixup(struct device *dev)
8543 ++{
8544 ++ struct pci_dev *pdev;
8545 ++ void __iomem *base;
8546 ++ int timeout = 5000;
8547 ++
8548 ++ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
8549 ++ if (!pdev)
8550 ++ return;
8551 ++
8552 ++ base = pci_ioremap_bar(pdev, 0);
8553 ++ if (!base)
8554 ++ goto put_pdev;
8555 ++
8556 ++ while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
8557 ++ cpu_relax();
8558 ++ udelay(1);
8559 ++ timeout--;
8560 ++ if (!timeout) {
8561 ++ dev_warn(dev, "RNM is not producing entropy\n");
8562 ++ break;
8563 ++ }
8564 ++ }
8565 ++
8566 ++ iounmap(base);
8567 ++
8568 ++put_pdev:
8569 ++ pci_dev_put(pdev);
8570 ++}
8571 ++
8572 + int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
8573 + {
8574 +
8575 +@@ -1189,9 +1222,17 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
8576 +
8577 + if (is_dev_otx2(pdev))
8578 + goto unlock;
8579 ++
8580 ++ /*
8581 ++ * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
8582 ++ * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
8583 ++ */
8584 ++ rnm_to_cpt_errata_fixup(&pdev->dev);
8585 ++
8586 + /*
8587 + * Configure engine group mask to allow context prefetching
8588 +- * for the groups.
8589 ++ * for the groups and enable random number request, to enable
8590 ++ * CPT to request random numbers from RNM.
8591 + */
8592 + otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
8593 + OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
8594 +diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
8595 +index 2748a3327e391..f8f8542ce3e47 100644
8596 +--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
8597 ++++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
8598 +@@ -1634,16 +1634,13 @@ static inline int cpt_register_algs(void)
8599 + {
8600 + int i, err = 0;
8601 +
8602 +- if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
8603 +- for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
8604 +- otx2_cpt_skciphers[i].base.cra_flags &=
8605 +- ~CRYPTO_ALG_DEAD;
8606 +-
8607 +- err = crypto_register_skciphers(otx2_cpt_skciphers,
8608 +- ARRAY_SIZE(otx2_cpt_skciphers));
8609 +- if (err)
8610 +- return err;
8611 +- }
8612 ++ for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
8613 ++ otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
8614 ++
8615 ++ err = crypto_register_skciphers(otx2_cpt_skciphers,
8616 ++ ARRAY_SIZE(otx2_cpt_skciphers));
8617 ++ if (err)
8618 ++ return err;
8619 +
8620 + for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
8621 + otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
8622 +diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
8623 +index d19e5ffb5104b..d6f9e2fe863d7 100644
8624 +--- a/drivers/crypto/mxs-dcp.c
8625 ++++ b/drivers/crypto/mxs-dcp.c
8626 +@@ -331,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
8627 + memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
8628 + }
8629 +
8630 +- for_each_sg(req->src, src, sg_nents(src), i) {
8631 ++ for_each_sg(req->src, src, sg_nents(req->src), i) {
8632 + src_buf = sg_virt(src);
8633 + len = sg_dma_len(src);
8634 + tlen += len;
8635 +diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
8636 +index 6d10edc40aca0..68d39c833332e 100644
8637 +--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
8638 ++++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
8639 +@@ -52,7 +52,7 @@ static const char *const dev_cfg_services[] = {
8640 + static int get_service_enabled(struct adf_accel_dev *accel_dev)
8641 + {
8642 + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
8643 +- u32 ret;
8644 ++ int ret;
8645 +
8646 + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
8647 + ADF_SERVICES_ENABLED, services);
8648 +diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
8649 +index 8efbedf63bc80..3b3ea849c5e53 100644
8650 +--- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
8651 ++++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
8652 +@@ -9,15 +9,12 @@
8653 + #include "adf_pfvf_pf_proto.h"
8654 + #include "adf_pfvf_utils.h"
8655 +
8656 +-#define ADF_4XXX_MAX_NUM_VFS 16
8657 +-
8658 + #define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
8659 + #define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
8660 +
8661 + /* VF2PF interrupt source registers */
8662 +-#define ADF_4XXX_VM2PF_SOU(i) (0x41A180 + ((i) * 4))
8663 +-#define ADF_4XXX_VM2PF_MSK(i) (0x41A1C0 + ((i) * 4))
8664 +-#define ADF_4XXX_VM2PF_INT_EN_MSK BIT(0)
8665 ++#define ADF_4XXX_VM2PF_SOU 0x41A180
8666 ++#define ADF_4XXX_VM2PF_MSK 0x41A1C0
8667 +
8668 + #define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2
8669 + #define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F
8670 +@@ -41,51 +38,30 @@ static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
8671 +
8672 + static u32 adf_gen4_get_vf2pf_sources(void __iomem *pmisc_addr)
8673 + {
8674 +- int i;
8675 + u32 sou, mask;
8676 +- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
8677 +- u32 vf_mask = 0;
8678 +
8679 +- for (i = 0; i < num_csrs; i++) {
8680 +- sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU(i));
8681 +- mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK(i));
8682 +- sou &= ~mask;
8683 +- vf_mask |= sou << i;
8684 +- }
8685 ++ sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
8686 ++ mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
8687 +
8688 +- return vf_mask;
8689 ++ return sou &= ~mask;
8690 + }
8691 +
8692 + static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
8693 + u32 vf_mask)
8694 + {
8695 +- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
8696 +- unsigned long mask = vf_mask;
8697 + unsigned int val;
8698 +- int i;
8699 +-
8700 +- for_each_set_bit(i, &mask, num_csrs) {
8701 +- unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
8702 +
8703 +- val = ADF_CSR_RD(pmisc_addr, offset) & ~ADF_4XXX_VM2PF_INT_EN_MSK;
8704 +- ADF_CSR_WR(pmisc_addr, offset, val);
8705 +- }
8706 ++ val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
8707 ++ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
8708 + }
8709 +
8710 + static void adf_gen4_disable_vf2pf_interrupts(void __iomem *pmisc_addr,
8711 + u32 vf_mask)
8712 + {
8713 +- int num_csrs = ADF_4XXX_MAX_NUM_VFS;
8714 +- unsigned long mask = vf_mask;
8715 + unsigned int val;
8716 +- int i;
8717 +-
8718 +- for_each_set_bit(i, &mask, num_csrs) {
8719 +- unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
8720 +
8721 +- val = ADF_CSR_RD(pmisc_addr, offset) | ADF_4XXX_VM2PF_INT_EN_MSK;
8722 +- ADF_CSR_WR(pmisc_addr, offset, val);
8723 +- }
8724 ++ val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) | vf_mask;
8725 ++ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
8726 + }
8727 +
8728 + static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
8729 +diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
8730 +index 14b222691c9c2..1141258db4b65 100644
8731 +--- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
8732 ++++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
8733 +@@ -96,7 +96,7 @@ int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
8734 + int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
8735 + {
8736 + struct adf_hw_device_data *hw_data = accel_dev->hw_device;
8737 +- struct capabilities_v3 cap_msg = { { 0 }, };
8738 ++ struct capabilities_v3 cap_msg = { 0 };
8739 + unsigned int len = sizeof(cap_msg);
8740 +
8741 + if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
8742 +@@ -141,7 +141,7 @@ int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
8743 +
8744 + int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
8745 + {
8746 +- struct ring_to_svc_map_v1 rts_map_msg = { { 0 }, };
8747 ++ struct ring_to_svc_map_v1 rts_map_msg = { 0 };
8748 + unsigned int len = sizeof(rts_map_msg);
8749 +
8750 + if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
8751 +diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
8752 +index 1cece1a7d3f00..5bbf0d2722e11 100644
8753 +--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
8754 ++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
8755 +@@ -506,7 +506,6 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
8756 + .exit = rk_ablk_exit_tfm,
8757 + .min_keysize = DES3_EDE_KEY_SIZE,
8758 + .max_keysize = DES3_EDE_KEY_SIZE,
8759 +- .ivsize = DES_BLOCK_SIZE,
8760 + .setkey = rk_tdes_setkey,
8761 + .encrypt = rk_des3_ede_ecb_encrypt,
8762 + .decrypt = rk_des3_ede_ecb_decrypt,
8763 +diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
8764 +index c85fab7ef0bdd..b2c28b87f14b3 100644
8765 +--- a/drivers/crypto/vmx/Kconfig
8766 ++++ b/drivers/crypto/vmx/Kconfig
8767 +@@ -2,7 +2,11 @@
8768 + config CRYPTO_DEV_VMX_ENCRYPT
8769 + tristate "Encryption acceleration support on P8 CPU"
8770 + depends on CRYPTO_DEV_VMX
8771 ++ select CRYPTO_AES
8772 ++ select CRYPTO_CBC
8773 ++ select CRYPTO_CTR
8774 + select CRYPTO_GHASH
8775 ++ select CRYPTO_XTS
8776 + default m
8777 + help
8778 + Support for VMX cryptographic acceleration instructions on Power8 CPU.
8779 +diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
8780 +index 40ab50318dafa..a90202ac88d2f 100644
8781 +--- a/drivers/cxl/core/Makefile
8782 ++++ b/drivers/cxl/core/Makefile
8783 +@@ -2,7 +2,7 @@
8784 + obj-$(CONFIG_CXL_BUS) += cxl_core.o
8785 +
8786 + ccflags-y += -I$(srctree)/drivers/cxl
8787 +-cxl_core-y := bus.o
8788 ++cxl_core-y := port.o
8789 + cxl_core-y += pmem.o
8790 + cxl_core-y += regs.o
8791 + cxl_core-y += memdev.o
8792 +diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c
8793 +deleted file mode 100644
8794 +index 3f9b98ecd18b7..0000000000000
8795 +--- a/drivers/cxl/core/bus.c
8796 ++++ /dev/null
8797 +@@ -1,675 +0,0 @@
8798 +-// SPDX-License-Identifier: GPL-2.0-only
8799 +-/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
8800 +-#include <linux/io-64-nonatomic-lo-hi.h>
8801 +-#include <linux/device.h>
8802 +-#include <linux/module.h>
8803 +-#include <linux/pci.h>
8804 +-#include <linux/slab.h>
8805 +-#include <linux/idr.h>
8806 +-#include <cxlmem.h>
8807 +-#include <cxl.h>
8808 +-#include "core.h"
8809 +-
8810 +-/**
8811 +- * DOC: cxl core
8812 +- *
8813 +- * The CXL core provides a set of interfaces that can be consumed by CXL aware
8814 +- * drivers. The interfaces allow for creation, modification, and destruction of
8815 +- * regions, memory devices, ports, and decoders. CXL aware drivers must register
8816 +- * with the CXL core via these interfaces in order to be able to participate in
8817 +- * cross-device interleave coordination. The CXL core also establishes and
8818 +- * maintains the bridge to the nvdimm subsystem.
8819 +- *
8820 +- * CXL core introduces sysfs hierarchy to control the devices that are
8821 +- * instantiated by the core.
8822 +- */
8823 +-
8824 +-static DEFINE_IDA(cxl_port_ida);
8825 +-
8826 +-static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
8827 +- char *buf)
8828 +-{
8829 +- return sysfs_emit(buf, "%s\n", dev->type->name);
8830 +-}
8831 +-static DEVICE_ATTR_RO(devtype);
8832 +-
8833 +-static struct attribute *cxl_base_attributes[] = {
8834 +- &dev_attr_devtype.attr,
8835 +- NULL,
8836 +-};
8837 +-
8838 +-struct attribute_group cxl_base_attribute_group = {
8839 +- .attrs = cxl_base_attributes,
8840 +-};
8841 +-
8842 +-static ssize_t start_show(struct device *dev, struct device_attribute *attr,
8843 +- char *buf)
8844 +-{
8845 +- struct cxl_decoder *cxld = to_cxl_decoder(dev);
8846 +-
8847 +- return sysfs_emit(buf, "%#llx\n", cxld->range.start);
8848 +-}
8849 +-static DEVICE_ATTR_RO(start);
8850 +-
8851 +-static ssize_t size_show(struct device *dev, struct device_attribute *attr,
8852 +- char *buf)
8853 +-{
8854 +- struct cxl_decoder *cxld = to_cxl_decoder(dev);
8855 +-
8856 +- return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
8857 +-}
8858 +-static DEVICE_ATTR_RO(size);
8859 +-
8860 +-#define CXL_DECODER_FLAG_ATTR(name, flag) \
8861 +-static ssize_t name##_show(struct device *dev, \
8862 +- struct device_attribute *attr, char *buf) \
8863 +-{ \
8864 +- struct cxl_decoder *cxld = to_cxl_decoder(dev); \
8865 +- \
8866 +- return sysfs_emit(buf, "%s\n", \
8867 +- (cxld->flags & (flag)) ? "1" : "0"); \
8868 +-} \
8869 +-static DEVICE_ATTR_RO(name)
8870 +-
8871 +-CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
8872 +-CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
8873 +-CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
8874 +-CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
8875 +-CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
8876 +-
8877 +-static ssize_t target_type_show(struct device *dev,
8878 +- struct device_attribute *attr, char *buf)
8879 +-{
8880 +- struct cxl_decoder *cxld = to_cxl_decoder(dev);
8881 +-
8882 +- switch (cxld->target_type) {
8883 +- case CXL_DECODER_ACCELERATOR:
8884 +- return sysfs_emit(buf, "accelerator\n");
8885 +- case CXL_DECODER_EXPANDER:
8886 +- return sysfs_emit(buf, "expander\n");
8887 +- }
8888 +- return -ENXIO;
8889 +-}
8890 +-static DEVICE_ATTR_RO(target_type);
8891 +-
8892 +-static ssize_t target_list_show(struct device *dev,
8893 +- struct device_attribute *attr, char *buf)
8894 +-{
8895 +- struct cxl_decoder *cxld = to_cxl_decoder(dev);
8896 +- ssize_t offset = 0;
8897 +- int i, rc = 0;
8898 +-
8899 +- device_lock(dev);
8900 +- for (i = 0; i < cxld->interleave_ways; i++) {
8901 +- struct cxl_dport *dport = cxld->target[i];
8902 +- struct cxl_dport *next = NULL;
8903 +-
8904 +- if (!dport)
8905 +- break;
8906 +-
8907 +- if (i + 1 < cxld->interleave_ways)
8908 +- next = cxld->target[i + 1];
8909 +- rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
8910 +- next ? "," : "");
8911 +- if (rc < 0)
8912 +- break;
8913 +- offset += rc;
8914 +- }
8915 +- device_unlock(dev);
8916 +-
8917 +- if (rc < 0)
8918 +- return rc;
8919 +-
8920 +- rc = sysfs_emit_at(buf, offset, "\n");
8921 +- if (rc < 0)
8922 +- return rc;
8923 +-
8924 +- return offset + rc;
8925 +-}
8926 +-static DEVICE_ATTR_RO(target_list);
8927 +-
8928 +-static struct attribute *cxl_decoder_base_attrs[] = {
8929 +- &dev_attr_start.attr,
8930 +- &dev_attr_size.attr,
8931 +- &dev_attr_locked.attr,
8932 +- &dev_attr_target_list.attr,
8933 +- NULL,
8934 +-};
8935 +-
8936 +-static struct attribute_group cxl_decoder_base_attribute_group = {
8937 +- .attrs = cxl_decoder_base_attrs,
8938 +-};
8939 +-
8940 +-static struct attribute *cxl_decoder_root_attrs[] = {
8941 +- &dev_attr_cap_pmem.attr,
8942 +- &dev_attr_cap_ram.attr,
8943 +- &dev_attr_cap_type2.attr,
8944 +- &dev_attr_cap_type3.attr,
8945 +- NULL,
8946 +-};
8947 +-
8948 +-static struct attribute_group cxl_decoder_root_attribute_group = {
8949 +- .attrs = cxl_decoder_root_attrs,
8950 +-};
8951 +-
8952 +-static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
8953 +- &cxl_decoder_root_attribute_group,
8954 +- &cxl_decoder_base_attribute_group,
8955 +- &cxl_base_attribute_group,
8956 +- NULL,
8957 +-};
8958 +-
8959 +-static struct attribute *cxl_decoder_switch_attrs[] = {
8960 +- &dev_attr_target_type.attr,
8961 +- NULL,
8962 +-};
8963 +-
8964 +-static struct attribute_group cxl_decoder_switch_attribute_group = {
8965 +- .attrs = cxl_decoder_switch_attrs,
8966 +-};
8967 +-
8968 +-static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
8969 +- &cxl_decoder_switch_attribute_group,
8970 +- &cxl_decoder_base_attribute_group,
8971 +- &cxl_base_attribute_group,
8972 +- NULL,
8973 +-};
8974 +-
8975 +-static void cxl_decoder_release(struct device *dev)
8976 +-{
8977 +- struct cxl_decoder *cxld = to_cxl_decoder(dev);
8978 +- struct cxl_port *port = to_cxl_port(dev->parent);
8979 +-
8980 +- ida_free(&port->decoder_ida, cxld->id);
8981 +- kfree(cxld);
8982 +-}
8983 +-
8984 +-static const struct device_type cxl_decoder_switch_type = {
8985 +- .name = "cxl_decoder_switch",
8986 +- .release = cxl_decoder_release,
8987 +- .groups = cxl_decoder_switch_attribute_groups,
8988 +-};
8989 +-
8990 +-static const struct device_type cxl_decoder_root_type = {
8991 +- .name = "cxl_decoder_root",
8992 +- .release = cxl_decoder_release,
8993 +- .groups = cxl_decoder_root_attribute_groups,
8994 +-};
8995 +-
8996 +-bool is_root_decoder(struct device *dev)
8997 +-{
8998 +- return dev->type == &cxl_decoder_root_type;
8999 +-}
9000 +-EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
9001 +-
9002 +-struct cxl_decoder *to_cxl_decoder(struct device *dev)
9003 +-{
9004 +- if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
9005 +- "not a cxl_decoder device\n"))
9006 +- return NULL;
9007 +- return container_of(dev, struct cxl_decoder, dev);
9008 +-}
9009 +-EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
9010 +-
9011 +-static void cxl_dport_release(struct cxl_dport *dport)
9012 +-{
9013 +- list_del(&dport->list);
9014 +- put_device(dport->dport);
9015 +- kfree(dport);
9016 +-}
9017 +-
9018 +-static void cxl_port_release(struct device *dev)
9019 +-{
9020 +- struct cxl_port *port = to_cxl_port(dev);
9021 +- struct cxl_dport *dport, *_d;
9022 +-
9023 +- device_lock(dev);
9024 +- list_for_each_entry_safe(dport, _d, &port->dports, list)
9025 +- cxl_dport_release(dport);
9026 +- device_unlock(dev);
9027 +- ida_free(&cxl_port_ida, port->id);
9028 +- kfree(port);
9029 +-}
9030 +-
9031 +-static const struct attribute_group *cxl_port_attribute_groups[] = {
9032 +- &cxl_base_attribute_group,
9033 +- NULL,
9034 +-};
9035 +-
9036 +-static const struct device_type cxl_port_type = {
9037 +- .name = "cxl_port",
9038 +- .release = cxl_port_release,
9039 +- .groups = cxl_port_attribute_groups,
9040 +-};
9041 +-
9042 +-struct cxl_port *to_cxl_port(struct device *dev)
9043 +-{
9044 +- if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
9045 +- "not a cxl_port device\n"))
9046 +- return NULL;
9047 +- return container_of(dev, struct cxl_port, dev);
9048 +-}
9049 +-
9050 +-static void unregister_port(void *_port)
9051 +-{
9052 +- struct cxl_port *port = _port;
9053 +- struct cxl_dport *dport;
9054 +-
9055 +- device_lock(&port->dev);
9056 +- list_for_each_entry(dport, &port->dports, list) {
9057 +- char link_name[CXL_TARGET_STRLEN];
9058 +-
9059 +- if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d",
9060 +- dport->port_id) >= CXL_TARGET_STRLEN)
9061 +- continue;
9062 +- sysfs_remove_link(&port->dev.kobj, link_name);
9063 +- }
9064 +- device_unlock(&port->dev);
9065 +- device_unregister(&port->dev);
9066 +-}
9067 +-
9068 +-static void cxl_unlink_uport(void *_port)
9069 +-{
9070 +- struct cxl_port *port = _port;
9071 +-
9072 +- sysfs_remove_link(&port->dev.kobj, "uport");
9073 +-}
9074 +-
9075 +-static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
9076 +-{
9077 +- int rc;
9078 +-
9079 +- rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
9080 +- if (rc)
9081 +- return rc;
9082 +- return devm_add_action_or_reset(host, cxl_unlink_uport, port);
9083 +-}
9084 +-
9085 +-static struct cxl_port *cxl_port_alloc(struct device *uport,
9086 +- resource_size_t component_reg_phys,
9087 +- struct cxl_port *parent_port)
9088 +-{
9089 +- struct cxl_port *port;
9090 +- struct device *dev;
9091 +- int rc;
9092 +-
9093 +- port = kzalloc(sizeof(*port), GFP_KERNEL);
9094 +- if (!port)
9095 +- return ERR_PTR(-ENOMEM);
9096 +-
9097 +- rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
9098 +- if (rc < 0)
9099 +- goto err;
9100 +- port->id = rc;
9101 +-
9102 +- /*
9103 +- * The top-level cxl_port "cxl_root" does not have a cxl_port as
9104 +- * its parent and it does not have any corresponding component
9105 +- * registers as its decode is described by a fixed platform
9106 +- * description.
9107 +- */
9108 +- dev = &port->dev;
9109 +- if (parent_port)
9110 +- dev->parent = &parent_port->dev;
9111 +- else
9112 +- dev->parent = uport;
9113 +-
9114 +- port->uport = uport;
9115 +- port->component_reg_phys = component_reg_phys;
9116 +- ida_init(&port->decoder_ida);
9117 +- INIT_LIST_HEAD(&port->dports);
9118 +-
9119 +- device_initialize(dev);
9120 +- device_set_pm_not_required(dev);
9121 +- dev->bus = &cxl_bus_type;
9122 +- dev->type = &cxl_port_type;
9123 +-
9124 +- return port;
9125 +-
9126 +-err:
9127 +- kfree(port);
9128 +- return ERR_PTR(rc);
9129 +-}
9130 +-
9131 +-/**
9132 +- * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
9133 +- * @host: host device for devm operations
9134 +- * @uport: "physical" device implementing this upstream port
9135 +- * @component_reg_phys: (optional) for configurable cxl_port instances
9136 +- * @parent_port: next hop up in the CXL memory decode hierarchy
9137 +- */
9138 +-struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
9139 +- resource_size_t component_reg_phys,
9140 +- struct cxl_port *parent_port)
9141 +-{
9142 +- struct cxl_port *port;
9143 +- struct device *dev;
9144 +- int rc;
9145 +-
9146 +- port = cxl_port_alloc(uport, component_reg_phys, parent_port);
9147 +- if (IS_ERR(port))
9148 +- return port;
9149 +-
9150 +- dev = &port->dev;
9151 +- if (parent_port)
9152 +- rc = dev_set_name(dev, "port%d", port->id);
9153 +- else
9154 +- rc = dev_set_name(dev, "root%d", port->id);
9155 +- if (rc)
9156 +- goto err;
9157 +-
9158 +- rc = device_add(dev);
9159 +- if (rc)
9160 +- goto err;
9161 +-
9162 +- rc = devm_add_action_or_reset(host, unregister_port, port);
9163 +- if (rc)
9164 +- return ERR_PTR(rc);
9165 +-
9166 +- rc = devm_cxl_link_uport(host, port);
9167 +- if (rc)
9168 +- return ERR_PTR(rc);
9169 +-
9170 +- return port;
9171 +-
9172 +-err:
9173 +- put_device(dev);
9174 +- return ERR_PTR(rc);
9175 +-}
9176 +-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
9177 +-
9178 +-static struct cxl_dport *find_dport(struct cxl_port *port, int id)
9179 +-{
9180 +- struct cxl_dport *dport;
9181 +-
9182 +- device_lock_assert(&port->dev);
9183 +- list_for_each_entry (dport, &port->dports, list)
9184 +- if (dport->port_id == id)
9185 +- return dport;
9186 +- return NULL;
9187 +-}
9188 +-
9189 +-static int add_dport(struct cxl_port *port, struct cxl_dport *new)
9190 +-{
9191 +- struct cxl_dport *dup;
9192 +-
9193 +- device_lock(&port->dev);
9194 +- dup = find_dport(port, new->port_id);
9195 +- if (dup)
9196 +- dev_err(&port->dev,
9197 +- "unable to add dport%d-%s non-unique port id (%s)\n",
9198 +- new->port_id, dev_name(new->dport),
9199 +- dev_name(dup->dport));
9200 +- else
9201 +- list_add_tail(&new->list, &port->dports);
9202 +- device_unlock(&port->dev);
9203 +-
9204 +- return dup ? -EEXIST : 0;
9205 +-}
9206 +-
9207 +-/**
9208 +- * cxl_add_dport - append downstream port data to a cxl_port
9209 +- * @port: the cxl_port that references this dport
9210 +- * @dport_dev: firmware or PCI device representing the dport
9211 +- * @port_id: identifier for this dport in a decoder's target list
9212 +- * @component_reg_phys: optional location of CXL component registers
9213 +- *
9214 +- * Note that all allocations and links are undone by cxl_port deletion
9215 +- * and release.
9216 +- */
9217 +-int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
9218 +- resource_size_t component_reg_phys)
9219 +-{
9220 +- char link_name[CXL_TARGET_STRLEN];
9221 +- struct cxl_dport *dport;
9222 +- int rc;
9223 +-
9224 +- if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
9225 +- CXL_TARGET_STRLEN)
9226 +- return -EINVAL;
9227 +-
9228 +- dport = kzalloc(sizeof(*dport), GFP_KERNEL);
9229 +- if (!dport)
9230 +- return -ENOMEM;
9231 +-
9232 +- INIT_LIST_HEAD(&dport->list);
9233 +- dport->dport = get_device(dport_dev);
9234 +- dport->port_id = port_id;
9235 +- dport->component_reg_phys = component_reg_phys;
9236 +- dport->port = port;
9237 +-
9238 +- rc = add_dport(port, dport);
9239 +- if (rc)
9240 +- goto err;
9241 +-
9242 +- rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
9243 +- if (rc)
9244 +- goto err;
9245 +-
9246 +- return 0;
9247 +-err:
9248 +- cxl_dport_release(dport);
9249 +- return rc;
9250 +-}
9251 +-EXPORT_SYMBOL_NS_GPL(cxl_add_dport, CXL);
9252 +-
9253 +-static int decoder_populate_targets(struct cxl_decoder *cxld,
9254 +- struct cxl_port *port, int *target_map)
9255 +-{
9256 +- int rc = 0, i;
9257 +-
9258 +- if (!target_map)
9259 +- return 0;
9260 +-
9261 +- device_lock(&port->dev);
9262 +- if (list_empty(&port->dports)) {
9263 +- rc = -EINVAL;
9264 +- goto out_unlock;
9265 +- }
9266 +-
9267 +- for (i = 0; i < cxld->nr_targets; i++) {
9268 +- struct cxl_dport *dport = find_dport(port, target_map[i]);
9269 +-
9270 +- if (!dport) {
9271 +- rc = -ENXIO;
9272 +- goto out_unlock;
9273 +- }
9274 +- cxld->target[i] = dport;
9275 +- }
9276 +-
9277 +-out_unlock:
9278 +- device_unlock(&port->dev);
9279 +-
9280 +- return rc;
9281 +-}
9282 +-
9283 +-struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
9284 +-{
9285 +- struct cxl_decoder *cxld;
9286 +- struct device *dev;
9287 +- int rc = 0;
9288 +-
9289 +- if (nr_targets > CXL_DECODER_MAX_INTERLEAVE || nr_targets < 1)
9290 +- return ERR_PTR(-EINVAL);
9291 +-
9292 +- cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
9293 +- if (!cxld)
9294 +- return ERR_PTR(-ENOMEM);
9295 +-
9296 +- rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
9297 +- if (rc < 0)
9298 +- goto err;
9299 +-
9300 +- cxld->id = rc;
9301 +- cxld->nr_targets = nr_targets;
9302 +- dev = &cxld->dev;
9303 +- device_initialize(dev);
9304 +- device_set_pm_not_required(dev);
9305 +- dev->parent = &port->dev;
9306 +- dev->bus = &cxl_bus_type;
9307 +-
9308 +- /* root ports do not have a cxl_port_type parent */
9309 +- if (port->dev.parent->type == &cxl_port_type)
9310 +- dev->type = &cxl_decoder_switch_type;
9311 +- else
9312 +- dev->type = &cxl_decoder_root_type;
9313 +-
9314 +- return cxld;
9315 +-err:
9316 +- kfree(cxld);
9317 +- return ERR_PTR(rc);
9318 +-}
9319 +-EXPORT_SYMBOL_NS_GPL(cxl_decoder_alloc, CXL);
9320 +-
9321 +-int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
9322 +-{
9323 +- struct cxl_port *port;
9324 +- struct device *dev;
9325 +- int rc;
9326 +-
9327 +- if (WARN_ON_ONCE(!cxld))
9328 +- return -EINVAL;
9329 +-
9330 +- if (WARN_ON_ONCE(IS_ERR(cxld)))
9331 +- return PTR_ERR(cxld);
9332 +-
9333 +- if (cxld->interleave_ways < 1)
9334 +- return -EINVAL;
9335 +-
9336 +- port = to_cxl_port(cxld->dev.parent);
9337 +- rc = decoder_populate_targets(cxld, port, target_map);
9338 +- if (rc)
9339 +- return rc;
9340 +-
9341 +- dev = &cxld->dev;
9342 +- rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
9343 +- if (rc)
9344 +- return rc;
9345 +-
9346 +- return device_add(dev);
9347 +-}
9348 +-EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
9349 +-
9350 +-static void cxld_unregister(void *dev)
9351 +-{
9352 +- device_unregister(dev);
9353 +-}
9354 +-
9355 +-int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
9356 +-{
9357 +- return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
9358 +-}
9359 +-EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
9360 +-
9361 +-/**
9362 +- * __cxl_driver_register - register a driver for the cxl bus
9363 +- * @cxl_drv: cxl driver structure to attach
9364 +- * @owner: owning module/driver
9365 +- * @modname: KBUILD_MODNAME for parent driver
9366 +- */
9367 +-int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
9368 +- const char *modname)
9369 +-{
9370 +- if (!cxl_drv->probe) {
9371 +- pr_debug("%s ->probe() must be specified\n", modname);
9372 +- return -EINVAL;
9373 +- }
9374 +-
9375 +- if (!cxl_drv->name) {
9376 +- pr_debug("%s ->name must be specified\n", modname);
9377 +- return -EINVAL;
9378 +- }
9379 +-
9380 +- if (!cxl_drv->id) {
9381 +- pr_debug("%s ->id must be specified\n", modname);
9382 +- return -EINVAL;
9383 +- }
9384 +-
9385 +- cxl_drv->drv.bus = &cxl_bus_type;
9386 +- cxl_drv->drv.owner = owner;
9387 +- cxl_drv->drv.mod_name = modname;
9388 +- cxl_drv->drv.name = cxl_drv->name;
9389 +-
9390 +- return driver_register(&cxl_drv->drv);
9391 +-}
9392 +-EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
9393 +-
9394 +-void cxl_driver_unregister(struct cxl_driver *cxl_drv)
9395 +-{
9396 +- driver_unregister(&cxl_drv->drv);
9397 +-}
9398 +-EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
9399 +-
9400 +-static int cxl_device_id(struct device *dev)
9401 +-{
9402 +- if (dev->type == &cxl_nvdimm_bridge_type)
9403 +- return CXL_DEVICE_NVDIMM_BRIDGE;
9404 +- if (dev->type == &cxl_nvdimm_type)
9405 +- return CXL_DEVICE_NVDIMM;
9406 +- return 0;
9407 +-}
9408 +-
9409 +-static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
9410 +-{
9411 +- return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
9412 +- cxl_device_id(dev));
9413 +-}
9414 +-
9415 +-static int cxl_bus_match(struct device *dev, struct device_driver *drv)
9416 +-{
9417 +- return cxl_device_id(dev) == to_cxl_drv(drv)->id;
9418 +-}
9419 +-
9420 +-static int cxl_bus_probe(struct device *dev)
9421 +-{
9422 +- return to_cxl_drv(dev->driver)->probe(dev);
9423 +-}
9424 +-
9425 +-static void cxl_bus_remove(struct device *dev)
9426 +-{
9427 +- struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
9428 +-
9429 +- if (cxl_drv->remove)
9430 +- cxl_drv->remove(dev);
9431 +-}
9432 +-
9433 +-struct bus_type cxl_bus_type = {
9434 +- .name = "cxl",
9435 +- .uevent = cxl_bus_uevent,
9436 +- .match = cxl_bus_match,
9437 +- .probe = cxl_bus_probe,
9438 +- .remove = cxl_bus_remove,
9439 +-};
9440 +-EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
9441 +-
9442 +-static __init int cxl_core_init(void)
9443 +-{
9444 +- int rc;
9445 +-
9446 +- cxl_mbox_init();
9447 +-
9448 +- rc = cxl_memdev_init();
9449 +- if (rc)
9450 +- return rc;
9451 +-
9452 +- rc = bus_register(&cxl_bus_type);
9453 +- if (rc)
9454 +- goto err;
9455 +- return 0;
9456 +-
9457 +-err:
9458 +- cxl_memdev_exit();
9459 +- cxl_mbox_exit();
9460 +- return rc;
9461 +-}
9462 +-
9463 +-static void cxl_core_exit(void)
9464 +-{
9465 +- bus_unregister(&cxl_bus_type);
9466 +- cxl_memdev_exit();
9467 +- cxl_mbox_exit();
9468 +-}
9469 +-
9470 +-module_init(cxl_core_init);
9471 +-module_exit(cxl_core_exit);
9472 +-MODULE_LICENSE("GPL v2");
9473 +diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
9474 +new file mode 100644
9475 +index 0000000000000..aa5239ac67c67
9476 +--- /dev/null
9477 ++++ b/drivers/cxl/core/port.c
9478 +@@ -0,0 +1,679 @@
9479 ++// SPDX-License-Identifier: GPL-2.0-only
9480 ++/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
9481 ++#include <linux/io-64-nonatomic-lo-hi.h>
9482 ++#include <linux/device.h>
9483 ++#include <linux/module.h>
9484 ++#include <linux/pci.h>
9485 ++#include <linux/slab.h>
9486 ++#include <linux/idr.h>
9487 ++#include <cxlmem.h>
9488 ++#include <cxl.h>
9489 ++#include "core.h"
9490 ++
9491 ++/**
9492 ++ * DOC: cxl core
9493 ++ *
9494 ++ * The CXL core provides a set of interfaces that can be consumed by CXL aware
9495 ++ * drivers. The interfaces allow for creation, modification, and destruction of
9496 ++ * regions, memory devices, ports, and decoders. CXL aware drivers must register
9497 ++ * with the CXL core via these interfaces in order to be able to participate in
9498 ++ * cross-device interleave coordination. The CXL core also establishes and
9499 ++ * maintains the bridge to the nvdimm subsystem.
9500 ++ *
9501 ++ * CXL core introduces sysfs hierarchy to control the devices that are
9502 ++ * instantiated by the core.
9503 ++ */
9504 ++
9505 ++static DEFINE_IDA(cxl_port_ida);
9506 ++
9507 ++static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
9508 ++ char *buf)
9509 ++{
9510 ++ return sysfs_emit(buf, "%s\n", dev->type->name);
9511 ++}
9512 ++static DEVICE_ATTR_RO(devtype);
9513 ++
9514 ++static struct attribute *cxl_base_attributes[] = {
9515 ++ &dev_attr_devtype.attr,
9516 ++ NULL,
9517 ++};
9518 ++
9519 ++struct attribute_group cxl_base_attribute_group = {
9520 ++ .attrs = cxl_base_attributes,
9521 ++};
9522 ++
9523 ++static ssize_t start_show(struct device *dev, struct device_attribute *attr,
9524 ++ char *buf)
9525 ++{
9526 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev);
9527 ++
9528 ++ return sysfs_emit(buf, "%#llx\n", cxld->range.start);
9529 ++}
9530 ++static DEVICE_ATTR_RO(start);
9531 ++
9532 ++static ssize_t size_show(struct device *dev, struct device_attribute *attr,
9533 ++ char *buf)
9534 ++{
9535 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev);
9536 ++
9537 ++ return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
9538 ++}
9539 ++static DEVICE_ATTR_RO(size);
9540 ++
9541 ++#define CXL_DECODER_FLAG_ATTR(name, flag) \
9542 ++static ssize_t name##_show(struct device *dev, \
9543 ++ struct device_attribute *attr, char *buf) \
9544 ++{ \
9545 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev); \
9546 ++ \
9547 ++ return sysfs_emit(buf, "%s\n", \
9548 ++ (cxld->flags & (flag)) ? "1" : "0"); \
9549 ++} \
9550 ++static DEVICE_ATTR_RO(name)
9551 ++
9552 ++CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
9553 ++CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
9554 ++CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
9555 ++CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
9556 ++CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
9557 ++
9558 ++static ssize_t target_type_show(struct device *dev,
9559 ++ struct device_attribute *attr, char *buf)
9560 ++{
9561 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev);
9562 ++
9563 ++ switch (cxld->target_type) {
9564 ++ case CXL_DECODER_ACCELERATOR:
9565 ++ return sysfs_emit(buf, "accelerator\n");
9566 ++ case CXL_DECODER_EXPANDER:
9567 ++ return sysfs_emit(buf, "expander\n");
9568 ++ }
9569 ++ return -ENXIO;
9570 ++}
9571 ++static DEVICE_ATTR_RO(target_type);
9572 ++
9573 ++static ssize_t target_list_show(struct device *dev,
9574 ++ struct device_attribute *attr, char *buf)
9575 ++{
9576 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev);
9577 ++ ssize_t offset = 0;
9578 ++ int i, rc = 0;
9579 ++
9580 ++ device_lock(dev);
9581 ++ for (i = 0; i < cxld->interleave_ways; i++) {
9582 ++ struct cxl_dport *dport = cxld->target[i];
9583 ++ struct cxl_dport *next = NULL;
9584 ++
9585 ++ if (!dport)
9586 ++ break;
9587 ++
9588 ++ if (i + 1 < cxld->interleave_ways)
9589 ++ next = cxld->target[i + 1];
9590 ++ rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
9591 ++ next ? "," : "");
9592 ++ if (rc < 0)
9593 ++ break;
9594 ++ offset += rc;
9595 ++ }
9596 ++ device_unlock(dev);
9597 ++
9598 ++ if (rc < 0)
9599 ++ return rc;
9600 ++
9601 ++ rc = sysfs_emit_at(buf, offset, "\n");
9602 ++ if (rc < 0)
9603 ++ return rc;
9604 ++
9605 ++ return offset + rc;
9606 ++}
9607 ++static DEVICE_ATTR_RO(target_list);
9608 ++
9609 ++static struct attribute *cxl_decoder_base_attrs[] = {
9610 ++ &dev_attr_start.attr,
9611 ++ &dev_attr_size.attr,
9612 ++ &dev_attr_locked.attr,
9613 ++ &dev_attr_target_list.attr,
9614 ++ NULL,
9615 ++};
9616 ++
9617 ++static struct attribute_group cxl_decoder_base_attribute_group = {
9618 ++ .attrs = cxl_decoder_base_attrs,
9619 ++};
9620 ++
9621 ++static struct attribute *cxl_decoder_root_attrs[] = {
9622 ++ &dev_attr_cap_pmem.attr,
9623 ++ &dev_attr_cap_ram.attr,
9624 ++ &dev_attr_cap_type2.attr,
9625 ++ &dev_attr_cap_type3.attr,
9626 ++ NULL,
9627 ++};
9628 ++
9629 ++static struct attribute_group cxl_decoder_root_attribute_group = {
9630 ++ .attrs = cxl_decoder_root_attrs,
9631 ++};
9632 ++
9633 ++static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
9634 ++ &cxl_decoder_root_attribute_group,
9635 ++ &cxl_decoder_base_attribute_group,
9636 ++ &cxl_base_attribute_group,
9637 ++ NULL,
9638 ++};
9639 ++
9640 ++static struct attribute *cxl_decoder_switch_attrs[] = {
9641 ++ &dev_attr_target_type.attr,
9642 ++ NULL,
9643 ++};
9644 ++
9645 ++static struct attribute_group cxl_decoder_switch_attribute_group = {
9646 ++ .attrs = cxl_decoder_switch_attrs,
9647 ++};
9648 ++
9649 ++static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
9650 ++ &cxl_decoder_switch_attribute_group,
9651 ++ &cxl_decoder_base_attribute_group,
9652 ++ &cxl_base_attribute_group,
9653 ++ NULL,
9654 ++};
9655 ++
9656 ++static void cxl_decoder_release(struct device *dev)
9657 ++{
9658 ++ struct cxl_decoder *cxld = to_cxl_decoder(dev);
9659 ++ struct cxl_port *port = to_cxl_port(dev->parent);
9660 ++
9661 ++ ida_free(&port->decoder_ida, cxld->id);
9662 ++ kfree(cxld);
9663 ++ put_device(&port->dev);
9664 ++}
9665 ++
9666 ++static const struct device_type cxl_decoder_switch_type = {
9667 ++ .name = "cxl_decoder_switch",
9668 ++ .release = cxl_decoder_release,
9669 ++ .groups = cxl_decoder_switch_attribute_groups,
9670 ++};
9671 ++
9672 ++static const struct device_type cxl_decoder_root_type = {
9673 ++ .name = "cxl_decoder_root",
9674 ++ .release = cxl_decoder_release,
9675 ++ .groups = cxl_decoder_root_attribute_groups,
9676 ++};
9677 ++
9678 ++bool is_root_decoder(struct device *dev)
9679 ++{
9680 ++ return dev->type == &cxl_decoder_root_type;
9681 ++}
9682 ++EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
9683 ++
9684 ++struct cxl_decoder *to_cxl_decoder(struct device *dev)
9685 ++{
9686 ++ if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
9687 ++ "not a cxl_decoder device\n"))
9688 ++ return NULL;
9689 ++ return container_of(dev, struct cxl_decoder, dev);
9690 ++}
9691 ++EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
9692 ++
9693 ++static void cxl_dport_release(struct cxl_dport *dport)
9694 ++{
9695 ++ list_del(&dport->list);
9696 ++ put_device(dport->dport);
9697 ++ kfree(dport);
9698 ++}
9699 ++
9700 ++static void cxl_port_release(struct device *dev)
9701 ++{
9702 ++ struct cxl_port *port = to_cxl_port(dev);
9703 ++ struct cxl_dport *dport, *_d;
9704 ++
9705 ++ device_lock(dev);
9706 ++ list_for_each_entry_safe(dport, _d, &port->dports, list)
9707 ++ cxl_dport_release(dport);
9708 ++ device_unlock(dev);
9709 ++ ida_free(&cxl_port_ida, port->id);
9710 ++ kfree(port);
9711 ++}
9712 ++
9713 ++static const struct attribute_group *cxl_port_attribute_groups[] = {
9714 ++ &cxl_base_attribute_group,
9715 ++ NULL,
9716 ++};
9717 ++
9718 ++static const struct device_type cxl_port_type = {
9719 ++ .name = "cxl_port",
9720 ++ .release = cxl_port_release,
9721 ++ .groups = cxl_port_attribute_groups,
9722 ++};
9723 ++
9724 ++struct cxl_port *to_cxl_port(struct device *dev)
9725 ++{
9726 ++ if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
9727 ++ "not a cxl_port device\n"))
9728 ++ return NULL;
9729 ++ return container_of(dev, struct cxl_port, dev);
9730 ++}
9731 ++
9732 ++static void unregister_port(void *_port)
9733 ++{
9734 ++ struct cxl_port *port = _port;
9735 ++ struct cxl_dport *dport;
9736 ++
9737 ++ device_lock(&port->dev);
9738 ++ list_for_each_entry(dport, &port->dports, list) {
9739 ++ char link_name[CXL_TARGET_STRLEN];
9740 ++
9741 ++ if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d",
9742 ++ dport->port_id) >= CXL_TARGET_STRLEN)
9743 ++ continue;
9744 ++ sysfs_remove_link(&port->dev.kobj, link_name);
9745 ++ }
9746 ++ device_unlock(&port->dev);
9747 ++ device_unregister(&port->dev);
9748 ++}
9749 ++
9750 ++static void cxl_unlink_uport(void *_port)
9751 ++{
9752 ++ struct cxl_port *port = _port;
9753 ++
9754 ++ sysfs_remove_link(&port->dev.kobj, "uport");
9755 ++}
9756 ++
9757 ++static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
9758 ++{
9759 ++ int rc;
9760 ++
9761 ++ rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
9762 ++ if (rc)
9763 ++ return rc;
9764 ++ return devm_add_action_or_reset(host, cxl_unlink_uport, port);
9765 ++}
9766 ++
9767 ++static struct cxl_port *cxl_port_alloc(struct device *uport,
9768 ++ resource_size_t component_reg_phys,
9769 ++ struct cxl_port *parent_port)
9770 ++{
9771 ++ struct cxl_port *port;
9772 ++ struct device *dev;
9773 ++ int rc;
9774 ++
9775 ++ port = kzalloc(sizeof(*port), GFP_KERNEL);
9776 ++ if (!port)
9777 ++ return ERR_PTR(-ENOMEM);
9778 ++
9779 ++ rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
9780 ++ if (rc < 0)
9781 ++ goto err;
9782 ++ port->id = rc;
9783 ++
9784 ++ /*
9785 ++ * The top-level cxl_port "cxl_root" does not have a cxl_port as
9786 ++ * its parent and it does not have any corresponding component
9787 ++ * registers as its decode is described by a fixed platform
9788 ++ * description.
9789 ++ */
9790 ++ dev = &port->dev;
9791 ++ if (parent_port)
9792 ++ dev->parent = &parent_port->dev;
9793 ++ else
9794 ++ dev->parent = uport;
9795 ++
9796 ++ port->uport = uport;
9797 ++ port->component_reg_phys = component_reg_phys;
9798 ++ ida_init(&port->decoder_ida);
9799 ++ INIT_LIST_HEAD(&port->dports);
9800 ++
9801 ++ device_initialize(dev);
9802 ++ device_set_pm_not_required(dev);
9803 ++ dev->bus = &cxl_bus_type;
9804 ++ dev->type = &cxl_port_type;
9805 ++
9806 ++ return port;
9807 ++
9808 ++err:
9809 ++ kfree(port);
9810 ++ return ERR_PTR(rc);
9811 ++}
9812 ++
9813 ++/**
9814 ++ * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
9815 ++ * @host: host device for devm operations
9816 ++ * @uport: "physical" device implementing this upstream port
9817 ++ * @component_reg_phys: (optional) for configurable cxl_port instances
9818 ++ * @parent_port: next hop up in the CXL memory decode hierarchy
9819 ++ */
9820 ++struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
9821 ++ resource_size_t component_reg_phys,
9822 ++ struct cxl_port *parent_port)
9823 ++{
9824 ++ struct cxl_port *port;
9825 ++ struct device *dev;
9826 ++ int rc;
9827 ++
9828 ++ port = cxl_port_alloc(uport, component_reg_phys, parent_port);
9829 ++ if (IS_ERR(port))
9830 ++ return port;
9831 ++
9832 ++ dev = &port->dev;
9833 ++ if (parent_port)
9834 ++ rc = dev_set_name(dev, "port%d", port->id);
9835 ++ else
9836 ++ rc = dev_set_name(dev, "root%d", port->id);
9837 ++ if (rc)
9838 ++ goto err;
9839 ++
9840 ++ rc = device_add(dev);
9841 ++ if (rc)
9842 ++ goto err;
9843 ++
9844 ++ rc = devm_add_action_or_reset(host, unregister_port, port);
9845 ++ if (rc)
9846 ++ return ERR_PTR(rc);
9847 ++
9848 ++ rc = devm_cxl_link_uport(host, port);
9849 ++ if (rc)
9850 ++ return ERR_PTR(rc);
9851 ++
9852 ++ return port;
9853 ++
9854 ++err:
9855 ++ put_device(dev);
9856 ++ return ERR_PTR(rc);
9857 ++}
9858 ++EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
9859 ++
9860 ++static struct cxl_dport *find_dport(struct cxl_port *port, int id)
9861 ++{
9862 ++ struct cxl_dport *dport;
9863 ++
9864 ++ device_lock_assert(&port->dev);
9865 ++ list_for_each_entry (dport, &port->dports, list)
9866 ++ if (dport->port_id == id)
9867 ++ return dport;
9868 ++ return NULL;
9869 ++}
9870 ++
9871 ++static int add_dport(struct cxl_port *port, struct cxl_dport *new)
9872 ++{
9873 ++ struct cxl_dport *dup;
9874 ++
9875 ++ device_lock(&port->dev);
9876 ++ dup = find_dport(port, new->port_id);
9877 ++ if (dup)
9878 ++ dev_err(&port->dev,
9879 ++ "unable to add dport%d-%s non-unique port id (%s)\n",
9880 ++ new->port_id, dev_name(new->dport),
9881 ++ dev_name(dup->dport));
9882 ++ else
9883 ++ list_add_tail(&new->list, &port->dports);
9884 ++ device_unlock(&port->dev);
9885 ++
9886 ++ return dup ? -EEXIST : 0;
9887 ++}
9888 ++
9889 ++/**
9890 ++ * cxl_add_dport - append downstream port data to a cxl_port
9891 ++ * @port: the cxl_port that references this dport
9892 ++ * @dport_dev: firmware or PCI device representing the dport
9893 ++ * @port_id: identifier for this dport in a decoder's target list
9894 ++ * @component_reg_phys: optional location of CXL component registers
9895 ++ *
9896 ++ * Note that all allocations and links are undone by cxl_port deletion
9897 ++ * and release.
9898 ++ */
9899 ++int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
9900 ++ resource_size_t component_reg_phys)
9901 ++{
9902 ++ char link_name[CXL_TARGET_STRLEN];
9903 ++ struct cxl_dport *dport;
9904 ++ int rc;
9905 ++
9906 ++ if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
9907 ++ CXL_TARGET_STRLEN)
9908 ++ return -EINVAL;
9909 ++
9910 ++ dport = kzalloc(sizeof(*dport), GFP_KERNEL);
9911 ++ if (!dport)
9912 ++ return -ENOMEM;
9913 ++
9914 ++ INIT_LIST_HEAD(&dport->list);
9915 ++ dport->dport = get_device(dport_dev);
9916 ++ dport->port_id = port_id;
9917 ++ dport->component_reg_phys = component_reg_phys;
9918 ++ dport->port = port;
9919 ++
9920 ++ rc = add_dport(port, dport);
9921 ++ if (rc)
9922 ++ goto err;
9923 ++
9924 ++ rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
9925 ++ if (rc)
9926 ++ goto err;
9927 ++
9928 ++ return 0;
9929 ++err:
9930 ++ cxl_dport_release(dport);
9931 ++ return rc;
9932 ++}
9933 ++EXPORT_SYMBOL_NS_GPL(cxl_add_dport, CXL);
9934 ++
9935 ++static int decoder_populate_targets(struct cxl_decoder *cxld,
9936 ++ struct cxl_port *port, int *target_map)
9937 ++{
9938 ++ int rc = 0, i;
9939 ++
9940 ++ if (!target_map)
9941 ++ return 0;
9942 ++
9943 ++ device_lock(&port->dev);
9944 ++ if (list_empty(&port->dports)) {
9945 ++ rc = -EINVAL;
9946 ++ goto out_unlock;
9947 ++ }
9948 ++
9949 ++ for (i = 0; i < cxld->nr_targets; i++) {
9950 ++ struct cxl_dport *dport = find_dport(port, target_map[i]);
9951 ++
9952 ++ if (!dport) {
9953 ++ rc = -ENXIO;
9954 ++ goto out_unlock;
9955 ++ }
9956 ++ cxld->target[i] = dport;
9957 ++ }
9958 ++
9959 ++out_unlock:
9960 ++ device_unlock(&port->dev);
9961 ++
9962 ++ return rc;
9963 ++}
9964 ++
9965 ++struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
9966 ++{
9967 ++ struct cxl_decoder *cxld;
9968 ++ struct device *dev;
9969 ++ int rc = 0;
9970 ++
9971 ++ if (nr_targets > CXL_DECODER_MAX_INTERLEAVE || nr_targets < 1)
9972 ++ return ERR_PTR(-EINVAL);
9973 ++
9974 ++ cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
9975 ++ if (!cxld)
9976 ++ return ERR_PTR(-ENOMEM);
9977 ++
9978 ++ rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
9979 ++ if (rc < 0)
9980 ++ goto err;
9981 ++
9982 ++ /* need parent to stick around to release the id */
9983 ++ get_device(&port->dev);
9984 ++ cxld->id = rc;
9985 ++
9986 ++ cxld->nr_targets = nr_targets;
9987 ++ dev = &cxld->dev;
9988 ++ device_initialize(dev);
9989 ++ device_set_pm_not_required(dev);
9990 ++ dev->parent = &port->dev;
9991 ++ dev->bus = &cxl_bus_type;
9992 ++
9993 ++ /* root ports do not have a cxl_port_type parent */
9994 ++ if (port->dev.parent->type == &cxl_port_type)
9995 ++ dev->type = &cxl_decoder_switch_type;
9996 ++ else
9997 ++ dev->type = &cxl_decoder_root_type;
9998 ++
9999 ++ return cxld;
10000 ++err:
10001 ++ kfree(cxld);
10002 ++ return ERR_PTR(rc);
10003 ++}
10004 ++EXPORT_SYMBOL_NS_GPL(cxl_decoder_alloc, CXL);
10005 ++
10006 ++int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
10007 ++{
10008 ++ struct cxl_port *port;
10009 ++ struct device *dev;
10010 ++ int rc;
10011 ++
10012 ++ if (WARN_ON_ONCE(!cxld))
10013 ++ return -EINVAL;
10014 ++
10015 ++ if (WARN_ON_ONCE(IS_ERR(cxld)))
10016 ++ return PTR_ERR(cxld);
10017 ++
10018 ++ if (cxld->interleave_ways < 1)
10019 ++ return -EINVAL;
10020 ++
10021 ++ port = to_cxl_port(cxld->dev.parent);
10022 ++ rc = decoder_populate_targets(cxld, port, target_map);
10023 ++ if (rc)
10024 ++ return rc;
10025 ++
10026 ++ dev = &cxld->dev;
10027 ++ rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
10028 ++ if (rc)
10029 ++ return rc;
10030 ++
10031 ++ return device_add(dev);
10032 ++}
10033 ++EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
10034 ++
10035 ++static void cxld_unregister(void *dev)
10036 ++{
10037 ++ device_unregister(dev);
10038 ++}
10039 ++
10040 ++int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
10041 ++{
10042 ++ return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
10043 ++}
10044 ++EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
10045 ++
10046 ++/**
10047 ++ * __cxl_driver_register - register a driver for the cxl bus
10048 ++ * @cxl_drv: cxl driver structure to attach
10049 ++ * @owner: owning module/driver
10050 ++ * @modname: KBUILD_MODNAME for parent driver
10051 ++ */
10052 ++int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
10053 ++ const char *modname)
10054 ++{
10055 ++ if (!cxl_drv->probe) {
10056 ++ pr_debug("%s ->probe() must be specified\n", modname);
10057 ++ return -EINVAL;
10058 ++ }
10059 ++
10060 ++ if (!cxl_drv->name) {
10061 ++ pr_debug("%s ->name must be specified\n", modname);
10062 ++ return -EINVAL;
10063 ++ }
10064 ++
10065 ++ if (!cxl_drv->id) {
10066 ++ pr_debug("%s ->id must be specified\n", modname);
10067 ++ return -EINVAL;
10068 ++ }
10069 ++
10070 ++ cxl_drv->drv.bus = &cxl_bus_type;
10071 ++ cxl_drv->drv.owner = owner;
10072 ++ cxl_drv->drv.mod_name = modname;
10073 ++ cxl_drv->drv.name = cxl_drv->name;
10074 ++
10075 ++ return driver_register(&cxl_drv->drv);
10076 ++}
10077 ++EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
10078 ++
10079 ++void cxl_driver_unregister(struct cxl_driver *cxl_drv)
10080 ++{
10081 ++ driver_unregister(&cxl_drv->drv);
10082 ++}
10083 ++EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
10084 ++
10085 ++static int cxl_device_id(struct device *dev)
10086 ++{
10087 ++ if (dev->type == &cxl_nvdimm_bridge_type)
10088 ++ return CXL_DEVICE_NVDIMM_BRIDGE;
10089 ++ if (dev->type == &cxl_nvdimm_type)
10090 ++ return CXL_DEVICE_NVDIMM;
10091 ++ return 0;
10092 ++}
10093 ++
10094 ++static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
10095 ++{
10096 ++ return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
10097 ++ cxl_device_id(dev));
10098 ++}
10099 ++
10100 ++static int cxl_bus_match(struct device *dev, struct device_driver *drv)
10101 ++{
10102 ++ return cxl_device_id(dev) == to_cxl_drv(drv)->id;
10103 ++}
10104 ++
10105 ++static int cxl_bus_probe(struct device *dev)
10106 ++{
10107 ++ return to_cxl_drv(dev->driver)->probe(dev);
10108 ++}
10109 ++
10110 ++static void cxl_bus_remove(struct device *dev)
10111 ++{
10112 ++ struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
10113 ++
10114 ++ if (cxl_drv->remove)
10115 ++ cxl_drv->remove(dev);
10116 ++}
10117 ++
10118 ++struct bus_type cxl_bus_type = {
10119 ++ .name = "cxl",
10120 ++ .uevent = cxl_bus_uevent,
10121 ++ .match = cxl_bus_match,
10122 ++ .probe = cxl_bus_probe,
10123 ++ .remove = cxl_bus_remove,
10124 ++};
10125 ++EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
10126 ++
10127 ++static __init int cxl_core_init(void)
10128 ++{
10129 ++ int rc;
10130 ++
10131 ++ cxl_mbox_init();
10132 ++
10133 ++ rc = cxl_memdev_init();
10134 ++ if (rc)
10135 ++ return rc;
10136 ++
10137 ++ rc = bus_register(&cxl_bus_type);
10138 ++ if (rc)
10139 ++ goto err;
10140 ++ return 0;
10141 ++
10142 ++err:
10143 ++ cxl_memdev_exit();
10144 ++ cxl_mbox_exit();
10145 ++ return rc;
10146 ++}
10147 ++
10148 ++static void cxl_core_exit(void)
10149 ++{
10150 ++ bus_unregister(&cxl_bus_type);
10151 ++ cxl_memdev_exit();
10152 ++ cxl_mbox_exit();
10153 ++}
10154 ++
10155 ++module_init(cxl_core_init);
10156 ++module_exit(cxl_core_exit);
10157 ++MODULE_LICENSE("GPL v2");
10158 +diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
10159 +index e37e23bf43553..6a18ff8739e00 100644
10160 +--- a/drivers/cxl/core/regs.c
10161 ++++ b/drivers/cxl/core/regs.c
10162 +@@ -35,7 +35,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
10163 + struct cxl_component_reg_map *map)
10164 + {
10165 + int cap, cap_count;
10166 +- u64 cap_array;
10167 ++ u32 cap_array;
10168 +
10169 + *map = (struct cxl_component_reg_map) { 0 };
10170 +
10171 +@@ -45,11 +45,11 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
10172 + */
10173 + base += CXL_CM_OFFSET;
10174 +
10175 +- cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
10176 ++ cap_array = readl(base + CXL_CM_CAP_HDR_OFFSET);
10177 +
10178 + if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
10179 + dev_err(dev,
10180 +- "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
10181 ++ "Couldn't locate the CXL.cache and CXL.mem capability array header.\n");
10182 + return;
10183 + }
10184 +
10185 +diff --git a/drivers/dax/super.c b/drivers/dax/super.c
10186 +index e3029389d8097..6bd565fe2e63b 100644
10187 +--- a/drivers/dax/super.c
10188 ++++ b/drivers/dax/super.c
10189 +@@ -476,6 +476,7 @@ static int dax_fs_init(void)
10190 + static void dax_fs_exit(void)
10191 + {
10192 + kern_unmount(dax_mnt);
10193 ++ rcu_barrier();
10194 + kmem_cache_destroy(dax_cache);
10195 + }
10196 +
10197 +diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
10198 +index c57a609db75be..e7330684d3b82 100644
10199 +--- a/drivers/dma-buf/udmabuf.c
10200 ++++ b/drivers/dma-buf/udmabuf.c
10201 +@@ -190,6 +190,10 @@ static long udmabuf_create(struct miscdevice *device,
10202 + if (ubuf->pagecount > pglimit)
10203 + goto err;
10204 + }
10205 ++
10206 ++ if (!ubuf->pagecount)
10207 ++ goto err;
10208 ++
10209 + ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
10210 + GFP_KERNEL);
10211 + if (!ubuf->pages) {
10212 +diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
10213 +index 97c87a7cba879..43817ced3a3e1 100644
10214 +--- a/drivers/dma/hisi_dma.c
10215 ++++ b/drivers/dma/hisi_dma.c
10216 +@@ -30,7 +30,7 @@
10217 + #define HISI_DMA_MODE 0x217c
10218 + #define HISI_DMA_OFFSET 0x100
10219 +
10220 +-#define HISI_DMA_MSI_NUM 30
10221 ++#define HISI_DMA_MSI_NUM 32
10222 + #define HISI_DMA_CHAN_NUM 30
10223 + #define HISI_DMA_Q_DEPTH_VAL 1024
10224 +
10225 +diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
10226 +index 573ad8b86804e..3061fe857d69f 100644
10227 +--- a/drivers/dma/idxd/device.c
10228 ++++ b/drivers/dma/idxd/device.c
10229 +@@ -681,8 +681,13 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
10230 + group->use_rdbuf_limit = false;
10231 + group->rdbufs_allowed = 0;
10232 + group->rdbufs_reserved = 0;
10233 +- group->tc_a = -1;
10234 +- group->tc_b = -1;
10235 ++ if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
10236 ++ group->tc_a = 1;
10237 ++ group->tc_b = 1;
10238 ++ } else {
10239 ++ group->tc_a = -1;
10240 ++ group->tc_b = -1;
10241 ++ }
10242 + }
10243 + }
10244 +
10245 +diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
10246 +index 0ef086e43090b..7e771c56c13c6 100644
10247 +--- a/drivers/firmware/efi/efi-pstore.c
10248 ++++ b/drivers/firmware/efi/efi-pstore.c
10249 +@@ -266,7 +266,7 @@ static int efi_pstore_write(struct pstore_record *record)
10250 + efi_name[i] = name[i];
10251 +
10252 + ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
10253 +- preemptible(), record->size, record->psi->buf);
10254 ++ false, record->size, record->psi->buf);
10255 +
10256 + if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE))
10257 + if (!schedule_work(&efivar_work))
10258 +diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
10259 +index 931544c9f63d4..983e07dc022ed 100644
10260 +--- a/drivers/firmware/google/Kconfig
10261 ++++ b/drivers/firmware/google/Kconfig
10262 +@@ -21,7 +21,7 @@ config GOOGLE_SMI
10263 +
10264 + config GOOGLE_COREBOOT_TABLE
10265 + tristate "Coreboot Table Access"
10266 +- depends on ACPI || OF
10267 ++ depends on HAS_IOMEM && (ACPI || OF)
10268 + help
10269 + This option enables the coreboot_table module, which provides other
10270 + firmware modules access to the coreboot table. The coreboot table
10271 +diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
10272 +index 7db8066b19fd5..3f67bf774821d 100644
10273 +--- a/drivers/firmware/qcom_scm.c
10274 ++++ b/drivers/firmware/qcom_scm.c
10275 +@@ -749,12 +749,6 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
10276 + };
10277 + int ret;
10278 +
10279 +- desc.args[0] = addr;
10280 +- desc.args[1] = size;
10281 +- desc.args[2] = spare;
10282 +- desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
10283 +- QCOM_SCM_VAL);
10284 +-
10285 + ret = qcom_scm_call(__scm->dev, &desc, NULL);
10286 +
10287 + /* the pg table has been initialized already, ignore the error */
10288 +diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
10289 +index 29c0a616b3177..c4bf934e3553e 100644
10290 +--- a/drivers/firmware/stratix10-svc.c
10291 ++++ b/drivers/firmware/stratix10-svc.c
10292 +@@ -477,7 +477,7 @@ static int svc_normal_to_secure_thread(void *data)
10293 + case INTEL_SIP_SMC_RSU_ERROR:
10294 + pr_err("%s: STATUS_ERROR\n", __func__);
10295 + cbdata->status = BIT(SVC_STATUS_ERROR);
10296 +- cbdata->kaddr1 = NULL;
10297 ++ cbdata->kaddr1 = &res.a1;
10298 + cbdata->kaddr2 = NULL;
10299 + cbdata->kaddr3 = NULL;
10300 + pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
10301 +diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
10302 +index 303a491e520d1..757cc8b9f3de9 100644
10303 +--- a/drivers/firmware/sysfb_simplefb.c
10304 ++++ b/drivers/firmware/sysfb_simplefb.c
10305 +@@ -113,16 +113,21 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
10306 + sysfb_apply_efi_quirks(pd);
10307 +
10308 + ret = platform_device_add_resources(pd, &res, 1);
10309 +- if (ret) {
10310 +- platform_device_put(pd);
10311 +- return ret;
10312 +- }
10313 ++ if (ret)
10314 ++ goto err_put_device;
10315 +
10316 + ret = platform_device_add_data(pd, mode, sizeof(*mode));
10317 +- if (ret) {
10318 +- platform_device_put(pd);
10319 +- return ret;
10320 +- }
10321 ++ if (ret)
10322 ++ goto err_put_device;
10323 ++
10324 ++ ret = platform_device_add(pd);
10325 ++ if (ret)
10326 ++ goto err_put_device;
10327 ++
10328 ++ return 0;
10329 ++
10330 ++err_put_device:
10331 ++ platform_device_put(pd);
10332 +
10333 +- return platform_device_add(pd);
10334 ++ return ret;
10335 + }
10336 +diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
10337 +index 8606e55c1721c..0bed2fab80558 100644
10338 +--- a/drivers/fsi/fsi-master-aspeed.c
10339 ++++ b/drivers/fsi/fsi-master-aspeed.c
10340 +@@ -542,25 +542,28 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
10341 + return rc;
10342 + }
10343 +
10344 +- aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL);
10345 ++ aspeed = kzalloc(sizeof(*aspeed), GFP_KERNEL);
10346 + if (!aspeed)
10347 + return -ENOMEM;
10348 +
10349 + aspeed->dev = &pdev->dev;
10350 +
10351 + aspeed->base = devm_platform_ioremap_resource(pdev, 0);
10352 +- if (IS_ERR(aspeed->base))
10353 +- return PTR_ERR(aspeed->base);
10354 ++ if (IS_ERR(aspeed->base)) {
10355 ++ rc = PTR_ERR(aspeed->base);
10356 ++ goto err_free_aspeed;
10357 ++ }
10358 +
10359 + aspeed->clk = devm_clk_get(aspeed->dev, NULL);
10360 + if (IS_ERR(aspeed->clk)) {
10361 + dev_err(aspeed->dev, "couldn't get clock\n");
10362 +- return PTR_ERR(aspeed->clk);
10363 ++ rc = PTR_ERR(aspeed->clk);
10364 ++ goto err_free_aspeed;
10365 + }
10366 + rc = clk_prepare_enable(aspeed->clk);
10367 + if (rc) {
10368 + dev_err(aspeed->dev, "couldn't enable clock\n");
10369 +- return rc;
10370 ++ goto err_free_aspeed;
10371 + }
10372 +
10373 + rc = setup_cfam_reset(aspeed);
10374 +@@ -595,7 +598,7 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
10375 + rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw);
10376 + if (rc) {
10377 + dev_err(&pdev->dev, "failed to read hub version\n");
10378 +- return rc;
10379 ++ goto err_release;
10380 + }
10381 +
10382 + reg = be32_to_cpu(raw);
10383 +@@ -634,6 +637,8 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
10384 +
10385 + err_release:
10386 + clk_disable_unprepare(aspeed->clk);
10387 ++err_free_aspeed:
10388 ++ kfree(aspeed);
10389 + return rc;
10390 + }
10391 +
10392 +diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
10393 +index da1486bb6a144..bcb756dc98663 100644
10394 +--- a/drivers/fsi/fsi-scom.c
10395 ++++ b/drivers/fsi/fsi-scom.c
10396 +@@ -145,7 +145,7 @@ static int put_indirect_scom_form0(struct scom_device *scom, uint64_t value,
10397 + uint64_t addr, uint32_t *status)
10398 + {
10399 + uint64_t ind_data, ind_addr;
10400 +- int rc, retries, err = 0;
10401 ++ int rc, err;
10402 +
10403 + if (value & ~XSCOM_DATA_IND_DATA)
10404 + return -EINVAL;
10405 +@@ -156,19 +156,14 @@ static int put_indirect_scom_form0(struct scom_device *scom, uint64_t value,
10406 + if (rc || (*status & SCOM_STATUS_ANY_ERR))
10407 + return rc;
10408 +
10409 +- for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
10410 +- rc = __get_scom(scom, &ind_data, addr, status);
10411 +- if (rc || (*status & SCOM_STATUS_ANY_ERR))
10412 +- return rc;
10413 ++ rc = __get_scom(scom, &ind_data, addr, status);
10414 ++ if (rc || (*status & SCOM_STATUS_ANY_ERR))
10415 ++ return rc;
10416 +
10417 +- err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
10418 +- *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
10419 +- if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
10420 +- return 0;
10421 ++ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
10422 ++ *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
10423 +
10424 +- msleep(1);
10425 +- }
10426 +- return rc;
10427 ++ return 0;
10428 + }
10429 +
10430 + static int put_indirect_scom_form1(struct scom_device *scom, uint64_t value,
10431 +@@ -188,7 +183,7 @@ static int get_indirect_scom_form0(struct scom_device *scom, uint64_t *value,
10432 + uint64_t addr, uint32_t *status)
10433 + {
10434 + uint64_t ind_data, ind_addr;
10435 +- int rc, retries, err = 0;
10436 ++ int rc, err;
10437 +
10438 + ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
10439 + ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | XSCOM_DATA_IND_READ;
10440 +@@ -196,21 +191,15 @@ static int get_indirect_scom_form0(struct scom_device *scom, uint64_t *value,
10441 + if (rc || (*status & SCOM_STATUS_ANY_ERR))
10442 + return rc;
10443 +
10444 +- for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
10445 +- rc = __get_scom(scom, &ind_data, addr, status);
10446 +- if (rc || (*status & SCOM_STATUS_ANY_ERR))
10447 +- return rc;
10448 +-
10449 +- err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
10450 +- *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
10451 +- *value = ind_data & XSCOM_DATA_IND_DATA;
10452 ++ rc = __get_scom(scom, &ind_data, addr, status);
10453 ++ if (rc || (*status & SCOM_STATUS_ANY_ERR))
10454 ++ return rc;
10455 +
10456 +- if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
10457 +- return 0;
10458 ++ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
10459 ++ *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
10460 ++ *value = ind_data & XSCOM_DATA_IND_DATA;
10461 +
10462 +- msleep(1);
10463 +- }
10464 +- return rc;
10465 ++ return 0;
10466 + }
10467 +
10468 + static int raw_put_scom(struct scom_device *scom, uint64_t value,
10469 +@@ -289,7 +278,7 @@ static int put_scom(struct scom_device *scom, uint64_t value,
10470 + int rc;
10471 +
10472 + rc = raw_put_scom(scom, value, addr, &status);
10473 +- if (rc == -ENODEV)
10474 ++ if (rc)
10475 + return rc;
10476 +
10477 + rc = handle_fsi2pib_status(scom, status);
10478 +@@ -308,7 +297,7 @@ static int get_scom(struct scom_device *scom, uint64_t *value,
10479 + int rc;
10480 +
10481 + rc = raw_get_scom(scom, value, addr, &status);
10482 +- if (rc == -ENODEV)
10483 ++ if (rc)
10484 + return rc;
10485 +
10486 + rc = handle_fsi2pib_status(scom, status);
10487 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
10488 +index c16a2704ced65..f3160b951df3a 100644
10489 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
10490 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
10491 +@@ -175,7 +175,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
10492 +
10493 + /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
10494 + if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
10495 +- if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
10496 ++ if ((connector->display_info.edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30) &&
10497 + (mode_clock * 5/4 <= max_tmds_clock))
10498 + bpc = 10;
10499 + else
10500 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
10501 +index ed077de426d9b..f18c698137a6b 100644
10502 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
10503 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
10504 +@@ -31,6 +31,7 @@
10505 + #include <linux/console.h>
10506 + #include <linux/slab.h>
10507 + #include <linux/iommu.h>
10508 ++#include <linux/pci.h>
10509 +
10510 + #include <drm/drm_atomic_helper.h>
10511 + #include <drm/drm_probe_helper.h>
10512 +@@ -2073,6 +2074,8 @@ out:
10513 + */
10514 + static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
10515 + {
10516 ++ struct drm_device *dev = adev_to_drm(adev);
10517 ++ struct pci_dev *parent;
10518 + int i, r;
10519 +
10520 + amdgpu_device_enable_virtual_display(adev);
10521 +@@ -2137,6 +2140,18 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
10522 + break;
10523 + }
10524 +
10525 ++ if (amdgpu_has_atpx() &&
10526 ++ (amdgpu_is_atpx_hybrid() ||
10527 ++ amdgpu_has_atpx_dgpu_power_cntl()) &&
10528 ++ ((adev->flags & AMD_IS_APU) == 0) &&
10529 ++ !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
10530 ++ adev->flags |= AMD_IS_PX;
10531 ++
10532 ++ if (!(adev->flags & AMD_IS_APU)) {
10533 ++ parent = pci_upstream_bridge(adev->pdev);
10534 ++ adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
10535 ++ }
10536 ++
10537 + amdgpu_amdkfd_device_probe(adev);
10538 +
10539 + adev->pm.pp_feature = amdgpu_pp_feature_mask;
10540 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
10541 +index 2a786e7886277..978c46395ced0 100644
10542 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
10543 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
10544 +@@ -91,17 +91,13 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
10545 +
10546 + int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
10547 + {
10548 +- unsigned char buff[AMDGPU_PRODUCT_NAME_LEN+2];
10549 ++ unsigned char buff[AMDGPU_PRODUCT_NAME_LEN];
10550 + u32 addrptr;
10551 + int size, len;
10552 +- int offset = 2;
10553 +
10554 + if (!is_fru_eeprom_supported(adev))
10555 + return 0;
10556 +
10557 +- if (adev->asic_type == CHIP_ALDEBARAN)
10558 +- offset = 0;
10559 +-
10560 + /* If algo exists, it means that the i2c_adapter's initialized */
10561 + if (!adev->pm.smu_i2c.algo) {
10562 + DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
10563 +@@ -143,8 +139,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
10564 + AMDGPU_PRODUCT_NAME_LEN);
10565 + len = AMDGPU_PRODUCT_NAME_LEN - 1;
10566 + }
10567 +- /* Start at 2 due to buff using fields 0 and 1 for the address */
10568 +- memcpy(adev->product_name, &buff[offset], len);
10569 ++ memcpy(adev->product_name, buff, len);
10570 + adev->product_name[len] = '\0';
10571 +
10572 + addrptr += size + 1;
10573 +@@ -162,7 +157,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
10574 + DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
10575 + len = sizeof(adev->product_number) - 1;
10576 + }
10577 +- memcpy(adev->product_number, &buff[offset], len);
10578 ++ memcpy(adev->product_number, buff, len);
10579 + adev->product_number[len] = '\0';
10580 +
10581 + addrptr += size + 1;
10582 +@@ -189,7 +184,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
10583 + DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
10584 + len = sizeof(adev->serial) - 1;
10585 + }
10586 +- memcpy(adev->serial, &buff[offset], len);
10587 ++ memcpy(adev->serial, buff, len);
10588 + adev->serial[len] = '\0';
10589 +
10590 + return 0;
10591 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
10592 +index 1ebb91db22743..11a385264bbd2 100644
10593 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
10594 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
10595 +@@ -152,21 +152,10 @@ static void amdgpu_get_audio_func(struct amdgpu_device *adev)
10596 + int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
10597 + {
10598 + struct drm_device *dev;
10599 +- struct pci_dev *parent;
10600 + int r, acpi_status;
10601 +
10602 + dev = adev_to_drm(adev);
10603 +
10604 +- if (amdgpu_has_atpx() &&
10605 +- (amdgpu_is_atpx_hybrid() ||
10606 +- amdgpu_has_atpx_dgpu_power_cntl()) &&
10607 +- ((flags & AMD_IS_APU) == 0) &&
10608 +- !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
10609 +- flags |= AMD_IS_PX;
10610 +-
10611 +- parent = pci_upstream_bridge(adev->pdev);
10612 +- adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
10613 +-
10614 + /* amdgpu_device_init should report only fatal error
10615 + * like memory allocation failure or iomapping failure,
10616 + * or memory manager initialization failure, it must
10617 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
10618 +index 075429bea4275..b28b5c4908601 100644
10619 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
10620 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
10621 +@@ -2613,10 +2613,13 @@ static int dm_resume(void *handle)
10622 + * before the 0 streams commit.
10623 + *
10624 + * DC expects that link encoder assignments are *not* valid
10625 +- * when committing a state, so as a workaround it needs to be
10626 +- * cleared here.
10627 ++ * when committing a state, so as a workaround we can copy
10628 ++ * off of the current state.
10629 ++ *
10630 ++ * We lose the previous assignments, but we had already
10631 ++ * commit 0 streams anyway.
10632 + */
10633 +- link_enc_cfg_init(dm->dc, dc_state);
10634 ++ link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
10635 +
10636 + if (dc_enable_dmub_notifications(adev->dm.dc))
10637 + amdgpu_dm_outbox_init(adev);
10638 +@@ -8144,6 +8147,9 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
10639 + mode = amdgpu_dm_create_common_mode(encoder,
10640 + common_modes[i].name, common_modes[i].w,
10641 + common_modes[i].h);
10642 ++ if (!mode)
10643 ++ continue;
10644 ++
10645 + drm_mode_probed_add(connector, mode);
10646 + amdgpu_dm_connector->num_modes++;
10647 + }
10648 +@@ -10858,10 +10864,13 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10649 + static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10650 + {
10651 + struct drm_connector *connector;
10652 +- struct drm_connector_state *conn_state;
10653 ++ struct drm_connector_state *conn_state, *old_conn_state;
10654 + struct amdgpu_dm_connector *aconnector = NULL;
10655 + int i;
10656 +- for_each_new_connector_in_state(state, connector, conn_state, i) {
10657 ++ for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10658 ++ if (!conn_state->crtc)
10659 ++ conn_state = old_conn_state;
10660 ++
10661 + if (conn_state->crtc != crtc)
10662 + continue;
10663 +
10664 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
10665 +index a55944da8d53f..72a3fded7142a 100644
10666 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
10667 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
10668 +@@ -122,6 +122,7 @@ static void remove_link_enc_assignment(
10669 + stream->link_enc = NULL;
10670 + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN;
10671 + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL;
10672 ++ dc_stream_release(stream);
10673 + break;
10674 + }
10675 + }
10676 +@@ -271,6 +272,13 @@ void link_enc_cfg_init(
10677 + state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
10678 + }
10679 +
10680 ++void link_enc_cfg_copy(const struct dc_state *src_ctx, struct dc_state *dst_ctx)
10681 ++{
10682 ++ memcpy(&dst_ctx->res_ctx.link_enc_cfg_ctx,
10683 ++ &src_ctx->res_ctx.link_enc_cfg_ctx,
10684 ++ sizeof(dst_ctx->res_ctx.link_enc_cfg_ctx));
10685 ++}
10686 ++
10687 + void link_enc_cfg_link_encs_assign(
10688 + struct dc *dc,
10689 + struct dc_state *state,
10690 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
10691 +index a4e43b4826e0e..59ceb9ed385db 100644
10692 +--- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
10693 ++++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
10694 +@@ -39,6 +39,11 @@ void link_enc_cfg_init(
10695 + const struct dc *dc,
10696 + struct dc_state *state);
10697 +
10698 ++/*
10699 ++ * Copies a link encoder assignment from another state.
10700 ++ */
10701 ++void link_enc_cfg_copy(const struct dc_state *src_ctx, struct dc_state *dst_ctx);
10702 ++
10703 + /*
10704 + * Algorithm for assigning available DIG link encoders to streams.
10705 + *
10706 +diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
10707 +index 0f15bcada4e99..717977aec6d06 100644
10708 +--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
10709 ++++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
10710 +@@ -265,14 +265,6 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
10711 + .funcs = &pflip_irq_info_funcs\
10712 + }
10713 +
10714 +-#define vupdate_int_entry(reg_num)\
10715 +- [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
10716 +- IRQ_REG_ENTRY(OTG, reg_num,\
10717 +- OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
10718 +- OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
10719 +- .funcs = &vblank_irq_info_funcs\
10720 +- }
10721 +-
10722 + /* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
10723 + * of DCE's DC_IRQ_SOURCE_VUPDATEx.
10724 + */
10725 +@@ -401,12 +393,6 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
10726 + dc_underflow_int_entry(6),
10727 + [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
10728 + [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
10729 +- vupdate_int_entry(0),
10730 +- vupdate_int_entry(1),
10731 +- vupdate_int_entry(2),
10732 +- vupdate_int_entry(3),
10733 +- vupdate_int_entry(4),
10734 +- vupdate_int_entry(5),
10735 + vupdate_no_lock_int_entry(0),
10736 + vupdate_no_lock_int_entry(1),
10737 + vupdate_no_lock_int_entry(2),
10738 +diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
10739 +index 48cc009d9bdf3..dc910003f3cab 100644
10740 +--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
10741 ++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
10742 +@@ -2134,8 +2134,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
10743 + }
10744 + }
10745 +
10746 +- /* setting should not be allowed from VF */
10747 +- if (amdgpu_sriov_vf(adev)) {
10748 ++ /* setting should not be allowed from VF if not in one VF mode */
10749 ++ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
10750 + dev_attr->attr.mode &= ~S_IWUGO;
10751 + dev_attr->store = NULL;
10752 + }
10753 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
10754 +index d93d28c1af95b..b51368fa30253 100644
10755 +--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
10756 ++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
10757 +@@ -138,7 +138,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
10758 + uint32_t *min,
10759 + uint32_t *max)
10760 + {
10761 +- int ret = 0;
10762 ++ int ret = -ENOTSUPP;
10763 +
10764 + if (!min && !max)
10765 + return -EINVAL;
10766 +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
10767 +index 592ecfcf00caf..6a882891d91c5 100644
10768 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
10769 ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
10770 +@@ -169,6 +169,7 @@
10771 + #define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
10772 + #define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
10773 +
10774 ++#define ADV7535_REG_POWER2_HPD_OVERRIDE BIT(6)
10775 + #define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0
10776 + #define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00
10777 + #define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40
10778 +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
10779 +index f8e5da1485999..77118c3395bf0 100644
10780 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
10781 ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
10782 +@@ -351,11 +351,17 @@ static void __adv7511_power_on(struct adv7511 *adv7511)
10783 + * from standby or are enabled. When the HPD goes low the adv7511 is
10784 + * reset and the outputs are disabled which might cause the monitor to
10785 + * go to standby again. To avoid this we ignore the HPD pin for the
10786 +- * first few seconds after enabling the output.
10787 ++ * first few seconds after enabling the output. On the other hand
10788 ++ * adv7535 require to enable HPD Override bit for proper HPD.
10789 + */
10790 +- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
10791 +- ADV7511_REG_POWER2_HPD_SRC_MASK,
10792 +- ADV7511_REG_POWER2_HPD_SRC_NONE);
10793 ++ if (adv7511->type == ADV7535)
10794 ++ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
10795 ++ ADV7535_REG_POWER2_HPD_OVERRIDE,
10796 ++ ADV7535_REG_POWER2_HPD_OVERRIDE);
10797 ++ else
10798 ++ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
10799 ++ ADV7511_REG_POWER2_HPD_SRC_MASK,
10800 ++ ADV7511_REG_POWER2_HPD_SRC_NONE);
10801 + }
10802 +
10803 + static void adv7511_power_on(struct adv7511 *adv7511)
10804 +@@ -375,6 +381,10 @@ static void adv7511_power_on(struct adv7511 *adv7511)
10805 + static void __adv7511_power_off(struct adv7511 *adv7511)
10806 + {
10807 + /* TODO: setup additional power down modes */
10808 ++ if (adv7511->type == ADV7535)
10809 ++ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
10810 ++ ADV7535_REG_POWER2_HPD_OVERRIDE, 0);
10811 ++
10812 + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
10813 + ADV7511_POWER_POWER_DOWN,
10814 + ADV7511_POWER_POWER_DOWN);
10815 +@@ -672,9 +682,14 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
10816 + status = connector_status_disconnected;
10817 + } else {
10818 + /* Renable HPD sensing */
10819 +- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
10820 +- ADV7511_REG_POWER2_HPD_SRC_MASK,
10821 +- ADV7511_REG_POWER2_HPD_SRC_BOTH);
10822 ++ if (adv7511->type == ADV7535)
10823 ++ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
10824 ++ ADV7535_REG_POWER2_HPD_OVERRIDE,
10825 ++ ADV7535_REG_POWER2_HPD_OVERRIDE);
10826 ++ else
10827 ++ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
10828 ++ ADV7511_REG_POWER2_HPD_SRC_MASK,
10829 ++ ADV7511_REG_POWER2_HPD_SRC_BOTH);
10830 + }
10831 +
10832 + adv7511->status = status;
10833 +diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
10834 +index 2346dbcc505f2..e596cacce9e3e 100644
10835 +--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
10836 ++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
10837 +@@ -846,7 +846,8 @@ static int segments_edid_read(struct anx7625_data *ctx,
10838 + static int sp_tx_edid_read(struct anx7625_data *ctx,
10839 + u8 *pedid_blocks_buf)
10840 + {
10841 +- u8 offset, edid_pos;
10842 ++ u8 offset;
10843 ++ int edid_pos;
10844 + int count, blocks_num;
10845 + u8 pblock_buf[MAX_DPCD_BUFFER_SIZE];
10846 + u8 i, j;
10847 +diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
10848 +index d8a15c459b42c..829e1a1446567 100644
10849 +--- a/drivers/gpu/drm/bridge/cdns-dsi.c
10850 ++++ b/drivers/gpu/drm/bridge/cdns-dsi.c
10851 +@@ -1284,6 +1284,7 @@ static const struct of_device_id cdns_dsi_of_match[] = {
10852 + { .compatible = "cdns,dsi" },
10853 + { },
10854 + };
10855 ++MODULE_DEVICE_TABLE(of, cdns_dsi_of_match);
10856 +
10857 + static struct platform_driver cdns_dsi_platform_driver = {
10858 + .probe = cdns_dsi_drm_probe,
10859 +diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
10860 +index dafb1b47c15fb..00597eb54661f 100644
10861 +--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
10862 ++++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
10863 +@@ -1164,7 +1164,11 @@ static int lt9611_probe(struct i2c_client *client,
10864 +
10865 + lt9611_enable_hpd_interrupts(lt9611);
10866 +
10867 +- return lt9611_audio_init(dev, lt9611);
10868 ++ ret = lt9611_audio_init(dev, lt9611);
10869 ++ if (ret)
10870 ++ goto err_remove_bridge;
10871 ++
10872 ++ return 0;
10873 +
10874 + err_remove_bridge:
10875 + drm_bridge_remove(&lt9611->bridge);
10876 +diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
10877 +index af07eeb47ca02..6e484d836cfe2 100644
10878 +--- a/drivers/gpu/drm/bridge/nwl-dsi.c
10879 ++++ b/drivers/gpu/drm/bridge/nwl-dsi.c
10880 +@@ -1204,6 +1204,7 @@ static int nwl_dsi_probe(struct platform_device *pdev)
10881 +
10882 + ret = nwl_dsi_select_input(dsi);
10883 + if (ret < 0) {
10884 ++ pm_runtime_disable(dev);
10885 + mipi_dsi_host_unregister(&dsi->dsi_host);
10886 + return ret;
10887 + }
10888 +diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
10889 +index 843265d7f1b12..ec7745c31da07 100644
10890 +--- a/drivers/gpu/drm/bridge/sil-sii8620.c
10891 ++++ b/drivers/gpu/drm/bridge/sil-sii8620.c
10892 +@@ -2120,7 +2120,7 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
10893 + if (ret) {
10894 + dev_err(ctx->dev, "Failed to register RC device\n");
10895 + ctx->error = ret;
10896 +- rc_free_device(ctx->rc_dev);
10897 ++ rc_free_device(rc_dev);
10898 + return;
10899 + }
10900 + ctx->rc_dev = rc_dev;
10901 +diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
10902 +index 54d8fdad395f5..97cdc61b57f61 100644
10903 +--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
10904 ++++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
10905 +@@ -2551,8 +2551,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
10906 + if (!output_fmts)
10907 + return NULL;
10908 +
10909 +- /* If dw-hdmi is the only bridge, avoid negociating with ourselves */
10910 +- if (list_is_singular(&bridge->encoder->bridge_chain)) {
10911 ++ /* If dw-hdmi is the first or only bridge, avoid negociating with ourselves */
10912 ++ if (list_is_singular(&bridge->encoder->bridge_chain) ||
10913 ++ list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) {
10914 + *num_output_fmts = 1;
10915 + output_fmts[0] = MEDIA_BUS_FMT_FIXED;
10916 +
10917 +diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
10918 +index e44e18a0112af..56c3fd08c6a0b 100644
10919 +--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
10920 ++++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
10921 +@@ -1199,6 +1199,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
10922 + ret = mipi_dsi_host_register(&dsi->dsi_host);
10923 + if (ret) {
10924 + dev_err(dev, "Failed to register MIPI host: %d\n", ret);
10925 ++ pm_runtime_disable(dev);
10926 + dw_mipi_dsi_debugfs_remove(dsi);
10927 + return ERR_PTR(ret);
10928 + }
10929 +diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
10930 +index 945f08de45f1d..314a84ffcea3d 100644
10931 +--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
10932 ++++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
10933 +@@ -560,10 +560,14 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
10934 + ctx->host_node = of_graph_get_remote_port_parent(endpoint);
10935 + of_node_put(endpoint);
10936 +
10937 +- if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4)
10938 +- return -EINVAL;
10939 +- if (!ctx->host_node)
10940 +- return -ENODEV;
10941 ++ if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4) {
10942 ++ ret = -EINVAL;
10943 ++ goto err_put_node;
10944 ++ }
10945 ++ if (!ctx->host_node) {
10946 ++ ret = -ENODEV;
10947 ++ goto err_put_node;
10948 ++ }
10949 +
10950 + ctx->lvds_dual_link = false;
10951 + ctx->lvds_dual_link_even_odd_swap = false;
10952 +@@ -590,16 +594,22 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
10953 +
10954 + ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
10955 + if (ret < 0)
10956 +- return ret;
10957 ++ goto err_put_node;
10958 + if (panel) {
10959 + panel_bridge = devm_drm_panel_bridge_add(dev, panel);
10960 +- if (IS_ERR(panel_bridge))
10961 +- return PTR_ERR(panel_bridge);
10962 ++ if (IS_ERR(panel_bridge)) {
10963 ++ ret = PTR_ERR(panel_bridge);
10964 ++ goto err_put_node;
10965 ++ }
10966 + }
10967 +
10968 + ctx->panel_bridge = panel_bridge;
10969 +
10970 + return 0;
10971 ++
10972 ++err_put_node:
10973 ++ of_node_put(ctx->host_node);
10974 ++ return ret;
10975 + }
10976 +
10977 + static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
10978 +@@ -673,8 +683,10 @@ static int sn65dsi83_probe(struct i2c_client *client,
10979 + return ret;
10980 +
10981 + ctx->regmap = devm_regmap_init_i2c(client, &sn65dsi83_regmap_config);
10982 +- if (IS_ERR(ctx->regmap))
10983 +- return PTR_ERR(ctx->regmap);
10984 ++ if (IS_ERR(ctx->regmap)) {
10985 ++ ret = PTR_ERR(ctx->regmap);
10986 ++ goto err_put_node;
10987 ++ }
10988 +
10989 + dev_set_drvdata(dev, ctx);
10990 + i2c_set_clientdata(client, ctx);
10991 +@@ -691,6 +703,8 @@ static int sn65dsi83_probe(struct i2c_client *client,
10992 +
10993 + err_remove_bridge:
10994 + drm_bridge_remove(&ctx->bridge);
10995 ++err_put_node:
10996 ++ of_node_put(ctx->host_node);
10997 + return ret;
10998 + }
10999 +
11000 +diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
11001 +index 23f9073bc473a..c9528aa62c9c9 100644
11002 +--- a/drivers/gpu/drm/drm_dp_helper.c
11003 ++++ b/drivers/gpu/drm/drm_dp_helper.c
11004 +@@ -144,16 +144,6 @@ u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
11005 + }
11006 + EXPORT_SYMBOL(drm_dp_get_adjust_tx_ffe_preset);
11007 +
11008 +-u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
11009 +- unsigned int lane)
11010 +-{
11011 +- unsigned int offset = DP_ADJUST_REQUEST_POST_CURSOR2;
11012 +- u8 value = dp_link_status(link_status, offset);
11013 +-
11014 +- return (value >> (lane << 1)) & 0x3;
11015 +-}
11016 +-EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor);
11017 +-
11018 + static int __8b10b_clock_recovery_delay_us(const struct drm_dp_aux *aux, u8 rd_interval)
11019 + {
11020 + if (rd_interval > 4)
11021 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
11022 +index f5f5de362ff2c..b8f5419e514ae 100644
11023 +--- a/drivers/gpu/drm/drm_edid.c
11024 ++++ b/drivers/gpu/drm/drm_edid.c
11025 +@@ -4848,7 +4848,8 @@ bool drm_detect_monitor_audio(struct edid *edid)
11026 + if (!edid_ext)
11027 + goto end;
11028 +
11029 +- has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
11030 ++ has_audio = (edid_ext[0] == CEA_EXT &&
11031 ++ (edid_ext[3] & EDID_BASIC_AUDIO) != 0);
11032 +
11033 + if (has_audio) {
11034 + DRM_DEBUG_KMS("Monitor has basic audio support\n");
11035 +@@ -5075,21 +5076,21 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
11036 +
11037 + if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
11038 + dc_bpc = 10;
11039 +- info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
11040 ++ info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_30;
11041 + DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
11042 + connector->name);
11043 + }
11044 +
11045 + if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
11046 + dc_bpc = 12;
11047 +- info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
11048 ++ info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_36;
11049 + DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
11050 + connector->name);
11051 + }
11052 +
11053 + if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
11054 + dc_bpc = 16;
11055 +- info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
11056 ++ info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_48;
11057 + DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
11058 + connector->name);
11059 + }
11060 +@@ -5104,16 +5105,9 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
11061 + connector->name, dc_bpc);
11062 + info->bpc = dc_bpc;
11063 +
11064 +- /*
11065 +- * Deep color support mandates RGB444 support for all video
11066 +- * modes and forbids YCRCB422 support for all video modes per
11067 +- * HDMI 1.3 spec.
11068 +- */
11069 +- info->color_formats = DRM_COLOR_FORMAT_RGB444;
11070 +-
11071 + /* YCRCB444 is optional according to spec. */
11072 + if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
11073 +- info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
11074 ++ info->edid_hdmi_ycbcr444_dc_modes = info->edid_hdmi_rgb444_dc_modes;
11075 + DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
11076 + connector->name);
11077 + }
11078 +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
11079 +index ed43b987d306a..f15127a32f7a7 100644
11080 +--- a/drivers/gpu/drm/drm_fb_helper.c
11081 ++++ b/drivers/gpu/drm/drm_fb_helper.c
11082 +@@ -2346,6 +2346,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
11083 + fbi->fbops = &drm_fbdev_fb_ops;
11084 + fbi->screen_size = sizes->surface_height * fb->pitches[0];
11085 + fbi->fix.smem_len = fbi->screen_size;
11086 ++ fbi->flags = FBINFO_DEFAULT;
11087 +
11088 + drm_fb_helper_fill_info(fbi, fb_helper, sizes);
11089 +
11090 +@@ -2353,19 +2354,21 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
11091 + fbi->screen_buffer = vzalloc(fbi->screen_size);
11092 + if (!fbi->screen_buffer)
11093 + return -ENOMEM;
11094 ++ fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
11095 +
11096 + fbi->fbdefio = &drm_fbdev_defio;
11097 +-
11098 + fb_deferred_io_init(fbi);
11099 + } else {
11100 + /* buffer is mapped for HW framebuffer */
11101 + ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
11102 + if (ret)
11103 + return ret;
11104 +- if (map.is_iomem)
11105 ++ if (map.is_iomem) {
11106 + fbi->screen_base = map.vaddr_iomem;
11107 +- else
11108 ++ } else {
11109 + fbi->screen_buffer = map.vaddr;
11110 ++ fbi->flags |= FBINFO_VIRTFB;
11111 ++ }
11112 +
11113 + /*
11114 + * Shamelessly leak the physical address to user-space. As
11115 +diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
11116 +index c313a5b4549c4..7e48dcd1bee4d 100644
11117 +--- a/drivers/gpu/drm/drm_syncobj.c
11118 ++++ b/drivers/gpu/drm/drm_syncobj.c
11119 +@@ -853,12 +853,57 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
11120 + &args->handle);
11121 + }
11122 +
11123 ++
11124 ++/*
11125 ++ * Try to flatten a dma_fence_chain into a dma_fence_array so that it can be
11126 ++ * added as timeline fence to a chain again.
11127 ++ */
11128 ++static int drm_syncobj_flatten_chain(struct dma_fence **f)
11129 ++{
11130 ++ struct dma_fence_chain *chain = to_dma_fence_chain(*f);
11131 ++ struct dma_fence *tmp, **fences;
11132 ++ struct dma_fence_array *array;
11133 ++ unsigned int count;
11134 ++
11135 ++ if (!chain)
11136 ++ return 0;
11137 ++
11138 ++ count = 0;
11139 ++ dma_fence_chain_for_each(tmp, &chain->base)
11140 ++ ++count;
11141 ++
11142 ++ fences = kmalloc_array(count, sizeof(*fences), GFP_KERNEL);
11143 ++ if (!fences)
11144 ++ return -ENOMEM;
11145 ++
11146 ++ count = 0;
11147 ++ dma_fence_chain_for_each(tmp, &chain->base)
11148 ++ fences[count++] = dma_fence_get(tmp);
11149 ++
11150 ++ array = dma_fence_array_create(count, fences,
11151 ++ dma_fence_context_alloc(1),
11152 ++ 1, false);
11153 ++ if (!array)
11154 ++ goto free_fences;
11155 ++
11156 ++ dma_fence_put(*f);
11157 ++ *f = &array->base;
11158 ++ return 0;
11159 ++
11160 ++free_fences:
11161 ++ while (count--)
11162 ++ dma_fence_put(fences[count]);
11163 ++
11164 ++ kfree(fences);
11165 ++ return -ENOMEM;
11166 ++}
11167 ++
11168 + static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
11169 + struct drm_syncobj_transfer *args)
11170 + {
11171 + struct drm_syncobj *timeline_syncobj = NULL;
11172 +- struct dma_fence *fence;
11173 + struct dma_fence_chain *chain;
11174 ++ struct dma_fence *fence;
11175 + int ret;
11176 +
11177 + timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
11178 +@@ -869,16 +914,22 @@ static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
11179 + args->src_point, args->flags,
11180 + &fence);
11181 + if (ret)
11182 +- goto err;
11183 ++ goto err_put_timeline;
11184 ++
11185 ++ ret = drm_syncobj_flatten_chain(&fence);
11186 ++ if (ret)
11187 ++ goto err_free_fence;
11188 ++
11189 + chain = dma_fence_chain_alloc();
11190 + if (!chain) {
11191 + ret = -ENOMEM;
11192 +- goto err1;
11193 ++ goto err_free_fence;
11194 + }
11195 ++
11196 + drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
11197 +-err1:
11198 ++err_free_fence:
11199 + dma_fence_put(fence);
11200 +-err:
11201 ++err_put_timeline:
11202 + drm_syncobj_put(timeline_syncobj);
11203 +
11204 + return ret;
11205 +diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
11206 +index 8ac196e814d5d..d351b834a6551 100644
11207 +--- a/drivers/gpu/drm/i915/display/intel_bw.c
11208 ++++ b/drivers/gpu/drm/i915/display/intel_bw.c
11209 +@@ -966,7 +966,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
11210 + * cause.
11211 + */
11212 + if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
11213 +- allowed_points = BIT(max_bw_point);
11214 ++ allowed_points &= ADLS_PSF_PT_MASK;
11215 ++ allowed_points |= BIT(max_bw_point);
11216 + drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
11217 + max_bw_point);
11218 + }
11219 +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
11220 +index b5e2508db1cfe..62e763faf0aa5 100644
11221 +--- a/drivers/gpu/drm/i915/display/intel_dp.c
11222 ++++ b/drivers/gpu/drm/i915/display/intel_dp.c
11223 +@@ -4831,7 +4831,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
11224 + struct intel_dp *intel_dp = &dig_port->dp;
11225 +
11226 + if (dig_port->base.type == INTEL_OUTPUT_EDP &&
11227 +- (long_hpd || !intel_pps_have_power(intel_dp))) {
11228 ++ (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) {
11229 + /*
11230 + * vdd off can generate a long/short pulse on eDP which
11231 + * would require vdd on to handle it, and thus we
11232 +diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
11233 +index 3b5b9e7b05b7b..866ac090e3e32 100644
11234 +--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
11235 ++++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
11236 +@@ -1836,6 +1836,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
11237 + bool has_hdmi_sink)
11238 + {
11239 + struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
11240 ++ enum phy phy = intel_port_to_phy(dev_priv, hdmi_to_dig_port(hdmi)->base.port);
11241 +
11242 + if (clock < 25000)
11243 + return MODE_CLOCK_LOW;
11244 +@@ -1856,6 +1857,14 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
11245 + if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
11246 + return MODE_CLOCK_RANGE;
11247 +
11248 ++ /* ICL+ combo PHY PLL can't generate 500-533.2 MHz */
11249 ++ if (intel_phy_is_combo(dev_priv, phy) && clock > 500000 && clock < 533200)
11250 ++ return MODE_CLOCK_RANGE;
11251 ++
11252 ++ /* ICL+ TC PHY PLL can't generate 500-532.8 MHz */
11253 ++ if (intel_phy_is_tc(dev_priv, phy) && clock > 500000 && clock < 532800)
11254 ++ return MODE_CLOCK_RANGE;
11255 ++
11256 + /*
11257 + * SNPS PHYs' MPLLB table-based programming can only handle a fixed
11258 + * set of link rates.
11259 +@@ -1912,7 +1921,7 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
11260 + if (ycbcr420_output)
11261 + return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36;
11262 + else
11263 +- return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36;
11264 ++ return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_36;
11265 + case 10:
11266 + if (!has_hdmi_sink)
11267 + return false;
11268 +@@ -1920,7 +1929,7 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
11269 + if (ycbcr420_output)
11270 + return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_30;
11271 + else
11272 +- return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30;
11273 ++ return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30;
11274 + case 8:
11275 + return true;
11276 + default:
11277 +diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
11278 +index 4a2662838cd8d..df10b6898987a 100644
11279 +--- a/drivers/gpu/drm/i915/display/intel_opregion.c
11280 ++++ b/drivers/gpu/drm/i915/display/intel_opregion.c
11281 +@@ -375,6 +375,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
11282 + return -EINVAL;
11283 + }
11284 +
11285 ++ /*
11286 ++ * The port numbering and mapping here is bizarre. The now-obsolete
11287 ++ * swsci spec supports ports numbered [0..4]. Port E is handled as a
11288 ++ * special case, but port F and beyond are not. The functionality is
11289 ++ * supposed to be obsolete for new platforms. Just bail out if the port
11290 ++ * number is out of bounds after mapping.
11291 ++ */
11292 ++ if (port > 4) {
11293 ++ drm_dbg_kms(&dev_priv->drm,
11294 ++ "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
11295 ++ intel_encoder->base.base.id, intel_encoder->base.name,
11296 ++ port_name(intel_encoder->port), port);
11297 ++ return -EINVAL;
11298 ++ }
11299 ++
11300 + if (!enable)
11301 + parm |= 4 << 8;
11302 +
11303 +diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
11304 +index e9c679bb1b2eb..5edd188d97479 100644
11305 +--- a/drivers/gpu/drm/i915/display/intel_pps.c
11306 ++++ b/drivers/gpu/drm/i915/display/intel_pps.c
11307 +@@ -1075,14 +1075,14 @@ static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
11308 + edp_panel_vdd_schedule_off(intel_dp);
11309 + }
11310 +
11311 +-bool intel_pps_have_power(struct intel_dp *intel_dp)
11312 ++bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
11313 + {
11314 + intel_wakeref_t wakeref;
11315 + bool have_power = false;
11316 +
11317 + with_intel_pps_lock(intel_dp, wakeref) {
11318 +- have_power = edp_have_panel_power(intel_dp) &&
11319 +- edp_have_panel_vdd(intel_dp);
11320 ++ have_power = edp_have_panel_power(intel_dp) ||
11321 ++ edp_have_panel_vdd(intel_dp);
11322 + }
11323 +
11324 + return have_power;
11325 +diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
11326 +index fbb47f6f453e4..e64144659d31f 100644
11327 +--- a/drivers/gpu/drm/i915/display/intel_pps.h
11328 ++++ b/drivers/gpu/drm/i915/display/intel_pps.h
11329 +@@ -37,7 +37,7 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp);
11330 + void intel_pps_on(struct intel_dp *intel_dp);
11331 + void intel_pps_off(struct intel_dp *intel_dp);
11332 + void intel_pps_vdd_off_sync(struct intel_dp *intel_dp);
11333 +-bool intel_pps_have_power(struct intel_dp *intel_dp);
11334 ++bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp);
11335 + void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
11336 +
11337 + void intel_pps_init(struct intel_dp *intel_dp);
11338 +diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
11339 +index 00279e8c27756..b00de57cc957e 100644
11340 +--- a/drivers/gpu/drm/i915/display/intel_psr.c
11341 ++++ b/drivers/gpu/drm/i915/display/intel_psr.c
11342 +@@ -1816,6 +1816,9 @@ static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
11343 +
11344 + mutex_lock(&psr->lock);
11345 +
11346 ++ if (psr->sink_not_reliable)
11347 ++ goto exit;
11348 ++
11349 + drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
11350 +
11351 + /* Only enable if there is active planes */
11352 +@@ -1826,6 +1829,7 @@ static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
11353 + if (crtc_state->crc_enabled && psr->enabled)
11354 + psr_force_hw_tracking_exit(intel_dp);
11355 +
11356 ++exit:
11357 + mutex_unlock(&psr->lock);
11358 + }
11359 + }
11360 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
11361 +index 1478c02a82cbe..936a257b511c8 100644
11362 +--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
11363 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
11364 +@@ -439,7 +439,7 @@ vm_access(struct vm_area_struct *area, unsigned long addr,
11365 + return -EACCES;
11366 +
11367 + addr -= area->vm_start;
11368 +- if (addr >= obj->base.size)
11369 ++ if (range_overflows_t(u64, addr, len, obj->base.size))
11370 + return -EINVAL;
11371 +
11372 + i915_gem_ww_ctx_init(&ww, true);
11373 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
11374 +index 0c70ab08fc0c9..73efed2f30ca7 100644
11375 +--- a/drivers/gpu/drm/i915/i915_drv.h
11376 ++++ b/drivers/gpu/drm/i915/i915_drv.h
11377 +@@ -1146,7 +1146,7 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
11378 + (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
11379 +
11380 + #define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver)
11381 +-#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.arch, \
11382 ++#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.ver, \
11383 + INTEL_INFO(i915)->media.rel)
11384 + #define IS_MEDIA_VER(i915, from, until) \
11385 + (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
11386 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
11387 +index fae4f7818d28b..12120474c80c7 100644
11388 +--- a/drivers/gpu/drm/i915/intel_pm.c
11389 ++++ b/drivers/gpu/drm/i915/intel_pm.c
11390 +@@ -3722,8 +3722,7 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
11391 + MISSING_CASE(DISPLAY_VER(dev_priv));
11392 + }
11393 +
11394 +- /* Default to an unusable block time */
11395 +- dev_priv->sagv_block_time_us = -1;
11396 ++ dev_priv->sagv_block_time_us = 0;
11397 + }
11398 +
11399 + /*
11400 +@@ -5652,7 +5651,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
11401 + result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
11402 + result->enable = true;
11403 +
11404 +- if (DISPLAY_VER(dev_priv) < 12)
11405 ++ if (DISPLAY_VER(dev_priv) < 12 && dev_priv->sagv_block_time_us)
11406 + result->can_sagv = latency >= dev_priv->sagv_block_time_us;
11407 + }
11408 +
11409 +@@ -5683,7 +5682,10 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
11410 + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
11411 + struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
11412 + struct skl_wm_level *levels = plane_wm->wm;
11413 +- unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us;
11414 ++ unsigned int latency = 0;
11415 ++
11416 ++ if (dev_priv->sagv_block_time_us)
11417 ++ latency = dev_priv->sagv_block_time_us + dev_priv->wm.skl_latency[0];
11418 +
11419 + skl_compute_plane_wm(crtc_state, 0, latency,
11420 + wm_params, &levels[0],
11421 +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
11422 +index 80f1d439841a6..26aeaf0ab86ef 100644
11423 +--- a/drivers/gpu/drm/meson/meson_drv.c
11424 ++++ b/drivers/gpu/drm/meson/meson_drv.c
11425 +@@ -302,42 +302,42 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
11426 + if (priv->afbcd.ops) {
11427 + ret = priv->afbcd.ops->init(priv);
11428 + if (ret)
11429 +- return ret;
11430 ++ goto free_drm;
11431 + }
11432 +
11433 + /* Encoder Initialization */
11434 +
11435 + ret = meson_encoder_cvbs_init(priv);
11436 + if (ret)
11437 +- goto free_drm;
11438 ++ goto exit_afbcd;
11439 +
11440 + if (has_components) {
11441 + ret = component_bind_all(drm->dev, drm);
11442 + if (ret) {
11443 + dev_err(drm->dev, "Couldn't bind all components\n");
11444 +- goto free_drm;
11445 ++ goto exit_afbcd;
11446 + }
11447 + }
11448 +
11449 + ret = meson_encoder_hdmi_init(priv);
11450 + if (ret)
11451 +- goto free_drm;
11452 ++ goto exit_afbcd;
11453 +
11454 + ret = meson_plane_create(priv);
11455 + if (ret)
11456 +- goto free_drm;
11457 ++ goto exit_afbcd;
11458 +
11459 + ret = meson_overlay_create(priv);
11460 + if (ret)
11461 +- goto free_drm;
11462 ++ goto exit_afbcd;
11463 +
11464 + ret = meson_crtc_create(priv);
11465 + if (ret)
11466 +- goto free_drm;
11467 ++ goto exit_afbcd;
11468 +
11469 + ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
11470 + if (ret)
11471 +- goto free_drm;
11472 ++ goto exit_afbcd;
11473 +
11474 + drm_mode_config_reset(drm);
11475 +
11476 +@@ -355,6 +355,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
11477 +
11478 + uninstall_irq:
11479 + free_irq(priv->vsync_irq, drm);
11480 ++exit_afbcd:
11481 ++ if (priv->afbcd.ops)
11482 ++ priv->afbcd.ops->exit(priv);
11483 + free_drm:
11484 + drm_dev_put(drm);
11485 +
11486 +@@ -385,10 +388,8 @@ static void meson_drv_unbind(struct device *dev)
11487 + free_irq(priv->vsync_irq, drm);
11488 + drm_dev_put(drm);
11489 +
11490 +- if (priv->afbcd.ops) {
11491 +- priv->afbcd.ops->reset(priv);
11492 +- meson_rdma_free(priv);
11493 +- }
11494 ++ if (priv->afbcd.ops)
11495 ++ priv->afbcd.ops->exit(priv);
11496 + }
11497 +
11498 + static const struct component_master_ops meson_drv_master_ops = {
11499 +diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c
11500 +index ffc6b584dbf85..0cdbe899402f8 100644
11501 +--- a/drivers/gpu/drm/meson/meson_osd_afbcd.c
11502 ++++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c
11503 +@@ -79,11 +79,6 @@ static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format)
11504 + return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0;
11505 + }
11506 +
11507 +-static int meson_gxm_afbcd_init(struct meson_drm *priv)
11508 +-{
11509 +- return 0;
11510 +-}
11511 +-
11512 + static int meson_gxm_afbcd_reset(struct meson_drm *priv)
11513 + {
11514 + writel_relaxed(VIU_SW_RESET_OSD1_AFBCD,
11515 +@@ -93,6 +88,16 @@ static int meson_gxm_afbcd_reset(struct meson_drm *priv)
11516 + return 0;
11517 + }
11518 +
11519 ++static int meson_gxm_afbcd_init(struct meson_drm *priv)
11520 ++{
11521 ++ return 0;
11522 ++}
11523 ++
11524 ++static void meson_gxm_afbcd_exit(struct meson_drm *priv)
11525 ++{
11526 ++ meson_gxm_afbcd_reset(priv);
11527 ++}
11528 ++
11529 + static int meson_gxm_afbcd_enable(struct meson_drm *priv)
11530 + {
11531 + writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) |
11532 +@@ -172,6 +177,7 @@ static int meson_gxm_afbcd_setup(struct meson_drm *priv)
11533 +
11534 + struct meson_afbcd_ops meson_afbcd_gxm_ops = {
11535 + .init = meson_gxm_afbcd_init,
11536 ++ .exit = meson_gxm_afbcd_exit,
11537 + .reset = meson_gxm_afbcd_reset,
11538 + .enable = meson_gxm_afbcd_enable,
11539 + .disable = meson_gxm_afbcd_disable,
11540 +@@ -269,6 +275,18 @@ static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format)
11541 + return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0;
11542 + }
11543 +
11544 ++static int meson_g12a_afbcd_reset(struct meson_drm *priv)
11545 ++{
11546 ++ meson_rdma_reset(priv);
11547 ++
11548 ++ meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
11549 ++ VIU_SW_RESET_G12A_OSD1_AFBCD,
11550 ++ VIU_SW_RESET);
11551 ++ meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
11552 ++
11553 ++ return 0;
11554 ++}
11555 ++
11556 + static int meson_g12a_afbcd_init(struct meson_drm *priv)
11557 + {
11558 + int ret;
11559 +@@ -286,16 +304,10 @@ static int meson_g12a_afbcd_init(struct meson_drm *priv)
11560 + return 0;
11561 + }
11562 +
11563 +-static int meson_g12a_afbcd_reset(struct meson_drm *priv)
11564 ++static void meson_g12a_afbcd_exit(struct meson_drm *priv)
11565 + {
11566 +- meson_rdma_reset(priv);
11567 +-
11568 +- meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
11569 +- VIU_SW_RESET_G12A_OSD1_AFBCD,
11570 +- VIU_SW_RESET);
11571 +- meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
11572 +-
11573 +- return 0;
11574 ++ meson_g12a_afbcd_reset(priv);
11575 ++ meson_rdma_free(priv);
11576 + }
11577 +
11578 + static int meson_g12a_afbcd_enable(struct meson_drm *priv)
11579 +@@ -380,6 +392,7 @@ static int meson_g12a_afbcd_setup(struct meson_drm *priv)
11580 +
11581 + struct meson_afbcd_ops meson_afbcd_g12a_ops = {
11582 + .init = meson_g12a_afbcd_init,
11583 ++ .exit = meson_g12a_afbcd_exit,
11584 + .reset = meson_g12a_afbcd_reset,
11585 + .enable = meson_g12a_afbcd_enable,
11586 + .disable = meson_g12a_afbcd_disable,
11587 +diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h
11588 +index 5e5523304f42f..e77ddeb6416f3 100644
11589 +--- a/drivers/gpu/drm/meson/meson_osd_afbcd.h
11590 ++++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h
11591 +@@ -14,6 +14,7 @@
11592 +
11593 + struct meson_afbcd_ops {
11594 + int (*init)(struct meson_drm *priv);
11595 ++ void (*exit)(struct meson_drm *priv);
11596 + int (*reset)(struct meson_drm *priv);
11597 + int (*enable)(struct meson_drm *priv);
11598 + int (*disable)(struct meson_drm *priv);
11599 +diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
11600 +index b983541a4c530..cd9ba13ad5fc8 100644
11601 +--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
11602 ++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
11603 +@@ -529,7 +529,10 @@ static void mgag200_set_format_regs(struct mga_device *mdev,
11604 + WREG_GFX(3, 0x00);
11605 + WREG_GFX(4, 0x00);
11606 + WREG_GFX(5, 0x40);
11607 +- WREG_GFX(6, 0x05);
11608 ++ /* GCTL6 should be 0x05, but we configure memmapsl to 0xb8000 (text mode),
11609 ++ * so that it doesn't hang when running kexec/kdump on G200_SE rev42.
11610 ++ */
11611 ++ WREG_GFX(6, 0x0d);
11612 + WREG_GFX(7, 0x0f);
11613 + WREG_GFX(8, 0x0f);
11614 +
11615 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
11616 +index 17cfad6424db6..616be7265da4d 100644
11617 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
11618 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
11619 +@@ -655,19 +655,23 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
11620 + {
11621 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
11622 + const u32 *regs = a6xx_protect;
11623 +- unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
11624 +-
11625 +- BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
11626 +- BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
11627 ++ unsigned i, count, count_max;
11628 +
11629 + if (adreno_is_a650(adreno_gpu)) {
11630 + regs = a650_protect;
11631 + count = ARRAY_SIZE(a650_protect);
11632 + count_max = 48;
11633 ++ BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
11634 + } else if (adreno_is_a660_family(adreno_gpu)) {
11635 + regs = a660_protect;
11636 + count = ARRAY_SIZE(a660_protect);
11637 + count_max = 48;
11638 ++ BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48);
11639 ++ } else {
11640 ++ regs = a6xx_protect;
11641 ++ count = ARRAY_SIZE(a6xx_protect);
11642 ++ count_max = 32;
11643 ++ BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
11644 + }
11645 +
11646 + /*
11647 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
11648 +index 1e648db439f9b..16ae0cccbbb1e 100644
11649 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
11650 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
11651 +@@ -168,7 +168,6 @@ enum dpu_enc_rc_states {
11652 + * @vsync_event_work: worker to handle vsync event for autorefresh
11653 + * @topology: topology of the display
11654 + * @idle_timeout: idle timeout duration in milliseconds
11655 +- * @dp: msm_dp pointer, for DP encoders
11656 + */
11657 + struct dpu_encoder_virt {
11658 + struct drm_encoder base;
11659 +@@ -207,8 +206,6 @@ struct dpu_encoder_virt {
11660 + struct msm_display_topology topology;
11661 +
11662 + u32 idle_timeout;
11663 +-
11664 +- struct msm_dp *dp;
11665 + };
11666 +
11667 + #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
11668 +@@ -1099,7 +1096,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
11669 + }
11670 +
11671 +
11672 +- if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
11673 ++ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS &&
11674 + dpu_enc->cur_master->hw_mdptop &&
11675 + dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
11676 + dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
11677 +@@ -2128,8 +2125,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
11678 + timer_setup(&dpu_enc->vsync_event_timer,
11679 + dpu_encoder_vsync_event_handler,
11680 + 0);
11681 +- else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
11682 +- dpu_enc->dp = priv->dp[disp_info->h_tile_instance[0]];
11683 +
11684 + INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
11685 + dpu_encoder_off_work);
11686 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
11687 +index f9c83d6e427ad..24fbaf562d418 100644
11688 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
11689 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
11690 +@@ -35,6 +35,14 @@ int dpu_rm_destroy(struct dpu_rm *rm)
11691 + {
11692 + int i;
11693 +
11694 ++ for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
11695 ++ struct dpu_hw_dspp *hw;
11696 ++
11697 ++ if (rm->dspp_blks[i]) {
11698 ++ hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
11699 ++ dpu_hw_dspp_destroy(hw);
11700 ++ }
11701 ++ }
11702 + for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
11703 + struct dpu_hw_pingpong *hw;
11704 +
11705 +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
11706 +index c724cb0bde9dc..8d1ea694d06cd 100644
11707 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
11708 ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
11709 +@@ -1365,60 +1365,44 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
11710 + return ret;
11711 + }
11712 +
11713 +-int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
11714 ++void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
11715 ++{
11716 ++ struct dp_ctrl_private *ctrl;
11717 ++
11718 ++ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
11719 ++
11720 ++ dp_catalog_ctrl_reset(ctrl->catalog);
11721 ++
11722 ++ if (enable)
11723 ++ dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
11724 ++}
11725 ++
11726 ++void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
11727 + {
11728 + struct dp_ctrl_private *ctrl;
11729 + struct dp_io *dp_io;
11730 + struct phy *phy;
11731 +
11732 +- if (!dp_ctrl) {
11733 +- DRM_ERROR("Invalid input data\n");
11734 +- return -EINVAL;
11735 +- }
11736 +-
11737 + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
11738 + dp_io = &ctrl->parser->io;
11739 + phy = dp_io->phy;
11740 +
11741 +- ctrl->dp_ctrl.orientation = flip;
11742 +-
11743 +- if (reset)
11744 +- dp_catalog_ctrl_reset(ctrl->catalog);
11745 +-
11746 +- DRM_DEBUG_DP("flip=%d\n", flip);
11747 + dp_catalog_ctrl_phy_reset(ctrl->catalog);
11748 + phy_init(phy);
11749 +- dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
11750 +-
11751 +- return 0;
11752 + }
11753 +
11754 +-/**
11755 +- * dp_ctrl_host_deinit() - Uninitialize DP controller
11756 +- * @dp_ctrl: Display Port Driver data
11757 +- *
11758 +- * Perform required steps to uninitialize DP controller
11759 +- * and its resources.
11760 +- */
11761 +-void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
11762 ++void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl)
11763 + {
11764 + struct dp_ctrl_private *ctrl;
11765 + struct dp_io *dp_io;
11766 + struct phy *phy;
11767 +
11768 +- if (!dp_ctrl) {
11769 +- DRM_ERROR("Invalid input data\n");
11770 +- return;
11771 +- }
11772 +-
11773 + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
11774 + dp_io = &ctrl->parser->io;
11775 + phy = dp_io->phy;
11776 +
11777 +- dp_catalog_ctrl_enable_irq(ctrl->catalog, false);
11778 ++ dp_catalog_ctrl_phy_reset(ctrl->catalog);
11779 + phy_exit(phy);
11780 +-
11781 +- DRM_DEBUG_DP("Host deinitialized successfully\n");
11782 + }
11783 +
11784 + static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
11785 +@@ -1488,7 +1472,10 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
11786 + }
11787 +
11788 + phy_power_off(phy);
11789 ++
11790 ++ /* aux channel down, reinit phy */
11791 + phy_exit(phy);
11792 ++ phy_init(phy);
11793 +
11794 + return 0;
11795 + }
11796 +@@ -1761,6 +1748,9 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
11797 + /* end with failure */
11798 + break; /* lane == 1 already */
11799 + }
11800 ++
11801 ++ /* stop link training before start re training */
11802 ++ dp_ctrl_clear_training_pattern(ctrl);
11803 + }
11804 + }
11805 +
11806 +@@ -1893,8 +1883,14 @@ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
11807 + return ret;
11808 + }
11809 +
11810 ++ DRM_DEBUG_DP("Before, phy=%x init_count=%d power_on=%d\n",
11811 ++ (u32)(uintptr_t)phy, phy->init_count, phy->power_count);
11812 ++
11813 + phy_power_off(phy);
11814 +
11815 ++ DRM_DEBUG_DP("After, phy=%x init_count=%d power_on=%d\n",
11816 ++ (u32)(uintptr_t)phy, phy->init_count, phy->power_count);
11817 ++
11818 + /* aux channel down, reinit phy */
11819 + phy_exit(phy);
11820 + phy_init(phy);
11821 +@@ -1903,23 +1899,6 @@ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
11822 + return ret;
11823 + }
11824 +
11825 +-void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl)
11826 +-{
11827 +- struct dp_ctrl_private *ctrl;
11828 +- struct dp_io *dp_io;
11829 +- struct phy *phy;
11830 +-
11831 +- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
11832 +- dp_io = &ctrl->parser->io;
11833 +- phy = dp_io->phy;
11834 +-
11835 +- dp_catalog_ctrl_reset(ctrl->catalog);
11836 +-
11837 +- phy_exit(phy);
11838 +-
11839 +- DRM_DEBUG_DP("DP off phy done\n");
11840 +-}
11841 +-
11842 + int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
11843 + {
11844 + struct dp_ctrl_private *ctrl;
11845 +@@ -1947,10 +1926,14 @@ int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
11846 + DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
11847 + }
11848 +
11849 ++ DRM_DEBUG_DP("Before, phy=%x init_count=%d power_on=%d\n",
11850 ++ (u32)(uintptr_t)phy, phy->init_count, phy->power_count);
11851 ++
11852 + phy_power_off(phy);
11853 +- phy_exit(phy);
11854 +
11855 +- DRM_DEBUG_DP("DP off done\n");
11856 ++ DRM_DEBUG_DP("After, phy=%x init_count=%d power_on=%d\n",
11857 ++ (u32)(uintptr_t)phy, phy->init_count, phy->power_count);
11858 ++
11859 + return ret;
11860 + }
11861 +
11862 +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
11863 +index 2363a2df9597b..2433edbc70a6d 100644
11864 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
11865 ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
11866 +@@ -19,12 +19,9 @@ struct dp_ctrl {
11867 + u32 pixel_rate;
11868 + };
11869 +
11870 +-int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
11871 +-void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
11872 + int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
11873 + int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
11874 + int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
11875 +-void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl);
11876 + int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
11877 + void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
11878 + void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
11879 +@@ -34,4 +31,9 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
11880 + struct dp_power *power, struct dp_catalog *catalog,
11881 + struct dp_parser *parser);
11882 +
11883 ++void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable);
11884 ++void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl);
11885 ++void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl);
11886 ++void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl);
11887 ++
11888 + #endif /* _DP_CTRL_H_ */
11889 +diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
11890 +index 7cc4d21f20911..1d7f82e6eafea 100644
11891 +--- a/drivers/gpu/drm/msm/dp/dp_display.c
11892 ++++ b/drivers/gpu/drm/msm/dp/dp_display.c
11893 +@@ -83,6 +83,7 @@ struct dp_display_private {
11894 +
11895 + /* state variables */
11896 + bool core_initialized;
11897 ++ bool phy_initialized;
11898 + bool hpd_irq_on;
11899 + bool audio_supported;
11900 +
11901 +@@ -372,36 +373,45 @@ end:
11902 + return rc;
11903 + }
11904 +
11905 +-static void dp_display_host_init(struct dp_display_private *dp, int reset)
11906 ++static void dp_display_host_phy_init(struct dp_display_private *dp)
11907 + {
11908 +- bool flip = false;
11909 ++ DRM_DEBUG_DP("core_init=%d phy_init=%d\n",
11910 ++ dp->core_initialized, dp->phy_initialized);
11911 +
11912 +- DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized);
11913 +- if (dp->core_initialized) {
11914 +- DRM_DEBUG_DP("DP core already initialized\n");
11915 +- return;
11916 ++ if (!dp->phy_initialized) {
11917 ++ dp_ctrl_phy_init(dp->ctrl);
11918 ++ dp->phy_initialized = true;
11919 ++ }
11920 ++}
11921 ++
11922 ++static void dp_display_host_phy_exit(struct dp_display_private *dp)
11923 ++{
11924 ++ DRM_DEBUG_DP("core_init=%d phy_init=%d\n",
11925 ++ dp->core_initialized, dp->phy_initialized);
11926 ++
11927 ++ if (dp->phy_initialized) {
11928 ++ dp_ctrl_phy_exit(dp->ctrl);
11929 ++ dp->phy_initialized = false;
11930 + }
11931 ++}
11932 +
11933 +- if (dp->usbpd->orientation == ORIENTATION_CC2)
11934 +- flip = true;
11935 ++static void dp_display_host_init(struct dp_display_private *dp)
11936 ++{
11937 ++ DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized);
11938 +
11939 +- dp_power_init(dp->power, flip);
11940 +- dp_ctrl_host_init(dp->ctrl, flip, reset);
11941 ++ dp_power_init(dp->power, false);
11942 ++ dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
11943 + dp_aux_init(dp->aux);
11944 + dp->core_initialized = true;
11945 + }
11946 +
11947 + static void dp_display_host_deinit(struct dp_display_private *dp)
11948 + {
11949 +- if (!dp->core_initialized) {
11950 +- DRM_DEBUG_DP("DP core not initialized\n");
11951 +- return;
11952 +- }
11953 ++ DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized);
11954 +
11955 +- dp_ctrl_host_deinit(dp->ctrl);
11956 ++ dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
11957 + dp_aux_deinit(dp->aux);
11958 + dp_power_deinit(dp->power);
11959 +-
11960 + dp->core_initialized = false;
11961 + }
11962 +
11963 +@@ -409,7 +419,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
11964 + {
11965 + struct dp_display_private *dp = dev_get_dp_display_private(dev);
11966 +
11967 +- dp_display_host_init(dp, false);
11968 ++ dp_display_host_phy_init(dp);
11969 +
11970 + return dp_display_process_hpd_high(dp);
11971 + }
11972 +@@ -530,11 +540,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
11973 + ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
11974 + if (ret) { /* link train failed */
11975 + dp->hpd_state = ST_DISCONNECTED;
11976 +-
11977 +- if (ret == -ECONNRESET) { /* cable unplugged */
11978 +- dp->core_initialized = false;
11979 +- }
11980 +-
11981 + } else {
11982 + /* start sentinel checking in case of missing uevent */
11983 + dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
11984 +@@ -604,8 +609,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
11985 + if (state == ST_DISCONNECTED) {
11986 + /* triggered by irq_hdp with sink_count = 0 */
11987 + if (dp->link->sink_count == 0) {
11988 +- dp_ctrl_off_phy(dp->ctrl);
11989 +- dp->core_initialized = false;
11990 ++ dp_display_host_phy_exit(dp);
11991 + }
11992 + mutex_unlock(&dp->event_mutex);
11993 + return 0;
11994 +@@ -667,7 +671,6 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
11995 + static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
11996 + {
11997 + u32 state;
11998 +- int ret;
11999 +
12000 + mutex_lock(&dp->event_mutex);
12001 +
12002 +@@ -692,16 +695,8 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
12003 + return 0;
12004 + }
12005 +
12006 +- /*
12007 +- * dp core (ahb/aux clks) must be initialized before
12008 +- * irq_hpd be handled
12009 +- */
12010 +- if (dp->core_initialized) {
12011 +- ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
12012 +- if (ret == -ECONNRESET) { /* cable unplugged */
12013 +- dp->core_initialized = false;
12014 +- }
12015 +- }
12016 ++ dp_display_usbpd_attention_cb(&dp->pdev->dev);
12017 ++
12018 + DRM_DEBUG_DP("hpd_state=%d\n", state);
12019 +
12020 + mutex_unlock(&dp->event_mutex);
12021 +@@ -892,12 +887,19 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
12022 +
12023 + dp_display->audio_enabled = false;
12024 +
12025 +- /* triggered by irq_hpd with sink_count = 0 */
12026 + if (dp->link->sink_count == 0) {
12027 ++ /*
12028 ++ * irq_hpd with sink_count = 0
12029 ++ * hdmi unplugged out of dongle
12030 ++ */
12031 + dp_ctrl_off_link_stream(dp->ctrl);
12032 + } else {
12033 ++ /*
12034 ++ * unplugged interrupt
12035 ++ * dongle unplugged out of DUT
12036 ++ */
12037 + dp_ctrl_off(dp->ctrl);
12038 +- dp->core_initialized = false;
12039 ++ dp_display_host_phy_exit(dp);
12040 + }
12041 +
12042 + dp_display->power_on = false;
12043 +@@ -1027,7 +1029,7 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
12044 + static void dp_display_config_hpd(struct dp_display_private *dp)
12045 + {
12046 +
12047 +- dp_display_host_init(dp, true);
12048 ++ dp_display_host_init(dp);
12049 + dp_catalog_ctrl_hpd_config(dp->catalog);
12050 +
12051 + /* Enable interrupt first time
12052 +@@ -1306,20 +1308,23 @@ static int dp_pm_resume(struct device *dev)
12053 + dp->hpd_state = ST_DISCONNECTED;
12054 +
12055 + /* turn on dp ctrl/phy */
12056 +- dp_display_host_init(dp, true);
12057 ++ dp_display_host_init(dp);
12058 +
12059 + dp_catalog_ctrl_hpd_config(dp->catalog);
12060 +
12061 +- /*
12062 +- * set sink to normal operation mode -- D0
12063 +- * before dpcd read
12064 +- */
12065 +- dp_link_psm_config(dp->link, &dp->panel->link_info, false);
12066 +
12067 + if (dp_catalog_link_is_connected(dp->catalog)) {
12068 ++ /*
12069 ++ * set sink to normal operation mode -- D0
12070 ++ * before dpcd read
12071 ++ */
12072 ++ dp_display_host_phy_init(dp);
12073 ++ dp_link_psm_config(dp->link, &dp->panel->link_info, false);
12074 + sink_count = drm_dp_read_sink_count(dp->aux);
12075 + if (sink_count < 0)
12076 + sink_count = 0;
12077 ++
12078 ++ dp_display_host_phy_exit(dp);
12079 + }
12080 +
12081 + dp->link->sink_count = sink_count;
12082 +@@ -1358,18 +1363,16 @@ static int dp_pm_suspend(struct device *dev)
12083 + DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
12084 + dp->core_initialized, dp_display->power_on);
12085 +
12086 +- if (dp->core_initialized == true) {
12087 +- /* mainlink enabled */
12088 +- if (dp_power_clk_status(dp->power, DP_CTRL_PM))
12089 +- dp_ctrl_off_link_stream(dp->ctrl);
12090 +-
12091 +- dp_display_host_deinit(dp);
12092 +- }
12093 ++ /* mainlink enabled */
12094 ++ if (dp_power_clk_status(dp->power, DP_CTRL_PM))
12095 ++ dp_ctrl_off_link_stream(dp->ctrl);
12096 +
12097 +- dp->hpd_state = ST_SUSPENDED;
12098 ++ dp_display_host_phy_exit(dp);
12099 +
12100 + /* host_init will be called at pm_resume */
12101 +- dp->core_initialized = false;
12102 ++ dp_display_host_deinit(dp);
12103 ++
12104 ++ dp->hpd_state = ST_SUSPENDED;
12105 +
12106 + DRM_DEBUG_DP("After, core_inited=%d power_on=%d\n",
12107 + dp->core_initialized, dp_display->power_on);
12108 +@@ -1460,6 +1463,7 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
12109 + struct drm_encoder *encoder)
12110 + {
12111 + struct msm_drm_private *priv;
12112 ++ struct dp_display_private *dp_priv;
12113 + int ret;
12114 +
12115 + if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev))
12116 +@@ -1468,6 +1472,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
12117 + priv = dev->dev_private;
12118 + dp_display->drm_dev = dev;
12119 +
12120 ++ dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
12121 ++
12122 + ret = dp_display_request_irq(dp_display);
12123 + if (ret) {
12124 + DRM_ERROR("request_irq failed, ret=%d\n", ret);
12125 +@@ -1485,6 +1491,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
12126 + return ret;
12127 + }
12128 +
12129 ++ dp_priv->panel->connector = dp_display->connector;
12130 ++
12131 + priv->connectors[priv->num_connectors++] = dp_display->connector;
12132 +
12133 + dp_display->bridge = msm_dp_bridge_init(dp_display, dev, encoder);
12134 +@@ -1535,7 +1543,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
12135 + state = dp_display->hpd_state;
12136 +
12137 + if (state == ST_DISPLAY_OFF)
12138 +- dp_display_host_init(dp_display, true);
12139 ++ dp_display_host_phy_init(dp_display);
12140 +
12141 + dp_display_enable(dp_display, 0);
12142 +
12143 +diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
12144 +index d4d360d19ebad..26ef41a4c1b68 100644
12145 +--- a/drivers/gpu/drm/msm/dp/dp_drm.c
12146 ++++ b/drivers/gpu/drm/msm/dp/dp_drm.c
12147 +@@ -169,16 +169,6 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
12148 +
12149 + drm_connector_attach_encoder(connector, dp_display->encoder);
12150 +
12151 +- if (dp_display->panel_bridge) {
12152 +- ret = drm_bridge_attach(dp_display->encoder,
12153 +- dp_display->panel_bridge, NULL,
12154 +- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
12155 +- if (ret < 0) {
12156 +- DRM_ERROR("failed to attach panel bridge: %d\n", ret);
12157 +- return ERR_PTR(ret);
12158 +- }
12159 +- }
12160 +-
12161 + return connector;
12162 + }
12163 +
12164 +@@ -246,5 +236,16 @@ struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display, struct drm_devi
12165 + return ERR_PTR(rc);
12166 + }
12167 +
12168 ++ if (dp_display->panel_bridge) {
12169 ++ rc = drm_bridge_attach(dp_display->encoder,
12170 ++ dp_display->panel_bridge, bridge,
12171 ++ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
12172 ++ if (rc < 0) {
12173 ++ DRM_ERROR("failed to attach panel bridge: %d\n", rc);
12174 ++ drm_bridge_remove(bridge);
12175 ++ return ERR_PTR(rc);
12176 ++ }
12177 ++ }
12178 ++
12179 + return bridge;
12180 + }
12181 +diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
12182 +index 71db10c0f262d..f1418722c5492 100644
12183 +--- a/drivers/gpu/drm/msm/dp/dp_panel.c
12184 ++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
12185 +@@ -212,6 +212,11 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
12186 + if (drm_add_modes_noedid(connector, 640, 480))
12187 + drm_set_preferred_mode(connector, 640, 480);
12188 + mutex_unlock(&connector->dev->mode_config.mutex);
12189 ++ } else {
12190 ++ /* always add fail-safe mode as backup mode */
12191 ++ mutex_lock(&connector->dev->mode_config.mutex);
12192 ++ drm_add_modes_noedid(connector, 640, 480);
12193 ++ mutex_unlock(&connector->dev->mode_config.mutex);
12194 + }
12195 +
12196 + if (panel->aux_cfg_update_done) {
12197 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
12198 +index d8128f50b0dd5..0b782cc18b3f4 100644
12199 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
12200 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
12201 +@@ -562,7 +562,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **prov
12202 + char clk_name[32], parent[32], vco_name[32];
12203 + char parent2[32], parent3[32], parent4[32];
12204 + struct clk_init_data vco_init = {
12205 +- .parent_names = (const char *[]){ "xo" },
12206 ++ .parent_data = &(const struct clk_parent_data) {
12207 ++ .fw_name = "ref",
12208 ++ },
12209 + .num_parents = 1,
12210 + .name = vco_name,
12211 + .flags = CLK_IGNORE_UNUSED,
12212 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
12213 +index 7414966f198e3..75557ac99adf1 100644
12214 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
12215 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
12216 +@@ -802,7 +802,9 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **prov
12217 + {
12218 + char clk_name[32], parent[32], vco_name[32];
12219 + struct clk_init_data vco_init = {
12220 +- .parent_names = (const char *[]){ "xo" },
12221 ++ .parent_data = &(const struct clk_parent_data) {
12222 ++ .fw_name = "ref",
12223 ++ },
12224 + .num_parents = 1,
12225 + .name = vco_name,
12226 + .flags = CLK_IGNORE_UNUSED,
12227 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
12228 +index 2da673a2add69..48eab80b548e1 100644
12229 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
12230 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
12231 +@@ -521,7 +521,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
12232 + {
12233 + char clk_name[32], parent1[32], parent2[32], vco_name[32];
12234 + struct clk_init_data vco_init = {
12235 +- .parent_names = (const char *[]){ "xo" },
12236 ++ .parent_data = &(const struct clk_parent_data) {
12237 ++ .fw_name = "ref", .name = "xo",
12238 ++ },
12239 + .num_parents = 1,
12240 + .name = vco_name,
12241 + .flags = CLK_IGNORE_UNUSED,
12242 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
12243 +index 71ed4aa0dc67e..fc56cdcc9ad64 100644
12244 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
12245 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
12246 +@@ -385,7 +385,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
12247 + {
12248 + char *clk_name, *parent_name, *vco_name;
12249 + struct clk_init_data vco_init = {
12250 +- .parent_names = (const char *[]){ "pxo" },
12251 ++ .parent_data = &(const struct clk_parent_data) {
12252 ++ .fw_name = "ref",
12253 ++ },
12254 + .num_parents = 1,
12255 + .flags = CLK_IGNORE_UNUSED,
12256 + .ops = &clk_ops_dsi_pll_28nm_vco,
12257 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
12258 +index 079613d2aaa98..6e506feb111fd 100644
12259 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
12260 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
12261 +@@ -588,7 +588,9 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
12262 + char clk_name[32], parent[32], vco_name[32];
12263 + char parent2[32], parent3[32], parent4[32];
12264 + struct clk_init_data vco_init = {
12265 +- .parent_names = (const char *[]){ "bi_tcxo" },
12266 ++ .parent_data = &(const struct clk_parent_data) {
12267 ++ .fw_name = "ref",
12268 ++ },
12269 + .num_parents = 1,
12270 + .name = vco_name,
12271 + .flags = CLK_IGNORE_UNUSED,
12272 +@@ -862,20 +864,26 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
12273 + /* Alter PHY configurations if data rate less than 1.5GHZ*/
12274 + less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
12275 +
12276 +- /* For C-PHY, no low power settings for lower clk rate */
12277 +- if (phy->cphy_mode)
12278 +- less_than_1500_mhz = false;
12279 +-
12280 + if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
12281 + vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
12282 +- glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
12283 +- glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c;
12284 ++ if (phy->cphy_mode) {
12285 ++ glbl_rescode_top_ctrl = 0x00;
12286 ++ glbl_rescode_bot_ctrl = 0x3c;
12287 ++ } else {
12288 ++ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
12289 ++ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c;
12290 ++ }
12291 + glbl_str_swi_cal_sel_ctrl = 0x00;
12292 + glbl_hstx_str_ctrl_0 = 0x88;
12293 + } else {
12294 + vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
12295 +- glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
12296 +- glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
12297 ++ if (phy->cphy_mode) {
12298 ++ glbl_str_swi_cal_sel_ctrl = 0x03;
12299 ++ glbl_hstx_str_ctrl_0 = 0x66;
12300 ++ } else {
12301 ++ glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
12302 ++ glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
12303 ++ }
12304 + glbl_rescode_top_ctrl = 0x03;
12305 + glbl_rescode_bot_ctrl = 0x3c;
12306 + }
12307 +diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
12308 +index ae2f2abc8f5a5..daf9f87477ba1 100644
12309 +--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
12310 ++++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
12311 +@@ -101,7 +101,6 @@ nv40_backlight_init(struct nouveau_encoder *encoder,
12312 + if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
12313 + return -ENODEV;
12314 +
12315 +- props->type = BACKLIGHT_RAW;
12316 + props->max_brightness = 31;
12317 + *ops = &nv40_bl_ops;
12318 + return 0;
12319 +@@ -294,7 +293,8 @@ nv50_backlight_init(struct nouveau_backlight *bl,
12320 + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
12321 + struct nvif_object *device = &drm->client.device.object;
12322 +
12323 +- if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)))
12324 ++ if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)) ||
12325 ++ nv_conn->base.status != connector_status_connected)
12326 + return -ENODEV;
12327 +
12328 + if (nv_conn->type == DCB_CONNECTOR_eDP) {
12329 +@@ -342,7 +342,6 @@ nv50_backlight_init(struct nouveau_backlight *bl,
12330 + else
12331 + *ops = &nva3_bl_ops;
12332 +
12333 +- props->type = BACKLIGHT_RAW;
12334 + props->max_brightness = 100;
12335 +
12336 + return 0;
12337 +@@ -410,6 +409,7 @@ nouveau_backlight_init(struct drm_connector *connector)
12338 + goto fail_alloc;
12339 + }
12340 +
12341 ++ props.type = BACKLIGHT_RAW;
12342 + bl->dev = backlight_device_register(backlight_name, connector->kdev,
12343 + nv_encoder, ops, &props);
12344 + if (IS_ERR(bl->dev)) {
12345 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
12346 +index 667fa016496ee..a6ea89a5d51ab 100644
12347 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
12348 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
12349 +@@ -142,11 +142,12 @@ nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver,
12350 +
12351 + hsfw->imem_size = desc->code_size;
12352 + hsfw->imem_tag = desc->start_tag;
12353 +- hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL);
12354 +- memcpy(hsfw->imem, data + desc->code_off, desc->code_size);
12355 +-
12356 ++ hsfw->imem = kmemdup(data + desc->code_off, desc->code_size, GFP_KERNEL);
12357 + nvkm_firmware_put(fw);
12358 +- return 0;
12359 ++ if (!hsfw->imem)
12360 ++ return -ENOMEM;
12361 ++ else
12362 ++ return 0;
12363 + }
12364 +
12365 + int
12366 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
12367 +index bbe628b306ee3..f8355de6e335d 100644
12368 +--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
12369 ++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
12370 +@@ -360,8 +360,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
12371 +
12372 + panfrost_gpu_init_features(pfdev);
12373 +
12374 +- dma_set_mask_and_coherent(pfdev->dev,
12375 ++ err = dma_set_mask_and_coherent(pfdev->dev,
12376 + DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
12377 ++ if (err)
12378 ++ return err;
12379 ++
12380 + dma_set_max_seg_size(pfdev->dev, UINT_MAX);
12381 +
12382 + irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
12383 +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
12384 +index 607ad5620bd99..1546abcadacf4 100644
12385 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c
12386 ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
12387 +@@ -204,7 +204,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
12388 +
12389 + /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
12390 + if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
12391 +- if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
12392 ++ if ((connector->display_info.edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30) &&
12393 + (mode_clock * 5/4 <= max_tmds_clock))
12394 + bpc = 10;
12395 + else
12396 +diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
12397 +index 6b4759ed6bfd4..c491429f1a029 100644
12398 +--- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
12399 ++++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
12400 +@@ -131,8 +131,10 @@ sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in)
12401 + return false;
12402 +
12403 + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
12404 +- if (!txmsg)
12405 ++ if (!txmsg) {
12406 ++ kfree(out);
12407 + return false;
12408 ++ }
12409 +
12410 + drm_dp_encode_sideband_req(in, txmsg);
12411 + ret = drm_dp_decode_sideband_req(txmsg, out);
12412 +diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
12413 +index 70dfb7d1dec55..f5535eb04c6b1 100644
12414 +--- a/drivers/gpu/drm/tegra/dp.c
12415 ++++ b/drivers/gpu/drm/tegra/dp.c
12416 +@@ -549,6 +549,15 @@ static void drm_dp_link_get_adjustments(struct drm_dp_link *link,
12417 + {
12418 + struct drm_dp_link_train_set *adjust = &link->train.adjust;
12419 + unsigned int i;
12420 ++ u8 post_cursor;
12421 ++ int err;
12422 ++
12423 ++ err = drm_dp_dpcd_read(link->aux, DP_ADJUST_REQUEST_POST_CURSOR2,
12424 ++ &post_cursor, sizeof(post_cursor));
12425 ++ if (err < 0) {
12426 ++ DRM_ERROR("failed to read post_cursor2: %d\n", err);
12427 ++ post_cursor = 0;
12428 ++ }
12429 +
12430 + for (i = 0; i < link->lanes; i++) {
12431 + adjust->voltage_swing[i] =
12432 +@@ -560,7 +569,7 @@ static void drm_dp_link_get_adjustments(struct drm_dp_link *link,
12433 + DP_TRAIN_PRE_EMPHASIS_SHIFT;
12434 +
12435 + adjust->post_cursor[i] =
12436 +- drm_dp_get_adjust_request_post_cursor(status, i);
12437 ++ (post_cursor >> (i << 1)) & 0x3;
12438 + }
12439 + }
12440 +
12441 +diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
12442 +index f46d377f0c304..de1333dc0d867 100644
12443 +--- a/drivers/gpu/drm/tegra/dsi.c
12444 ++++ b/drivers/gpu/drm/tegra/dsi.c
12445 +@@ -1538,8 +1538,10 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
12446 + dsi->slave = platform_get_drvdata(gangster);
12447 + of_node_put(np);
12448 +
12449 +- if (!dsi->slave)
12450 ++ if (!dsi->slave) {
12451 ++ put_device(&gangster->dev);
12452 + return -EPROBE_DEFER;
12453 ++ }
12454 +
12455 + dsi->slave->master = dsi;
12456 + }
12457 +diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
12458 +index 04146da2d1d8e..11576e0297e41 100644
12459 +--- a/drivers/gpu/drm/tiny/simpledrm.c
12460 ++++ b/drivers/gpu/drm/tiny/simpledrm.c
12461 +@@ -798,6 +798,9 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev)
12462 + if (ret)
12463 + return ret;
12464 + drm_connector_helper_add(connector, &simpledrm_connector_helper_funcs);
12465 ++ drm_connector_set_panel_orientation_with_quirk(connector,
12466 ++ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
12467 ++ mode->hdisplay, mode->vdisplay);
12468 +
12469 + formats = simpledrm_device_formats(sdev, &nformats);
12470 +
12471 +diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
12472 +index bd46396a1ae07..1afcd54fbbd53 100644
12473 +--- a/drivers/gpu/drm/v3d/v3d_drv.c
12474 ++++ b/drivers/gpu/drm/v3d/v3d_drv.c
12475 +@@ -219,6 +219,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
12476 + int ret;
12477 + u32 mmu_debug;
12478 + u32 ident1;
12479 ++ u64 mask;
12480 +
12481 + v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
12482 + if (IS_ERR(v3d))
12483 +@@ -237,8 +238,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
12484 + return ret;
12485 +
12486 + mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
12487 +- dma_set_mask_and_coherent(dev,
12488 +- DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)));
12489 ++ mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
12490 ++ ret = dma_set_mask_and_coherent(dev, mask);
12491 ++ if (ret)
12492 ++ return ret;
12493 ++
12494 + v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
12495 +
12496 + ident1 = V3D_READ(V3D_HUB_IDENT1);
12497 +diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
12498 +index 6994f8c0e02ea..80c685ab3e30d 100644
12499 +--- a/drivers/gpu/host1x/dev.c
12500 ++++ b/drivers/gpu/host1x/dev.c
12501 +@@ -447,7 +447,6 @@ static int host1x_probe(struct platform_device *pdev)
12502 + if (syncpt_irq < 0)
12503 + return syncpt_irq;
12504 +
12505 +- host1x_bo_cache_init(&host->cache);
12506 + mutex_init(&host->devices_lock);
12507 + INIT_LIST_HEAD(&host->devices);
12508 + INIT_LIST_HEAD(&host->list);
12509 +@@ -489,10 +488,12 @@ static int host1x_probe(struct platform_device *pdev)
12510 + if (err)
12511 + return err;
12512 +
12513 ++ host1x_bo_cache_init(&host->cache);
12514 ++
12515 + err = host1x_iommu_init(host);
12516 + if (err < 0) {
12517 + dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
12518 +- return err;
12519 ++ goto destroy_cache;
12520 + }
12521 +
12522 + err = host1x_channel_list_init(&host->channel_list,
12523 +@@ -553,6 +554,8 @@ free_channels:
12524 + host1x_channel_list_free(&host->channel_list);
12525 + iommu_exit:
12526 + host1x_iommu_exit(host);
12527 ++destroy_cache:
12528 ++ host1x_bo_cache_destroy(&host->cache);
12529 +
12530 + return err;
12531 + }
12532 +@@ -568,6 +571,7 @@ static int host1x_remove(struct platform_device *pdev)
12533 +
12534 + host1x_intr_deinit(host);
12535 + host1x_syncpt_deinit(host);
12536 ++ host1x_channel_list_free(&host->channel_list);
12537 + host1x_iommu_exit(host);
12538 + host1x_bo_cache_destroy(&host->cache);
12539 +
12540 +diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c
12541 +index ce7740ef449ba..51d0875a34800 100644
12542 +--- a/drivers/greybus/svc.c
12543 ++++ b/drivers/greybus/svc.c
12544 +@@ -866,8 +866,14 @@ static int gb_svc_hello(struct gb_operation *op)
12545 +
12546 + gb_svc_debugfs_init(svc);
12547 +
12548 +- return gb_svc_queue_deferred_request(op);
12549 ++ ret = gb_svc_queue_deferred_request(op);
12550 ++ if (ret)
12551 ++ goto err_remove_debugfs;
12552 ++
12553 ++ return 0;
12554 +
12555 ++err_remove_debugfs:
12556 ++ gb_svc_debugfs_exit(svc);
12557 + err_unregister_device:
12558 + gb_svc_watchdog_destroy(svc);
12559 + device_del(&svc->dev);
12560 +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
12561 +index 6726567d72976..8d6fc50dab65f 100644
12562 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c
12563 ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
12564 +@@ -618,6 +618,17 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
12565 + if (report_type == HID_OUTPUT_REPORT)
12566 + return -EINVAL;
12567 +
12568 ++ /*
12569 ++ * In case of unnumbered reports the response from the device will
12570 ++ * not have the report ID that the upper layers expect, so we need
12571 ++ * to stash it the buffer ourselves and adjust the data size.
12572 ++ */
12573 ++ if (!report_number) {
12574 ++ buf[0] = 0;
12575 ++ buf++;
12576 ++ count--;
12577 ++ }
12578 ++
12579 + /* +2 bytes to include the size of the reply in the query buffer */
12580 + ask_count = min(count + 2, (size_t)ihid->bufsize);
12581 +
12582 +@@ -639,6 +650,9 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
12583 + count = min(count, ret_count - 2);
12584 + memcpy(buf, ihid->rawbuf + 2, count);
12585 +
12586 ++ if (!report_number)
12587 ++ count++;
12588 ++
12589 + return count;
12590 + }
12591 +
12592 +@@ -655,17 +669,19 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
12593 +
12594 + mutex_lock(&ihid->reset_lock);
12595 +
12596 +- if (report_id) {
12597 +- buf++;
12598 +- count--;
12599 +- }
12600 +-
12601 ++ /*
12602 ++ * Note that both numbered and unnumbered reports passed here
12603 ++ * are supposed to have report ID stored in the 1st byte of the
12604 ++ * buffer, so we strip it off unconditionally before passing payload
12605 ++ * to i2c_hid_set_or_send_report which takes care of encoding
12606 ++ * everything properly.
12607 ++ */
12608 + ret = i2c_hid_set_or_send_report(client,
12609 + report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
12610 +- report_id, buf, count, use_data);
12611 ++ report_id, buf + 1, count - 1, use_data);
12612 +
12613 +- if (report_id && ret >= 0)
12614 +- ret++; /* add report_id to the number of transfered bytes */
12615 ++ if (ret >= 0)
12616 ++ ret++; /* add report_id to the number of transferred bytes */
12617 +
12618 + mutex_unlock(&ihid->reset_lock);
12619 +
12620 +diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
12621 +index e24988586710d..16aa030af8453 100644
12622 +--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
12623 ++++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
12624 +@@ -661,21 +661,12 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data,
12625 + */
12626 + payload_max_size &= ~(L1_CACHE_BYTES - 1);
12627 +
12628 +- dma_buf = kmalloc(payload_max_size, GFP_KERNEL | GFP_DMA32);
12629 ++ dma_buf = dma_alloc_coherent(devc, payload_max_size, &dma_buf_phy, GFP_KERNEL);
12630 + if (!dma_buf) {
12631 + client_data->flag_retry = true;
12632 + return -ENOMEM;
12633 + }
12634 +
12635 +- dma_buf_phy = dma_map_single(devc, dma_buf, payload_max_size,
12636 +- DMA_TO_DEVICE);
12637 +- if (dma_mapping_error(devc, dma_buf_phy)) {
12638 +- dev_err(cl_data_to_dev(client_data), "DMA map failed\n");
12639 +- client_data->flag_retry = true;
12640 +- rv = -ENOMEM;
12641 +- goto end_err_dma_buf_release;
12642 +- }
12643 +-
12644 + ldr_xfer_dma_frag.fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT;
12645 + ldr_xfer_dma_frag.fragment.xfer_mode = LOADER_XFER_MODE_DIRECT_DMA;
12646 + ldr_xfer_dma_frag.ddr_phys_addr = (u64)dma_buf_phy;
12647 +@@ -695,14 +686,7 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data,
12648 + ldr_xfer_dma_frag.fragment.size = fragment_size;
12649 + memcpy(dma_buf, &fw->data[fragment_offset], fragment_size);
12650 +
12651 +- dma_sync_single_for_device(devc, dma_buf_phy,
12652 +- payload_max_size,
12653 +- DMA_TO_DEVICE);
12654 +-
12655 +- /*
12656 +- * Flush cache here because the dma_sync_single_for_device()
12657 +- * does not do for x86.
12658 +- */
12659 ++ /* Flush cache to be sure the data is in main memory. */
12660 + clflush_cache_range(dma_buf, payload_max_size);
12661 +
12662 + dev_dbg(cl_data_to_dev(client_data),
12663 +@@ -725,15 +709,8 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data,
12664 + fragment_offset += fragment_size;
12665 + }
12666 +
12667 +- dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
12668 +- kfree(dma_buf);
12669 +- return 0;
12670 +-
12671 + end_err_resp_buf_release:
12672 +- /* Free ISH buffer if not done already, in error case */
12673 +- dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
12674 +-end_err_dma_buf_release:
12675 +- kfree(dma_buf);
12676 ++ dma_free_coherent(devc, payload_max_size, dma_buf, dma_buf_phy);
12677 + return rv;
12678 + }
12679 +
12680 +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
12681 +index f2d05bff42453..439f99b8b5de2 100644
12682 +--- a/drivers/hv/hv_balloon.c
12683 ++++ b/drivers/hv/hv_balloon.c
12684 +@@ -1563,7 +1563,7 @@ static void balloon_onchannelcallback(void *context)
12685 + break;
12686 +
12687 + default:
12688 +- pr_warn("Unhandled message: type: %d\n", dm_hdr->type);
12689 ++ pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
12690 +
12691 + }
12692 + }
12693 +diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
12694 +index e0aa8aa46d8c4..ef3a8ecde4dfc 100644
12695 +--- a/drivers/hwmon/pmbus/pmbus.h
12696 ++++ b/drivers/hwmon/pmbus/pmbus.h
12697 +@@ -319,6 +319,7 @@ enum pmbus_fan_mode { percent = 0, rpm };
12698 + /*
12699 + * STATUS_VOUT, STATUS_INPUT
12700 + */
12701 ++#define PB_VOLTAGE_VIN_OFF BIT(3)
12702 + #define PB_VOLTAGE_UV_FAULT BIT(4)
12703 + #define PB_VOLTAGE_UV_WARNING BIT(5)
12704 + #define PB_VOLTAGE_OV_WARNING BIT(6)
12705 +diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
12706 +index ac2fbee1ba9c0..ca0bfaf2f6911 100644
12707 +--- a/drivers/hwmon/pmbus/pmbus_core.c
12708 ++++ b/drivers/hwmon/pmbus/pmbus_core.c
12709 +@@ -1373,7 +1373,7 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
12710 + .reg = PMBUS_VIN_UV_FAULT_LIMIT,
12711 + .attr = "lcrit",
12712 + .alarm = "lcrit_alarm",
12713 +- .sbit = PB_VOLTAGE_UV_FAULT,
12714 ++ .sbit = PB_VOLTAGE_UV_FAULT | PB_VOLTAGE_VIN_OFF,
12715 + }, {
12716 + .reg = PMBUS_VIN_OV_WARN_LIMIT,
12717 + .attr = "max",
12718 +@@ -2391,10 +2391,14 @@ static int pmbus_regulator_is_enabled(struct regulator_dev *rdev)
12719 + {
12720 + struct device *dev = rdev_get_dev(rdev);
12721 + struct i2c_client *client = to_i2c_client(dev->parent);
12722 ++ struct pmbus_data *data = i2c_get_clientdata(client);
12723 + u8 page = rdev_get_id(rdev);
12724 + int ret;
12725 +
12726 ++ mutex_lock(&data->update_lock);
12727 + ret = pmbus_read_byte_data(client, page, PMBUS_OPERATION);
12728 ++ mutex_unlock(&data->update_lock);
12729 ++
12730 + if (ret < 0)
12731 + return ret;
12732 +
12733 +@@ -2405,11 +2409,17 @@ static int _pmbus_regulator_on_off(struct regulator_dev *rdev, bool enable)
12734 + {
12735 + struct device *dev = rdev_get_dev(rdev);
12736 + struct i2c_client *client = to_i2c_client(dev->parent);
12737 ++ struct pmbus_data *data = i2c_get_clientdata(client);
12738 + u8 page = rdev_get_id(rdev);
12739 ++ int ret;
12740 +
12741 +- return pmbus_update_byte_data(client, page, PMBUS_OPERATION,
12742 +- PB_OPERATION_CONTROL_ON,
12743 +- enable ? PB_OPERATION_CONTROL_ON : 0);
12744 ++ mutex_lock(&data->update_lock);
12745 ++ ret = pmbus_update_byte_data(client, page, PMBUS_OPERATION,
12746 ++ PB_OPERATION_CONTROL_ON,
12747 ++ enable ? PB_OPERATION_CONTROL_ON : 0);
12748 ++ mutex_unlock(&data->update_lock);
12749 ++
12750 ++ return ret;
12751 + }
12752 +
12753 + static int pmbus_regulator_enable(struct regulator_dev *rdev)
12754 +diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
12755 +index 40cdadad35e52..f85eede6d7663 100644
12756 +--- a/drivers/hwmon/sch56xx-common.c
12757 ++++ b/drivers/hwmon/sch56xx-common.c
12758 +@@ -422,7 +422,7 @@ void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision,
12759 + data->wddev.max_timeout = 255 * 60;
12760 + watchdog_set_nowayout(&data->wddev, nowayout);
12761 + if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
12762 +- set_bit(WDOG_ACTIVE, &data->wddev.status);
12763 ++ set_bit(WDOG_HW_RUNNING, &data->wddev.status);
12764 +
12765 + /* Since the watchdog uses a downcounter there is no register to read
12766 + the BIOS set timeout from (if any was set at all) ->
12767 +diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
12768 +index a0640fa5c55bd..57e94424a8d65 100644
12769 +--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
12770 ++++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
12771 +@@ -367,8 +367,12 @@ static ssize_t mode_store(struct device *dev,
12772 + mode = ETM_MODE_QELEM(config->mode);
12773 + /* start by clearing QE bits */
12774 + config->cfg &= ~(BIT(13) | BIT(14));
12775 +- /* if supported, Q elements with instruction counts are enabled */
12776 +- if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
12777 ++ /*
12778 ++ * if supported, Q elements with instruction counts are enabled.
12779 ++ * Always set the low bit for any requested mode. Valid combos are
12780 ++ * 0b00, 0b01 and 0b11.
12781 ++ */
12782 ++ if (mode && drvdata->q_support)
12783 + config->cfg |= BIT(13);
12784 + /*
12785 + * if supported, Q elements with and without instruction
12786 +diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
12787 +index 098fc34c48293..11850fd8c3b5b 100644
12788 +--- a/drivers/hwtracing/coresight/coresight-syscfg.c
12789 ++++ b/drivers/hwtracing/coresight/coresight-syscfg.c
12790 +@@ -1049,7 +1049,7 @@ static int cscfg_create_device(void)
12791 +
12792 + err = device_register(dev);
12793 + if (err)
12794 +- cscfg_dev_release(dev);
12795 ++ put_device(dev);
12796 +
12797 + create_dev_exit_unlock:
12798 + mutex_unlock(&cscfg_mutex);
12799 +diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
12800 +index 5149454eef4a5..f72c6576d8a36 100644
12801 +--- a/drivers/i2c/busses/i2c-bcm2835.c
12802 ++++ b/drivers/i2c/busses/i2c-bcm2835.c
12803 +@@ -454,18 +454,20 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
12804 + ret = clk_prepare_enable(i2c_dev->bus_clk);
12805 + if (ret) {
12806 + dev_err(&pdev->dev, "Couldn't prepare clock");
12807 +- return ret;
12808 ++ goto err_put_exclusive_rate;
12809 + }
12810 +
12811 + i2c_dev->irq = platform_get_irq(pdev, 0);
12812 +- if (i2c_dev->irq < 0)
12813 +- return i2c_dev->irq;
12814 ++ if (i2c_dev->irq < 0) {
12815 ++ ret = i2c_dev->irq;
12816 ++ goto err_disable_unprepare_clk;
12817 ++ }
12818 +
12819 + ret = request_irq(i2c_dev->irq, bcm2835_i2c_isr, IRQF_SHARED,
12820 + dev_name(&pdev->dev), i2c_dev);
12821 + if (ret) {
12822 + dev_err(&pdev->dev, "Could not request IRQ\n");
12823 +- return -ENODEV;
12824 ++ goto err_disable_unprepare_clk;
12825 + }
12826 +
12827 + adap = &i2c_dev->adapter;
12828 +@@ -489,7 +491,16 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
12829 +
12830 + ret = i2c_add_adapter(adap);
12831 + if (ret)
12832 +- free_irq(i2c_dev->irq, i2c_dev);
12833 ++ goto err_free_irq;
12834 ++
12835 ++ return 0;
12836 ++
12837 ++err_free_irq:
12838 ++ free_irq(i2c_dev->irq, i2c_dev);
12839 ++err_disable_unprepare_clk:
12840 ++ clk_disable_unprepare(i2c_dev->bus_clk);
12841 ++err_put_exclusive_rate:
12842 ++ clk_rate_exclusive_put(i2c_dev->bus_clk);
12843 +
12844 + return ret;
12845 + }
12846 +diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
12847 +index ef73a42577cc7..07eb819072c4f 100644
12848 +--- a/drivers/i2c/busses/i2c-meson.c
12849 ++++ b/drivers/i2c/busses/i2c-meson.c
12850 +@@ -465,18 +465,18 @@ static int meson_i2c_probe(struct platform_device *pdev)
12851 + */
12852 + meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0);
12853 +
12854 +- ret = i2c_add_adapter(&i2c->adap);
12855 +- if (ret < 0) {
12856 +- clk_disable_unprepare(i2c->clk);
12857 +- return ret;
12858 +- }
12859 +-
12860 + /* Disable filtering */
12861 + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR,
12862 + REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0);
12863 +
12864 + meson_i2c_set_clk_div(i2c, timings.bus_freq_hz);
12865 +
12866 ++ ret = i2c_add_adapter(&i2c->adap);
12867 ++ if (ret < 0) {
12868 ++ clk_disable_unprepare(i2c->clk);
12869 ++ return ret;
12870 ++ }
12871 ++
12872 + return 0;
12873 + }
12874 +
12875 +diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
12876 +index 4e161a4089d85..7728c8460dc0f 100644
12877 +--- a/drivers/i2c/busses/i2c-pasemi-core.c
12878 ++++ b/drivers/i2c/busses/i2c-pasemi-core.c
12879 +@@ -333,7 +333,6 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus)
12880 + smbus->adapter.owner = THIS_MODULE;
12881 + snprintf(smbus->adapter.name, sizeof(smbus->adapter.name),
12882 + "PA Semi SMBus adapter (%s)", dev_name(smbus->dev));
12883 +- smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
12884 + smbus->adapter.algo = &smbus_algorithm;
12885 + smbus->adapter.algo_data = smbus;
12886 +
12887 +diff --git a/drivers/i2c/busses/i2c-pasemi-pci.c b/drivers/i2c/busses/i2c-pasemi-pci.c
12888 +index 1ab1f28744fb2..cfc89e04eb94c 100644
12889 +--- a/drivers/i2c/busses/i2c-pasemi-pci.c
12890 ++++ b/drivers/i2c/busses/i2c-pasemi-pci.c
12891 +@@ -56,6 +56,7 @@ static int pasemi_smb_pci_probe(struct pci_dev *dev,
12892 + if (!smbus->ioaddr)
12893 + return -EBUSY;
12894 +
12895 ++ smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
12896 + error = pasemi_i2c_common_probe(smbus);
12897 + if (error)
12898 + return error;
12899 +diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
12900 +index eb789cfb99739..ffefe3c482e9c 100644
12901 +--- a/drivers/i2c/busses/i2c-xiic.c
12902 ++++ b/drivers/i2c/busses/i2c-xiic.c
12903 +@@ -734,7 +734,6 @@ static const struct i2c_adapter_quirks xiic_quirks = {
12904 +
12905 + static const struct i2c_adapter xiic_adapter = {
12906 + .owner = THIS_MODULE,
12907 +- .name = DRIVER_NAME,
12908 + .class = I2C_CLASS_DEPRECATED,
12909 + .algo = &xiic_algorithm,
12910 + .quirks = &xiic_quirks,
12911 +@@ -771,6 +770,8 @@ static int xiic_i2c_probe(struct platform_device *pdev)
12912 + i2c_set_adapdata(&i2c->adap, i2c);
12913 + i2c->adap.dev.parent = &pdev->dev;
12914 + i2c->adap.dev.of_node = pdev->dev.of_node;
12915 ++ snprintf(i2c->adap.name, sizeof(i2c->adap.name),
12916 ++ DRIVER_NAME " %s", pdev->name);
12917 +
12918 + mutex_init(&i2c->lock);
12919 +
12920 +diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
12921 +index 5365199a31f41..f7a7405d4350a 100644
12922 +--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
12923 ++++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
12924 +@@ -261,7 +261,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
12925 +
12926 + err = device_create_file(&pdev->dev, &dev_attr_available_masters);
12927 + if (err)
12928 +- goto err_rollback;
12929 ++ goto err_rollback_activation;
12930 +
12931 + err = device_create_file(&pdev->dev, &dev_attr_current_master);
12932 + if (err)
12933 +@@ -271,8 +271,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
12934 +
12935 + err_rollback_available:
12936 + device_remove_file(&pdev->dev, &dev_attr_available_masters);
12937 +-err_rollback:
12938 ++err_rollback_activation:
12939 + i2c_demux_deactivate_master(priv);
12940 ++err_rollback:
12941 + for (j = 0; j < i; j++) {
12942 + of_node_put(priv->chan[j].parent_np);
12943 + of_changeset_destroy(&priv->chan[j].chgset);
12944 +diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
12945 +index 64b82b4503ada..a21fdb015c6c0 100644
12946 +--- a/drivers/iio/accel/mma8452.c
12947 ++++ b/drivers/iio/accel/mma8452.c
12948 +@@ -176,6 +176,7 @@ static const struct mma8452_event_regs trans_ev_regs = {
12949 + * @enabled_events: event flags enabled and handled by this driver
12950 + */
12951 + struct mma_chip_info {
12952 ++ const char *name;
12953 + u8 chip_id;
12954 + const struct iio_chan_spec *channels;
12955 + int num_channels;
12956 +@@ -379,8 +380,8 @@ static ssize_t mma8452_show_scale_avail(struct device *dev,
12957 + struct device_attribute *attr,
12958 + char *buf)
12959 + {
12960 +- struct mma8452_data *data = iio_priv(i2c_get_clientdata(
12961 +- to_i2c_client(dev)));
12962 ++ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
12963 ++ struct mma8452_data *data = iio_priv(indio_dev);
12964 +
12965 + return mma8452_show_int_plus_micros(buf, data->chip_info->mma_scales,
12966 + ARRAY_SIZE(data->chip_info->mma_scales));
12967 +@@ -1301,6 +1302,7 @@ enum {
12968 +
12969 + static const struct mma_chip_info mma_chip_info_table[] = {
12970 + [mma8451] = {
12971 ++ .name = "mma8451",
12972 + .chip_id = MMA8451_DEVICE_ID,
12973 + .channels = mma8451_channels,
12974 + .num_channels = ARRAY_SIZE(mma8451_channels),
12975 +@@ -1325,6 +1327,7 @@ static const struct mma_chip_info mma_chip_info_table[] = {
12976 + MMA8452_INT_FF_MT,
12977 + },
12978 + [mma8452] = {
12979 ++ .name = "mma8452",
12980 + .chip_id = MMA8452_DEVICE_ID,
12981 + .channels = mma8452_channels,
12982 + .num_channels = ARRAY_SIZE(mma8452_channels),
12983 +@@ -1341,6 +1344,7 @@ static const struct mma_chip_info mma_chip_info_table[] = {
12984 + MMA8452_INT_FF_MT,
12985 + },
12986 + [mma8453] = {
12987 ++ .name = "mma8453",
12988 + .chip_id = MMA8453_DEVICE_ID,
12989 + .channels = mma8453_channels,
12990 + .num_channels = ARRAY_SIZE(mma8453_channels),
12991 +@@ -1357,6 +1361,7 @@ static const struct mma_chip_info mma_chip_info_table[] = {
12992 + MMA8452_INT_FF_MT,
12993 + },
12994 + [mma8652] = {
12995 ++ .name = "mma8652",
12996 + .chip_id = MMA8652_DEVICE_ID,
12997 + .channels = mma8652_channels,
12998 + .num_channels = ARRAY_SIZE(mma8652_channels),
12999 +@@ -1366,6 +1371,7 @@ static const struct mma_chip_info mma_chip_info_table[] = {
13000 + .enabled_events = MMA8452_INT_FF_MT,
13001 + },
13002 + [mma8653] = {
13003 ++ .name = "mma8653",
13004 + .chip_id = MMA8653_DEVICE_ID,
13005 + .channels = mma8653_channels,
13006 + .num_channels = ARRAY_SIZE(mma8653_channels),
13007 +@@ -1380,6 +1386,7 @@ static const struct mma_chip_info mma_chip_info_table[] = {
13008 + .enabled_events = MMA8452_INT_FF_MT,
13009 + },
13010 + [fxls8471] = {
13011 ++ .name = "fxls8471",
13012 + .chip_id = FXLS8471_DEVICE_ID,
13013 + .channels = mma8451_channels,
13014 + .num_channels = ARRAY_SIZE(mma8451_channels),
13015 +@@ -1522,13 +1529,6 @@ static int mma8452_probe(struct i2c_client *client,
13016 + struct mma8452_data *data;
13017 + struct iio_dev *indio_dev;
13018 + int ret;
13019 +- const struct of_device_id *match;
13020 +-
13021 +- match = of_match_device(mma8452_dt_ids, &client->dev);
13022 +- if (!match) {
13023 +- dev_err(&client->dev, "unknown device model\n");
13024 +- return -ENODEV;
13025 +- }
13026 +
13027 + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
13028 + if (!indio_dev)
13029 +@@ -1537,7 +1537,14 @@ static int mma8452_probe(struct i2c_client *client,
13030 + data = iio_priv(indio_dev);
13031 + data->client = client;
13032 + mutex_init(&data->lock);
13033 +- data->chip_info = match->data;
13034 ++
13035 ++ data->chip_info = device_get_match_data(&client->dev);
13036 ++ if (!data->chip_info && id) {
13037 ++ data->chip_info = &mma_chip_info_table[id->driver_data];
13038 ++ } else {
13039 ++ dev_err(&client->dev, "unknown device model\n");
13040 ++ return -ENODEV;
13041 ++ }
13042 +
13043 + data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
13044 + if (IS_ERR(data->vdd_reg))
13045 +@@ -1581,11 +1588,11 @@ static int mma8452_probe(struct i2c_client *client,
13046 + }
13047 +
13048 + dev_info(&client->dev, "registering %s accelerometer; ID 0x%x\n",
13049 +- match->compatible, data->chip_info->chip_id);
13050 ++ data->chip_info->name, data->chip_info->chip_id);
13051 +
13052 + i2c_set_clientdata(client, indio_dev);
13053 + indio_dev->info = &mma8452_info;
13054 +- indio_dev->name = id->name;
13055 ++ indio_dev->name = data->chip_info->name;
13056 + indio_dev->modes = INDIO_DIRECT_MODE;
13057 + indio_dev->channels = data->chip_info->channels;
13058 + indio_dev->num_channels = data->chip_info->num_channels;
13059 +@@ -1810,7 +1817,7 @@ MODULE_DEVICE_TABLE(i2c, mma8452_id);
13060 + static struct i2c_driver mma8452_driver = {
13061 + .driver = {
13062 + .name = "mma8452",
13063 +- .of_match_table = of_match_ptr(mma8452_dt_ids),
13064 ++ .of_match_table = mma8452_dt_ids,
13065 + .pm = &mma8452_pm_ops,
13066 + },
13067 + .probe = mma8452_probe,
13068 +diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
13069 +index e939b84cbb561..0793d2474cdcf 100644
13070 +--- a/drivers/iio/adc/aspeed_adc.c
13071 ++++ b/drivers/iio/adc/aspeed_adc.c
13072 +@@ -539,7 +539,9 @@ static int aspeed_adc_probe(struct platform_device *pdev)
13073 + data->clk_scaler = devm_clk_hw_register_divider(
13074 + &pdev->dev, clk_name, clk_parent_name, scaler_flags,
13075 + data->base + ASPEED_REG_CLOCK_CONTROL, 0,
13076 +- data->model_data->scaler_bit_width, 0, &data->clk_lock);
13077 ++ data->model_data->scaler_bit_width,
13078 ++ data->model_data->need_prescaler ? CLK_DIVIDER_ONE_BASED : 0,
13079 ++ &data->clk_lock);
13080 + if (IS_ERR(data->clk_scaler))
13081 + return PTR_ERR(data->clk_scaler);
13082 +
13083 +diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
13084 +index afdb59e0b5267..d0223e39d59af 100644
13085 +--- a/drivers/iio/adc/twl6030-gpadc.c
13086 ++++ b/drivers/iio/adc/twl6030-gpadc.c
13087 +@@ -911,6 +911,8 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
13088 + ret = devm_request_threaded_irq(dev, irq, NULL,
13089 + twl6030_gpadc_irq_handler,
13090 + IRQF_ONESHOT, "twl6030_gpadc", indio_dev);
13091 ++ if (ret)
13092 ++ return ret;
13093 +
13094 + ret = twl6030_gpadc_enable_irq(TWL6030_GPADC_RT_SW1_EOC_MASK);
13095 + if (ret < 0) {
13096 +diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
13097 +index 8343c5f74121e..7bf097fa10cb7 100644
13098 +--- a/drivers/iio/adc/xilinx-ams.c
13099 ++++ b/drivers/iio/adc/xilinx-ams.c
13100 +@@ -91,8 +91,8 @@
13101 +
13102 + #define AMS_CONF1_SEQ_MASK GENMASK(15, 12)
13103 + #define AMS_CONF1_SEQ_DEFAULT FIELD_PREP(AMS_CONF1_SEQ_MASK, 0)
13104 +-#define AMS_CONF1_SEQ_CONTINUOUS FIELD_PREP(AMS_CONF1_SEQ_MASK, 1)
13105 +-#define AMS_CONF1_SEQ_SINGLE_CHANNEL FIELD_PREP(AMS_CONF1_SEQ_MASK, 2)
13106 ++#define AMS_CONF1_SEQ_CONTINUOUS FIELD_PREP(AMS_CONF1_SEQ_MASK, 2)
13107 ++#define AMS_CONF1_SEQ_SINGLE_CHANNEL FIELD_PREP(AMS_CONF1_SEQ_MASK, 3)
13108 +
13109 + #define AMS_REG_SEQ0_MASK GENMASK(15, 0)
13110 + #define AMS_REG_SEQ2_MASK GENMASK(21, 16)
13111 +@@ -530,14 +530,18 @@ static int ams_enable_single_channel(struct ams *ams, unsigned int offset)
13112 + return -EINVAL;
13113 + }
13114 +
13115 +- /* set single channel, sequencer off mode */
13116 ++ /* put sysmon in a soft reset to change the sequence */
13117 + ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
13118 +- AMS_CONF1_SEQ_SINGLE_CHANNEL);
13119 ++ AMS_CONF1_SEQ_DEFAULT);
13120 +
13121 + /* write the channel number */
13122 + ams_ps_update_reg(ams, AMS_REG_CONFIG0, AMS_CONF0_CHANNEL_NUM_MASK,
13123 + channel_num);
13124 +
13125 ++ /* set single channel, sequencer off mode */
13126 ++ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
13127 ++ AMS_CONF1_SEQ_SINGLE_CHANNEL);
13128 ++
13129 + return 0;
13130 + }
13131 +
13132 +@@ -551,6 +555,8 @@ static int ams_read_vcc_reg(struct ams *ams, unsigned int offset, u32 *data)
13133 + if (ret)
13134 + return ret;
13135 +
13136 ++ /* clear end-of-conversion flag, wait for next conversion to complete */
13137 ++ writel(expect, ams->base + AMS_ISR_1);
13138 + ret = readl_poll_timeout(ams->base + AMS_ISR_1, reg, (reg & expect),
13139 + AMS_INIT_POLL_TIME_US, AMS_INIT_TIMEOUT_US);
13140 + if (ret)
13141 +@@ -1224,6 +1230,7 @@ static int ams_init_module(struct iio_dev *indio_dev,
13142 +
13143 + /* add PS channels to iio device channels */
13144 + memcpy(channels, ams_ps_channels, sizeof(ams_ps_channels));
13145 ++ num_channels = ARRAY_SIZE(ams_ps_channels);
13146 + } else if (fwnode_property_match_string(fwnode, "compatible",
13147 + "xlnx,zynqmp-ams-pl") == 0) {
13148 + ams->pl_base = fwnode_iomap(fwnode, 0);
13149 +diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
13150 +index 774eb3044edd8..271d73e420c42 100644
13151 +--- a/drivers/iio/afe/iio-rescale.c
13152 ++++ b/drivers/iio/afe/iio-rescale.c
13153 +@@ -39,7 +39,7 @@ static int rescale_read_raw(struct iio_dev *indio_dev,
13154 + int *val, int *val2, long mask)
13155 + {
13156 + struct rescale *rescale = iio_priv(indio_dev);
13157 +- unsigned long long tmp;
13158 ++ s64 tmp;
13159 + int ret;
13160 +
13161 + switch (mask) {
13162 +@@ -77,10 +77,10 @@ static int rescale_read_raw(struct iio_dev *indio_dev,
13163 + *val2 = rescale->denominator;
13164 + return IIO_VAL_FRACTIONAL;
13165 + case IIO_VAL_FRACTIONAL_LOG2:
13166 +- tmp = *val * 1000000000LL;
13167 +- do_div(tmp, rescale->denominator);
13168 ++ tmp = (s64)*val * 1000000000LL;
13169 ++ tmp = div_s64(tmp, rescale->denominator);
13170 + tmp *= rescale->numerator;
13171 +- do_div(tmp, 1000000000LL);
13172 ++ tmp = div_s64(tmp, 1000000000LL);
13173 + *val = tmp;
13174 + return ret;
13175 + default:
13176 +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
13177 +index 93f0c6bce502c..b1d8d5a66f01f 100644
13178 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
13179 ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
13180 +@@ -1633,7 +1633,7 @@ st_lsm6dsx_sysfs_sampling_frequency_avail(struct device *dev,
13181 + struct device_attribute *attr,
13182 + char *buf)
13183 + {
13184 +- struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev));
13185 ++ struct st_lsm6dsx_sensor *sensor = iio_priv(dev_to_iio_dev(dev));
13186 + const struct st_lsm6dsx_odr_table_entry *odr_table;
13187 + int i, len = 0;
13188 +
13189 +@@ -1651,7 +1651,7 @@ static ssize_t st_lsm6dsx_sysfs_scale_avail(struct device *dev,
13190 + struct device_attribute *attr,
13191 + char *buf)
13192 + {
13193 +- struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev));
13194 ++ struct st_lsm6dsx_sensor *sensor = iio_priv(dev_to_iio_dev(dev));
13195 + const struct st_lsm6dsx_fs_table_entry *fs_table;
13196 + struct st_lsm6dsx_hw *hw = sensor->hw;
13197 + int i, len = 0;
13198 +diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
13199 +index 0222885b334c1..df74765d33dcb 100644
13200 +--- a/drivers/iio/inkern.c
13201 ++++ b/drivers/iio/inkern.c
13202 +@@ -595,28 +595,50 @@ EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
13203 + static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
13204 + int raw, int *processed, unsigned int scale)
13205 + {
13206 +- int scale_type, scale_val, scale_val2, offset;
13207 ++ int scale_type, scale_val, scale_val2;
13208 ++ int offset_type, offset_val, offset_val2;
13209 + s64 raw64 = raw;
13210 +- int ret;
13211 +
13212 +- ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
13213 +- if (ret >= 0)
13214 +- raw64 += offset;
13215 ++ offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
13216 ++ IIO_CHAN_INFO_OFFSET);
13217 ++ if (offset_type >= 0) {
13218 ++ switch (offset_type) {
13219 ++ case IIO_VAL_INT:
13220 ++ break;
13221 ++ case IIO_VAL_INT_PLUS_MICRO:
13222 ++ case IIO_VAL_INT_PLUS_NANO:
13223 ++ /*
13224 ++ * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
13225 ++ * implicitely truncate the offset to it's integer form.
13226 ++ */
13227 ++ break;
13228 ++ case IIO_VAL_FRACTIONAL:
13229 ++ offset_val /= offset_val2;
13230 ++ break;
13231 ++ case IIO_VAL_FRACTIONAL_LOG2:
13232 ++ offset_val >>= offset_val2;
13233 ++ break;
13234 ++ default:
13235 ++ return -EINVAL;
13236 ++ }
13237 ++
13238 ++ raw64 += offset_val;
13239 ++ }
13240 +
13241 + scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
13242 + IIO_CHAN_INFO_SCALE);
13243 + if (scale_type < 0) {
13244 + /*
13245 +- * Just pass raw values as processed if no scaling is
13246 +- * available.
13247 ++ * If no channel scaling is available apply consumer scale to
13248 ++ * raw value and return.
13249 + */
13250 +- *processed = raw;
13251 ++ *processed = raw * scale;
13252 + return 0;
13253 + }
13254 +
13255 + switch (scale_type) {
13256 + case IIO_VAL_INT:
13257 +- *processed = raw64 * scale_val;
13258 ++ *processed = raw64 * scale_val * scale;
13259 + break;
13260 + case IIO_VAL_INT_PLUS_MICRO:
13261 + if (scale_val2 < 0)
13262 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
13263 +index 50c53409ceb61..fabca5e51e3d4 100644
13264 +--- a/drivers/infiniband/core/cma.c
13265 ++++ b/drivers/infiniband/core/cma.c
13266 +@@ -2642,7 +2642,7 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
13267 + {
13268 + struct rdma_id_private *id_priv;
13269 +
13270 +- if (id->qp_type != IB_QPT_RC)
13271 ++ if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI)
13272 + return -EINVAL;
13273 +
13274 + id_priv = container_of(id, struct rdma_id_private, id);
13275 +diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
13276 +index f5aacaf7fb8ef..ca24ce34da766 100644
13277 +--- a/drivers/infiniband/core/nldev.c
13278 ++++ b/drivers/infiniband/core/nldev.c
13279 +@@ -1951,9 +1951,10 @@ static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[],
13280 + u32 port)
13281 + {
13282 + struct rdma_hw_stats *stats;
13283 +- int rem, i, index, ret = 0;
13284 + struct nlattr *entry_attr;
13285 + unsigned long *target;
13286 ++ int rem, i, ret = 0;
13287 ++ u32 index;
13288 +
13289 + stats = ib_get_hw_stats_port(device, port);
13290 + if (!stats)
13291 +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
13292 +index c18634bec2126..e821dc94a43ed 100644
13293 +--- a/drivers/infiniband/core/verbs.c
13294 ++++ b/drivers/infiniband/core/verbs.c
13295 +@@ -2153,6 +2153,7 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
13296 + return mr;
13297 +
13298 + mr->device = pd->device;
13299 ++ mr->type = IB_MR_TYPE_USER;
13300 + mr->pd = pd;
13301 + mr->dm = NULL;
13302 + atomic_inc(&pd->usecnt);
13303 +diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
13304 +index dc9211f3a0098..99d0743133cac 100644
13305 +--- a/drivers/infiniband/hw/hfi1/verbs.c
13306 ++++ b/drivers/infiniband/hw/hfi1/verbs.c
13307 +@@ -1397,8 +1397,7 @@ static int query_port(struct rvt_dev_info *rdi, u32 port_num,
13308 + 4096 : hfi1_max_mtu), IB_MTU_4096);
13309 + props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
13310 + mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
13311 +- props->phys_mtu = HFI1_CAP_IS_KSET(AIP) ? hfi1_max_mtu :
13312 +- ib_mtu_enum_to_int(props->max_mtu);
13313 ++ props->phys_mtu = hfi1_max_mtu;
13314 +
13315 + return 0;
13316 + }
13317 +diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
13318 +index 3141a9c85de5a..e7554b6043e4b 100644
13319 +--- a/drivers/infiniband/hw/irdma/ctrl.c
13320 ++++ b/drivers/infiniband/hw/irdma/ctrl.c
13321 +@@ -433,7 +433,7 @@ enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_c
13322 +
13323 + cqp = qp->dev->cqp;
13324 + if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
13325 +- qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1))
13326 ++ qp->qp_uk.qp_id >= (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt))
13327 + return IRDMA_ERR_INVALID_QP_ID;
13328 +
13329 + wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
13330 +@@ -2512,10 +2512,10 @@ static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
13331 + enum irdma_status_code ret_code = 0;
13332 +
13333 + cqp = cq->dev->cqp;
13334 +- if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1))
13335 ++ if (cq->cq_uk.cq_id >= (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt))
13336 + return IRDMA_ERR_INVALID_CQ_ID;
13337 +
13338 +- if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1))
13339 ++ if (cq->ceq_id >= (cq->dev->hmc_fpm_misc.max_ceqs))
13340 + return IRDMA_ERR_INVALID_CEQ_ID;
13341 +
13342 + ceq = cq->dev->ceq[cq->ceq_id];
13343 +@@ -3617,7 +3617,7 @@ enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
13344 + info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
13345 + return IRDMA_ERR_INVALID_SIZE;
13346 +
13347 +- if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
13348 ++ if (info->ceq_id >= (info->dev->hmc_fpm_misc.max_ceqs))
13349 + return IRDMA_ERR_INVALID_CEQ_ID;
13350 + pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
13351 +
13352 +@@ -4166,7 +4166,7 @@ enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *cq,
13353 + info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
13354 + return IRDMA_ERR_INVALID_SIZE;
13355 +
13356 +- if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
13357 ++ if (info->ceq_id >= (info->dev->hmc_fpm_misc.max_ceqs ))
13358 + return IRDMA_ERR_INVALID_CEQ_ID;
13359 +
13360 + pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
13361 +diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
13362 +index 89234d04cc652..e46e3240cc9fd 100644
13363 +--- a/drivers/infiniband/hw/irdma/hw.c
13364 ++++ b/drivers/infiniband/hw/irdma/hw.c
13365 +@@ -1608,7 +1608,7 @@ static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf)
13366 + info.fpm_commit_buf = mem.va;
13367 +
13368 + info.bar0 = rf->hw.hw_addr;
13369 +- info.hmc_fn_id = PCI_FUNC(rf->pcidev->devfn);
13370 ++ info.hmc_fn_id = rf->pf_id;
13371 + info.hw = &rf->hw;
13372 + status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
13373 + if (status)
13374 +diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
13375 +index 43e962b97d6a3..0886783db647c 100644
13376 +--- a/drivers/infiniband/hw/irdma/i40iw_if.c
13377 ++++ b/drivers/infiniband/hw/irdma/i40iw_if.c
13378 +@@ -77,6 +77,7 @@ static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info
13379 + rf->rdma_ver = IRDMA_GEN_1;
13380 + rf->gen_ops.request_reset = i40iw_request_reset;
13381 + rf->pcidev = cdev_info->pcidev;
13382 ++ rf->pf_id = cdev_info->fid;
13383 + rf->hw.hw_addr = cdev_info->hw_addr;
13384 + rf->cdev = cdev_info;
13385 + rf->msix_count = cdev_info->msix_count;
13386 +diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
13387 +index 9fab29039f1c0..5e8e8860686dc 100644
13388 +--- a/drivers/infiniband/hw/irdma/main.c
13389 ++++ b/drivers/infiniband/hw/irdma/main.c
13390 +@@ -226,6 +226,7 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf
13391 + rf->hw.hw_addr = pf->hw.hw_addr;
13392 + rf->pcidev = pf->pdev;
13393 + rf->msix_count = pf->num_rdma_msix;
13394 ++ rf->pf_id = pf->hw.pf_id;
13395 + rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector];
13396 + rf->default_vsi.vsi_idx = vsi->vsi_num;
13397 + rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ?
13398 +diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
13399 +index cb218cab79ac1..fb7faa85e4c9d 100644
13400 +--- a/drivers/infiniband/hw/irdma/main.h
13401 ++++ b/drivers/infiniband/hw/irdma/main.h
13402 +@@ -257,6 +257,7 @@ struct irdma_pci_f {
13403 + u8 *mem_rsrc;
13404 + u8 rdma_ver;
13405 + u8 rst_to;
13406 ++ u8 pf_id;
13407 + enum irdma_protocol_used protocol_used;
13408 + u32 sd_type;
13409 + u32 msix_count;
13410 +diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
13411 +index 398736d8c78a4..e81b74a518dd0 100644
13412 +--- a/drivers/infiniband/hw/irdma/utils.c
13413 ++++ b/drivers/infiniband/hw/irdma/utils.c
13414 +@@ -150,31 +150,35 @@ int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
13415 + void *ptr)
13416 + {
13417 + struct in_ifaddr *ifa = ptr;
13418 +- struct net_device *netdev = ifa->ifa_dev->dev;
13419 ++ struct net_device *real_dev, *netdev = ifa->ifa_dev->dev;
13420 + struct irdma_device *iwdev;
13421 + struct ib_device *ibdev;
13422 + u32 local_ipaddr;
13423 +
13424 +- ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
13425 ++ real_dev = rdma_vlan_dev_real_dev(netdev);
13426 ++ if (!real_dev)
13427 ++ real_dev = netdev;
13428 ++
13429 ++ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
13430 + if (!ibdev)
13431 + return NOTIFY_DONE;
13432 +
13433 + iwdev = to_iwdev(ibdev);
13434 + local_ipaddr = ntohl(ifa->ifa_address);
13435 + ibdev_dbg(&iwdev->ibdev,
13436 +- "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", netdev,
13437 +- event, &local_ipaddr, netdev->dev_addr);
13438 ++ "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev,
13439 ++ event, &local_ipaddr, real_dev->dev_addr);
13440 + switch (event) {
13441 + case NETDEV_DOWN:
13442 +- irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr,
13443 ++ irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
13444 + &local_ipaddr, true, IRDMA_ARP_DELETE);
13445 +- irdma_if_notify(iwdev, netdev, &local_ipaddr, true, false);
13446 ++ irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false);
13447 + irdma_gid_change_event(&iwdev->ibdev);
13448 + break;
13449 + case NETDEV_UP:
13450 + case NETDEV_CHANGEADDR:
13451 +- irdma_add_arp(iwdev->rf, &local_ipaddr, true, netdev->dev_addr);
13452 +- irdma_if_notify(iwdev, netdev, &local_ipaddr, true, true);
13453 ++ irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr);
13454 ++ irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true);
13455 + irdma_gid_change_event(&iwdev->ibdev);
13456 + break;
13457 + default:
13458 +@@ -196,32 +200,36 @@ int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
13459 + void *ptr)
13460 + {
13461 + struct inet6_ifaddr *ifa = ptr;
13462 +- struct net_device *netdev = ifa->idev->dev;
13463 ++ struct net_device *real_dev, *netdev = ifa->idev->dev;
13464 + struct irdma_device *iwdev;
13465 + struct ib_device *ibdev;
13466 + u32 local_ipaddr6[4];
13467 +
13468 +- ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
13469 ++ real_dev = rdma_vlan_dev_real_dev(netdev);
13470 ++ if (!real_dev)
13471 ++ real_dev = netdev;
13472 ++
13473 ++ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
13474 + if (!ibdev)
13475 + return NOTIFY_DONE;
13476 +
13477 + iwdev = to_iwdev(ibdev);
13478 + irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
13479 + ibdev_dbg(&iwdev->ibdev,
13480 +- "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", netdev,
13481 +- event, local_ipaddr6, netdev->dev_addr);
13482 ++ "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev,
13483 ++ event, local_ipaddr6, real_dev->dev_addr);
13484 + switch (event) {
13485 + case NETDEV_DOWN:
13486 +- irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr,
13487 ++ irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
13488 + local_ipaddr6, false, IRDMA_ARP_DELETE);
13489 +- irdma_if_notify(iwdev, netdev, local_ipaddr6, false, false);
13490 ++ irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false);
13491 + irdma_gid_change_event(&iwdev->ibdev);
13492 + break;
13493 + case NETDEV_UP:
13494 + case NETDEV_CHANGEADDR:
13495 + irdma_add_arp(iwdev->rf, local_ipaddr6, false,
13496 +- netdev->dev_addr);
13497 +- irdma_if_notify(iwdev, netdev, local_ipaddr6, false, true);
13498 ++ real_dev->dev_addr);
13499 ++ irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true);
13500 + irdma_gid_change_event(&iwdev->ibdev);
13501 + break;
13502 + default:
13503 +@@ -243,14 +251,18 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
13504 + void *ptr)
13505 + {
13506 + struct neighbour *neigh = ptr;
13507 ++ struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev;
13508 + struct irdma_device *iwdev;
13509 + struct ib_device *ibdev;
13510 + __be32 *p;
13511 + u32 local_ipaddr[4] = {};
13512 + bool ipv4 = true;
13513 +
13514 +- ibdev = ib_device_get_by_netdev((struct net_device *)neigh->dev,
13515 +- RDMA_DRIVER_IRDMA);
13516 ++ real_dev = rdma_vlan_dev_real_dev(netdev);
13517 ++ if (!real_dev)
13518 ++ real_dev = netdev;
13519 ++
13520 ++ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
13521 + if (!ibdev)
13522 + return NOTIFY_DONE;
13523 +
13524 +diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
13525 +index 460e757d3fe61..1bf6404ec8340 100644
13526 +--- a/drivers/infiniband/hw/irdma/verbs.c
13527 ++++ b/drivers/infiniband/hw/irdma/verbs.c
13528 +@@ -2509,7 +2509,7 @@ static int irdma_dealloc_mw(struct ib_mw *ibmw)
13529 + cqp_info = &cqp_request->info;
13530 + info = &cqp_info->in.u.dealloc_stag.info;
13531 + memset(info, 0, sizeof(*info));
13532 +- info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
13533 ++ info->pd_id = iwpd->sc_pd.pd_id;
13534 + info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
13535 + info->mr = false;
13536 + cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
13537 +@@ -3021,7 +3021,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
13538 + cqp_info = &cqp_request->info;
13539 + info = &cqp_info->in.u.dealloc_stag.info;
13540 + memset(info, 0, sizeof(*info));
13541 +- info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
13542 ++ info->pd_id = iwpd->sc_pd.pd_id;
13543 + info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
13544 + info->mr = true;
13545 + if (iwpbl->pbl_allocated)
13546 +diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
13547 +index 08b7f6bc56c37..15c0884d1f498 100644
13548 +--- a/drivers/infiniband/hw/mlx5/devx.c
13549 ++++ b/drivers/infiniband/hw/mlx5/devx.c
13550 +@@ -1886,8 +1886,10 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
13551 + key_level2,
13552 + obj_event,
13553 + GFP_KERNEL);
13554 +- if (err)
13555 ++ if (err) {
13556 ++ kfree(obj_event);
13557 + return err;
13558 ++ }
13559 + INIT_LIST_HEAD(&obj_event->obj_sub_list);
13560 + }
13561 +
13562 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
13563 +index 157d862fb8642..2910d78333130 100644
13564 +--- a/drivers/infiniband/hw/mlx5/mr.c
13565 ++++ b/drivers/infiniband/hw/mlx5/mr.c
13566 +@@ -585,6 +585,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
13567 + ent = &cache->ent[entry];
13568 + spin_lock_irq(&ent->lock);
13569 + if (list_empty(&ent->head)) {
13570 ++ queue_adjust_cache_locked(ent);
13571 ++ ent->miss++;
13572 + spin_unlock_irq(&ent->lock);
13573 + mr = create_cache_mr(ent);
13574 + if (IS_ERR(mr))
13575 +diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
13576 +index 38c7b6fb39d70..360a567159fe5 100644
13577 +--- a/drivers/infiniband/sw/rxe/rxe_av.c
13578 ++++ b/drivers/infiniband/sw/rxe/rxe_av.c
13579 +@@ -99,11 +99,14 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
13580 + av->network_type = type;
13581 + }
13582 +
13583 +-struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
13584 ++struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp)
13585 + {
13586 + struct rxe_ah *ah;
13587 + u32 ah_num;
13588 +
13589 ++ if (ahp)
13590 ++ *ahp = NULL;
13591 ++
13592 + if (!pkt || !pkt->qp)
13593 + return NULL;
13594 +
13595 +@@ -117,10 +120,22 @@ struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
13596 + if (ah_num) {
13597 + /* only new user provider or kernel client */
13598 + ah = rxe_pool_get_index(&pkt->rxe->ah_pool, ah_num);
13599 +- if (!ah || ah->ah_num != ah_num || rxe_ah_pd(ah) != pkt->qp->pd) {
13600 ++ if (!ah) {
13601 + pr_warn("Unable to find AH matching ah_num\n");
13602 + return NULL;
13603 + }
13604 ++
13605 ++ if (rxe_ah_pd(ah) != pkt->qp->pd) {
13606 ++ pr_warn("PDs don't match for AH and QP\n");
13607 ++ rxe_drop_ref(ah);
13608 ++ return NULL;
13609 ++ }
13610 ++
13611 ++ if (ahp)
13612 ++ *ahp = ah;
13613 ++ else
13614 ++ rxe_drop_ref(ah);
13615 ++
13616 + return &ah->av;
13617 + }
13618 +
13619 +diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
13620 +index b1e174afb1d49..b92bb7a152905 100644
13621 +--- a/drivers/infiniband/sw/rxe/rxe_loc.h
13622 ++++ b/drivers/infiniband/sw/rxe/rxe_loc.h
13623 +@@ -19,7 +19,7 @@ void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr);
13624 +
13625 + void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr);
13626 +
13627 +-struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
13628 ++struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp);
13629 +
13630 + /* rxe_cq.c */
13631 + int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
13632 +@@ -102,7 +102,8 @@ void rxe_mw_cleanup(struct rxe_pool_elem *arg);
13633 + /* rxe_net.c */
13634 + struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
13635 + int paylen, struct rxe_pkt_info *pkt);
13636 +-int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb);
13637 ++int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
13638 ++ struct sk_buff *skb);
13639 + int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
13640 + struct sk_buff *skb);
13641 + const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
13642 +diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
13643 +index be72bdbfb4ba7..580cfd742dd2f 100644
13644 +--- a/drivers/infiniband/sw/rxe/rxe_net.c
13645 ++++ b/drivers/infiniband/sw/rxe/rxe_net.c
13646 +@@ -289,13 +289,13 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
13647 + ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
13648 + }
13649 +
13650 +-static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
13651 ++static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt,
13652 ++ struct sk_buff *skb)
13653 + {
13654 + struct rxe_qp *qp = pkt->qp;
13655 + struct dst_entry *dst;
13656 + bool xnet = false;
13657 + __be16 df = htons(IP_DF);
13658 +- struct rxe_av *av = rxe_get_av(pkt);
13659 + struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
13660 + struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
13661 +
13662 +@@ -315,11 +315,11 @@ static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
13663 + return 0;
13664 + }
13665 +
13666 +-static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
13667 ++static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt,
13668 ++ struct sk_buff *skb)
13669 + {
13670 + struct rxe_qp *qp = pkt->qp;
13671 + struct dst_entry *dst;
13672 +- struct rxe_av *av = rxe_get_av(pkt);
13673 + struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
13674 + struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
13675 +
13676 +@@ -340,16 +340,17 @@ static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
13677 + return 0;
13678 + }
13679 +
13680 +-int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb)
13681 ++int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
13682 ++ struct sk_buff *skb)
13683 + {
13684 + int err = 0;
13685 +
13686 + if (skb->protocol == htons(ETH_P_IP))
13687 +- err = prepare4(pkt, skb);
13688 ++ err = prepare4(av, pkt, skb);
13689 + else if (skb->protocol == htons(ETH_P_IPV6))
13690 +- err = prepare6(pkt, skb);
13691 ++ err = prepare6(av, pkt, skb);
13692 +
13693 +- if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac))
13694 ++ if (ether_addr_equal(skb->dev->dev_addr, av->dmac))
13695 + pkt->mask |= RXE_LOOPBACK_MASK;
13696 +
13697 + return err;
13698 +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
13699 +index 5eb89052dd668..204e31bbd61f7 100644
13700 +--- a/drivers/infiniband/sw/rxe/rxe_req.c
13701 ++++ b/drivers/infiniband/sw/rxe/rxe_req.c
13702 +@@ -358,14 +358,14 @@ static inline int get_mtu(struct rxe_qp *qp)
13703 + }
13704 +
13705 + static struct sk_buff *init_req_packet(struct rxe_qp *qp,
13706 ++ struct rxe_av *av,
13707 + struct rxe_send_wqe *wqe,
13708 +- int opcode, int payload,
13709 ++ int opcode, u32 payload,
13710 + struct rxe_pkt_info *pkt)
13711 + {
13712 + struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
13713 + struct sk_buff *skb;
13714 + struct rxe_send_wr *ibwr = &wqe->wr;
13715 +- struct rxe_av *av;
13716 + int pad = (-payload) & 0x3;
13717 + int paylen;
13718 + int solicited;
13719 +@@ -374,21 +374,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
13720 +
13721 + /* length from start of bth to end of icrc */
13722 + paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
13723 +-
13724 +- /* pkt->hdr, port_num and mask are initialized in ifc layer */
13725 +- pkt->rxe = rxe;
13726 +- pkt->opcode = opcode;
13727 +- pkt->qp = qp;
13728 +- pkt->psn = qp->req.psn;
13729 +- pkt->mask = rxe_opcode[opcode].mask;
13730 +- pkt->paylen = paylen;
13731 +- pkt->wqe = wqe;
13732 ++ pkt->paylen = paylen;
13733 +
13734 + /* init skb */
13735 +- av = rxe_get_av(pkt);
13736 +- if (!av)
13737 +- return NULL;
13738 +-
13739 + skb = rxe_init_packet(rxe, av, paylen, pkt);
13740 + if (unlikely(!skb))
13741 + return NULL;
13742 +@@ -447,13 +435,13 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
13743 + return skb;
13744 + }
13745 +
13746 +-static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
13747 +- struct rxe_pkt_info *pkt, struct sk_buff *skb,
13748 +- int paylen)
13749 ++static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
13750 ++ struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
13751 ++ struct sk_buff *skb, u32 paylen)
13752 + {
13753 + int err;
13754 +
13755 +- err = rxe_prepare(pkt, skb);
13756 ++ err = rxe_prepare(av, pkt, skb);
13757 + if (err)
13758 + return err;
13759 +
13760 +@@ -497,7 +485,7 @@ static void update_wqe_state(struct rxe_qp *qp,
13761 + static void update_wqe_psn(struct rxe_qp *qp,
13762 + struct rxe_send_wqe *wqe,
13763 + struct rxe_pkt_info *pkt,
13764 +- int payload)
13765 ++ u32 payload)
13766 + {
13767 + /* number of packets left to send including current one */
13768 + int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
13769 +@@ -540,7 +528,7 @@ static void rollback_state(struct rxe_send_wqe *wqe,
13770 + }
13771 +
13772 + static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
13773 +- struct rxe_pkt_info *pkt, int payload)
13774 ++ struct rxe_pkt_info *pkt, u32 payload)
13775 + {
13776 + qp->req.opcode = pkt->opcode;
13777 +
13778 +@@ -608,17 +596,20 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
13779 + int rxe_requester(void *arg)
13780 + {
13781 + struct rxe_qp *qp = (struct rxe_qp *)arg;
13782 ++ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
13783 + struct rxe_pkt_info pkt;
13784 + struct sk_buff *skb;
13785 + struct rxe_send_wqe *wqe;
13786 + enum rxe_hdr_mask mask;
13787 +- int payload;
13788 ++ u32 payload;
13789 + int mtu;
13790 + int opcode;
13791 + int ret;
13792 + struct rxe_send_wqe rollback_wqe;
13793 + u32 rollback_psn;
13794 + struct rxe_queue *q = qp->sq.queue;
13795 ++ struct rxe_ah *ah;
13796 ++ struct rxe_av *av;
13797 +
13798 + rxe_add_ref(qp);
13799 +
13800 +@@ -705,14 +696,28 @@ next_wqe:
13801 + payload = mtu;
13802 + }
13803 +
13804 +- skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
13805 ++ pkt.rxe = rxe;
13806 ++ pkt.opcode = opcode;
13807 ++ pkt.qp = qp;
13808 ++ pkt.psn = qp->req.psn;
13809 ++ pkt.mask = rxe_opcode[opcode].mask;
13810 ++ pkt.wqe = wqe;
13811 ++
13812 ++ av = rxe_get_av(&pkt, &ah);
13813 ++ if (unlikely(!av)) {
13814 ++ pr_err("qp#%d Failed no address vector\n", qp_num(qp));
13815 ++ wqe->status = IB_WC_LOC_QP_OP_ERR;
13816 ++ goto err_drop_ah;
13817 ++ }
13818 ++
13819 ++ skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
13820 + if (unlikely(!skb)) {
13821 + pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
13822 + wqe->status = IB_WC_LOC_QP_OP_ERR;
13823 +- goto err;
13824 ++ goto err_drop_ah;
13825 + }
13826 +
13827 +- ret = finish_packet(qp, wqe, &pkt, skb, payload);
13828 ++ ret = finish_packet(qp, av, wqe, &pkt, skb, payload);
13829 + if (unlikely(ret)) {
13830 + pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
13831 + if (ret == -EFAULT)
13832 +@@ -720,9 +725,12 @@ next_wqe:
13833 + else
13834 + wqe->status = IB_WC_LOC_QP_OP_ERR;
13835 + kfree_skb(skb);
13836 +- goto err;
13837 ++ goto err_drop_ah;
13838 + }
13839 +
13840 ++ if (ah)
13841 ++ rxe_drop_ref(ah);
13842 ++
13843 + /*
13844 + * To prevent a race on wqe access between requester and completer,
13845 + * wqe members state and psn need to be set before calling
13846 +@@ -751,6 +759,9 @@ next_wqe:
13847 +
13848 + goto next_wqe;
13849 +
13850 ++err_drop_ah:
13851 ++ if (ah)
13852 ++ rxe_drop_ref(ah);
13853 + err:
13854 + wqe->state = wqe_state_error;
13855 + __rxe_do_task(&qp->comp.task);
13856 +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
13857 +index e8f435fa6e4d7..192cb9a096a14 100644
13858 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c
13859 ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
13860 +@@ -632,7 +632,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
13861 + if (ack->mask & RXE_ATMACK_MASK)
13862 + atmack_set_orig(ack, qp->resp.atomic_orig);
13863 +
13864 +- err = rxe_prepare(ack, skb);
13865 ++ err = rxe_prepare(&qp->pri_av, ack, skb);
13866 + if (err) {
13867 + kfree_skb(skb);
13868 + return NULL;
13869 +@@ -814,6 +814,10 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
13870 + return RESPST_ERR_INVALIDATE_RKEY;
13871 + }
13872 +
13873 ++ if (pkt->mask & RXE_END_MASK)
13874 ++ /* We successfully processed this new request. */
13875 ++ qp->resp.msn++;
13876 ++
13877 + /* next expected psn, read handles this separately */
13878 + qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
13879 + qp->resp.ack_psn = qp->resp.psn;
13880 +@@ -821,11 +825,9 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
13881 + qp->resp.opcode = pkt->opcode;
13882 + qp->resp.status = IB_WC_SUCCESS;
13883 +
13884 +- if (pkt->mask & RXE_COMP_MASK) {
13885 +- /* We successfully processed this new request. */
13886 +- qp->resp.msn++;
13887 ++ if (pkt->mask & RXE_COMP_MASK)
13888 + return RESPST_COMPLETE;
13889 +- } else if (qp_type(qp) == IB_QPT_RC)
13890 ++ else if (qp_type(qp) == IB_QPT_RC)
13891 + return RESPST_ACKNOWLEDGE;
13892 + else
13893 + return RESPST_CLEANUP;
13894 +diff --git a/drivers/input/input.c b/drivers/input/input.c
13895 +index c3139bc2aa0db..ccaeb24263854 100644
13896 +--- a/drivers/input/input.c
13897 ++++ b/drivers/input/input.c
13898 +@@ -2285,12 +2285,6 @@ int input_register_device(struct input_dev *dev)
13899 + /* KEY_RESERVED is not supposed to be transmitted to userspace. */
13900 + __clear_bit(KEY_RESERVED, dev->keybit);
13901 +
13902 +- /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
13903 +- if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
13904 +- __clear_bit(BTN_RIGHT, dev->keybit);
13905 +- __clear_bit(BTN_MIDDLE, dev->keybit);
13906 +- }
13907 +-
13908 + /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
13909 + input_cleanse_bitmasks(dev);
13910 +
13911 +diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
13912 +index b28c9435b898d..170e0f33040e8 100644
13913 +--- a/drivers/iommu/iova.c
13914 ++++ b/drivers/iommu/iova.c
13915 +@@ -95,10 +95,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
13916 + cached_iova = to_iova(iovad->cached32_node);
13917 + if (free == cached_iova ||
13918 + (free->pfn_hi < iovad->dma_32bit_pfn &&
13919 +- free->pfn_lo >= cached_iova->pfn_lo)) {
13920 ++ free->pfn_lo >= cached_iova->pfn_lo))
13921 + iovad->cached32_node = rb_next(&free->node);
13922 ++
13923 ++ if (free->pfn_lo < iovad->dma_32bit_pfn)
13924 + iovad->max32_alloc_size = iovad->dma_32bit_pfn;
13925 +- }
13926 +
13927 + cached_iova = to_iova(iovad->cached_node);
13928 + if (free->pfn_lo >= cached_iova->pfn_lo)
13929 +diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
13930 +index ca752bdc710f6..61bd9a3004ede 100644
13931 +--- a/drivers/iommu/ipmmu-vmsa.c
13932 ++++ b/drivers/iommu/ipmmu-vmsa.c
13933 +@@ -1006,7 +1006,9 @@ static int ipmmu_probe(struct platform_device *pdev)
13934 + bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
13935 + mmu->features = of_device_get_match_data(&pdev->dev);
13936 + memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
13937 +- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
13938 ++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
13939 ++ if (ret)
13940 ++ return ret;
13941 +
13942 + /* Map I/O memory and request IRQ. */
13943 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
13944 +diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
13945 +index 25b834104790c..5971a11686662 100644
13946 +--- a/drivers/iommu/mtk_iommu.c
13947 ++++ b/drivers/iommu/mtk_iommu.c
13948 +@@ -562,22 +562,52 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
13949 + {
13950 + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
13951 + struct mtk_iommu_data *data;
13952 ++ struct device_link *link;
13953 ++ struct device *larbdev;
13954 ++ unsigned int larbid, larbidx, i;
13955 +
13956 + if (!fwspec || fwspec->ops != &mtk_iommu_ops)
13957 + return ERR_PTR(-ENODEV); /* Not a iommu client device */
13958 +
13959 + data = dev_iommu_priv_get(dev);
13960 +
13961 ++ /*
13962 ++ * Link the consumer device with the smi-larb device(supplier).
13963 ++ * The device that connects with each a larb is a independent HW.
13964 ++ * All the ports in each a device should be in the same larbs.
13965 ++ */
13966 ++ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
13967 ++ for (i = 1; i < fwspec->num_ids; i++) {
13968 ++ larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]);
13969 ++ if (larbid != larbidx) {
13970 ++ dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
13971 ++ larbid, larbidx);
13972 ++ return ERR_PTR(-EINVAL);
13973 ++ }
13974 ++ }
13975 ++ larbdev = data->larb_imu[larbid].dev;
13976 ++ link = device_link_add(dev, larbdev,
13977 ++ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
13978 ++ if (!link)
13979 ++ dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
13980 + return &data->iommu;
13981 + }
13982 +
13983 + static void mtk_iommu_release_device(struct device *dev)
13984 + {
13985 + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
13986 ++ struct mtk_iommu_data *data;
13987 ++ struct device *larbdev;
13988 ++ unsigned int larbid;
13989 +
13990 + if (!fwspec || fwspec->ops != &mtk_iommu_ops)
13991 + return;
13992 +
13993 ++ data = dev_iommu_priv_get(dev);
13994 ++ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
13995 ++ larbdev = data->larb_imu[larbid].dev;
13996 ++ device_link_remove(dev, larbdev);
13997 ++
13998 + iommu_fwspec_free(dev);
13999 + }
14000 +
14001 +@@ -848,7 +878,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
14002 + plarbdev = of_find_device_by_node(larbnode);
14003 + if (!plarbdev) {
14004 + of_node_put(larbnode);
14005 +- return -EPROBE_DEFER;
14006 ++ return -ENODEV;
14007 + }
14008 + data->larb_imu[id].dev = &plarbdev->dev;
14009 +
14010 +diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
14011 +index be22fcf988cee..bc7ee90b9373d 100644
14012 +--- a/drivers/iommu/mtk_iommu_v1.c
14013 ++++ b/drivers/iommu/mtk_iommu_v1.c
14014 +@@ -423,7 +423,18 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
14015 + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
14016 + struct of_phandle_args iommu_spec;
14017 + struct mtk_iommu_data *data;
14018 +- int err, idx = 0;
14019 ++ int err, idx = 0, larbid, larbidx;
14020 ++ struct device_link *link;
14021 ++ struct device *larbdev;
14022 ++
14023 ++ /*
14024 ++ * In the deferred case, free the existed fwspec.
14025 ++ * Always initialize the fwspec internally.
14026 ++ */
14027 ++ if (fwspec) {
14028 ++ iommu_fwspec_free(dev);
14029 ++ fwspec = dev_iommu_fwspec_get(dev);
14030 ++ }
14031 +
14032 + while (!of_parse_phandle_with_args(dev->of_node, "iommus",
14033 + "#iommu-cells",
14034 +@@ -444,6 +455,23 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
14035 +
14036 + data = dev_iommu_priv_get(dev);
14037 +
14038 ++ /* Link the consumer device with the smi-larb device(supplier) */
14039 ++ larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
14040 ++ for (idx = 1; idx < fwspec->num_ids; idx++) {
14041 ++ larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]);
14042 ++ if (larbid != larbidx) {
14043 ++ dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
14044 ++ larbid, larbidx);
14045 ++ return ERR_PTR(-EINVAL);
14046 ++ }
14047 ++ }
14048 ++
14049 ++ larbdev = data->larb_imu[larbid].dev;
14050 ++ link = device_link_add(dev, larbdev,
14051 ++ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
14052 ++ if (!link)
14053 ++ dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
14054 ++
14055 + return &data->iommu;
14056 + }
14057 +
14058 +@@ -464,10 +492,18 @@ static void mtk_iommu_probe_finalize(struct device *dev)
14059 + static void mtk_iommu_release_device(struct device *dev)
14060 + {
14061 + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
14062 ++ struct mtk_iommu_data *data;
14063 ++ struct device *larbdev;
14064 ++ unsigned int larbid;
14065 +
14066 + if (!fwspec || fwspec->ops != &mtk_iommu_ops)
14067 + return;
14068 +
14069 ++ data = dev_iommu_priv_get(dev);
14070 ++ larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
14071 ++ larbdev = data->larb_imu[larbid].dev;
14072 ++ device_link_remove(dev, larbdev);
14073 ++
14074 + iommu_fwspec_free(dev);
14075 + }
14076 +
14077 +@@ -595,7 +631,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
14078 + plarbdev = of_find_device_by_node(larbnode);
14079 + if (!plarbdev) {
14080 + of_node_put(larbnode);
14081 +- return -EPROBE_DEFER;
14082 ++ return -ENODEV;
14083 + }
14084 + data->larb_imu[i].dev = &plarbdev->dev;
14085 +
14086 +diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
14087 +index ba4759b3e2693..94230306e0eee 100644
14088 +--- a/drivers/irqchip/irq-nvic.c
14089 ++++ b/drivers/irqchip/irq-nvic.c
14090 +@@ -107,6 +107,7 @@ static int __init nvic_of_init(struct device_node *node,
14091 +
14092 + if (!nvic_irq_domain) {
14093 + pr_warn("Failed to allocate irq domain\n");
14094 ++ iounmap(nvic_base);
14095 + return -ENOMEM;
14096 + }
14097 +
14098 +@@ -116,6 +117,7 @@ static int __init nvic_of_init(struct device_node *node,
14099 + if (ret) {
14100 + pr_warn("Failed to allocate irq chips\n");
14101 + irq_domain_remove(nvic_irq_domain);
14102 ++ iounmap(nvic_base);
14103 + return ret;
14104 + }
14105 +
14106 +diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
14107 +index 173e6520e06ec..c0b457f26ec41 100644
14108 +--- a/drivers/irqchip/qcom-pdc.c
14109 ++++ b/drivers/irqchip/qcom-pdc.c
14110 +@@ -56,17 +56,18 @@ static u32 pdc_reg_read(int reg, u32 i)
14111 + static void pdc_enable_intr(struct irq_data *d, bool on)
14112 + {
14113 + int pin_out = d->hwirq;
14114 ++ unsigned long flags;
14115 + u32 index, mask;
14116 + u32 enable;
14117 +
14118 + index = pin_out / 32;
14119 + mask = pin_out % 32;
14120 +
14121 +- raw_spin_lock(&pdc_lock);
14122 ++ raw_spin_lock_irqsave(&pdc_lock, flags);
14123 + enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
14124 + enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask);
14125 + pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
14126 +- raw_spin_unlock(&pdc_lock);
14127 ++ raw_spin_unlock_irqrestore(&pdc_lock, flags);
14128 + }
14129 +
14130 + static void qcom_pdc_gic_disable(struct irq_data *d)
14131 +diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
14132 +index 544de2db64531..a0c252415c868 100644
14133 +--- a/drivers/mailbox/imx-mailbox.c
14134 ++++ b/drivers/mailbox/imx-mailbox.c
14135 +@@ -14,6 +14,7 @@
14136 + #include <linux/module.h>
14137 + #include <linux/of_device.h>
14138 + #include <linux/pm_runtime.h>
14139 ++#include <linux/suspend.h>
14140 + #include <linux/slab.h>
14141 +
14142 + #define IMX_MU_CHANS 16
14143 +@@ -76,6 +77,7 @@ struct imx_mu_priv {
14144 + const struct imx_mu_dcfg *dcfg;
14145 + struct clk *clk;
14146 + int irq;
14147 ++ bool suspend;
14148 +
14149 + u32 xcr[4];
14150 +
14151 +@@ -334,6 +336,9 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
14152 + return IRQ_NONE;
14153 + }
14154 +
14155 ++ if (priv->suspend)
14156 ++ pm_system_wakeup();
14157 ++
14158 + return IRQ_HANDLED;
14159 + }
14160 +
14161 +@@ -702,6 +707,8 @@ static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
14162 + priv->xcr[i] = imx_mu_read(priv, priv->dcfg->xCR[i]);
14163 + }
14164 +
14165 ++ priv->suspend = true;
14166 ++
14167 + return 0;
14168 + }
14169 +
14170 +@@ -718,11 +725,13 @@ static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
14171 + * send failed, may lead to system freeze. This issue
14172 + * is observed by testing freeze mode suspend.
14173 + */
14174 +- if (!imx_mu_read(priv, priv->dcfg->xCR[0]) && !priv->clk) {
14175 ++ if (!priv->clk && !imx_mu_read(priv, priv->dcfg->xCR[0])) {
14176 + for (i = 0; i < IMX_MU_xCR_MAX; i++)
14177 + imx_mu_write(priv, priv->xcr[i], priv->dcfg->xCR[i]);
14178 + }
14179 +
14180 ++ priv->suspend = false;
14181 ++
14182 + return 0;
14183 + }
14184 +
14185 +diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
14186 +index acd0675da681e..78f7265039c66 100644
14187 +--- a/drivers/mailbox/tegra-hsp.c
14188 ++++ b/drivers/mailbox/tegra-hsp.c
14189 +@@ -412,6 +412,11 @@ static int tegra_hsp_mailbox_flush(struct mbox_chan *chan,
14190 + value = tegra_hsp_channel_readl(ch, HSP_SM_SHRD_MBOX);
14191 + if ((value & HSP_SM_SHRD_MBOX_FULL) == 0) {
14192 + mbox_chan_txdone(chan, 0);
14193 ++
14194 ++ /* Wait until channel is empty */
14195 ++ if (chan->active_req != NULL)
14196 ++ continue;
14197 ++
14198 + return 0;
14199 + }
14200 +
14201 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
14202 +index 88c573eeb5982..ad9f16689419d 100644
14203 +--- a/drivers/md/bcache/btree.c
14204 ++++ b/drivers/md/bcache/btree.c
14205 +@@ -2060,9 +2060,11 @@ int bch_btree_check(struct cache_set *c)
14206 + }
14207 + }
14208 +
14209 ++ /*
14210 ++ * Must wait for all threads to stop.
14211 ++ */
14212 + wait_event_interruptible(check_state->wait,
14213 +- atomic_read(&check_state->started) == 0 ||
14214 +- test_bit(CACHE_SET_IO_DISABLE, &c->flags));
14215 ++ atomic_read(&check_state->started) == 0);
14216 +
14217 + for (i = 0; i < check_state->total_threads; i++) {
14218 + if (check_state->infos[i].result) {
14219 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
14220 +index c7560f66dca88..68d3dd6b4f119 100644
14221 +--- a/drivers/md/bcache/writeback.c
14222 ++++ b/drivers/md/bcache/writeback.c
14223 +@@ -998,9 +998,11 @@ void bch_sectors_dirty_init(struct bcache_device *d)
14224 + }
14225 + }
14226 +
14227 ++ /*
14228 ++ * Must wait for all threads to stop.
14229 ++ */
14230 + wait_event_interruptible(state->wait,
14231 +- atomic_read(&state->started) == 0 ||
14232 +- test_bit(CACHE_SET_IO_DISABLE, &c->flags));
14233 ++ atomic_read(&state->started) == 0);
14234 +
14235 + out:
14236 + kfree(state);
14237 +diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
14238 +index b855fef4f38a6..adb9604e85ac4 100644
14239 +--- a/drivers/md/dm-core.h
14240 ++++ b/drivers/md/dm-core.h
14241 +@@ -65,6 +65,8 @@ struct mapped_device {
14242 + struct gendisk *disk;
14243 + struct dax_device *dax_dev;
14244 +
14245 ++ unsigned long __percpu *pending_io;
14246 ++
14247 + /*
14248 + * A list of ios that arrived while we were suspended.
14249 + */
14250 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
14251 +index d4ae31558826a..f51aea71cb036 100644
14252 +--- a/drivers/md/dm-crypt.c
14253 ++++ b/drivers/md/dm-crypt.c
14254 +@@ -2590,7 +2590,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
14255 +
14256 + static int get_key_size(char **key_string)
14257 + {
14258 +- return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
14259 ++ return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
14260 + }
14261 +
14262 + #endif /* CONFIG_KEYS */
14263 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
14264 +index eb4b5e52bd6ff..9399006dbc546 100644
14265 +--- a/drivers/md/dm-integrity.c
14266 ++++ b/drivers/md/dm-integrity.c
14267 +@@ -2473,9 +2473,11 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
14268 + dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
14269 + sec &= ~(sector_t)(ic->sectors_per_block - 1);
14270 + }
14271 ++ if (unlikely(sec >= ic->provided_data_sectors)) {
14272 ++ journal_entry_set_unused(je);
14273 ++ continue;
14274 ++ }
14275 + }
14276 +- if (unlikely(sec >= ic->provided_data_sectors))
14277 +- continue;
14278 + get_area_and_offset(ic, sec, &area, &offset);
14279 + restore_last_bytes(ic, access_journal_data(ic, i, j), je);
14280 + for (k = j + 1; k < ic->journal_section_entries; k++) {
14281 +diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
14282 +index 35d368c418d03..0e039a8c0bf2e 100644
14283 +--- a/drivers/md/dm-stats.c
14284 ++++ b/drivers/md/dm-stats.c
14285 +@@ -195,6 +195,7 @@ void dm_stats_init(struct dm_stats *stats)
14286 +
14287 + mutex_init(&stats->mutex);
14288 + INIT_LIST_HEAD(&stats->list);
14289 ++ stats->precise_timestamps = false;
14290 + stats->last = alloc_percpu(struct dm_stats_last_position);
14291 + for_each_possible_cpu(cpu) {
14292 + last = per_cpu_ptr(stats->last, cpu);
14293 +@@ -231,6 +232,22 @@ void dm_stats_cleanup(struct dm_stats *stats)
14294 + mutex_destroy(&stats->mutex);
14295 + }
14296 +
14297 ++static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
14298 ++{
14299 ++ struct list_head *l;
14300 ++ struct dm_stat *tmp_s;
14301 ++ bool precise_timestamps = false;
14302 ++
14303 ++ list_for_each(l, &stats->list) {
14304 ++ tmp_s = container_of(l, struct dm_stat, list_entry);
14305 ++ if (tmp_s->stat_flags & STAT_PRECISE_TIMESTAMPS) {
14306 ++ precise_timestamps = true;
14307 ++ break;
14308 ++ }
14309 ++ }
14310 ++ stats->precise_timestamps = precise_timestamps;
14311 ++}
14312 ++
14313 + static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
14314 + sector_t step, unsigned stat_flags,
14315 + unsigned n_histogram_entries,
14316 +@@ -376,6 +393,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
14317 + }
14318 + ret_id = s->id;
14319 + list_add_tail_rcu(&s->list_entry, l);
14320 ++
14321 ++ dm_stats_recalc_precise_timestamps(stats);
14322 ++
14323 + mutex_unlock(&stats->mutex);
14324 +
14325 + resume_callback(md);
14326 +@@ -418,6 +438,9 @@ static int dm_stats_delete(struct dm_stats *stats, int id)
14327 + }
14328 +
14329 + list_del_rcu(&s->list_entry);
14330 ++
14331 ++ dm_stats_recalc_precise_timestamps(stats);
14332 ++
14333 + mutex_unlock(&stats->mutex);
14334 +
14335 + /*
14336 +@@ -621,13 +644,14 @@ static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
14337 +
14338 + void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
14339 + sector_t bi_sector, unsigned bi_sectors, bool end,
14340 +- unsigned long duration_jiffies,
14341 ++ unsigned long start_time,
14342 + struct dm_stats_aux *stats_aux)
14343 + {
14344 + struct dm_stat *s;
14345 + sector_t end_sector;
14346 + struct dm_stats_last_position *last;
14347 + bool got_precise_time;
14348 ++ unsigned long duration_jiffies = 0;
14349 +
14350 + if (unlikely(!bi_sectors))
14351 + return;
14352 +@@ -647,16 +671,16 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
14353 + ));
14354 + WRITE_ONCE(last->last_sector, end_sector);
14355 + WRITE_ONCE(last->last_rw, bi_rw);
14356 +- }
14357 ++ } else
14358 ++ duration_jiffies = jiffies - start_time;
14359 +
14360 + rcu_read_lock();
14361 +
14362 + got_precise_time = false;
14363 + list_for_each_entry_rcu(s, &stats->list, list_entry) {
14364 + if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
14365 +- if (!end)
14366 +- stats_aux->duration_ns = ktime_to_ns(ktime_get());
14367 +- else
14368 ++ /* start (!end) duration_ns is set by DM core's alloc_io() */
14369 ++ if (end)
14370 + stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
14371 + got_precise_time = true;
14372 + }
14373 +diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
14374 +index 2ddfae678f320..09c81a1ec057d 100644
14375 +--- a/drivers/md/dm-stats.h
14376 ++++ b/drivers/md/dm-stats.h
14377 +@@ -13,8 +13,7 @@ struct dm_stats {
14378 + struct mutex mutex;
14379 + struct list_head list; /* list of struct dm_stat */
14380 + struct dm_stats_last_position __percpu *last;
14381 +- sector_t last_sector;
14382 +- unsigned last_rw;
14383 ++ bool precise_timestamps;
14384 + };
14385 +
14386 + struct dm_stats_aux {
14387 +@@ -32,7 +31,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
14388 +
14389 + void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
14390 + sector_t bi_sector, unsigned bi_sectors, bool end,
14391 +- unsigned long duration_jiffies,
14392 ++ unsigned long start_time,
14393 + struct dm_stats_aux *aux);
14394 +
14395 + static inline bool dm_stats_used(struct dm_stats *st)
14396 +@@ -40,4 +39,10 @@ static inline bool dm_stats_used(struct dm_stats *st)
14397 + return !list_empty(&st->list);
14398 + }
14399 +
14400 ++static inline void dm_stats_record_start(struct dm_stats *stats, struct dm_stats_aux *aux)
14401 ++{
14402 ++ if (unlikely(stats->precise_timestamps))
14403 ++ aux->duration_ns = ktime_to_ns(ktime_get());
14404 ++}
14405 ++
14406 + #endif
14407 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
14408 +index 997ace47bbd54..394778d8bf549 100644
14409 +--- a/drivers/md/dm.c
14410 ++++ b/drivers/md/dm.c
14411 +@@ -484,33 +484,48 @@ u64 dm_start_time_ns_from_clone(struct bio *bio)
14412 + }
14413 + EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
14414 +
14415 +-static void start_io_acct(struct dm_io *io)
14416 ++static bool bio_is_flush_with_data(struct bio *bio)
14417 + {
14418 +- struct mapped_device *md = io->md;
14419 +- struct bio *bio = io->orig_bio;
14420 +-
14421 +- bio_start_io_acct_time(bio, io->start_time);
14422 +- if (unlikely(dm_stats_used(&md->stats)))
14423 +- dm_stats_account_io(&md->stats, bio_data_dir(bio),
14424 +- bio->bi_iter.bi_sector, bio_sectors(bio),
14425 +- false, 0, &io->stats_aux);
14426 ++ return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
14427 + }
14428 +
14429 +-static void end_io_acct(struct mapped_device *md, struct bio *bio,
14430 +- unsigned long start_time, struct dm_stats_aux *stats_aux)
14431 ++static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio,
14432 ++ unsigned long start_time, struct dm_stats_aux *stats_aux)
14433 + {
14434 +- unsigned long duration = jiffies - start_time;
14435 ++ bool is_flush_with_data;
14436 ++ unsigned int bi_size;
14437 +
14438 +- bio_end_io_acct(bio, start_time);
14439 ++ /* If REQ_PREFLUSH set save any payload but do not account it */
14440 ++ is_flush_with_data = bio_is_flush_with_data(bio);
14441 ++ if (is_flush_with_data) {
14442 ++ bi_size = bio->bi_iter.bi_size;
14443 ++ bio->bi_iter.bi_size = 0;
14444 ++ }
14445 ++
14446 ++ if (!end)
14447 ++ bio_start_io_acct_time(bio, start_time);
14448 ++ else
14449 ++ bio_end_io_acct(bio, start_time);
14450 +
14451 + if (unlikely(dm_stats_used(&md->stats)))
14452 + dm_stats_account_io(&md->stats, bio_data_dir(bio),
14453 + bio->bi_iter.bi_sector, bio_sectors(bio),
14454 +- true, duration, stats_aux);
14455 ++ end, start_time, stats_aux);
14456 ++
14457 ++ /* Restore bio's payload so it does get accounted upon requeue */
14458 ++ if (is_flush_with_data)
14459 ++ bio->bi_iter.bi_size = bi_size;
14460 ++}
14461 ++
14462 ++static void start_io_acct(struct dm_io *io)
14463 ++{
14464 ++ dm_io_acct(false, io->md, io->orig_bio, io->start_time, &io->stats_aux);
14465 ++}
14466 +
14467 +- /* nudge anyone waiting on suspend queue */
14468 +- if (unlikely(wq_has_sleeper(&md->wait)))
14469 +- wake_up(&md->wait);
14470 ++static void end_io_acct(struct mapped_device *md, struct bio *bio,
14471 ++ unsigned long start_time, struct dm_stats_aux *stats_aux)
14472 ++{
14473 ++ dm_io_acct(true, md, bio, start_time, stats_aux);
14474 + }
14475 +
14476 + static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
14477 +@@ -531,12 +546,15 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
14478 + io->magic = DM_IO_MAGIC;
14479 + io->status = 0;
14480 + atomic_set(&io->io_count, 1);
14481 ++ this_cpu_inc(*md->pending_io);
14482 + io->orig_bio = bio;
14483 + io->md = md;
14484 + spin_lock_init(&io->endio_lock);
14485 +
14486 + io->start_time = jiffies;
14487 +
14488 ++ dm_stats_record_start(&md->stats, &io->stats_aux);
14489 ++
14490 + return io;
14491 + }
14492 +
14493 +@@ -826,11 +844,17 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
14494 + stats_aux = io->stats_aux;
14495 + free_io(md, io);
14496 + end_io_acct(md, bio, start_time, &stats_aux);
14497 ++ smp_wmb();
14498 ++ this_cpu_dec(*md->pending_io);
14499 ++
14500 ++ /* nudge anyone waiting on suspend queue */
14501 ++ if (unlikely(wq_has_sleeper(&md->wait)))
14502 ++ wake_up(&md->wait);
14503 +
14504 + if (io_error == BLK_STS_DM_REQUEUE)
14505 + return;
14506 +
14507 +- if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
14508 ++ if (bio_is_flush_with_data(bio)) {
14509 + /*
14510 + * Preflush done for flush with data, reissue
14511 + * without REQ_PREFLUSH.
14512 +@@ -1607,6 +1631,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
14513 + md->dax_dev = NULL;
14514 + }
14515 +
14516 ++ dm_cleanup_zoned_dev(md);
14517 + if (md->disk) {
14518 + spin_lock(&_minor_lock);
14519 + md->disk->private_data = NULL;
14520 +@@ -1619,6 +1644,11 @@ static void cleanup_mapped_device(struct mapped_device *md)
14521 + blk_cleanup_disk(md->disk);
14522 + }
14523 +
14524 ++ if (md->pending_io) {
14525 ++ free_percpu(md->pending_io);
14526 ++ md->pending_io = NULL;
14527 ++ }
14528 ++
14529 + cleanup_srcu_struct(&md->io_barrier);
14530 +
14531 + mutex_destroy(&md->suspend_lock);
14532 +@@ -1627,7 +1657,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
14533 + mutex_destroy(&md->swap_bios_lock);
14534 +
14535 + dm_mq_cleanup_mapped_device(md);
14536 +- dm_cleanup_zoned_dev(md);
14537 + }
14538 +
14539 + /*
14540 +@@ -1721,6 +1750,10 @@ static struct mapped_device *alloc_dev(int minor)
14541 + if (!md->wq)
14542 + goto bad;
14543 +
14544 ++ md->pending_io = alloc_percpu(unsigned long);
14545 ++ if (!md->pending_io)
14546 ++ goto bad;
14547 ++
14548 + dm_stats_init(&md->stats);
14549 +
14550 + /* Populate the mapping, nobody knows we exist yet */
14551 +@@ -2128,16 +2161,13 @@ void dm_put(struct mapped_device *md)
14552 + }
14553 + EXPORT_SYMBOL_GPL(dm_put);
14554 +
14555 +-static bool md_in_flight_bios(struct mapped_device *md)
14556 ++static bool dm_in_flight_bios(struct mapped_device *md)
14557 + {
14558 + int cpu;
14559 +- struct block_device *part = dm_disk(md)->part0;
14560 +- long sum = 0;
14561 ++ unsigned long sum = 0;
14562 +
14563 +- for_each_possible_cpu(cpu) {
14564 +- sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
14565 +- sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
14566 +- }
14567 ++ for_each_possible_cpu(cpu)
14568 ++ sum += *per_cpu_ptr(md->pending_io, cpu);
14569 +
14570 + return sum != 0;
14571 + }
14572 +@@ -2150,7 +2180,7 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta
14573 + while (true) {
14574 + prepare_to_wait(&md->wait, &wait, task_state);
14575 +
14576 +- if (!md_in_flight_bios(md))
14577 ++ if (!dm_in_flight_bios(md))
14578 + break;
14579 +
14580 + if (signal_pending_state(task_state, current)) {
14581 +@@ -2162,6 +2192,8 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta
14582 + }
14583 + finish_wait(&md->wait, &wait);
14584 +
14585 ++ smp_rmb();
14586 ++
14587 + return r;
14588 + }
14589 +
14590 +diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
14591 +index 8e13cae40ec5b..db7f41a80770d 100644
14592 +--- a/drivers/media/i2c/adv7511-v4l2.c
14593 ++++ b/drivers/media/i2c/adv7511-v4l2.c
14594 +@@ -522,7 +522,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_
14595 + buffer[3] = 0;
14596 + buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
14597 +
14598 +- if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
14599 ++ if (hdmi_infoframe_unpack(&frame, buffer, len + 4) < 0) {
14600 + v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
14601 + return;
14602 + }
14603 +diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
14604 +index a2fa408d2d9f5..bb0c8fc6d3832 100644
14605 +--- a/drivers/media/i2c/adv7604.c
14606 ++++ b/drivers/media/i2c/adv7604.c
14607 +@@ -2484,7 +2484,7 @@ static int adv76xx_read_infoframe(struct v4l2_subdev *sd, int index,
14608 + buffer[i + 3] = infoframe_read(sd,
14609 + adv76xx_cri[index].payload_addr + i);
14610 +
14611 +- if (hdmi_infoframe_unpack(frame, buffer, sizeof(buffer)) < 0) {
14612 ++ if (hdmi_infoframe_unpack(frame, buffer, len + 3) < 0) {
14613 + v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__,
14614 + adv76xx_cri[index].desc);
14615 + return -ENOENT;
14616 +diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
14617 +index 9d6eed0f82819..22caa070273b4 100644
14618 +--- a/drivers/media/i2c/adv7842.c
14619 ++++ b/drivers/media/i2c/adv7842.c
14620 +@@ -2583,7 +2583,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7842_cfg_read_
14621 + for (i = 0; i < len; i++)
14622 + buffer[i + 3] = infoframe_read(sd, cri->payload_addr + i);
14623 +
14624 +- if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
14625 ++ if (hdmi_infoframe_unpack(&frame, buffer, len + 3) < 0) {
14626 + v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
14627 + return;
14628 + }
14629 +diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
14630 +index bab720c7c1de1..d5f0eabf20c6a 100644
14631 +--- a/drivers/media/i2c/ov2740.c
14632 ++++ b/drivers/media/i2c/ov2740.c
14633 +@@ -1162,6 +1162,7 @@ static int ov2740_probe(struct i2c_client *client)
14634 + if (!ov2740)
14635 + return -ENOMEM;
14636 +
14637 ++ v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
14638 + full_power = acpi_dev_state_d0(&client->dev);
14639 + if (full_power) {
14640 + ret = ov2740_identify_module(ov2740);
14641 +@@ -1171,13 +1172,6 @@ static int ov2740_probe(struct i2c_client *client)
14642 + }
14643 + }
14644 +
14645 +- v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
14646 +- ret = ov2740_identify_module(ov2740);
14647 +- if (ret) {
14648 +- dev_err(&client->dev, "failed to find sensor: %d", ret);
14649 +- return ret;
14650 +- }
14651 +-
14652 + mutex_init(&ov2740->mutex);
14653 + ov2740->cur_mode = &supported_modes[0];
14654 + ret = ov2740_init_controls(ov2740);
14655 +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
14656 +index ddbd71394db33..db5a19babe67d 100644
14657 +--- a/drivers/media/i2c/ov5640.c
14658 ++++ b/drivers/media/i2c/ov5640.c
14659 +@@ -2293,7 +2293,6 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
14660 + struct ov5640_dev *sensor = to_ov5640_dev(sd);
14661 + const struct ov5640_mode_info *new_mode;
14662 + struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
14663 +- struct v4l2_mbus_framefmt *fmt;
14664 + int ret;
14665 +
14666 + if (format->pad != 0)
14667 +@@ -2311,12 +2310,10 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
14668 + if (ret)
14669 + goto out;
14670 +
14671 +- if (format->which == V4L2_SUBDEV_FORMAT_TRY)
14672 +- fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
14673 +- else
14674 +- fmt = &sensor->fmt;
14675 +-
14676 +- *fmt = *mbus_fmt;
14677 ++ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
14678 ++ *v4l2_subdev_get_try_format(sd, sd_state, 0) = *mbus_fmt;
14679 ++ goto out;
14680 ++ }
14681 +
14682 + if (new_mode != sensor->current_mode) {
14683 + sensor->current_mode = new_mode;
14684 +@@ -2325,6 +2322,9 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
14685 + if (mbus_fmt->code != sensor->fmt.code)
14686 + sensor->pending_fmt_change = true;
14687 +
14688 ++ /* update format even if code is unchanged, resolution might change */
14689 ++ sensor->fmt = *mbus_fmt;
14690 ++
14691 + __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
14692 + ov5640_calc_pixel_rate(sensor));
14693 + out:
14694 +diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
14695 +index 947d437ed0efe..ef8b52dc9401d 100644
14696 +--- a/drivers/media/i2c/ov5648.c
14697 ++++ b/drivers/media/i2c/ov5648.c
14698 +@@ -639,7 +639,7 @@ struct ov5648_ctrls {
14699 + struct v4l2_ctrl *pixel_rate;
14700 +
14701 + struct v4l2_ctrl_handler handler;
14702 +-} __packed;
14703 ++};
14704 +
14705 + struct ov5648_sensor {
14706 + struct device *dev;
14707 +@@ -1778,8 +1778,14 @@ static int ov5648_state_configure(struct ov5648_sensor *sensor,
14708 +
14709 + static int ov5648_state_init(struct ov5648_sensor *sensor)
14710 + {
14711 +- return ov5648_state_configure(sensor, &ov5648_modes[0],
14712 +- ov5648_mbus_codes[0]);
14713 ++ int ret;
14714 ++
14715 ++ mutex_lock(&sensor->mutex);
14716 ++ ret = ov5648_state_configure(sensor, &ov5648_modes[0],
14717 ++ ov5648_mbus_codes[0]);
14718 ++ mutex_unlock(&sensor->mutex);
14719 ++
14720 ++ return ret;
14721 + }
14722 +
14723 + /* Sensor Base */
14724 +diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
14725 +index f67412150b16b..eb59dc8bb5929 100644
14726 +--- a/drivers/media/i2c/ov6650.c
14727 ++++ b/drivers/media/i2c/ov6650.c
14728 +@@ -472,9 +472,16 @@ static int ov6650_get_selection(struct v4l2_subdev *sd,
14729 + {
14730 + struct i2c_client *client = v4l2_get_subdevdata(sd);
14731 + struct ov6650 *priv = to_ov6650(client);
14732 ++ struct v4l2_rect *rect;
14733 +
14734 +- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
14735 +- return -EINVAL;
14736 ++ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
14737 ++ /* pre-select try crop rectangle */
14738 ++ rect = &sd_state->pads->try_crop;
14739 ++
14740 ++ } else {
14741 ++ /* pre-select active crop rectangle */
14742 ++ rect = &priv->rect;
14743 ++ }
14744 +
14745 + switch (sel->target) {
14746 + case V4L2_SEL_TGT_CROP_BOUNDS:
14747 +@@ -483,14 +490,33 @@ static int ov6650_get_selection(struct v4l2_subdev *sd,
14748 + sel->r.width = W_CIF;
14749 + sel->r.height = H_CIF;
14750 + return 0;
14751 ++
14752 + case V4L2_SEL_TGT_CROP:
14753 +- sel->r = priv->rect;
14754 ++ /* use selected crop rectangle */
14755 ++ sel->r = *rect;
14756 + return 0;
14757 ++
14758 + default:
14759 + return -EINVAL;
14760 + }
14761 + }
14762 +
14763 ++static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
14764 ++{
14765 ++ return width > rect->width >> 1 || height > rect->height >> 1;
14766 ++}
14767 ++
14768 ++static void ov6650_bind_align_crop_rectangle(struct v4l2_rect *rect)
14769 ++{
14770 ++ v4l_bound_align_image(&rect->width, 2, W_CIF, 1,
14771 ++ &rect->height, 2, H_CIF, 1, 0);
14772 ++ v4l_bound_align_image(&rect->left, DEF_HSTRT << 1,
14773 ++ (DEF_HSTRT << 1) + W_CIF - (__s32)rect->width, 1,
14774 ++ &rect->top, DEF_VSTRT << 1,
14775 ++ (DEF_VSTRT << 1) + H_CIF - (__s32)rect->height,
14776 ++ 1, 0);
14777 ++}
14778 ++
14779 + static int ov6650_set_selection(struct v4l2_subdev *sd,
14780 + struct v4l2_subdev_state *sd_state,
14781 + struct v4l2_subdev_selection *sel)
14782 +@@ -499,18 +525,30 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
14783 + struct ov6650 *priv = to_ov6650(client);
14784 + int ret;
14785 +
14786 +- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
14787 +- sel->target != V4L2_SEL_TGT_CROP)
14788 ++ if (sel->target != V4L2_SEL_TGT_CROP)
14789 + return -EINVAL;
14790 +
14791 +- v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1,
14792 +- &sel->r.height, 2, H_CIF, 1, 0);
14793 +- v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1,
14794 +- (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1,
14795 +- &sel->r.top, DEF_VSTRT << 1,
14796 +- (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height,
14797 +- 1, 0);
14798 ++ ov6650_bind_align_crop_rectangle(&sel->r);
14799 ++
14800 ++ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
14801 ++ struct v4l2_rect *crop = &sd_state->pads->try_crop;
14802 ++ struct v4l2_mbus_framefmt *mf = &sd_state->pads->try_fmt;
14803 ++ /* detect current pad config scaling factor */
14804 ++ bool half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
14805 ++
14806 ++ /* store new crop rectangle */
14807 ++ *crop = sel->r;
14808 +
14809 ++ /* adjust frame size */
14810 ++ mf->width = crop->width >> half_scale;
14811 ++ mf->height = crop->height >> half_scale;
14812 ++
14813 ++ return 0;
14814 ++ }
14815 ++
14816 ++ /* V4L2_SUBDEV_FORMAT_ACTIVE */
14817 ++
14818 ++ /* apply new crop rectangle */
14819 + ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
14820 + if (!ret) {
14821 + priv->rect.width += priv->rect.left - sel->r.left;
14822 +@@ -562,30 +600,13 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd,
14823 + return 0;
14824 + }
14825 +
14826 +-static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
14827 +-{
14828 +- return width > rect->width >> 1 || height > rect->height >> 1;
14829 +-}
14830 +-
14831 + #define to_clkrc(div) ((div) - 1)
14832 +
14833 + /* set the format we will capture in */
14834 +-static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
14835 ++static int ov6650_s_fmt(struct v4l2_subdev *sd, u32 code, bool half_scale)
14836 + {
14837 + struct i2c_client *client = v4l2_get_subdevdata(sd);
14838 + struct ov6650 *priv = to_ov6650(client);
14839 +- bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
14840 +- struct v4l2_subdev_selection sel = {
14841 +- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
14842 +- .target = V4L2_SEL_TGT_CROP,
14843 +- .r.left = priv->rect.left + (priv->rect.width >> 1) -
14844 +- (mf->width >> (1 - half_scale)),
14845 +- .r.top = priv->rect.top + (priv->rect.height >> 1) -
14846 +- (mf->height >> (1 - half_scale)),
14847 +- .r.width = mf->width << half_scale,
14848 +- .r.height = mf->height << half_scale,
14849 +- };
14850 +- u32 code = mf->code;
14851 + u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask;
14852 + int ret;
14853 +
14854 +@@ -653,9 +674,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
14855 + coma_mask |= COMA_QCIF;
14856 + }
14857 +
14858 +- ret = ov6650_set_selection(sd, NULL, &sel);
14859 +- if (!ret)
14860 +- ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
14861 ++ ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
14862 + if (!ret) {
14863 + priv->half_scale = half_scale;
14864 +
14865 +@@ -674,14 +693,12 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
14866 + struct v4l2_mbus_framefmt *mf = &format->format;
14867 + struct i2c_client *client = v4l2_get_subdevdata(sd);
14868 + struct ov6650 *priv = to_ov6650(client);
14869 ++ struct v4l2_rect *crop;
14870 ++ bool half_scale;
14871 +
14872 + if (format->pad)
14873 + return -EINVAL;
14874 +
14875 +- if (is_unscaled_ok(mf->width, mf->height, &priv->rect))
14876 +- v4l_bound_align_image(&mf->width, 2, W_CIF, 1,
14877 +- &mf->height, 2, H_CIF, 1, 0);
14878 +-
14879 + switch (mf->code) {
14880 + case MEDIA_BUS_FMT_Y10_1X10:
14881 + mf->code = MEDIA_BUS_FMT_Y8_1X8;
14882 +@@ -699,10 +716,17 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
14883 + break;
14884 + }
14885 +
14886 ++ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
14887 ++ crop = &sd_state->pads->try_crop;
14888 ++ else
14889 ++ crop = &priv->rect;
14890 ++
14891 ++ half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
14892 ++
14893 + if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
14894 +- /* store media bus format code and frame size in pad config */
14895 +- sd_state->pads->try_fmt.width = mf->width;
14896 +- sd_state->pads->try_fmt.height = mf->height;
14897 ++ /* store new mbus frame format code and size in pad config */
14898 ++ sd_state->pads->try_fmt.width = crop->width >> half_scale;
14899 ++ sd_state->pads->try_fmt.height = crop->height >> half_scale;
14900 + sd_state->pads->try_fmt.code = mf->code;
14901 +
14902 + /* return default mbus frame format updated with pad config */
14903 +@@ -712,9 +736,11 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
14904 + mf->code = sd_state->pads->try_fmt.code;
14905 +
14906 + } else {
14907 +- /* apply new media bus format code and frame size */
14908 +- int ret = ov6650_s_fmt(sd, mf);
14909 ++ int ret = 0;
14910 +
14911 ++ /* apply new media bus frame format and scaling if changed */
14912 ++ if (mf->code != priv->code || half_scale != priv->half_scale)
14913 ++ ret = ov6650_s_fmt(sd, mf->code, half_scale);
14914 + if (ret)
14915 + return ret;
14916 +
14917 +@@ -890,9 +916,8 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
14918 + if (!ret)
14919 + ret = ov6650_prog_dflt(client, xclk->clkrc);
14920 + if (!ret) {
14921 +- struct v4l2_mbus_framefmt mf = ov6650_def_fmt;
14922 +-
14923 +- ret = ov6650_s_fmt(sd, &mf);
14924 ++ /* driver default frame format, no scaling */
14925 ++ ret = ov6650_s_fmt(sd, ov6650_def_fmt.code, false);
14926 + }
14927 + if (!ret)
14928 + ret = v4l2_ctrl_handler_setup(&priv->hdl);
14929 +diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
14930 +index d9d016cfa9ac0..e0dd0f4849a7a 100644
14931 +--- a/drivers/media/i2c/ov8865.c
14932 ++++ b/drivers/media/i2c/ov8865.c
14933 +@@ -457,8 +457,8 @@
14934 +
14935 + #define OV8865_NATIVE_WIDTH 3296
14936 + #define OV8865_NATIVE_HEIGHT 2528
14937 +-#define OV8865_ACTIVE_START_TOP 32
14938 +-#define OV8865_ACTIVE_START_LEFT 80
14939 ++#define OV8865_ACTIVE_START_LEFT 16
14940 ++#define OV8865_ACTIVE_START_TOP 40
14941 + #define OV8865_ACTIVE_WIDTH 3264
14942 + #define OV8865_ACTIVE_HEIGHT 2448
14943 +
14944 +diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
14945 +index 8cc9bec43688e..5ca3d0cc653a8 100644
14946 +--- a/drivers/media/pci/bt8xx/bttv-driver.c
14947 ++++ b/drivers/media/pci/bt8xx/bttv-driver.c
14948 +@@ -3890,7 +3890,7 @@ static int bttv_register_video(struct bttv *btv)
14949 +
14950 + /* video */
14951 + vdev_init(btv, &btv->video_dev, &bttv_video_template, "video");
14952 +- btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
14953 ++ btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE |
14954 + V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
14955 + if (btv->tuner_type != TUNER_ABSENT)
14956 + btv->video_dev.device_caps |= V4L2_CAP_TUNER;
14957 +@@ -3911,7 +3911,7 @@ static int bttv_register_video(struct bttv *btv)
14958 + /* vbi */
14959 + vdev_init(btv, &btv->vbi_dev, &bttv_video_template, "vbi");
14960 + btv->vbi_dev.device_caps = V4L2_CAP_VBI_CAPTURE | V4L2_CAP_READWRITE |
14961 +- V4L2_CAP_STREAMING | V4L2_CAP_TUNER;
14962 ++ V4L2_CAP_STREAMING;
14963 + if (btv->tuner_type != TUNER_ABSENT)
14964 + btv->vbi_dev.device_caps |= V4L2_CAP_TUNER;
14965 +
14966 +diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
14967 +index 680e1e3fe89b7..2c1d5137ac470 100644
14968 +--- a/drivers/media/pci/cx88/cx88-mpeg.c
14969 ++++ b/drivers/media/pci/cx88/cx88-mpeg.c
14970 +@@ -162,6 +162,9 @@ int cx8802_start_dma(struct cx8802_dev *dev,
14971 + cx_write(MO_TS_GPCNTRL, GP_COUNT_CONTROL_RESET);
14972 + q->count = 0;
14973 +
14974 ++ /* clear interrupt status register */
14975 ++ cx_write(MO_TS_INTSTAT, 0x1f1111);
14976 ++
14977 + /* enable irqs */
14978 + dprintk(1, "setting the interrupt mask\n");
14979 + cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_TSINT);
14980 +diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
14981 +index 4cf92dee65271..ce3a7ca51736e 100644
14982 +--- a/drivers/media/pci/ivtv/ivtv-driver.h
14983 ++++ b/drivers/media/pci/ivtv/ivtv-driver.h
14984 +@@ -330,7 +330,6 @@ struct ivtv_stream {
14985 + struct ivtv *itv; /* for ease of use */
14986 + const char *name; /* name of the stream */
14987 + int type; /* stream type */
14988 +- u32 caps; /* V4L2 capabilities */
14989 +
14990 + struct v4l2_fh *fh; /* pointer to the streaming filehandle */
14991 + spinlock_t qlock; /* locks access to the queues */
14992 +diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
14993 +index 0cdf6b3210c2f..fee460e2ca863 100644
14994 +--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
14995 ++++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
14996 +@@ -438,7 +438,7 @@ static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_f
14997 + struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
14998 + struct v4l2_window *winfmt = &fmt->fmt.win;
14999 +
15000 +- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
15001 ++ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
15002 + return -EINVAL;
15003 + if (!itv->osd_video_pbase)
15004 + return -EINVAL;
15005 +@@ -549,7 +549,7 @@ static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2
15006 + u32 chromakey = fmt->fmt.win.chromakey;
15007 + u8 global_alpha = fmt->fmt.win.global_alpha;
15008 +
15009 +- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
15010 ++ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
15011 + return -EINVAL;
15012 + if (!itv->osd_video_pbase)
15013 + return -EINVAL;
15014 +@@ -1383,7 +1383,7 @@ static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb)
15015 + 0,
15016 + };
15017 +
15018 +- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
15019 ++ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
15020 + return -ENOTTY;
15021 + if (!itv->osd_video_pbase)
15022 + return -ENOTTY;
15023 +@@ -1450,7 +1450,7 @@ static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffe
15024 + struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
15025 + struct yuv_playback_info *yi = &itv->yuv_info;
15026 +
15027 +- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
15028 ++ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
15029 + return -ENOTTY;
15030 + if (!itv->osd_video_pbase)
15031 + return -ENOTTY;
15032 +@@ -1470,7 +1470,7 @@ static int ivtv_overlay(struct file *file, void *fh, unsigned int on)
15033 + struct ivtv *itv = id->itv;
15034 + struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
15035 +
15036 +- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
15037 ++ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
15038 + return -ENOTTY;
15039 + if (!itv->osd_video_pbase)
15040 + return -ENOTTY;
15041 +diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
15042 +index 6e455948cc77a..13d7d55e65949 100644
15043 +--- a/drivers/media/pci/ivtv/ivtv-streams.c
15044 ++++ b/drivers/media/pci/ivtv/ivtv-streams.c
15045 +@@ -176,7 +176,7 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
15046 + s->itv = itv;
15047 + s->type = type;
15048 + s->name = ivtv_stream_info[type].name;
15049 +- s->caps = ivtv_stream_info[type].v4l2_caps;
15050 ++ s->vdev.device_caps = ivtv_stream_info[type].v4l2_caps;
15051 +
15052 + if (ivtv_stream_info[type].pio)
15053 + s->dma = DMA_NONE;
15054 +@@ -299,12 +299,9 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
15055 + if (s_mpg->vdev.v4l2_dev)
15056 + num = s_mpg->vdev.num + ivtv_stream_info[type].num_offset;
15057 + }
15058 +- s->vdev.device_caps = s->caps;
15059 +- if (itv->osd_video_pbase) {
15060 +- itv->streams[IVTV_DEC_STREAM_TYPE_YUV].vdev.device_caps |=
15061 +- V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
15062 +- itv->streams[IVTV_DEC_STREAM_TYPE_MPG].vdev.device_caps |=
15063 +- V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
15064 ++ if (itv->osd_video_pbase && (type == IVTV_DEC_STREAM_TYPE_YUV ||
15065 ++ type == IVTV_DEC_STREAM_TYPE_MPG)) {
15066 ++ s->vdev.device_caps |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
15067 + itv->v4l2_cap |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
15068 + }
15069 + video_set_drvdata(&s->vdev, s);
15070 +diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
15071 +index fb24d2ed3621b..d3cde05a6ebab 100644
15072 +--- a/drivers/media/pci/saa7134/saa7134-alsa.c
15073 ++++ b/drivers/media/pci/saa7134/saa7134-alsa.c
15074 +@@ -1214,7 +1214,7 @@ static int alsa_device_exit(struct saa7134_dev *dev)
15075 +
15076 + static int saa7134_alsa_init(void)
15077 + {
15078 +- struct saa7134_dev *dev = NULL;
15079 ++ struct saa7134_dev *dev;
15080 +
15081 + saa7134_dmasound_init = alsa_device_init;
15082 + saa7134_dmasound_exit = alsa_device_exit;
15083 +@@ -1229,7 +1229,7 @@ static int saa7134_alsa_init(void)
15084 + alsa_device_init(dev);
15085 + }
15086 +
15087 +- if (dev == NULL)
15088 ++ if (list_empty(&saa7134_devlist))
15089 + pr_info("saa7134 ALSA: no saa7134 cards found\n");
15090 +
15091 + return 0;
15092 +diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
15093 +index 7a24daf7165a4..bdeecde0d9978 100644
15094 +--- a/drivers/media/platform/aspeed-video.c
15095 ++++ b/drivers/media/platform/aspeed-video.c
15096 +@@ -153,7 +153,7 @@
15097 + #define VE_SRC_TB_EDGE_DET_BOT GENMASK(28, VE_SRC_TB_EDGE_DET_BOT_SHF)
15098 +
15099 + #define VE_MODE_DETECT_STATUS 0x098
15100 +-#define VE_MODE_DETECT_H_PIXELS GENMASK(11, 0)
15101 ++#define VE_MODE_DETECT_H_PERIOD GENMASK(11, 0)
15102 + #define VE_MODE_DETECT_V_LINES_SHF 16
15103 + #define VE_MODE_DETECT_V_LINES GENMASK(27, VE_MODE_DETECT_V_LINES_SHF)
15104 + #define VE_MODE_DETECT_STATUS_VSYNC BIT(28)
15105 +@@ -164,6 +164,8 @@
15106 + #define VE_SYNC_STATUS_VSYNC_SHF 16
15107 + #define VE_SYNC_STATUS_VSYNC GENMASK(27, VE_SYNC_STATUS_VSYNC_SHF)
15108 +
15109 ++#define VE_H_TOTAL_PIXELS 0x0A0
15110 ++
15111 + #define VE_INTERRUPT_CTRL 0x304
15112 + #define VE_INTERRUPT_STATUS 0x308
15113 + #define VE_INTERRUPT_MODE_DETECT_WD BIT(0)
15114 +@@ -802,6 +804,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
15115 + u32 src_lr_edge;
15116 + u32 src_tb_edge;
15117 + u32 sync;
15118 ++ u32 htotal;
15119 + struct v4l2_bt_timings *det = &video->detected_timings;
15120 +
15121 + det->width = MIN_WIDTH;
15122 +@@ -847,6 +850,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
15123 + src_tb_edge = aspeed_video_read(video, VE_SRC_TB_EDGE_DET);
15124 + mds = aspeed_video_read(video, VE_MODE_DETECT_STATUS);
15125 + sync = aspeed_video_read(video, VE_SYNC_STATUS);
15126 ++ htotal = aspeed_video_read(video, VE_H_TOTAL_PIXELS);
15127 +
15128 + video->frame_bottom = (src_tb_edge & VE_SRC_TB_EDGE_DET_BOT) >>
15129 + VE_SRC_TB_EDGE_DET_BOT_SHF;
15130 +@@ -863,8 +867,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
15131 + VE_SRC_LR_EDGE_DET_RT_SHF;
15132 + video->frame_left = src_lr_edge & VE_SRC_LR_EDGE_DET_LEFT;
15133 + det->hfrontporch = video->frame_left;
15134 +- det->hbackporch = (mds & VE_MODE_DETECT_H_PIXELS) -
15135 +- video->frame_right;
15136 ++ det->hbackporch = htotal - video->frame_right;
15137 + det->hsync = sync & VE_SYNC_STATUS_HSYNC;
15138 + if (video->frame_left > video->frame_right)
15139 + continue;
15140 +diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
15141 +index 660cd0ab6749a..24807782c9e50 100644
15142 +--- a/drivers/media/platform/atmel/atmel-isc-base.c
15143 ++++ b/drivers/media/platform/atmel/atmel-isc-base.c
15144 +@@ -1369,14 +1369,12 @@ static int isc_enum_framesizes(struct file *file, void *fh,
15145 + struct v4l2_frmsizeenum *fsize)
15146 + {
15147 + struct isc_device *isc = video_drvdata(file);
15148 +- struct v4l2_subdev_frame_size_enum fse = {
15149 +- .code = isc->config.sd_format->mbus_code,
15150 +- .index = fsize->index,
15151 +- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
15152 +- };
15153 + int ret = -EINVAL;
15154 + int i;
15155 +
15156 ++ if (fsize->index)
15157 ++ return -EINVAL;
15158 ++
15159 + for (i = 0; i < isc->num_user_formats; i++)
15160 + if (isc->user_formats[i]->fourcc == fsize->pixel_format)
15161 + ret = 0;
15162 +@@ -1388,14 +1386,14 @@ static int isc_enum_framesizes(struct file *file, void *fh,
15163 + if (ret)
15164 + return ret;
15165 +
15166 +- ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
15167 +- NULL, &fse);
15168 +- if (ret)
15169 +- return ret;
15170 ++ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
15171 +
15172 +- fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
15173 +- fsize->discrete.width = fse.max_width;
15174 +- fsize->discrete.height = fse.max_height;
15175 ++ fsize->stepwise.min_width = 16;
15176 ++ fsize->stepwise.max_width = isc->max_width;
15177 ++ fsize->stepwise.min_height = 16;
15178 ++ fsize->stepwise.max_height = isc->max_height;
15179 ++ fsize->stepwise.step_width = 1;
15180 ++ fsize->stepwise.step_height = 1;
15181 +
15182 + return 0;
15183 + }
15184 +diff --git a/drivers/media/platform/atmel/atmel-sama7g5-isc.c b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
15185 +index 5d1c76f680f37..2b1082295c130 100644
15186 +--- a/drivers/media/platform/atmel/atmel-sama7g5-isc.c
15187 ++++ b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
15188 +@@ -556,7 +556,6 @@ static int microchip_xisc_remove(struct platform_device *pdev)
15189 +
15190 + v4l2_device_unregister(&isc->v4l2_dev);
15191 +
15192 +- clk_disable_unprepare(isc->ispck);
15193 + clk_disable_unprepare(isc->hclock);
15194 +
15195 + isc_clk_cleanup(isc);
15196 +@@ -568,7 +567,6 @@ static int __maybe_unused xisc_runtime_suspend(struct device *dev)
15197 + {
15198 + struct isc_device *isc = dev_get_drvdata(dev);
15199 +
15200 +- clk_disable_unprepare(isc->ispck);
15201 + clk_disable_unprepare(isc->hclock);
15202 +
15203 + return 0;
15204 +@@ -583,10 +581,6 @@ static int __maybe_unused xisc_runtime_resume(struct device *dev)
15205 + if (ret)
15206 + return ret;
15207 +
15208 +- ret = clk_prepare_enable(isc->ispck);
15209 +- if (ret)
15210 +- clk_disable_unprepare(isc->hclock);
15211 +-
15212 + return ret;
15213 + }
15214 +
15215 +diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
15216 +index 3cd47ba26357e..a57822b050706 100644
15217 +--- a/drivers/media/platform/coda/coda-common.c
15218 ++++ b/drivers/media/platform/coda/coda-common.c
15219 +@@ -409,6 +409,7 @@ static struct vdoa_data *coda_get_vdoa_data(void)
15220 + if (!vdoa_data)
15221 + vdoa_data = ERR_PTR(-EPROBE_DEFER);
15222 +
15223 ++ put_device(&vdoa_pdev->dev);
15224 + out:
15225 + of_node_put(vdoa_node);
15226 +
15227 +diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
15228 +index 5a89d885d0e3b..4a260f4ed236b 100644
15229 +--- a/drivers/media/platform/davinci/vpif.c
15230 ++++ b/drivers/media/platform/davinci/vpif.c
15231 +@@ -41,6 +41,11 @@ MODULE_ALIAS("platform:" VPIF_DRIVER_NAME);
15232 + #define VPIF_CH2_MAX_MODES 15
15233 + #define VPIF_CH3_MAX_MODES 2
15234 +
15235 ++struct vpif_data {
15236 ++ struct platform_device *capture;
15237 ++ struct platform_device *display;
15238 ++};
15239 ++
15240 + DEFINE_SPINLOCK(vpif_lock);
15241 + EXPORT_SYMBOL_GPL(vpif_lock);
15242 +
15243 +@@ -423,16 +428,31 @@ int vpif_channel_getfid(u8 channel_id)
15244 + }
15245 + EXPORT_SYMBOL(vpif_channel_getfid);
15246 +
15247 ++static void vpif_pdev_release(struct device *dev)
15248 ++{
15249 ++ struct platform_device *pdev = to_platform_device(dev);
15250 ++
15251 ++ kfree(pdev);
15252 ++}
15253 ++
15254 + static int vpif_probe(struct platform_device *pdev)
15255 + {
15256 + static struct resource *res_irq;
15257 + struct platform_device *pdev_capture, *pdev_display;
15258 + struct device_node *endpoint = NULL;
15259 ++ struct vpif_data *data;
15260 ++ int ret;
15261 +
15262 + vpif_base = devm_platform_ioremap_resource(pdev, 0);
15263 + if (IS_ERR(vpif_base))
15264 + return PTR_ERR(vpif_base);
15265 +
15266 ++ data = kzalloc(sizeof(*data), GFP_KERNEL);
15267 ++ if (!data)
15268 ++ return -ENOMEM;
15269 ++
15270 ++ platform_set_drvdata(pdev, data);
15271 ++
15272 + pm_runtime_enable(&pdev->dev);
15273 + pm_runtime_get(&pdev->dev);
15274 +
15275 +@@ -456,46 +476,79 @@ static int vpif_probe(struct platform_device *pdev)
15276 + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
15277 + if (!res_irq) {
15278 + dev_warn(&pdev->dev, "Missing IRQ resource.\n");
15279 +- pm_runtime_put(&pdev->dev);
15280 +- return -EINVAL;
15281 ++ ret = -EINVAL;
15282 ++ goto err_put_rpm;
15283 + }
15284 +
15285 +- pdev_capture = devm_kzalloc(&pdev->dev, sizeof(*pdev_capture),
15286 +- GFP_KERNEL);
15287 +- if (pdev_capture) {
15288 +- pdev_capture->name = "vpif_capture";
15289 +- pdev_capture->id = -1;
15290 +- pdev_capture->resource = res_irq;
15291 +- pdev_capture->num_resources = 1;
15292 +- pdev_capture->dev.dma_mask = pdev->dev.dma_mask;
15293 +- pdev_capture->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
15294 +- pdev_capture->dev.parent = &pdev->dev;
15295 +- platform_device_register(pdev_capture);
15296 +- } else {
15297 +- dev_warn(&pdev->dev, "Unable to allocate memory for pdev_capture.\n");
15298 ++ pdev_capture = kzalloc(sizeof(*pdev_capture), GFP_KERNEL);
15299 ++ if (!pdev_capture) {
15300 ++ ret = -ENOMEM;
15301 ++ goto err_put_rpm;
15302 + }
15303 +
15304 +- pdev_display = devm_kzalloc(&pdev->dev, sizeof(*pdev_display),
15305 +- GFP_KERNEL);
15306 +- if (pdev_display) {
15307 +- pdev_display->name = "vpif_display";
15308 +- pdev_display->id = -1;
15309 +- pdev_display->resource = res_irq;
15310 +- pdev_display->num_resources = 1;
15311 +- pdev_display->dev.dma_mask = pdev->dev.dma_mask;
15312 +- pdev_display->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
15313 +- pdev_display->dev.parent = &pdev->dev;
15314 +- platform_device_register(pdev_display);
15315 +- } else {
15316 +- dev_warn(&pdev->dev, "Unable to allocate memory for pdev_display.\n");
15317 ++ pdev_capture->name = "vpif_capture";
15318 ++ pdev_capture->id = -1;
15319 ++ pdev_capture->resource = res_irq;
15320 ++ pdev_capture->num_resources = 1;
15321 ++ pdev_capture->dev.dma_mask = pdev->dev.dma_mask;
15322 ++ pdev_capture->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
15323 ++ pdev_capture->dev.parent = &pdev->dev;
15324 ++ pdev_capture->dev.release = vpif_pdev_release;
15325 ++
15326 ++ ret = platform_device_register(pdev_capture);
15327 ++ if (ret)
15328 ++ goto err_put_pdev_capture;
15329 ++
15330 ++ pdev_display = kzalloc(sizeof(*pdev_display), GFP_KERNEL);
15331 ++ if (!pdev_display) {
15332 ++ ret = -ENOMEM;
15333 ++ goto err_put_pdev_capture;
15334 + }
15335 +
15336 ++ pdev_display->name = "vpif_display";
15337 ++ pdev_display->id = -1;
15338 ++ pdev_display->resource = res_irq;
15339 ++ pdev_display->num_resources = 1;
15340 ++ pdev_display->dev.dma_mask = pdev->dev.dma_mask;
15341 ++ pdev_display->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
15342 ++ pdev_display->dev.parent = &pdev->dev;
15343 ++ pdev_display->dev.release = vpif_pdev_release;
15344 ++
15345 ++ ret = platform_device_register(pdev_display);
15346 ++ if (ret)
15347 ++ goto err_put_pdev_display;
15348 ++
15349 ++ data->capture = pdev_capture;
15350 ++ data->display = pdev_display;
15351 ++
15352 + return 0;
15353 ++
15354 ++err_put_pdev_display:
15355 ++ platform_device_put(pdev_display);
15356 ++err_put_pdev_capture:
15357 ++ platform_device_put(pdev_capture);
15358 ++err_put_rpm:
15359 ++ pm_runtime_put(&pdev->dev);
15360 ++ pm_runtime_disable(&pdev->dev);
15361 ++ kfree(data);
15362 ++
15363 ++ return ret;
15364 + }
15365 +
15366 + static int vpif_remove(struct platform_device *pdev)
15367 + {
15368 ++ struct vpif_data *data = platform_get_drvdata(pdev);
15369 ++
15370 ++ if (data->capture)
15371 ++ platform_device_unregister(data->capture);
15372 ++ if (data->display)
15373 ++ platform_device_unregister(data->display);
15374 ++
15375 ++ pm_runtime_put(&pdev->dev);
15376 + pm_runtime_disable(&pdev->dev);
15377 ++
15378 ++ kfree(data);
15379 ++
15380 + return 0;
15381 + }
15382 +
15383 +diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/imx-jpeg/mxc-jpeg.c
15384 +index 4ca96cf9def76..83a2b4d13bad3 100644
15385 +--- a/drivers/media/platform/imx-jpeg/mxc-jpeg.c
15386 ++++ b/drivers/media/platform/imx-jpeg/mxc-jpeg.c
15387 +@@ -947,8 +947,13 @@ static void mxc_jpeg_device_run(void *priv)
15388 + v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true);
15389 +
15390 + jpeg_src_buf = vb2_to_mxc_buf(&src_buf->vb2_buf);
15391 ++ if (q_data_cap->fmt->colplanes != dst_buf->vb2_buf.num_planes) {
15392 ++ dev_err(dev, "Capture format %s has %d planes, but capture buffer has %d planes\n",
15393 ++ q_data_cap->fmt->name, q_data_cap->fmt->colplanes,
15394 ++ dst_buf->vb2_buf.num_planes);
15395 ++ jpeg_src_buf->jpeg_parse_error = true;
15396 ++ }
15397 + if (jpeg_src_buf->jpeg_parse_error) {
15398 +- jpeg->slot_data[ctx->slot].used = false;
15399 + v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
15400 + v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
15401 + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
15402 +diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c
15403 +index ccda18e5a3774..5e7b319f300df 100644
15404 +--- a/drivers/media/platform/meson/ge2d/ge2d.c
15405 ++++ b/drivers/media/platform/meson/ge2d/ge2d.c
15406 +@@ -215,35 +215,35 @@ static void ge2d_hw_start(struct meson_ge2d *ge2d)
15407 +
15408 + regmap_write(ge2d->map, GE2D_SRC1_CLIPY_START_END,
15409 + FIELD_PREP(GE2D_START, ctx->in.crop.top) |
15410 +- FIELD_PREP(GE2D_END, ctx->in.crop.top + ctx->in.crop.height));
15411 ++ FIELD_PREP(GE2D_END, ctx->in.crop.top + ctx->in.crop.height - 1));
15412 + regmap_write(ge2d->map, GE2D_SRC1_CLIPX_START_END,
15413 + FIELD_PREP(GE2D_START, ctx->in.crop.left) |
15414 +- FIELD_PREP(GE2D_END, ctx->in.crop.left + ctx->in.crop.width));
15415 ++ FIELD_PREP(GE2D_END, ctx->in.crop.left + ctx->in.crop.width - 1));
15416 + regmap_write(ge2d->map, GE2D_SRC2_CLIPY_START_END,
15417 + FIELD_PREP(GE2D_START, ctx->out.crop.top) |
15418 +- FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height));
15419 ++ FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height - 1));
15420 + regmap_write(ge2d->map, GE2D_SRC2_CLIPX_START_END,
15421 + FIELD_PREP(GE2D_START, ctx->out.crop.left) |
15422 +- FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width));
15423 ++ FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width - 1));
15424 + regmap_write(ge2d->map, GE2D_DST_CLIPY_START_END,
15425 + FIELD_PREP(GE2D_START, ctx->out.crop.top) |
15426 +- FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height));
15427 ++ FIELD_PREP(GE2D_END, ctx->out.crop.top + ctx->out.crop.height - 1));
15428 + regmap_write(ge2d->map, GE2D_DST_CLIPX_START_END,
15429 + FIELD_PREP(GE2D_START, ctx->out.crop.left) |
15430 +- FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width));
15431 ++ FIELD_PREP(GE2D_END, ctx->out.crop.left + ctx->out.crop.width - 1));
15432 +
15433 + regmap_write(ge2d->map, GE2D_SRC1_Y_START_END,
15434 +- FIELD_PREP(GE2D_END, ctx->in.pix_fmt.height));
15435 ++ FIELD_PREP(GE2D_END, ctx->in.pix_fmt.height - 1));
15436 + regmap_write(ge2d->map, GE2D_SRC1_X_START_END,
15437 +- FIELD_PREP(GE2D_END, ctx->in.pix_fmt.width));
15438 ++ FIELD_PREP(GE2D_END, ctx->in.pix_fmt.width - 1));
15439 + regmap_write(ge2d->map, GE2D_SRC2_Y_START_END,
15440 +- FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height));
15441 ++ FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height - 1));
15442 + regmap_write(ge2d->map, GE2D_SRC2_X_START_END,
15443 +- FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width));
15444 ++ FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width - 1));
15445 + regmap_write(ge2d->map, GE2D_DST_Y_START_END,
15446 +- FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height));
15447 ++ FIELD_PREP(GE2D_END, ctx->out.pix_fmt.height - 1));
15448 + regmap_write(ge2d->map, GE2D_DST_X_START_END,
15449 +- FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width));
15450 ++ FIELD_PREP(GE2D_END, ctx->out.pix_fmt.width - 1));
15451 +
15452 + /* Color, no blend, use source color */
15453 + reg = GE2D_ALU_DO_COLOR_OPERATION_LOGIC(LOGIC_OPERATION_COPY,
15454 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c
15455 +index cd27f637dbe7c..cfc7ebed8fb7a 100644
15456 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c
15457 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c
15458 +@@ -102,6 +102,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev,
15459 + vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_handler, dev, rst_id);
15460 +
15461 + fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
15462 ++ if (!fw)
15463 ++ return ERR_PTR(-ENOMEM);
15464 + fw->type = VPU;
15465 + fw->ops = &mtk_vcodec_vpu_msg;
15466 + fw->pdev = fw_pdev;
15467 +diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
15468 +index 5b9b57f4d9bf8..68cf68dbcace2 100644
15469 +--- a/drivers/media/platform/omap3isp/ispstat.c
15470 ++++ b/drivers/media/platform/omap3isp/ispstat.c
15471 +@@ -512,7 +512,7 @@ int omap3isp_stat_request_statistics(struct ispstat *stat,
15472 + int omap3isp_stat_request_statistics_time32(struct ispstat *stat,
15473 + struct omap3isp_stat_data_time32 *data)
15474 + {
15475 +- struct omap3isp_stat_data data64;
15476 ++ struct omap3isp_stat_data data64 = { };
15477 + int ret;
15478 +
15479 + ret = omap3isp_stat_request_statistics(stat, &data64);
15480 +@@ -521,7 +521,8 @@ int omap3isp_stat_request_statistics_time32(struct ispstat *stat,
15481 +
15482 + data->ts.tv_sec = data64.ts.tv_sec;
15483 + data->ts.tv_usec = data64.ts.tv_usec;
15484 +- memcpy(&data->buf, &data64.buf, sizeof(*data) - sizeof(data->ts));
15485 ++ data->buf = (uintptr_t)data64.buf;
15486 ++ memcpy(&data->frame, &data64.frame, sizeof(data->frame));
15487 +
15488 + return 0;
15489 + }
15490 +diff --git a/drivers/media/platform/qcom/camss/camss-csid-170.c b/drivers/media/platform/qcom/camss/camss-csid-170.c
15491 +index ac22ff29d2a9f..82f59933ad7b3 100644
15492 +--- a/drivers/media/platform/qcom/camss/camss-csid-170.c
15493 ++++ b/drivers/media/platform/qcom/camss/camss-csid-170.c
15494 +@@ -105,7 +105,8 @@
15495 + #define CSID_RDI_CTRL(rdi) ((IS_LITE ? 0x208 : 0x308)\
15496 + + 0x100 * (rdi))
15497 + #define RDI_CTRL_HALT_CMD 0
15498 +-#define ALT_CMD_RESUME_AT_FRAME_BOUNDARY 1
15499 ++#define HALT_CMD_HALT_AT_FRAME_BOUNDARY 0
15500 ++#define HALT_CMD_RESUME_AT_FRAME_BOUNDARY 1
15501 + #define RDI_CTRL_HALT_MODE 2
15502 +
15503 + #define CSID_RDI_FRM_DROP_PATTERN(rdi) ((IS_LITE ? 0x20C : 0x30C)\
15504 +@@ -366,7 +367,7 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
15505 + val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH;
15506 + writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0));
15507 +
15508 +- val = DATA_TYPE_RAW_10BIT << TPG_DT_n_CFG_1_DATA_TYPE;
15509 ++ val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE;
15510 + writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0));
15511 +
15512 + val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE;
15513 +@@ -382,8 +383,9 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
15514 + val = 1 << RDI_CFG0_BYTE_CNTR_EN;
15515 + val |= 1 << RDI_CFG0_FORMAT_MEASURE_EN;
15516 + val |= 1 << RDI_CFG0_TIMESTAMP_EN;
15517 ++ /* note: for non-RDI path, this should be format->decode_format */
15518 + val |= DECODE_FORMAT_PAYLOAD_ONLY << RDI_CFG0_DECODE_FORMAT;
15519 +- val |= DATA_TYPE_RAW_10BIT << RDI_CFG0_DATA_TYPE;
15520 ++ val |= format->data_type << RDI_CFG0_DATA_TYPE;
15521 + val |= vc << RDI_CFG0_VIRTUAL_CHANNEL;
15522 + val |= dt_id << RDI_CFG0_DT_ID;
15523 + writel_relaxed(val, csid->base + CSID_RDI_CFG0(0));
15524 +@@ -443,13 +445,10 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
15525 + val |= 1 << CSI2_RX_CFG1_MISR_EN;
15526 + writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1); // csi2_vc_mode_shift_val ?
15527 +
15528 +- /* error irqs start at BIT(11) */
15529 +- writel_relaxed(~0u, csid->base + CSID_CSI2_RX_IRQ_MASK);
15530 +-
15531 +- /* RDI irq */
15532 +- writel_relaxed(~0u, csid->base + CSID_TOP_IRQ_MASK);
15533 +-
15534 +- val = 1 << RDI_CTRL_HALT_CMD;
15535 ++ if (enable)
15536 ++ val = HALT_CMD_RESUME_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD;
15537 ++ else
15538 ++ val = HALT_CMD_HALT_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD;
15539 + writel_relaxed(val, csid->base + CSID_RDI_CTRL(0));
15540 + }
15541 +
15542 +diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
15543 +index f524af712a843..600150cfc4f70 100644
15544 +--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
15545 ++++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
15546 +@@ -395,17 +395,7 @@ static irqreturn_t vfe_isr(int irq, void *dev)
15547 + */
15548 + static int vfe_halt(struct vfe_device *vfe)
15549 + {
15550 +- unsigned long time;
15551 +-
15552 +- reinit_completion(&vfe->halt_complete);
15553 +-
15554 +- time = wait_for_completion_timeout(&vfe->halt_complete,
15555 +- msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
15556 +- if (!time) {
15557 +- dev_err(vfe->camss->dev, "VFE halt timeout\n");
15558 +- return -EIO;
15559 +- }
15560 +-
15561 ++ /* rely on vfe_disable_output() to stop the VFE */
15562 + return 0;
15563 + }
15564 +
15565 +diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
15566 +index 84c3a511ec31e..0bca95d016507 100644
15567 +--- a/drivers/media/platform/qcom/venus/helpers.c
15568 ++++ b/drivers/media/platform/qcom/venus/helpers.c
15569 +@@ -189,7 +189,6 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
15570 + buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
15571 + buf->attrs);
15572 + if (!buf->va) {
15573 +- kfree(buf);
15574 + ret = -ENOMEM;
15575 + goto fail;
15576 + }
15577 +@@ -209,6 +208,7 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
15578 + return 0;
15579 +
15580 + fail:
15581 ++ kfree(buf);
15582 + venus_helper_free_dpb_bufs(inst);
15583 + return ret;
15584 + }
15585 +diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
15586 +index 5aea07307e02e..4ecd444050bb6 100644
15587 +--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
15588 ++++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
15589 +@@ -1054,6 +1054,8 @@ static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
15590 + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
15591 + break;
15592 + }
15593 ++ case HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI:
15594 ++ return -ENOTSUPP;
15595 +
15596 + /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
15597 + case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
15598 +diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
15599 +index 84bafc3118cc6..adea4c3b8c204 100644
15600 +--- a/drivers/media/platform/qcom/venus/venc.c
15601 ++++ b/drivers/media/platform/qcom/venus/venc.c
15602 +@@ -662,8 +662,8 @@ static int venc_set_properties(struct venus_inst *inst)
15603 +
15604 + ptype = HFI_PROPERTY_PARAM_VENC_H264_TRANSFORM_8X8;
15605 + h264_transform.enable_type = 0;
15606 +- if (ctr->profile.h264 == HFI_H264_PROFILE_HIGH ||
15607 +- ctr->profile.h264 == HFI_H264_PROFILE_CONSTRAINED_HIGH)
15608 ++ if (ctr->profile.h264 == V4L2_MPEG_VIDEO_H264_PROFILE_HIGH ||
15609 ++ ctr->profile.h264 == V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
15610 + h264_transform.enable_type = ctr->h264_8x8_transform;
15611 +
15612 + ret = hfi_session_set_property(inst, ptype, &h264_transform);
15613 +diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
15614 +index 1ada42df314dc..ea5805e71c143 100644
15615 +--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
15616 ++++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
15617 +@@ -320,8 +320,8 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
15618 + ctr->intra_refresh_period = ctrl->val;
15619 + break;
15620 + case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
15621 +- if (ctr->profile.h264 != HFI_H264_PROFILE_HIGH &&
15622 +- ctr->profile.h264 != HFI_H264_PROFILE_CONSTRAINED_HIGH)
15623 ++ if (ctr->profile.h264 != V4L2_MPEG_VIDEO_H264_PROFILE_HIGH &&
15624 ++ ctr->profile.h264 != V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
15625 + return -EINVAL;
15626 +
15627 + /*
15628 +@@ -457,7 +457,7 @@ int venc_ctrl_init(struct venus_inst *inst)
15629 + V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP, 1, 51, 1, 1);
15630 +
15631 + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
15632 +- V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM, 0, 1, 1, 0);
15633 ++ V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM, 0, 1, 1, 1);
15634 +
15635 + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
15636 + V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP, 1, 51, 1, 1);
15637 +diff --git a/drivers/media/platform/ti-vpe/cal-video.c b/drivers/media/platform/ti-vpe/cal-video.c
15638 +index 7799da1cc261b..3e936a2ca36c6 100644
15639 +--- a/drivers/media/platform/ti-vpe/cal-video.c
15640 ++++ b/drivers/media/platform/ti-vpe/cal-video.c
15641 +@@ -823,6 +823,9 @@ static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
15642 + /* Enumerate sub device formats and enable all matching local formats */
15643 + ctx->active_fmt = devm_kcalloc(ctx->cal->dev, cal_num_formats,
15644 + sizeof(*ctx->active_fmt), GFP_KERNEL);
15645 ++ if (!ctx->active_fmt)
15646 ++ return -ENOMEM;
15647 ++
15648 + ctx->num_active_fmt = 0;
15649 +
15650 + for (j = 0, i = 0; ; ++j) {
15651 +diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c
15652 +index c6cd2e6d8e654..a50701cfbbd7b 100644
15653 +--- a/drivers/media/rc/gpio-ir-tx.c
15654 ++++ b/drivers/media/rc/gpio-ir-tx.c
15655 +@@ -48,11 +48,29 @@ static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier)
15656 + return 0;
15657 + }
15658 +
15659 ++static void delay_until(ktime_t until)
15660 ++{
15661 ++ /*
15662 ++ * delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on
15663 ++ * m68k ndelay(s64) does not compile; so use s32 rather than s64.
15664 ++ */
15665 ++ s32 delta;
15666 ++
15667 ++ while (true) {
15668 ++ delta = ktime_us_delta(until, ktime_get());
15669 ++ if (delta <= 0)
15670 ++ return;
15671 ++
15672 ++ /* udelay more than 1ms may not work */
15673 ++ delta = min(delta, 1000);
15674 ++ udelay(delta);
15675 ++ }
15676 ++}
15677 ++
15678 + static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf,
15679 + uint count)
15680 + {
15681 + ktime_t edge;
15682 +- s32 delta;
15683 + int i;
15684 +
15685 + local_irq_disable();
15686 +@@ -63,9 +81,7 @@ static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf,
15687 + gpiod_set_value(gpio_ir->gpio, !(i % 2));
15688 +
15689 + edge = ktime_add_us(edge, txbuf[i]);
15690 +- delta = ktime_us_delta(edge, ktime_get());
15691 +- if (delta > 0)
15692 +- udelay(delta);
15693 ++ delay_until(edge);
15694 + }
15695 +
15696 + gpiod_set_value(gpio_ir->gpio, 0);
15697 +@@ -97,9 +113,7 @@ static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf,
15698 + if (i % 2) {
15699 + // space
15700 + edge = ktime_add_us(edge, txbuf[i]);
15701 +- delta = ktime_us_delta(edge, ktime_get());
15702 +- if (delta > 0)
15703 +- udelay(delta);
15704 ++ delay_until(edge);
15705 + } else {
15706 + // pulse
15707 + ktime_t last = ktime_add_us(edge, txbuf[i]);
15708 +diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
15709 +index 7e98e7e3aacec..1968067092594 100644
15710 +--- a/drivers/media/rc/ir_toy.c
15711 ++++ b/drivers/media/rc/ir_toy.c
15712 +@@ -458,7 +458,7 @@ static int irtoy_probe(struct usb_interface *intf,
15713 + err = usb_submit_urb(irtoy->urb_in, GFP_KERNEL);
15714 + if (err != 0) {
15715 + dev_err(irtoy->dev, "fail to submit in urb: %d\n", err);
15716 +- return err;
15717 ++ goto free_rcdev;
15718 + }
15719 +
15720 + err = irtoy_setup(irtoy);
15721 +diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.c b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
15722 +index d79b65854627c..4676083cee3b8 100644
15723 +--- a/drivers/media/test-drivers/vidtv/vidtv_s302m.c
15724 ++++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
15725 +@@ -455,6 +455,9 @@ struct vidtv_encoder
15726 + e->name = kstrdup(args.name, GFP_KERNEL);
15727 +
15728 + e->encoder_buf = vzalloc(VIDTV_S302M_BUF_SZ);
15729 ++ if (!e->encoder_buf)
15730 ++ goto out_kfree_e;
15731 ++
15732 + e->encoder_buf_sz = VIDTV_S302M_BUF_SZ;
15733 + e->encoder_buf_offset = 0;
15734 +
15735 +@@ -467,10 +470,8 @@ struct vidtv_encoder
15736 + e->is_video_encoder = false;
15737 +
15738 + ctx = kzalloc(priv_sz, GFP_KERNEL);
15739 +- if (!ctx) {
15740 +- kfree(e);
15741 +- return NULL;
15742 +- }
15743 ++ if (!ctx)
15744 ++ goto out_kfree_buf;
15745 +
15746 + e->ctx = ctx;
15747 + ctx->last_duration = 0;
15748 +@@ -498,6 +499,14 @@ struct vidtv_encoder
15749 + e->next = NULL;
15750 +
15751 + return e;
15752 ++
15753 ++out_kfree_buf:
15754 ++ kfree(e->encoder_buf);
15755 ++
15756 ++out_kfree_e:
15757 ++ kfree(e->name);
15758 ++ kfree(e);
15759 ++ return NULL;
15760 + }
15761 +
15762 + void vidtv_s302m_encoder_destroy(struct vidtv_encoder *e)
15763 +diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
15764 +index b451ce3cb169a..ae25d2cbfdfee 100644
15765 +--- a/drivers/media/usb/em28xx/em28xx-cards.c
15766 ++++ b/drivers/media/usb/em28xx/em28xx-cards.c
15767 +@@ -3936,6 +3936,8 @@ static int em28xx_usb_probe(struct usb_interface *intf,
15768 + goto err_free;
15769 + }
15770 +
15771 ++ kref_init(&dev->ref);
15772 ++
15773 + dev->devno = nr;
15774 + dev->model = id->driver_info;
15775 + dev->alt = -1;
15776 +@@ -4036,6 +4038,8 @@ static int em28xx_usb_probe(struct usb_interface *intf,
15777 + }
15778 +
15779 + if (dev->board.has_dual_ts && em28xx_duplicate_dev(dev) == 0) {
15780 ++ kref_init(&dev->dev_next->ref);
15781 ++
15782 + dev->dev_next->ts = SECONDARY_TS;
15783 + dev->dev_next->alt = -1;
15784 + dev->dev_next->is_audio_only = has_vendor_audio &&
15785 +@@ -4090,12 +4094,8 @@ static int em28xx_usb_probe(struct usb_interface *intf,
15786 + em28xx_write_reg(dev, 0x0b, 0x82);
15787 + mdelay(100);
15788 + }
15789 +-
15790 +- kref_init(&dev->dev_next->ref);
15791 + }
15792 +
15793 +- kref_init(&dev->ref);
15794 +-
15795 + request_modules(dev);
15796 +
15797 + /*
15798 +@@ -4150,11 +4150,8 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
15799 +
15800 + em28xx_close_extension(dev);
15801 +
15802 +- if (dev->dev_next) {
15803 +- em28xx_close_extension(dev->dev_next);
15804 ++ if (dev->dev_next)
15805 + em28xx_release_resources(dev->dev_next);
15806 +- }
15807 +-
15808 + em28xx_release_resources(dev);
15809 +
15810 + if (dev->dev_next) {
15811 +diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
15812 +index c742cc88fac5c..1fa6f10ee157b 100644
15813 +--- a/drivers/media/usb/go7007/s2250-board.c
15814 ++++ b/drivers/media/usb/go7007/s2250-board.c
15815 +@@ -504,6 +504,7 @@ static int s2250_probe(struct i2c_client *client,
15816 + u8 *data;
15817 + struct go7007 *go = i2c_get_adapdata(adapter);
15818 + struct go7007_usb *usb = go->hpi_context;
15819 ++ int err = -EIO;
15820 +
15821 + audio = i2c_new_dummy_device(adapter, TLV320_ADDRESS >> 1);
15822 + if (IS_ERR(audio))
15823 +@@ -532,11 +533,8 @@ static int s2250_probe(struct i2c_client *client,
15824 + V4L2_CID_HUE, -512, 511, 1, 0);
15825 + sd->ctrl_handler = &state->hdl;
15826 + if (state->hdl.error) {
15827 +- int err = state->hdl.error;
15828 +-
15829 +- v4l2_ctrl_handler_free(&state->hdl);
15830 +- kfree(state);
15831 +- return err;
15832 ++ err = state->hdl.error;
15833 ++ goto fail;
15834 + }
15835 +
15836 + state->std = V4L2_STD_NTSC;
15837 +@@ -600,7 +598,7 @@ fail:
15838 + i2c_unregister_device(audio);
15839 + v4l2_ctrl_handler_free(&state->hdl);
15840 + kfree(state);
15841 +- return -EIO;
15842 ++ return err;
15843 + }
15844 +
15845 + static int s2250_remove(struct i2c_client *client)
15846 +diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
15847 +index 563128d117317..60e57e0f19272 100644
15848 +--- a/drivers/media/usb/hdpvr/hdpvr-video.c
15849 ++++ b/drivers/media/usb/hdpvr/hdpvr-video.c
15850 +@@ -308,7 +308,6 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev)
15851 +
15852 + dev->status = STATUS_STREAMING;
15853 +
15854 +- INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
15855 + schedule_work(&dev->worker);
15856 +
15857 + v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev,
15858 +@@ -1165,6 +1164,9 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
15859 + bool ac3 = dev->flags & HDPVR_FLAG_AC3_CAP;
15860 + int res;
15861 +
15862 ++ // initialize dev->worker
15863 ++ INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
15864 ++
15865 + dev->cur_std = V4L2_STD_525_60;
15866 + dev->width = 720;
15867 + dev->height = 480;
15868 +diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
15869 +index 4e1698f788187..ce717502ea4c3 100644
15870 +--- a/drivers/media/usb/stk1160/stk1160-core.c
15871 ++++ b/drivers/media/usb/stk1160/stk1160-core.c
15872 +@@ -403,7 +403,7 @@ static void stk1160_disconnect(struct usb_interface *interface)
15873 + /* Here is the only place where isoc get released */
15874 + stk1160_uninit_isoc(dev);
15875 +
15876 +- stk1160_clear_queue(dev);
15877 ++ stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR);
15878 +
15879 + video_unregister_device(&dev->vdev);
15880 + v4l2_device_disconnect(&dev->v4l2_dev);
15881 +diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
15882 +index 6a4eb616d5160..1aa953469402f 100644
15883 +--- a/drivers/media/usb/stk1160/stk1160-v4l.c
15884 ++++ b/drivers/media/usb/stk1160/stk1160-v4l.c
15885 +@@ -258,7 +258,7 @@ out_uninit:
15886 + stk1160_uninit_isoc(dev);
15887 + out_stop_hw:
15888 + usb_set_interface(dev->udev, 0, 0);
15889 +- stk1160_clear_queue(dev);
15890 ++ stk1160_clear_queue(dev, VB2_BUF_STATE_QUEUED);
15891 +
15892 + mutex_unlock(&dev->v4l_lock);
15893 +
15894 +@@ -306,7 +306,7 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
15895 +
15896 + stk1160_stop_hw(dev);
15897 +
15898 +- stk1160_clear_queue(dev);
15899 ++ stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR);
15900 +
15901 + stk1160_dbg("streaming stopped\n");
15902 +
15903 +@@ -745,7 +745,7 @@ static const struct video_device v4l_template = {
15904 + /********************************************************************/
15905 +
15906 + /* Must be called with both v4l_lock and vb_queue_lock hold */
15907 +-void stk1160_clear_queue(struct stk1160 *dev)
15908 ++void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state)
15909 + {
15910 + struct stk1160_buffer *buf;
15911 + unsigned long flags;
15912 +@@ -756,7 +756,7 @@ void stk1160_clear_queue(struct stk1160 *dev)
15913 + buf = list_first_entry(&dev->avail_bufs,
15914 + struct stk1160_buffer, list);
15915 + list_del(&buf->list);
15916 +- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
15917 ++ vb2_buffer_done(&buf->vb.vb2_buf, vb2_state);
15918 + stk1160_dbg("buffer [%p/%d] aborted\n",
15919 + buf, buf->vb.vb2_buf.index);
15920 + }
15921 +@@ -766,7 +766,7 @@ void stk1160_clear_queue(struct stk1160 *dev)
15922 + buf = dev->isoc_ctl.buf;
15923 + dev->isoc_ctl.buf = NULL;
15924 +
15925 +- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
15926 ++ vb2_buffer_done(&buf->vb.vb2_buf, vb2_state);
15927 + stk1160_dbg("buffer [%p/%d] aborted\n",
15928 + buf, buf->vb.vb2_buf.index);
15929 + }
15930 +diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
15931 +index a31ea1c80f255..a70963ce87533 100644
15932 +--- a/drivers/media/usb/stk1160/stk1160.h
15933 ++++ b/drivers/media/usb/stk1160/stk1160.h
15934 +@@ -166,7 +166,7 @@ struct regval {
15935 + int stk1160_vb2_setup(struct stk1160 *dev);
15936 + int stk1160_video_register(struct stk1160 *dev);
15937 + void stk1160_video_unregister(struct stk1160 *dev);
15938 +-void stk1160_clear_queue(struct stk1160 *dev);
15939 ++void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state);
15940 +
15941 + /* Provided by stk1160-video.c */
15942 + int stk1160_alloc_isoc(struct stk1160 *dev);
15943 +diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
15944 +index 54abe5245dcc4..df8cff47a7fb5 100644
15945 +--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
15946 ++++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
15947 +@@ -112,7 +112,9 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
15948 + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
15949 + struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quant;
15950 + struct v4l2_ctrl_vp8_frame *p_vp8_frame;
15951 ++ struct v4l2_ctrl_vp9_frame *p_vp9_frame;
15952 + struct v4l2_ctrl_fwht_params *p_fwht_params;
15953 ++ struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix;
15954 + void *p = ptr.p + idx * ctrl->elem_size;
15955 +
15956 + if (ctrl->p_def.p_const)
15957 +@@ -152,6 +154,13 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
15958 + p_vp8_frame = p;
15959 + p_vp8_frame->num_dct_parts = 1;
15960 + break;
15961 ++ case V4L2_CTRL_TYPE_VP9_FRAME:
15962 ++ p_vp9_frame = p;
15963 ++ p_vp9_frame->profile = 0;
15964 ++ p_vp9_frame->bit_depth = 8;
15965 ++ p_vp9_frame->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING |
15966 ++ V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING;
15967 ++ break;
15968 + case V4L2_CTRL_TYPE_FWHT_PARAMS:
15969 + p_fwht_params = p;
15970 + p_fwht_params->version = V4L2_FWHT_VERSION;
15971 +@@ -160,6 +169,15 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
15972 + p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
15973 + (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
15974 + break;
15975 ++ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
15976 ++ p_h264_scaling_matrix = p;
15977 ++ /*
15978 ++ * The default (flat) H.264 scaling matrix when none are
15979 ++ * specified in the bitstream, this is according to formulas
15980 ++ * (7-8) and (7-9) of the specification.
15981 ++ */
15982 ++ memset(p_h264_scaling_matrix, 16, sizeof(*p_h264_scaling_matrix));
15983 ++ break;
15984 + }
15985 + }
15986 +
15987 +diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
15988 +index 9ac557b8e1467..642cb90f457c6 100644
15989 +--- a/drivers/media/v4l2-core/v4l2-ioctl.c
15990 ++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
15991 +@@ -279,8 +279,8 @@ static void v4l_print_format(const void *arg, bool write_only)
15992 + const struct v4l2_vbi_format *vbi;
15993 + const struct v4l2_sliced_vbi_format *sliced;
15994 + const struct v4l2_window *win;
15995 +- const struct v4l2_sdr_format *sdr;
15996 + const struct v4l2_meta_format *meta;
15997 ++ u32 pixelformat;
15998 + u32 planes;
15999 + unsigned i;
16000 +
16001 +@@ -299,8 +299,9 @@ static void v4l_print_format(const void *arg, bool write_only)
16002 + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
16003 + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
16004 + mp = &p->fmt.pix_mp;
16005 ++ pixelformat = mp->pixelformat;
16006 + pr_cont(", width=%u, height=%u, format=%p4cc, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
16007 +- mp->width, mp->height, &mp->pixelformat,
16008 ++ mp->width, mp->height, &pixelformat,
16009 + prt_names(mp->field, v4l2_field_names),
16010 + mp->colorspace, mp->num_planes, mp->flags,
16011 + mp->ycbcr_enc, mp->quantization, mp->xfer_func);
16012 +@@ -343,14 +344,15 @@ static void v4l_print_format(const void *arg, bool write_only)
16013 + break;
16014 + case V4L2_BUF_TYPE_SDR_CAPTURE:
16015 + case V4L2_BUF_TYPE_SDR_OUTPUT:
16016 +- sdr = &p->fmt.sdr;
16017 +- pr_cont(", pixelformat=%p4cc\n", &sdr->pixelformat);
16018 ++ pixelformat = p->fmt.sdr.pixelformat;
16019 ++ pr_cont(", pixelformat=%p4cc\n", &pixelformat);
16020 + break;
16021 + case V4L2_BUF_TYPE_META_CAPTURE:
16022 + case V4L2_BUF_TYPE_META_OUTPUT:
16023 + meta = &p->fmt.meta;
16024 ++ pixelformat = meta->dataformat;
16025 + pr_cont(", dataformat=%p4cc, buffersize=%u\n",
16026 +- &meta->dataformat, meta->buffersize);
16027 ++ &pixelformat, meta->buffersize);
16028 + break;
16029 + }
16030 + }
16031 +diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
16032 +index e2654b422334c..675e22895ebe6 100644
16033 +--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
16034 ++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
16035 +@@ -585,19 +585,14 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
16036 + }
16037 + EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
16038 +
16039 +-int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
16040 +- struct v4l2_buffer *buf)
16041 ++static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
16042 ++ struct v4l2_buffer *buf)
16043 + {
16044 +- struct vb2_queue *vq;
16045 +- int ret = 0;
16046 +- unsigned int i;
16047 +-
16048 +- vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
16049 +- ret = vb2_querybuf(vq, buf);
16050 +-
16051 + /* Adjust MMAP memory offsets for the CAPTURE queue */
16052 + if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) {
16053 + if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
16054 ++ unsigned int i;
16055 ++
16056 + for (i = 0; i < buf->length; ++i)
16057 + buf->m.planes[i].m.mem_offset
16058 + += DST_QUEUE_OFF_BASE;
16059 +@@ -605,8 +600,23 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
16060 + buf->m.offset += DST_QUEUE_OFF_BASE;
16061 + }
16062 + }
16063 ++}
16064 +
16065 +- return ret;
16066 ++int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
16067 ++ struct v4l2_buffer *buf)
16068 ++{
16069 ++ struct vb2_queue *vq;
16070 ++ int ret;
16071 ++
16072 ++ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
16073 ++ ret = vb2_querybuf(vq, buf);
16074 ++ if (ret)
16075 ++ return ret;
16076 ++
16077 ++ /* Adjust MMAP memory offsets for the CAPTURE queue */
16078 ++ v4l2_m2m_adjust_mem_offset(vq, buf);
16079 ++
16080 ++ return 0;
16081 + }
16082 + EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
16083 +
16084 +@@ -763,6 +773,9 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
16085 + if (ret)
16086 + return ret;
16087 +
16088 ++ /* Adjust MMAP memory offsets for the CAPTURE queue */
16089 ++ v4l2_m2m_adjust_mem_offset(vq, buf);
16090 ++
16091 + /*
16092 + * If the capture queue is streaming, but streaming hasn't started
16093 + * on the device, but was asked to stop, mark the previously queued
16094 +@@ -784,9 +797,17 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
16095 + struct v4l2_buffer *buf)
16096 + {
16097 + struct vb2_queue *vq;
16098 ++ int ret;
16099 +
16100 + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
16101 +- return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
16102 ++ ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
16103 ++ if (ret)
16104 ++ return ret;
16105 ++
16106 ++ /* Adjust MMAP memory offsets for the CAPTURE queue */
16107 ++ v4l2_m2m_adjust_mem_offset(vq, buf);
16108 ++
16109 ++ return 0;
16110 + }
16111 + EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
16112 +
16113 +@@ -795,9 +816,17 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
16114 + {
16115 + struct video_device *vdev = video_devdata(file);
16116 + struct vb2_queue *vq;
16117 ++ int ret;
16118 +
16119 + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
16120 +- return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
16121 ++ ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
16122 ++ if (ret)
16123 ++ return ret;
16124 ++
16125 ++ /* Adjust MMAP memory offsets for the CAPTURE queue */
16126 ++ v4l2_m2m_adjust_mem_offset(vq, buf);
16127 ++
16128 ++ return 0;
16129 + }
16130 + EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
16131 +
16132 +diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
16133 +index 762d0c0f0716f..ecc78d6f89ed2 100644
16134 +--- a/drivers/memory/emif.c
16135 ++++ b/drivers/memory/emif.c
16136 +@@ -1025,7 +1025,7 @@ static struct emif_data *__init_or_module get_device_details(
16137 + temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
16138 + dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
16139 +
16140 +- if (!emif || !pd || !dev_info) {
16141 ++ if (!emif || !temp || !dev_info) {
16142 + dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__);
16143 + goto error;
16144 + }
16145 +@@ -1117,7 +1117,7 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
16146 + {
16147 + struct emif_data *emif;
16148 + struct resource *res;
16149 +- int irq;
16150 ++ int irq, ret;
16151 +
16152 + if (pdev->dev.of_node)
16153 + emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev);
16154 +@@ -1147,7 +1147,9 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
16155 + emif_onetime_settings(emif);
16156 + emif_debugfs_init(emif);
16157 + disable_and_clear_all_interrupts(emif);
16158 +- setup_interrupts(emif, irq);
16159 ++ ret = setup_interrupts(emif, irq);
16160 ++ if (ret)
16161 ++ goto error;
16162 +
16163 + /* One-time actions taken on probing the first device */
16164 + if (!emif1) {
16165 +diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
16166 +index 497b6edbf3ca1..25ba3c5e4ad6a 100644
16167 +--- a/drivers/memory/tegra/tegra20-emc.c
16168 ++++ b/drivers/memory/tegra/tegra20-emc.c
16169 +@@ -540,7 +540,7 @@ static int emc_read_lpddr_mode_register(struct tegra_emc *emc,
16170 + unsigned int register_addr,
16171 + unsigned int *register_data)
16172 + {
16173 +- u32 memory_dev = emem_dev + 1;
16174 ++ u32 memory_dev = emem_dev ? 1 : 2;
16175 + u32 val, mr_mask = 0xff;
16176 + int err;
16177 +
16178 +diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
16179 +index c0450397b6735..7ea312f0840e0 100644
16180 +--- a/drivers/memstick/core/mspro_block.c
16181 ++++ b/drivers/memstick/core/mspro_block.c
16182 +@@ -186,13 +186,8 @@ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode)
16183 +
16184 + mutex_lock(&mspro_block_disk_lock);
16185 +
16186 +- if (msb && msb->card) {
16187 ++ if (msb && msb->card)
16188 + msb->usage_count++;
16189 +- if ((mode & FMODE_WRITE) && msb->read_only)
16190 +- rc = -EROFS;
16191 +- else
16192 +- rc = 0;
16193 +- }
16194 +
16195 + mutex_unlock(&mspro_block_disk_lock);
16196 +
16197 +@@ -1239,6 +1234,9 @@ static int mspro_block_init_disk(struct memstick_dev *card)
16198 + set_capacity(msb->disk, capacity);
16199 + dev_dbg(&card->dev, "capacity set %ld\n", capacity);
16200 +
16201 ++ if (msb->read_only)
16202 ++ set_disk_ro(msb->disk, true);
16203 ++
16204 + rc = device_add_disk(&card->dev, msb->disk, NULL);
16205 + if (rc)
16206 + goto out_cleanup_disk;
16207 +diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
16208 +index 8d58c8df46cfb..56338f9dbd0ba 100644
16209 +--- a/drivers/mfd/asic3.c
16210 ++++ b/drivers/mfd/asic3.c
16211 +@@ -906,14 +906,14 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
16212 + ret = mfd_add_devices(&pdev->dev, pdev->id,
16213 + &asic3_cell_ds1wm, 1, mem, asic->irq_base, NULL);
16214 + if (ret < 0)
16215 +- goto out;
16216 ++ goto out_unmap;
16217 + }
16218 +
16219 + if (mem_sdio && (irq >= 0)) {
16220 + ret = mfd_add_devices(&pdev->dev, pdev->id,
16221 + &asic3_cell_mmc, 1, mem_sdio, irq, NULL);
16222 + if (ret < 0)
16223 +- goto out;
16224 ++ goto out_unmap;
16225 + }
16226 +
16227 + ret = 0;
16228 +@@ -927,8 +927,12 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
16229 + ret = mfd_add_devices(&pdev->dev, 0,
16230 + asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0, NULL);
16231 + }
16232 ++ return ret;
16233 +
16234 +- out:
16235 ++out_unmap:
16236 ++ if (asic->tmio_cnf)
16237 ++ iounmap(asic->tmio_cnf);
16238 ++out:
16239 + return ret;
16240 + }
16241 +
16242 +diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
16243 +index 8a4f1d90dcfd1..1000572761a84 100644
16244 +--- a/drivers/mfd/mc13xxx-core.c
16245 ++++ b/drivers/mfd/mc13xxx-core.c
16246 +@@ -323,8 +323,10 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
16247 + adc1 |= MC13783_ADC1_ATOX;
16248 +
16249 + dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__);
16250 +- mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
16251 ++ ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
16252 + mc13xxx_handler_adcdone, __func__, &adcdone_data);
16253 ++ if (ret)
16254 ++ goto out;
16255 +
16256 + mc13xxx_reg_write(mc13xxx, MC13XXX_ADC0, adc0);
16257 + mc13xxx_reg_write(mc13xxx, MC13XXX_ADC1, adc1);
16258 +diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
16259 +index de6d44a158bba..3f514d77a843f 100644
16260 +--- a/drivers/misc/cardreader/alcor_pci.c
16261 ++++ b/drivers/misc/cardreader/alcor_pci.c
16262 +@@ -266,7 +266,7 @@ static int alcor_pci_probe(struct pci_dev *pdev,
16263 + if (!priv)
16264 + return -ENOMEM;
16265 +
16266 +- ret = ida_simple_get(&alcor_pci_idr, 0, 0, GFP_KERNEL);
16267 ++ ret = ida_alloc(&alcor_pci_idr, GFP_KERNEL);
16268 + if (ret < 0)
16269 + return ret;
16270 + priv->id = ret;
16271 +@@ -280,7 +280,8 @@ static int alcor_pci_probe(struct pci_dev *pdev,
16272 + ret = pci_request_regions(pdev, DRV_NAME_ALCOR_PCI);
16273 + if (ret) {
16274 + dev_err(&pdev->dev, "Cannot request region\n");
16275 +- return -ENOMEM;
16276 ++ ret = -ENOMEM;
16277 ++ goto error_free_ida;
16278 + }
16279 +
16280 + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
16281 +@@ -324,6 +325,8 @@ static int alcor_pci_probe(struct pci_dev *pdev,
16282 +
16283 + error_release_regions:
16284 + pci_release_regions(pdev);
16285 ++error_free_ida:
16286 ++ ida_free(&alcor_pci_idr, priv->id);
16287 + return ret;
16288 + }
16289 +
16290 +@@ -337,7 +340,7 @@ static void alcor_pci_remove(struct pci_dev *pdev)
16291 +
16292 + mfd_remove_devices(&pdev->dev);
16293 +
16294 +- ida_simple_remove(&alcor_pci_idr, priv->id);
16295 ++ ida_free(&alcor_pci_idr, priv->id);
16296 +
16297 + pci_release_regions(pdev);
16298 + pci_set_drvdata(pdev, NULL);
16299 +diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
16300 +index fc084ee5106ec..09001fd9db85f 100644
16301 +--- a/drivers/misc/habanalabs/common/debugfs.c
16302 ++++ b/drivers/misc/habanalabs/common/debugfs.c
16303 +@@ -890,6 +890,8 @@ static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
16304 + pci_set_power_state(hdev->pdev, PCI_D0);
16305 + pci_restore_state(hdev->pdev);
16306 + rc = pci_enable_device(hdev->pdev);
16307 ++ if (rc < 0)
16308 ++ return rc;
16309 + } else if (value == 2) {
16310 + pci_save_state(hdev->pdev);
16311 + pci_disable_device(hdev->pdev);
16312 +diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
16313 +index 67c5b452dd356..88b91ad8e5413 100644
16314 +--- a/drivers/misc/kgdbts.c
16315 ++++ b/drivers/misc/kgdbts.c
16316 +@@ -1070,10 +1070,10 @@ static int kgdbts_option_setup(char *opt)
16317 + {
16318 + if (strlen(opt) >= MAX_CONFIG_LEN) {
16319 + printk(KERN_ERR "kgdbts: config string too long\n");
16320 +- return -ENOSPC;
16321 ++ return 1;
16322 + }
16323 + strcpy(config, opt);
16324 +- return 0;
16325 ++ return 1;
16326 + }
16327 +
16328 + __setup("kgdbts=", kgdbts_option_setup);
16329 +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
16330 +index 67bb6a25fd0a0..64ce3f830262b 100644
16331 +--- a/drivers/misc/mei/hw-me-regs.h
16332 ++++ b/drivers/misc/mei/hw-me-regs.h
16333 +@@ -107,6 +107,7 @@
16334 + #define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */
16335 + #define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */
16336 + #define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */
16337 ++#define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */
16338 +
16339 + /*
16340 + * MEI HW Section
16341 +@@ -120,6 +121,7 @@
16342 + #define PCI_CFG_HFS_2 0x48
16343 + #define PCI_CFG_HFS_3 0x60
16344 + # define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070
16345 ++# define PCI_CFG_HFS_3_FW_SKU_IGN 0x00000000
16346 + # define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060
16347 + #define PCI_CFG_HFS_4 0x64
16348 + #define PCI_CFG_HFS_5 0x68
16349 +diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
16350 +index d3a6c07286451..fbc4c95818645 100644
16351 +--- a/drivers/misc/mei/hw-me.c
16352 ++++ b/drivers/misc/mei/hw-me.c
16353 +@@ -1405,16 +1405,16 @@ static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
16354 + .quirk_probe = mei_me_fw_type_sps_4
16355 +
16356 + /**
16357 +- * mei_me_fw_type_sps() - check for sps sku
16358 ++ * mei_me_fw_type_sps_ign() - check for sps or ign sku
16359 + *
16360 +- * Read ME FW Status register to check for SPS Firmware.
16361 +- * The SPS FW is only signaled in pci function 0
16362 ++ * Read ME FW Status register to check for SPS or IGN Firmware.
16363 ++ * The SPS/IGN FW is only signaled in pci function 0
16364 + *
16365 + * @pdev: pci device
16366 + *
16367 +- * Return: true in case of SPS firmware
16368 ++ * Return: true in case of SPS/IGN firmware
16369 + */
16370 +-static bool mei_me_fw_type_sps(const struct pci_dev *pdev)
16371 ++static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev)
16372 + {
16373 + u32 reg;
16374 + u32 fw_type;
16375 +@@ -1427,14 +1427,15 @@ static bool mei_me_fw_type_sps(const struct pci_dev *pdev)
16376 +
16377 + dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
16378 +
16379 +- return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
16380 ++ return fw_type == PCI_CFG_HFS_3_FW_SKU_IGN ||
16381 ++ fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
16382 + }
16383 +
16384 + #define MEI_CFG_KIND_ITOUCH \
16385 + .kind = "itouch"
16386 +
16387 +-#define MEI_CFG_FW_SPS \
16388 +- .quirk_probe = mei_me_fw_type_sps
16389 ++#define MEI_CFG_FW_SPS_IGN \
16390 ++ .quirk_probe = mei_me_fw_type_sps_ign
16391 +
16392 + #define MEI_CFG_FW_VER_SUPP \
16393 + .fw_ver_supported = 1
16394 +@@ -1535,7 +1536,7 @@ static const struct mei_cfg mei_me_pch12_sps_cfg = {
16395 + MEI_CFG_PCH8_HFS,
16396 + MEI_CFG_FW_VER_SUPP,
16397 + MEI_CFG_DMA_128,
16398 +- MEI_CFG_FW_SPS,
16399 ++ MEI_CFG_FW_SPS_IGN,
16400 + };
16401 +
16402 + /* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion
16403 +@@ -1545,7 +1546,7 @@ static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
16404 + MEI_CFG_KIND_ITOUCH,
16405 + MEI_CFG_PCH8_HFS,
16406 + MEI_CFG_FW_VER_SUPP,
16407 +- MEI_CFG_FW_SPS,
16408 ++ MEI_CFG_FW_SPS_IGN,
16409 + };
16410 +
16411 + /* Tiger Lake and newer devices */
16412 +@@ -1562,7 +1563,7 @@ static const struct mei_cfg mei_me_pch15_sps_cfg = {
16413 + MEI_CFG_FW_VER_SUPP,
16414 + MEI_CFG_DMA_128,
16415 + MEI_CFG_TRC,
16416 +- MEI_CFG_FW_SPS,
16417 ++ MEI_CFG_FW_SPS_IGN,
16418 + };
16419 +
16420 + /*
16421 +diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
16422 +index a67f4f2d33a93..0706322154cbe 100644
16423 +--- a/drivers/misc/mei/interrupt.c
16424 ++++ b/drivers/misc/mei/interrupt.c
16425 +@@ -424,31 +424,26 @@ int mei_irq_read_handler(struct mei_device *dev,
16426 + list_for_each_entry(cl, &dev->file_list, link) {
16427 + if (mei_cl_hbm_equal(cl, mei_hdr)) {
16428 + cl_dbg(dev, cl, "got a message\n");
16429 +- break;
16430 ++ ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list);
16431 ++ goto reset_slots;
16432 + }
16433 + }
16434 +
16435 + /* if no recipient cl was found we assume corrupted header */
16436 +- if (&cl->link == &dev->file_list) {
16437 +- /* A message for not connected fixed address clients
16438 +- * should be silently discarded
16439 +- * On power down client may be force cleaned,
16440 +- * silently discard such messages
16441 +- */
16442 +- if (hdr_is_fixed(mei_hdr) ||
16443 +- dev->dev_state == MEI_DEV_POWER_DOWN) {
16444 +- mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length);
16445 +- ret = 0;
16446 +- goto reset_slots;
16447 +- }
16448 +- dev_err(dev->dev, "no destination client found 0x%08X\n",
16449 +- dev->rd_msg_hdr[0]);
16450 +- ret = -EBADMSG;
16451 +- goto end;
16452 ++ /* A message for not connected fixed address clients
16453 ++ * should be silently discarded
16454 ++ * On power down client may be force cleaned,
16455 ++ * silently discard such messages
16456 ++ */
16457 ++ if (hdr_is_fixed(mei_hdr) ||
16458 ++ dev->dev_state == MEI_DEV_POWER_DOWN) {
16459 ++ mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length);
16460 ++ ret = 0;
16461 ++ goto reset_slots;
16462 + }
16463 +-
16464 +- ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list);
16465 +-
16466 ++ dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]);
16467 ++ ret = -EBADMSG;
16468 ++ goto end;
16469 +
16470 + reset_slots:
16471 + /* reset the number of slots and header */
16472 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
16473 +index 3a45aaf002ac8..a738253dbd056 100644
16474 +--- a/drivers/misc/mei/pci-me.c
16475 ++++ b/drivers/misc/mei/pci-me.c
16476 +@@ -113,6 +113,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
16477 + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
16478 + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
16479 + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
16480 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
16481 +
16482 + /* required last entry */
16483 + {0, }
16484 +diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
16485 +index 096ae624be9aa..58a60afa650b6 100644
16486 +--- a/drivers/mmc/core/bus.c
16487 ++++ b/drivers/mmc/core/bus.c
16488 +@@ -15,6 +15,7 @@
16489 + #include <linux/stat.h>
16490 + #include <linux/of.h>
16491 + #include <linux/pm_runtime.h>
16492 ++#include <linux/sysfs.h>
16493 +
16494 + #include <linux/mmc/card.h>
16495 + #include <linux/mmc/host.h>
16496 +@@ -34,13 +35,13 @@ static ssize_t type_show(struct device *dev,
16497 +
16498 + switch (card->type) {
16499 + case MMC_TYPE_MMC:
16500 +- return sprintf(buf, "MMC\n");
16501 ++ return sysfs_emit(buf, "MMC\n");
16502 + case MMC_TYPE_SD:
16503 +- return sprintf(buf, "SD\n");
16504 ++ return sysfs_emit(buf, "SD\n");
16505 + case MMC_TYPE_SDIO:
16506 +- return sprintf(buf, "SDIO\n");
16507 ++ return sysfs_emit(buf, "SDIO\n");
16508 + case MMC_TYPE_SD_COMBO:
16509 +- return sprintf(buf, "SDcombo\n");
16510 ++ return sysfs_emit(buf, "SDcombo\n");
16511 + default:
16512 + return -EFAULT;
16513 + }
16514 +diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
16515 +index 8105852c4b62f..3996b191b68d1 100644
16516 +--- a/drivers/mmc/core/bus.h
16517 ++++ b/drivers/mmc/core/bus.h
16518 +@@ -9,6 +9,7 @@
16519 + #define _MMC_CORE_BUS_H
16520 +
16521 + #include <linux/device.h>
16522 ++#include <linux/sysfs.h>
16523 +
16524 + struct mmc_host;
16525 + struct mmc_card;
16526 +@@ -17,7 +18,7 @@ struct mmc_card;
16527 + static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
16528 + { \
16529 + struct mmc_card *card = mmc_dev_to_card(dev); \
16530 +- return sprintf(buf, fmt, args); \
16531 ++ return sysfs_emit(buf, fmt, args); \
16532 + } \
16533 + static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
16534 +
16535 +diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
16536 +index cf140f4ec8643..d739e2b631fe8 100644
16537 +--- a/drivers/mmc/core/host.c
16538 ++++ b/drivers/mmc/core/host.c
16539 +@@ -588,6 +588,16 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
16540 +
16541 + EXPORT_SYMBOL(mmc_alloc_host);
16542 +
16543 ++static int mmc_validate_host_caps(struct mmc_host *host)
16544 ++{
16545 ++ if (host->caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) {
16546 ++ dev_warn(host->parent, "missing ->enable_sdio_irq() ops\n");
16547 ++ return -EINVAL;
16548 ++ }
16549 ++
16550 ++ return 0;
16551 ++}
16552 ++
16553 + /**
16554 + * mmc_add_host - initialise host hardware
16555 + * @host: mmc host
16556 +@@ -600,8 +610,9 @@ int mmc_add_host(struct mmc_host *host)
16557 + {
16558 + int err;
16559 +
16560 +- WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
16561 +- !host->ops->enable_sdio_irq);
16562 ++ err = mmc_validate_host_caps(host);
16563 ++ if (err)
16564 ++ return err;
16565 +
16566 + err = device_add(&host->class_dev);
16567 + if (err)
16568 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
16569 +index 8421519c2a983..43d1b9b2fa499 100644
16570 +--- a/drivers/mmc/core/mmc.c
16571 ++++ b/drivers/mmc/core/mmc.c
16572 +@@ -12,6 +12,7 @@
16573 + #include <linux/slab.h>
16574 + #include <linux/stat.h>
16575 + #include <linux/pm_runtime.h>
16576 ++#include <linux/sysfs.h>
16577 +
16578 + #include <linux/mmc/host.h>
16579 + #include <linux/mmc/card.h>
16580 +@@ -812,12 +813,11 @@ static ssize_t mmc_fwrev_show(struct device *dev,
16581 + {
16582 + struct mmc_card *card = mmc_dev_to_card(dev);
16583 +
16584 +- if (card->ext_csd.rev < 7) {
16585 +- return sprintf(buf, "0x%x\n", card->cid.fwrev);
16586 +- } else {
16587 +- return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
16588 +- card->ext_csd.fwrev);
16589 +- }
16590 ++ if (card->ext_csd.rev < 7)
16591 ++ return sysfs_emit(buf, "0x%x\n", card->cid.fwrev);
16592 ++ else
16593 ++ return sysfs_emit(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
16594 ++ card->ext_csd.fwrev);
16595 + }
16596 +
16597 + static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
16598 +@@ -830,10 +830,10 @@ static ssize_t mmc_dsr_show(struct device *dev,
16599 + struct mmc_host *host = card->host;
16600 +
16601 + if (card->csd.dsr_imp && host->dsr_req)
16602 +- return sprintf(buf, "0x%x\n", host->dsr);
16603 ++ return sysfs_emit(buf, "0x%x\n", host->dsr);
16604 + else
16605 + /* return default DSR value */
16606 +- return sprintf(buf, "0x%x\n", 0x404);
16607 ++ return sysfs_emit(buf, "0x%x\n", 0x404);
16608 + }
16609 +
16610 + static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
16611 +diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
16612 +index bfbfed30dc4d8..68df6b2f49cc7 100644
16613 +--- a/drivers/mmc/core/sd.c
16614 ++++ b/drivers/mmc/core/sd.c
16615 +@@ -13,6 +13,7 @@
16616 + #include <linux/stat.h>
16617 + #include <linux/pm_runtime.h>
16618 + #include <linux/scatterlist.h>
16619 ++#include <linux/sysfs.h>
16620 +
16621 + #include <linux/mmc/host.h>
16622 + #include <linux/mmc/card.h>
16623 +@@ -708,18 +709,16 @@ MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
16624 + MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
16625 +
16626 +
16627 +-static ssize_t mmc_dsr_show(struct device *dev,
16628 +- struct device_attribute *attr,
16629 +- char *buf)
16630 ++static ssize_t mmc_dsr_show(struct device *dev, struct device_attribute *attr,
16631 ++ char *buf)
16632 + {
16633 +- struct mmc_card *card = mmc_dev_to_card(dev);
16634 +- struct mmc_host *host = card->host;
16635 +-
16636 +- if (card->csd.dsr_imp && host->dsr_req)
16637 +- return sprintf(buf, "0x%x\n", host->dsr);
16638 +- else
16639 +- /* return default DSR value */
16640 +- return sprintf(buf, "0x%x\n", 0x404);
16641 ++ struct mmc_card *card = mmc_dev_to_card(dev);
16642 ++ struct mmc_host *host = card->host;
16643 ++
16644 ++ if (card->csd.dsr_imp && host->dsr_req)
16645 ++ return sysfs_emit(buf, "0x%x\n", host->dsr);
16646 ++ /* return default DSR value */
16647 ++ return sysfs_emit(buf, "0x%x\n", 0x404);
16648 + }
16649 +
16650 + static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
16651 +@@ -735,9 +734,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
16652 + \
16653 + if (num > card->num_info) \
16654 + return -ENODATA; \
16655 +- if (!card->info[num-1][0]) \
16656 ++ if (!card->info[num - 1][0]) \
16657 + return 0; \
16658 +- return sprintf(buf, "%s\n", card->info[num-1]); \
16659 ++ return sysfs_emit(buf, "%s\n", card->info[num - 1]); \
16660 + } \
16661 + static DEVICE_ATTR_RO(info##num)
16662 +
16663 +diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
16664 +index 41164748723d2..25799accf8a02 100644
16665 +--- a/drivers/mmc/core/sdio.c
16666 ++++ b/drivers/mmc/core/sdio.c
16667 +@@ -7,6 +7,7 @@
16668 +
16669 + #include <linux/err.h>
16670 + #include <linux/pm_runtime.h>
16671 ++#include <linux/sysfs.h>
16672 +
16673 + #include <linux/mmc/host.h>
16674 + #include <linux/mmc/card.h>
16675 +@@ -40,9 +41,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
16676 + \
16677 + if (num > card->num_info) \
16678 + return -ENODATA; \
16679 +- if (!card->info[num-1][0]) \
16680 ++ if (!card->info[num - 1][0]) \
16681 + return 0; \
16682 +- return sprintf(buf, "%s\n", card->info[num-1]); \
16683 ++ return sysfs_emit(buf, "%s\n", card->info[num - 1]); \
16684 + } \
16685 + static DEVICE_ATTR_RO(info##num)
16686 +
16687 +diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
16688 +index fda03b35c14a5..c6268c38c69e5 100644
16689 +--- a/drivers/mmc/core/sdio_bus.c
16690 ++++ b/drivers/mmc/core/sdio_bus.c
16691 +@@ -14,6 +14,7 @@
16692 + #include <linux/pm_runtime.h>
16693 + #include <linux/pm_domain.h>
16694 + #include <linux/acpi.h>
16695 ++#include <linux/sysfs.h>
16696 +
16697 + #include <linux/mmc/card.h>
16698 + #include <linux/mmc/host.h>
16699 +@@ -35,7 +36,7 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
16700 + struct sdio_func *func; \
16701 + \
16702 + func = dev_to_sdio_func (dev); \
16703 +- return sprintf(buf, format_string, args); \
16704 ++ return sysfs_emit(buf, format_string, args); \
16705 + } \
16706 + static DEVICE_ATTR_RO(field)
16707 +
16708 +@@ -52,9 +53,9 @@ static ssize_t info##num##_show(struct device *dev, struct device_attribute *att
16709 + \
16710 + if (num > func->num_info) \
16711 + return -ENODATA; \
16712 +- if (!func->info[num-1][0]) \
16713 ++ if (!func->info[num - 1][0]) \
16714 + return 0; \
16715 +- return sprintf(buf, "%s\n", func->info[num-1]); \
16716 ++ return sysfs_emit(buf, "%s\n", func->info[num - 1]); \
16717 + } \
16718 + static DEVICE_ATTR_RO(info##num)
16719 +
16720 +diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
16721 +index 2a757c88f9d21..80de660027d89 100644
16722 +--- a/drivers/mmc/host/davinci_mmc.c
16723 ++++ b/drivers/mmc/host/davinci_mmc.c
16724 +@@ -1375,8 +1375,12 @@ static int davinci_mmcsd_suspend(struct device *dev)
16725 + static int davinci_mmcsd_resume(struct device *dev)
16726 + {
16727 + struct mmc_davinci_host *host = dev_get_drvdata(dev);
16728 ++ int ret;
16729 ++
16730 ++ ret = clk_enable(host->clk);
16731 ++ if (ret)
16732 ++ return ret;
16733 +
16734 +- clk_enable(host->clk);
16735 + mmc_davinci_reset_ctrl(host, 0);
16736 +
16737 + return 0;
16738 +diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
16739 +index 58cfaffa3c2d8..f7c384db89bf3 100644
16740 +--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
16741 ++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
16742 +@@ -1495,12 +1495,12 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
16743 +
16744 + realtek_init_host(host);
16745 +
16746 +- if (pcr->rtd3_en) {
16747 +- pm_runtime_set_autosuspend_delay(&pdev->dev, 5000);
16748 +- pm_runtime_use_autosuspend(&pdev->dev);
16749 +- pm_runtime_enable(&pdev->dev);
16750 +- }
16751 +-
16752 ++ pm_runtime_no_callbacks(&pdev->dev);
16753 ++ pm_runtime_set_active(&pdev->dev);
16754 ++ pm_runtime_enable(&pdev->dev);
16755 ++ pm_runtime_set_autosuspend_delay(&pdev->dev, 200);
16756 ++ pm_runtime_mark_last_busy(&pdev->dev);
16757 ++ pm_runtime_use_autosuspend(&pdev->dev);
16758 +
16759 + mmc_add_host(mmc);
16760 +
16761 +@@ -1521,11 +1521,6 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
16762 + pcr->slots[RTSX_SD_CARD].card_event = NULL;
16763 + mmc = host->mmc;
16764 +
16765 +- if (pcr->rtd3_en) {
16766 +- pm_runtime_dont_use_autosuspend(&pdev->dev);
16767 +- pm_runtime_disable(&pdev->dev);
16768 +- }
16769 +-
16770 + cancel_work_sync(&host->work);
16771 +
16772 + mutex_lock(&host->host_mutex);
16773 +@@ -1548,6 +1543,9 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
16774 +
16775 + flush_work(&host->work);
16776 +
16777 ++ pm_runtime_dont_use_autosuspend(&pdev->dev);
16778 ++ pm_runtime_disable(&pdev->dev);
16779 ++
16780 + mmc_free_host(mmc);
16781 +
16782 + dev_dbg(&(pdev->dev),
16783 +diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
16784 +index f654afbe8e83c..b4891bb266485 100644
16785 +--- a/drivers/mmc/host/sdhci_am654.c
16786 ++++ b/drivers/mmc/host/sdhci_am654.c
16787 +@@ -514,26 +514,6 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
16788 + .flags = IOMUX_PRESENT,
16789 + };
16790 +
16791 +-static const struct sdhci_pltfm_data sdhci_am64_8bit_pdata = {
16792 +- .ops = &sdhci_j721e_8bit_ops,
16793 +- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
16794 +-};
16795 +-
16796 +-static const struct sdhci_am654_driver_data sdhci_am64_8bit_drvdata = {
16797 +- .pdata = &sdhci_am64_8bit_pdata,
16798 +- .flags = DLL_PRESENT | DLL_CALIB,
16799 +-};
16800 +-
16801 +-static const struct sdhci_pltfm_data sdhci_am64_4bit_pdata = {
16802 +- .ops = &sdhci_j721e_4bit_ops,
16803 +- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
16804 +-};
16805 +-
16806 +-static const struct sdhci_am654_driver_data sdhci_am64_4bit_drvdata = {
16807 +- .pdata = &sdhci_am64_4bit_pdata,
16808 +- .flags = IOMUX_PRESENT,
16809 +-};
16810 +-
16811 + static const struct soc_device_attribute sdhci_am654_devices[] = {
16812 + { .family = "AM65X",
16813 + .revision = "SR1.0",
16814 +@@ -759,11 +739,11 @@ static const struct of_device_id sdhci_am654_of_match[] = {
16815 + },
16816 + {
16817 + .compatible = "ti,am64-sdhci-8bit",
16818 +- .data = &sdhci_am64_8bit_drvdata,
16819 ++ .data = &sdhci_j721e_8bit_drvdata,
16820 + },
16821 + {
16822 + .compatible = "ti,am64-sdhci-4bit",
16823 +- .data = &sdhci_am64_4bit_drvdata,
16824 ++ .data = &sdhci_j721e_4bit_drvdata,
16825 + },
16826 + { /* sentinel */ }
16827 + };
16828 +diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
16829 +index a8b31bddf14b8..1a840db207b5a 100644
16830 +--- a/drivers/mtd/devices/mchp23k256.c
16831 ++++ b/drivers/mtd/devices/mchp23k256.c
16832 +@@ -231,6 +231,19 @@ static const struct of_device_id mchp23k256_of_table[] = {
16833 + };
16834 + MODULE_DEVICE_TABLE(of, mchp23k256_of_table);
16835 +
16836 ++static const struct spi_device_id mchp23k256_spi_ids[] = {
16837 ++ {
16838 ++ .name = "mchp23k256",
16839 ++ .driver_data = (kernel_ulong_t)&mchp23k256_caps,
16840 ++ },
16841 ++ {
16842 ++ .name = "mchp23lcv1024",
16843 ++ .driver_data = (kernel_ulong_t)&mchp23lcv1024_caps,
16844 ++ },
16845 ++ {}
16846 ++};
16847 ++MODULE_DEVICE_TABLE(spi, mchp23k256_spi_ids);
16848 ++
16849 + static struct spi_driver mchp23k256_driver = {
16850 + .driver = {
16851 + .name = "mchp23k256",
16852 +@@ -238,6 +251,7 @@ static struct spi_driver mchp23k256_driver = {
16853 + },
16854 + .probe = mchp23k256_probe,
16855 + .remove = mchp23k256_remove,
16856 ++ .id_table = mchp23k256_spi_ids,
16857 + };
16858 +
16859 + module_spi_driver(mchp23k256_driver);
16860 +diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
16861 +index 231a107901960..b9cf2b4415a54 100644
16862 +--- a/drivers/mtd/devices/mchp48l640.c
16863 ++++ b/drivers/mtd/devices/mchp48l640.c
16864 +@@ -359,6 +359,15 @@ static const struct of_device_id mchp48l640_of_table[] = {
16865 + };
16866 + MODULE_DEVICE_TABLE(of, mchp48l640_of_table);
16867 +
16868 ++static const struct spi_device_id mchp48l640_spi_ids[] = {
16869 ++ {
16870 ++ .name = "48l640",
16871 ++ .driver_data = (kernel_ulong_t)&mchp48l640_caps,
16872 ++ },
16873 ++ {}
16874 ++};
16875 ++MODULE_DEVICE_TABLE(spi, mchp48l640_spi_ids);
16876 ++
16877 + static struct spi_driver mchp48l640_driver = {
16878 + .driver = {
16879 + .name = "mchp48l640",
16880 +@@ -366,6 +375,7 @@ static struct spi_driver mchp48l640_driver = {
16881 + },
16882 + .probe = mchp48l640_probe,
16883 + .remove = mchp48l640_remove,
16884 ++ .id_table = mchp48l640_spi_ids,
16885 + };
16886 +
16887 + module_spi_driver(mchp48l640_driver);
16888 +diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c
16889 +index 8b6f4da5d7201..a4b8b65fe15f5 100644
16890 +--- a/drivers/mtd/nand/onenand/generic.c
16891 ++++ b/drivers/mtd/nand/onenand/generic.c
16892 +@@ -53,7 +53,12 @@ static int generic_onenand_probe(struct platform_device *pdev)
16893 + }
16894 +
16895 + info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;
16896 +- info->onenand.irq = platform_get_irq(pdev, 0);
16897 ++
16898 ++ err = platform_get_irq(pdev, 0);
16899 ++ if (err < 0)
16900 ++ goto out_iounmap;
16901 ++
16902 ++ info->onenand.irq = err;
16903 +
16904 + info->mtd.dev.parent = &pdev->dev;
16905 + info->mtd.priv = &info->onenand;
16906 +diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
16907 +index f3276ee9e4fe7..ddd93bc38ea6c 100644
16908 +--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
16909 ++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
16910 +@@ -2060,13 +2060,15 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
16911 + nc->mck = of_clk_get(dev->parent->of_node, 0);
16912 + if (IS_ERR(nc->mck)) {
16913 + dev_err(dev, "Failed to retrieve MCK clk\n");
16914 +- return PTR_ERR(nc->mck);
16915 ++ ret = PTR_ERR(nc->mck);
16916 ++ goto out_release_dma;
16917 + }
16918 +
16919 + np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
16920 + if (!np) {
16921 + dev_err(dev, "Missing or invalid atmel,smc property\n");
16922 +- return -EINVAL;
16923 ++ ret = -EINVAL;
16924 ++ goto out_release_dma;
16925 + }
16926 +
16927 + nc->smc = syscon_node_to_regmap(np);
16928 +@@ -2074,10 +2076,16 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
16929 + if (IS_ERR(nc->smc)) {
16930 + ret = PTR_ERR(nc->smc);
16931 + dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
16932 +- return ret;
16933 ++ goto out_release_dma;
16934 + }
16935 +
16936 + return 0;
16937 ++
16938 ++out_release_dma:
16939 ++ if (nc->dmac)
16940 ++ dma_release_channel(nc->dmac);
16941 ++
16942 ++ return ret;
16943 + }
16944 +
16945 + static int
16946 +diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
16947 +index ded4df4739280..e50db25e5ddcb 100644
16948 +--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
16949 ++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
16950 +@@ -648,6 +648,7 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
16951 + const struct nand_sdr_timings *sdr)
16952 + {
16953 + struct gpmi_nfc_hardware_timing *hw = &this->hw;
16954 ++ struct resources *r = &this->resources;
16955 + unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
16956 + unsigned int period_ps, reference_period_ps;
16957 + unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
16958 +@@ -671,6 +672,8 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
16959 + wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
16960 + }
16961 +
16962 ++ hw->clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
16963 ++
16964 + /* SDR core timings are given in picoseconds */
16965 + period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
16966 +
16967 +diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
16968 +index e7b2ba016d8c6..8daaba96edb2c 100644
16969 +--- a/drivers/mtd/nand/raw/nand_base.c
16970 ++++ b/drivers/mtd/nand/raw/nand_base.c
16971 +@@ -338,16 +338,19 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
16972 + *
16973 + * Return: -EBUSY if the chip has been suspended, 0 otherwise
16974 + */
16975 +-static int nand_get_device(struct nand_chip *chip)
16976 ++static void nand_get_device(struct nand_chip *chip)
16977 + {
16978 +- mutex_lock(&chip->lock);
16979 +- if (chip->suspended) {
16980 ++ /* Wait until the device is resumed. */
16981 ++ while (1) {
16982 ++ mutex_lock(&chip->lock);
16983 ++ if (!chip->suspended) {
16984 ++ mutex_lock(&chip->controller->lock);
16985 ++ return;
16986 ++ }
16987 + mutex_unlock(&chip->lock);
16988 +- return -EBUSY;
16989 +- }
16990 +- mutex_lock(&chip->controller->lock);
16991 +
16992 +- return 0;
16993 ++ wait_event(chip->resume_wq, !chip->suspended);
16994 ++ }
16995 + }
16996 +
16997 + /**
16998 +@@ -576,9 +579,7 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
16999 + nand_erase_nand(chip, &einfo, 0);
17000 +
17001 + /* Write bad block marker to OOB */
17002 +- ret = nand_get_device(chip);
17003 +- if (ret)
17004 +- return ret;
17005 ++ nand_get_device(chip);
17006 +
17007 + ret = nand_markbad_bbm(chip, ofs);
17008 + nand_release_device(chip);
17009 +@@ -3826,9 +3827,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
17010 + ops->mode != MTD_OPS_RAW)
17011 + return -ENOTSUPP;
17012 +
17013 +- ret = nand_get_device(chip);
17014 +- if (ret)
17015 +- return ret;
17016 ++ nand_get_device(chip);
17017 +
17018 + if (!ops->datbuf)
17019 + ret = nand_do_read_oob(chip, from, ops);
17020 +@@ -4415,13 +4414,11 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
17021 + struct mtd_oob_ops *ops)
17022 + {
17023 + struct nand_chip *chip = mtd_to_nand(mtd);
17024 +- int ret;
17025 ++ int ret = 0;
17026 +
17027 + ops->retlen = 0;
17028 +
17029 +- ret = nand_get_device(chip);
17030 +- if (ret)
17031 +- return ret;
17032 ++ nand_get_device(chip);
17033 +
17034 + switch (ops->mode) {
17035 + case MTD_OPS_PLACE_OOB:
17036 +@@ -4481,9 +4478,7 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
17037 + return -EIO;
17038 +
17039 + /* Grab the lock and see if the device is available */
17040 +- ret = nand_get_device(chip);
17041 +- if (ret)
17042 +- return ret;
17043 ++ nand_get_device(chip);
17044 +
17045 + /* Shift to get first page */
17046 + page = (int)(instr->addr >> chip->page_shift);
17047 +@@ -4570,7 +4565,7 @@ static void nand_sync(struct mtd_info *mtd)
17048 + pr_debug("%s: called\n", __func__);
17049 +
17050 + /* Grab the lock and see if the device is available */
17051 +- WARN_ON(nand_get_device(chip));
17052 ++ nand_get_device(chip);
17053 + /* Release it and go back */
17054 + nand_release_device(chip);
17055 + }
17056 +@@ -4587,9 +4582,7 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
17057 + int ret;
17058 +
17059 + /* Select the NAND device */
17060 +- ret = nand_get_device(chip);
17061 +- if (ret)
17062 +- return ret;
17063 ++ nand_get_device(chip);
17064 +
17065 + nand_select_target(chip, chipnr);
17066 +
17067 +@@ -4660,6 +4653,8 @@ static void nand_resume(struct mtd_info *mtd)
17068 + __func__);
17069 + }
17070 + mutex_unlock(&chip->lock);
17071 ++
17072 ++ wake_up_all(&chip->resume_wq);
17073 + }
17074 +
17075 + /**
17076 +@@ -5437,6 +5432,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
17077 + chip->cur_cs = -1;
17078 +
17079 + mutex_init(&chip->lock);
17080 ++ init_waitqueue_head(&chip->resume_wq);
17081 +
17082 + /* Enforce the right timings for reset/detection */
17083 + chip->current_interface_config = nand_get_reset_interface_config();
17084 +diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
17085 +index 8a91e069ee2e9..3c6f6aff649f8 100644
17086 +--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
17087 ++++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
17088 +@@ -1062,7 +1062,7 @@ static int pl35x_nand_chip_init(struct pl35x_nandc *nfc,
17089 + chip->controller = &nfc->controller;
17090 + mtd = nand_to_mtd(chip);
17091 + mtd->dev.parent = nfc->dev;
17092 +- nand_set_flash_node(chip, nfc->dev->of_node);
17093 ++ nand_set_flash_node(chip, np);
17094 + if (!mtd->name) {
17095 + mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
17096 + "%s", PL35X_NANDC_DRIVER_NAME);
17097 +diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
17098 +index 04ea180118e33..cc155f6c6c68c 100644
17099 +--- a/drivers/mtd/spi-nor/core.c
17100 ++++ b/drivers/mtd/spi-nor/core.c
17101 +@@ -3181,10 +3181,11 @@ static void spi_nor_set_mtd_info(struct spi_nor *nor)
17102 + mtd->flags = MTD_CAP_NORFLASH;
17103 + if (nor->info->flags & SPI_NOR_NO_ERASE)
17104 + mtd->flags |= MTD_NO_ERASE;
17105 ++ else
17106 ++ mtd->_erase = spi_nor_erase;
17107 + mtd->writesize = nor->params->writesize;
17108 + mtd->writebufsize = nor->params->page_size;
17109 + mtd->size = nor->params->size;
17110 +- mtd->_erase = spi_nor_erase;
17111 + mtd->_read = spi_nor_read;
17112 + /* Might be already set by some SST flashes. */
17113 + if (!mtd->_write)
17114 +diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
17115 +index a7e3eb9befb62..a32050fecabf3 100644
17116 +--- a/drivers/mtd/ubi/build.c
17117 ++++ b/drivers/mtd/ubi/build.c
17118 +@@ -351,9 +351,6 @@ static ssize_t dev_attribute_show(struct device *dev,
17119 + * we still can use 'ubi->ubi_num'.
17120 + */
17121 + ubi = container_of(dev, struct ubi_device, dev);
17122 +- ubi = ubi_get_device(ubi->ubi_num);
17123 +- if (!ubi)
17124 +- return -ENODEV;
17125 +
17126 + if (attr == &dev_eraseblock_size)
17127 + ret = sprintf(buf, "%d\n", ubi->leb_size);
17128 +@@ -382,7 +379,6 @@ static ssize_t dev_attribute_show(struct device *dev,
17129 + else
17130 + ret = -EINVAL;
17131 +
17132 +- ubi_put_device(ubi);
17133 + return ret;
17134 + }
17135 +
17136 +@@ -979,9 +975,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
17137 + goto out_detach;
17138 + }
17139 +
17140 +- /* Make device "available" before it becomes accessible via sysfs */
17141 +- ubi_devices[ubi_num] = ubi;
17142 +-
17143 + err = uif_init(ubi);
17144 + if (err)
17145 + goto out_detach;
17146 +@@ -1026,6 +1019,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
17147 + wake_up_process(ubi->bgt_thread);
17148 + spin_unlock(&ubi->wl_lock);
17149 +
17150 ++ ubi_devices[ubi_num] = ubi;
17151 + ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
17152 + return ubi_num;
17153 +
17154 +@@ -1034,7 +1028,6 @@ out_debugfs:
17155 + out_uif:
17156 + uif_close(ubi);
17157 + out_detach:
17158 +- ubi_devices[ubi_num] = NULL;
17159 + ubi_wl_close(ubi);
17160 + ubi_free_all_volumes(ubi);
17161 + vfree(ubi->vtbl);
17162 +diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
17163 +index 022af59906aa9..6b5f1ffd961b9 100644
17164 +--- a/drivers/mtd/ubi/fastmap.c
17165 ++++ b/drivers/mtd/ubi/fastmap.c
17166 +@@ -468,7 +468,9 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
17167 + if (err == UBI_IO_FF_BITFLIPS)
17168 + scrub = 1;
17169 +
17170 +- add_aeb(ai, free, pnum, ec, scrub);
17171 ++ ret = add_aeb(ai, free, pnum, ec, scrub);
17172 ++ if (ret)
17173 ++ goto out;
17174 + continue;
17175 + } else if (err == 0 || err == UBI_IO_BITFLIPS) {
17176 + dbg_bld("Found non empty PEB:%i in pool", pnum);
17177 +@@ -638,8 +640,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
17178 + if (fm_pos >= fm_size)
17179 + goto fail_bad;
17180 +
17181 +- add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
17182 +- be32_to_cpu(fmec->ec), 0);
17183 ++ ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
17184 ++ be32_to_cpu(fmec->ec), 0);
17185 ++ if (ret)
17186 ++ goto fail;
17187 + }
17188 +
17189 + /* read EC values from used list */
17190 +@@ -649,8 +653,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
17191 + if (fm_pos >= fm_size)
17192 + goto fail_bad;
17193 +
17194 +- add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
17195 +- be32_to_cpu(fmec->ec), 0);
17196 ++ ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
17197 ++ be32_to_cpu(fmec->ec), 0);
17198 ++ if (ret)
17199 ++ goto fail;
17200 + }
17201 +
17202 + /* read EC values from scrub list */
17203 +@@ -660,8 +666,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
17204 + if (fm_pos >= fm_size)
17205 + goto fail_bad;
17206 +
17207 +- add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
17208 +- be32_to_cpu(fmec->ec), 1);
17209 ++ ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
17210 ++ be32_to_cpu(fmec->ec), 1);
17211 ++ if (ret)
17212 ++ goto fail;
17213 + }
17214 +
17215 + /* read EC values from erase list */
17216 +@@ -671,8 +679,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
17217 + if (fm_pos >= fm_size)
17218 + goto fail_bad;
17219 +
17220 +- add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
17221 +- be32_to_cpu(fmec->ec), 1);
17222 ++ ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
17223 ++ be32_to_cpu(fmec->ec), 1);
17224 ++ if (ret)
17225 ++ goto fail;
17226 + }
17227 +
17228 + ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
17229 +diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
17230 +index 139ee132bfbcf..1bc7b3a056046 100644
17231 +--- a/drivers/mtd/ubi/vmt.c
17232 ++++ b/drivers/mtd/ubi/vmt.c
17233 +@@ -56,16 +56,11 @@ static ssize_t vol_attribute_show(struct device *dev,
17234 + {
17235 + int ret;
17236 + struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
17237 +- struct ubi_device *ubi;
17238 +-
17239 +- ubi = ubi_get_device(vol->ubi->ubi_num);
17240 +- if (!ubi)
17241 +- return -ENODEV;
17242 ++ struct ubi_device *ubi = vol->ubi;
17243 +
17244 + spin_lock(&ubi->volumes_lock);
17245 + if (!ubi->volumes[vol->vol_id]) {
17246 + spin_unlock(&ubi->volumes_lock);
17247 +- ubi_put_device(ubi);
17248 + return -ENODEV;
17249 + }
17250 + /* Take a reference to prevent volume removal */
17251 +@@ -103,7 +98,6 @@ static ssize_t vol_attribute_show(struct device *dev,
17252 + vol->ref_count -= 1;
17253 + ubi_assert(vol->ref_count >= 0);
17254 + spin_unlock(&ubi->volumes_lock);
17255 +- ubi_put_device(ubi);
17256 + return ret;
17257 + }
17258 +
17259 +diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
17260 +index ba587e5fc24fc..683203f87ae2b 100644
17261 +--- a/drivers/net/bareudp.c
17262 ++++ b/drivers/net/bareudp.c
17263 +@@ -148,14 +148,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
17264 + skb_reset_network_header(skb);
17265 + skb_reset_mac_header(skb);
17266 +
17267 +- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
17268 ++ if (!ipv6_mod_enabled() || family == AF_INET)
17269 + err = IP_ECN_decapsulate(oiph, skb);
17270 + else
17271 + err = IP6_ECN_decapsulate(oiph, skb);
17272 +
17273 + if (unlikely(err)) {
17274 + if (log_ecn_error) {
17275 +- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
17276 ++ if (!ipv6_mod_enabled() || family == AF_INET)
17277 + net_info_ratelimited("non-ECT from %pI4 "
17278 + "with TOS=%#x\n",
17279 + &((struct iphdr *)oiph)->saddr,
17280 +@@ -221,11 +221,12 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port)
17281 + int err;
17282 +
17283 + memset(&udp_conf, 0, sizeof(udp_conf));
17284 +-#if IS_ENABLED(CONFIG_IPV6)
17285 +- udp_conf.family = AF_INET6;
17286 +-#else
17287 +- udp_conf.family = AF_INET;
17288 +-#endif
17289 ++
17290 ++ if (ipv6_mod_enabled())
17291 ++ udp_conf.family = AF_INET6;
17292 ++ else
17293 ++ udp_conf.family = AF_INET;
17294 ++
17295 + udp_conf.local_udp_port = port;
17296 + /* Open UDP socket */
17297 + err = udp_sock_create(net, &udp_conf, &sock);
17298 +@@ -448,7 +449,7 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
17299 + }
17300 +
17301 + rcu_read_lock();
17302 +- if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
17303 ++ if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
17304 + err = bareudp6_xmit_skb(skb, dev, bareudp, info);
17305 + else
17306 + err = bareudp_xmit_skb(skb, dev, bareudp, info);
17307 +@@ -478,7 +479,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
17308 +
17309 + use_cache = ip_tunnel_dst_cache_usable(skb, info);
17310 +
17311 +- if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
17312 ++ if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
17313 + struct rtable *rt;
17314 + __be32 saddr;
17315 +
17316 +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
17317 +index 1a4b56f6fa8c6..b3b5bc1c803b3 100644
17318 +--- a/drivers/net/can/m_can/m_can.c
17319 ++++ b/drivers/net/can/m_can/m_can.c
17320 +@@ -1637,8 +1637,6 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
17321 + if (err)
17322 + goto out_fail;
17323 +
17324 +- can_put_echo_skb(skb, dev, 0, 0);
17325 +-
17326 + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
17327 + cccr = m_can_read(cdev, M_CAN_CCCR);
17328 + cccr &= ~CCCR_CMR_MASK;
17329 +@@ -1655,6 +1653,9 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
17330 + m_can_write(cdev, M_CAN_CCCR, cccr);
17331 + }
17332 + m_can_write(cdev, M_CAN_TXBTIE, 0x1);
17333 ++
17334 ++ can_put_echo_skb(skb, dev, 0, 0);
17335 ++
17336 + m_can_write(cdev, M_CAN_TXBAR, 0x1);
17337 + /* End of xmit function for version 3.0.x */
17338 + } else {
17339 +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
17340 +index b5986df6eca0b..1c192554209a3 100644
17341 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
17342 ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
17343 +@@ -1657,7 +1657,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
17344 + out_kfree_buf_rx:
17345 + kfree(buf_rx);
17346 +
17347 +- return 0;
17348 ++ return err;
17349 + }
17350 +
17351 + #define MCP251XFD_QUIRK_ACTIVE(quirk) \
17352 +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
17353 +index 7bedceffdfa36..bbec3311d8934 100644
17354 +--- a/drivers/net/can/usb/ems_usb.c
17355 ++++ b/drivers/net/can/usb/ems_usb.c
17356 +@@ -819,7 +819,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
17357 +
17358 + usb_unanchor_urb(urb);
17359 + usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
17360 +- dev_kfree_skb(skb);
17361 +
17362 + atomic_dec(&dev->active_tx_urbs);
17363 +
17364 +diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
17365 +index 77bddff86252b..c45a814e1de2f 100644
17366 +--- a/drivers/net/can/usb/mcba_usb.c
17367 ++++ b/drivers/net/can/usb/mcba_usb.c
17368 +@@ -33,10 +33,6 @@
17369 + #define MCBA_USB_RX_BUFF_SIZE 64
17370 + #define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg))
17371 +
17372 +-/* MCBA endpoint numbers */
17373 +-#define MCBA_USB_EP_IN 1
17374 +-#define MCBA_USB_EP_OUT 1
17375 +-
17376 + /* Microchip command id */
17377 + #define MBCA_CMD_RECEIVE_MESSAGE 0xE3
17378 + #define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5
17379 +@@ -83,6 +79,8 @@ struct mcba_priv {
17380 + atomic_t free_ctx_cnt;
17381 + void *rxbuf[MCBA_MAX_RX_URBS];
17382 + dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
17383 ++ int rx_pipe;
17384 ++ int tx_pipe;
17385 + };
17386 +
17387 + /* CAN frame */
17388 +@@ -268,10 +266,8 @@ static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv,
17389 +
17390 + memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE);
17391 +
17392 +- usb_fill_bulk_urb(urb, priv->udev,
17393 +- usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf,
17394 +- MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback,
17395 +- ctx);
17396 ++ usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE,
17397 ++ mcba_usb_write_bulk_callback, ctx);
17398 +
17399 + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
17400 + usb_anchor_urb(urb, &priv->tx_submitted);
17401 +@@ -364,7 +360,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
17402 + xmit_failed:
17403 + can_free_echo_skb(priv->netdev, ctx->ndx, NULL);
17404 + mcba_usb_free_ctx(ctx);
17405 +- dev_kfree_skb(skb);
17406 + stats->tx_dropped++;
17407 +
17408 + return NETDEV_TX_OK;
17409 +@@ -608,7 +603,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
17410 + resubmit_urb:
17411 +
17412 + usb_fill_bulk_urb(urb, priv->udev,
17413 +- usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT),
17414 ++ priv->rx_pipe,
17415 + urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE,
17416 + mcba_usb_read_bulk_callback, priv);
17417 +
17418 +@@ -653,7 +648,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
17419 + urb->transfer_dma = buf_dma;
17420 +
17421 + usb_fill_bulk_urb(urb, priv->udev,
17422 +- usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
17423 ++ priv->rx_pipe,
17424 + buf, MCBA_USB_RX_BUFF_SIZE,
17425 + mcba_usb_read_bulk_callback, priv);
17426 + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
17427 +@@ -807,6 +802,13 @@ static int mcba_usb_probe(struct usb_interface *intf,
17428 + struct mcba_priv *priv;
17429 + int err;
17430 + struct usb_device *usbdev = interface_to_usbdev(intf);
17431 ++ struct usb_endpoint_descriptor *in, *out;
17432 ++
17433 ++ err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL);
17434 ++ if (err) {
17435 ++ dev_err(&intf->dev, "Can't find endpoints\n");
17436 ++ return err;
17437 ++ }
17438 +
17439 + netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS);
17440 + if (!netdev) {
17441 +@@ -852,6 +854,9 @@ static int mcba_usb_probe(struct usb_interface *intf,
17442 + goto cleanup_free_candev;
17443 + }
17444 +
17445 ++ priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress);
17446 ++ priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress);
17447 ++
17448 + devm_can_led_init(netdev);
17449 +
17450 + /* Start USB dev only if we have successfully registered CAN device */
17451 +diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
17452 +index 431af1ec1e3ca..b638604bf1eef 100644
17453 +--- a/drivers/net/can/usb/usb_8dev.c
17454 ++++ b/drivers/net/can/usb/usb_8dev.c
17455 +@@ -663,9 +663,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
17456 + atomic_inc(&priv->active_tx_urbs);
17457 +
17458 + err = usb_submit_urb(urb, GFP_ATOMIC);
17459 +- if (unlikely(err))
17460 +- goto failed;
17461 +- else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
17462 ++ if (unlikely(err)) {
17463 ++ can_free_echo_skb(netdev, context->echo_index, NULL);
17464 ++
17465 ++ usb_unanchor_urb(urb);
17466 ++ usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
17467 ++
17468 ++ atomic_dec(&priv->active_tx_urbs);
17469 ++
17470 ++ if (err == -ENODEV)
17471 ++ netif_device_detach(netdev);
17472 ++ else
17473 ++ netdev_warn(netdev, "failed tx_urb %d\n", err);
17474 ++ stats->tx_dropped++;
17475 ++ } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
17476 + /* Slow down tx path */
17477 + netif_stop_queue(netdev);
17478 +
17479 +@@ -684,19 +695,6 @@ nofreecontext:
17480 +
17481 + return NETDEV_TX_BUSY;
17482 +
17483 +-failed:
17484 +- can_free_echo_skb(netdev, context->echo_index, NULL);
17485 +-
17486 +- usb_unanchor_urb(urb);
17487 +- usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
17488 +-
17489 +- atomic_dec(&priv->active_tx_urbs);
17490 +-
17491 +- if (err == -ENODEV)
17492 +- netif_device_detach(netdev);
17493 +- else
17494 +- netdev_warn(netdev, "failed tx_urb %d\n", err);
17495 +-
17496 + nomembuf:
17497 + usb_free_urb(urb);
17498 +
17499 +diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
17500 +index 47ccc15a3486b..191ffa7776e8d 100644
17501 +--- a/drivers/net/can/vxcan.c
17502 ++++ b/drivers/net/can/vxcan.c
17503 +@@ -148,7 +148,7 @@ static void vxcan_setup(struct net_device *dev)
17504 + dev->hard_header_len = 0;
17505 + dev->addr_len = 0;
17506 + dev->tx_queue_len = 0;
17507 +- dev->flags = (IFF_NOARP|IFF_ECHO);
17508 ++ dev->flags = IFF_NOARP;
17509 + dev->netdev_ops = &vxcan_netdev_ops;
17510 + dev->needs_free_netdev = true;
17511 +
17512 +diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
17513 +index 0029d279616fd..37a3dabdce313 100644
17514 +--- a/drivers/net/dsa/Kconfig
17515 ++++ b/drivers/net/dsa/Kconfig
17516 +@@ -68,17 +68,7 @@ config NET_DSA_QCA8K
17517 + This enables support for the Qualcomm Atheros QCA8K Ethernet
17518 + switch chips.
17519 +
17520 +-config NET_DSA_REALTEK_SMI
17521 +- tristate "Realtek SMI Ethernet switch family support"
17522 +- select NET_DSA_TAG_RTL4_A
17523 +- select NET_DSA_TAG_RTL8_4
17524 +- select FIXED_PHY
17525 +- select IRQ_DOMAIN
17526 +- select REALTEK_PHY
17527 +- select REGMAP
17528 +- help
17529 +- This enables support for the Realtek SMI-based switch
17530 +- chips, currently only RTL8366RB.
17531 ++source "drivers/net/dsa/realtek/Kconfig"
17532 +
17533 + config NET_DSA_SMSC_LAN9303
17534 + tristate
17535 +diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
17536 +index 8da1569a34e6e..e73838c122560 100644
17537 +--- a/drivers/net/dsa/Makefile
17538 ++++ b/drivers/net/dsa/Makefile
17539 +@@ -9,8 +9,6 @@ obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
17540 + obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
17541 + obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
17542 + obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
17543 +-obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
17544 +-realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
17545 + obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
17546 + obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
17547 + obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
17548 +@@ -23,5 +21,6 @@ obj-y += microchip/
17549 + obj-y += mv88e6xxx/
17550 + obj-y += ocelot/
17551 + obj-y += qca/
17552 ++obj-y += realtek/
17553 + obj-y += sja1105/
17554 + obj-y += xrs700x/
17555 +diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
17556 +index a7e2fcf2df2c9..edbe5e7f1cb6b 100644
17557 +--- a/drivers/net/dsa/bcm_sf2_cfp.c
17558 ++++ b/drivers/net/dsa/bcm_sf2_cfp.c
17559 +@@ -567,14 +567,14 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
17560 + static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
17561 + int port, u32 location)
17562 + {
17563 +- struct cfp_rule *rule = NULL;
17564 ++ struct cfp_rule *rule;
17565 +
17566 + list_for_each_entry(rule, &priv->cfp.rules_list, next) {
17567 + if (rule->port == port && rule->fs.location == location)
17568 +- break;
17569 ++ return rule;
17570 + }
17571 +
17572 +- return rule;
17573 ++ return NULL;
17574 + }
17575 +
17576 + static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
17577 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
17578 +index ab1676553714c..cf7754dddad78 100644
17579 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
17580 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
17581 +@@ -3639,6 +3639,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
17582 + .port_sync_link = mv88e6185_port_sync_link,
17583 + .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
17584 + .port_tag_remap = mv88e6095_port_tag_remap,
17585 ++ .port_set_policy = mv88e6352_port_set_policy,
17586 + .port_set_frame_mode = mv88e6351_port_set_frame_mode,
17587 + .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
17588 + .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
17589 +diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
17590 +deleted file mode 100644
17591 +index aae46ada8d839..0000000000000
17592 +--- a/drivers/net/dsa/realtek-smi-core.c
17593 ++++ /dev/null
17594 +@@ -1,523 +0,0 @@
17595 +-// SPDX-License-Identifier: GPL-2.0+
17596 +-/* Realtek Simple Management Interface (SMI) driver
17597 +- * It can be discussed how "simple" this interface is.
17598 +- *
17599 +- * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
17600 +- * but the protocol is not MDIO at all. Instead it is a Realtek
17601 +- * pecularity that need to bit-bang the lines in a special way to
17602 +- * communicate with the switch.
17603 +- *
17604 +- * ASICs we intend to support with this driver:
17605 +- *
17606 +- * RTL8366 - The original version, apparently
17607 +- * RTL8369 - Similar enough to have the same datsheet as RTL8366
17608 +- * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
17609 +- * different register layout from the other two
17610 +- * RTL8366S - Is this "RTL8366 super"?
17611 +- * RTL8367 - Has an OpenWRT driver as well
17612 +- * RTL8368S - Seems to be an alternative name for RTL8366RB
17613 +- * RTL8370 - Also uses SMI
17614 +- *
17615 +- * Copyright (C) 2017 Linus Walleij <linus.walleij@××××××.org>
17616 +- * Copyright (C) 2010 Antti Seppälä <a.seppala@×××××.com>
17617 +- * Copyright (C) 2010 Roman Yeryomin <roman@×××××.lv>
17618 +- * Copyright (C) 2011 Colin Leitner <colin.leitner@××××××××××.com>
17619 +- * Copyright (C) 2009-2010 Gabor Juhos <juhosg@×××××××.org>
17620 +- */
17621 +-
17622 +-#include <linux/kernel.h>
17623 +-#include <linux/module.h>
17624 +-#include <linux/device.h>
17625 +-#include <linux/spinlock.h>
17626 +-#include <linux/skbuff.h>
17627 +-#include <linux/of.h>
17628 +-#include <linux/of_device.h>
17629 +-#include <linux/of_mdio.h>
17630 +-#include <linux/delay.h>
17631 +-#include <linux/gpio/consumer.h>
17632 +-#include <linux/platform_device.h>
17633 +-#include <linux/regmap.h>
17634 +-#include <linux/bitops.h>
17635 +-#include <linux/if_bridge.h>
17636 +-
17637 +-#include "realtek-smi-core.h"
17638 +-
17639 +-#define REALTEK_SMI_ACK_RETRY_COUNT 5
17640 +-#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */
17641 +-#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */
17642 +-
17643 +-static inline void realtek_smi_clk_delay(struct realtek_smi *smi)
17644 +-{
17645 +- ndelay(smi->clk_delay);
17646 +-}
17647 +-
17648 +-static void realtek_smi_start(struct realtek_smi *smi)
17649 +-{
17650 +- /* Set GPIO pins to output mode, with initial state:
17651 +- * SCK = 0, SDA = 1
17652 +- */
17653 +- gpiod_direction_output(smi->mdc, 0);
17654 +- gpiod_direction_output(smi->mdio, 1);
17655 +- realtek_smi_clk_delay(smi);
17656 +-
17657 +- /* CLK 1: 0 -> 1, 1 -> 0 */
17658 +- gpiod_set_value(smi->mdc, 1);
17659 +- realtek_smi_clk_delay(smi);
17660 +- gpiod_set_value(smi->mdc, 0);
17661 +- realtek_smi_clk_delay(smi);
17662 +-
17663 +- /* CLK 2: */
17664 +- gpiod_set_value(smi->mdc, 1);
17665 +- realtek_smi_clk_delay(smi);
17666 +- gpiod_set_value(smi->mdio, 0);
17667 +- realtek_smi_clk_delay(smi);
17668 +- gpiod_set_value(smi->mdc, 0);
17669 +- realtek_smi_clk_delay(smi);
17670 +- gpiod_set_value(smi->mdio, 1);
17671 +-}
17672 +-
17673 +-static void realtek_smi_stop(struct realtek_smi *smi)
17674 +-{
17675 +- realtek_smi_clk_delay(smi);
17676 +- gpiod_set_value(smi->mdio, 0);
17677 +- gpiod_set_value(smi->mdc, 1);
17678 +- realtek_smi_clk_delay(smi);
17679 +- gpiod_set_value(smi->mdio, 1);
17680 +- realtek_smi_clk_delay(smi);
17681 +- gpiod_set_value(smi->mdc, 1);
17682 +- realtek_smi_clk_delay(smi);
17683 +- gpiod_set_value(smi->mdc, 0);
17684 +- realtek_smi_clk_delay(smi);
17685 +- gpiod_set_value(smi->mdc, 1);
17686 +-
17687 +- /* Add a click */
17688 +- realtek_smi_clk_delay(smi);
17689 +- gpiod_set_value(smi->mdc, 0);
17690 +- realtek_smi_clk_delay(smi);
17691 +- gpiod_set_value(smi->mdc, 1);
17692 +-
17693 +- /* Set GPIO pins to input mode */
17694 +- gpiod_direction_input(smi->mdio);
17695 +- gpiod_direction_input(smi->mdc);
17696 +-}
17697 +-
17698 +-static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len)
17699 +-{
17700 +- for (; len > 0; len--) {
17701 +- realtek_smi_clk_delay(smi);
17702 +-
17703 +- /* Prepare data */
17704 +- gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1))));
17705 +- realtek_smi_clk_delay(smi);
17706 +-
17707 +- /* Clocking */
17708 +- gpiod_set_value(smi->mdc, 1);
17709 +- realtek_smi_clk_delay(smi);
17710 +- gpiod_set_value(smi->mdc, 0);
17711 +- }
17712 +-}
17713 +-
17714 +-static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data)
17715 +-{
17716 +- gpiod_direction_input(smi->mdio);
17717 +-
17718 +- for (*data = 0; len > 0; len--) {
17719 +- u32 u;
17720 +-
17721 +- realtek_smi_clk_delay(smi);
17722 +-
17723 +- /* Clocking */
17724 +- gpiod_set_value(smi->mdc, 1);
17725 +- realtek_smi_clk_delay(smi);
17726 +- u = !!gpiod_get_value(smi->mdio);
17727 +- gpiod_set_value(smi->mdc, 0);
17728 +-
17729 +- *data |= (u << (len - 1));
17730 +- }
17731 +-
17732 +- gpiod_direction_output(smi->mdio, 0);
17733 +-}
17734 +-
17735 +-static int realtek_smi_wait_for_ack(struct realtek_smi *smi)
17736 +-{
17737 +- int retry_cnt;
17738 +-
17739 +- retry_cnt = 0;
17740 +- do {
17741 +- u32 ack;
17742 +-
17743 +- realtek_smi_read_bits(smi, 1, &ack);
17744 +- if (ack == 0)
17745 +- break;
17746 +-
17747 +- if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
17748 +- dev_err(smi->dev, "ACK timeout\n");
17749 +- return -ETIMEDOUT;
17750 +- }
17751 +- } while (1);
17752 +-
17753 +- return 0;
17754 +-}
17755 +-
17756 +-static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data)
17757 +-{
17758 +- realtek_smi_write_bits(smi, data, 8);
17759 +- return realtek_smi_wait_for_ack(smi);
17760 +-}
17761 +-
17762 +-static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data)
17763 +-{
17764 +- realtek_smi_write_bits(smi, data, 8);
17765 +- return 0;
17766 +-}
17767 +-
17768 +-static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data)
17769 +-{
17770 +- u32 t;
17771 +-
17772 +- /* Read data */
17773 +- realtek_smi_read_bits(smi, 8, &t);
17774 +- *data = (t & 0xff);
17775 +-
17776 +- /* Send an ACK */
17777 +- realtek_smi_write_bits(smi, 0x00, 1);
17778 +-
17779 +- return 0;
17780 +-}
17781 +-
17782 +-static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data)
17783 +-{
17784 +- u32 t;
17785 +-
17786 +- /* Read data */
17787 +- realtek_smi_read_bits(smi, 8, &t);
17788 +- *data = (t & 0xff);
17789 +-
17790 +- /* Send an ACK */
17791 +- realtek_smi_write_bits(smi, 0x01, 1);
17792 +-
17793 +- return 0;
17794 +-}
17795 +-
17796 +-static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data)
17797 +-{
17798 +- unsigned long flags;
17799 +- u8 lo = 0;
17800 +- u8 hi = 0;
17801 +- int ret;
17802 +-
17803 +- spin_lock_irqsave(&smi->lock, flags);
17804 +-
17805 +- realtek_smi_start(smi);
17806 +-
17807 +- /* Send READ command */
17808 +- ret = realtek_smi_write_byte(smi, smi->cmd_read);
17809 +- if (ret)
17810 +- goto out;
17811 +-
17812 +- /* Set ADDR[7:0] */
17813 +- ret = realtek_smi_write_byte(smi, addr & 0xff);
17814 +- if (ret)
17815 +- goto out;
17816 +-
17817 +- /* Set ADDR[15:8] */
17818 +- ret = realtek_smi_write_byte(smi, addr >> 8);
17819 +- if (ret)
17820 +- goto out;
17821 +-
17822 +- /* Read DATA[7:0] */
17823 +- realtek_smi_read_byte0(smi, &lo);
17824 +- /* Read DATA[15:8] */
17825 +- realtek_smi_read_byte1(smi, &hi);
17826 +-
17827 +- *data = ((u32)lo) | (((u32)hi) << 8);
17828 +-
17829 +- ret = 0;
17830 +-
17831 +- out:
17832 +- realtek_smi_stop(smi);
17833 +- spin_unlock_irqrestore(&smi->lock, flags);
17834 +-
17835 +- return ret;
17836 +-}
17837 +-
17838 +-static int realtek_smi_write_reg(struct realtek_smi *smi,
17839 +- u32 addr, u32 data, bool ack)
17840 +-{
17841 +- unsigned long flags;
17842 +- int ret;
17843 +-
17844 +- spin_lock_irqsave(&smi->lock, flags);
17845 +-
17846 +- realtek_smi_start(smi);
17847 +-
17848 +- /* Send WRITE command */
17849 +- ret = realtek_smi_write_byte(smi, smi->cmd_write);
17850 +- if (ret)
17851 +- goto out;
17852 +-
17853 +- /* Set ADDR[7:0] */
17854 +- ret = realtek_smi_write_byte(smi, addr & 0xff);
17855 +- if (ret)
17856 +- goto out;
17857 +-
17858 +- /* Set ADDR[15:8] */
17859 +- ret = realtek_smi_write_byte(smi, addr >> 8);
17860 +- if (ret)
17861 +- goto out;
17862 +-
17863 +- /* Write DATA[7:0] */
17864 +- ret = realtek_smi_write_byte(smi, data & 0xff);
17865 +- if (ret)
17866 +- goto out;
17867 +-
17868 +- /* Write DATA[15:8] */
17869 +- if (ack)
17870 +- ret = realtek_smi_write_byte(smi, data >> 8);
17871 +- else
17872 +- ret = realtek_smi_write_byte_noack(smi, data >> 8);
17873 +- if (ret)
17874 +- goto out;
17875 +-
17876 +- ret = 0;
17877 +-
17878 +- out:
17879 +- realtek_smi_stop(smi);
17880 +- spin_unlock_irqrestore(&smi->lock, flags);
17881 +-
17882 +- return ret;
17883 +-}
17884 +-
17885 +-/* There is one single case when we need to use this accessor and that
17886 +- * is when issueing soft reset. Since the device reset as soon as we write
17887 +- * that bit, no ACK will come back for natural reasons.
17888 +- */
17889 +-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
17890 +- u32 data)
17891 +-{
17892 +- return realtek_smi_write_reg(smi, addr, data, false);
17893 +-}
17894 +-EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack);
17895 +-
17896 +-/* Regmap accessors */
17897 +-
17898 +-static int realtek_smi_write(void *ctx, u32 reg, u32 val)
17899 +-{
17900 +- struct realtek_smi *smi = ctx;
17901 +-
17902 +- return realtek_smi_write_reg(smi, reg, val, true);
17903 +-}
17904 +-
17905 +-static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
17906 +-{
17907 +- struct realtek_smi *smi = ctx;
17908 +-
17909 +- return realtek_smi_read_reg(smi, reg, val);
17910 +-}
17911 +-
17912 +-static const struct regmap_config realtek_smi_mdio_regmap_config = {
17913 +- .reg_bits = 10, /* A4..A0 R4..R0 */
17914 +- .val_bits = 16,
17915 +- .reg_stride = 1,
17916 +- /* PHY regs are at 0x8000 */
17917 +- .max_register = 0xffff,
17918 +- .reg_format_endian = REGMAP_ENDIAN_BIG,
17919 +- .reg_read = realtek_smi_read,
17920 +- .reg_write = realtek_smi_write,
17921 +- .cache_type = REGCACHE_NONE,
17922 +-};
17923 +-
17924 +-static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
17925 +-{
17926 +- struct realtek_smi *smi = bus->priv;
17927 +-
17928 +- return smi->ops->phy_read(smi, addr, regnum);
17929 +-}
17930 +-
17931 +-static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
17932 +- u16 val)
17933 +-{
17934 +- struct realtek_smi *smi = bus->priv;
17935 +-
17936 +- return smi->ops->phy_write(smi, addr, regnum, val);
17937 +-}
17938 +-
17939 +-int realtek_smi_setup_mdio(struct realtek_smi *smi)
17940 +-{
17941 +- struct device_node *mdio_np;
17942 +- int ret;
17943 +-
17944 +- mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
17945 +- if (!mdio_np) {
17946 +- dev_err(smi->dev, "no MDIO bus node\n");
17947 +- return -ENODEV;
17948 +- }
17949 +-
17950 +- smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
17951 +- if (!smi->slave_mii_bus) {
17952 +- ret = -ENOMEM;
17953 +- goto err_put_node;
17954 +- }
17955 +- smi->slave_mii_bus->priv = smi;
17956 +- smi->slave_mii_bus->name = "SMI slave MII";
17957 +- smi->slave_mii_bus->read = realtek_smi_mdio_read;
17958 +- smi->slave_mii_bus->write = realtek_smi_mdio_write;
17959 +- snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
17960 +- smi->ds->index);
17961 +- smi->slave_mii_bus->dev.of_node = mdio_np;
17962 +- smi->slave_mii_bus->parent = smi->dev;
17963 +- smi->ds->slave_mii_bus = smi->slave_mii_bus;
17964 +-
17965 +- ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
17966 +- if (ret) {
17967 +- dev_err(smi->dev, "unable to register MDIO bus %s\n",
17968 +- smi->slave_mii_bus->id);
17969 +- goto err_put_node;
17970 +- }
17971 +-
17972 +- return 0;
17973 +-
17974 +-err_put_node:
17975 +- of_node_put(mdio_np);
17976 +-
17977 +- return ret;
17978 +-}
17979 +-
17980 +-static int realtek_smi_probe(struct platform_device *pdev)
17981 +-{
17982 +- const struct realtek_smi_variant *var;
17983 +- struct device *dev = &pdev->dev;
17984 +- struct realtek_smi *smi;
17985 +- struct device_node *np;
17986 +- int ret;
17987 +-
17988 +- var = of_device_get_match_data(dev);
17989 +- np = dev->of_node;
17990 +-
17991 +- smi = devm_kzalloc(dev, sizeof(*smi) + var->chip_data_sz, GFP_KERNEL);
17992 +- if (!smi)
17993 +- return -ENOMEM;
17994 +- smi->chip_data = (void *)smi + sizeof(*smi);
17995 +- smi->map = devm_regmap_init(dev, NULL, smi,
17996 +- &realtek_smi_mdio_regmap_config);
17997 +- if (IS_ERR(smi->map)) {
17998 +- ret = PTR_ERR(smi->map);
17999 +- dev_err(dev, "regmap init failed: %d\n", ret);
18000 +- return ret;
18001 +- }
18002 +-
18003 +- /* Link forward and backward */
18004 +- smi->dev = dev;
18005 +- smi->clk_delay = var->clk_delay;
18006 +- smi->cmd_read = var->cmd_read;
18007 +- smi->cmd_write = var->cmd_write;
18008 +- smi->ops = var->ops;
18009 +-
18010 +- dev_set_drvdata(dev, smi);
18011 +- spin_lock_init(&smi->lock);
18012 +-
18013 +- /* TODO: if power is software controlled, set up any regulators here */
18014 +-
18015 +- /* Assert then deassert RESET */
18016 +- smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
18017 +- if (IS_ERR(smi->reset)) {
18018 +- dev_err(dev, "failed to get RESET GPIO\n");
18019 +- return PTR_ERR(smi->reset);
18020 +- }
18021 +- msleep(REALTEK_SMI_HW_STOP_DELAY);
18022 +- gpiod_set_value(smi->reset, 0);
18023 +- msleep(REALTEK_SMI_HW_START_DELAY);
18024 +- dev_info(dev, "deasserted RESET\n");
18025 +-
18026 +- /* Fetch MDIO pins */
18027 +- smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
18028 +- if (IS_ERR(smi->mdc))
18029 +- return PTR_ERR(smi->mdc);
18030 +- smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
18031 +- if (IS_ERR(smi->mdio))
18032 +- return PTR_ERR(smi->mdio);
18033 +-
18034 +- smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
18035 +-
18036 +- ret = smi->ops->detect(smi);
18037 +- if (ret) {
18038 +- dev_err(dev, "unable to detect switch\n");
18039 +- return ret;
18040 +- }
18041 +-
18042 +- smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
18043 +- if (!smi->ds)
18044 +- return -ENOMEM;
18045 +-
18046 +- smi->ds->dev = dev;
18047 +- smi->ds->num_ports = smi->num_ports;
18048 +- smi->ds->priv = smi;
18049 +-
18050 +- smi->ds->ops = var->ds_ops;
18051 +- ret = dsa_register_switch(smi->ds);
18052 +- if (ret) {
18053 +- dev_err_probe(dev, ret, "unable to register switch\n");
18054 +- return ret;
18055 +- }
18056 +- return 0;
18057 +-}
18058 +-
18059 +-static int realtek_smi_remove(struct platform_device *pdev)
18060 +-{
18061 +- struct realtek_smi *smi = platform_get_drvdata(pdev);
18062 +-
18063 +- if (!smi)
18064 +- return 0;
18065 +-
18066 +- dsa_unregister_switch(smi->ds);
18067 +- if (smi->slave_mii_bus)
18068 +- of_node_put(smi->slave_mii_bus->dev.of_node);
18069 +- gpiod_set_value(smi->reset, 1);
18070 +-
18071 +- platform_set_drvdata(pdev, NULL);
18072 +-
18073 +- return 0;
18074 +-}
18075 +-
18076 +-static void realtek_smi_shutdown(struct platform_device *pdev)
18077 +-{
18078 +- struct realtek_smi *smi = platform_get_drvdata(pdev);
18079 +-
18080 +- if (!smi)
18081 +- return;
18082 +-
18083 +- dsa_switch_shutdown(smi->ds);
18084 +-
18085 +- platform_set_drvdata(pdev, NULL);
18086 +-}
18087 +-
18088 +-static const struct of_device_id realtek_smi_of_match[] = {
18089 +- {
18090 +- .compatible = "realtek,rtl8366rb",
18091 +- .data = &rtl8366rb_variant,
18092 +- },
18093 +- {
18094 +- /* FIXME: add support for RTL8366S and more */
18095 +- .compatible = "realtek,rtl8366s",
18096 +- .data = NULL,
18097 +- },
18098 +- {
18099 +- .compatible = "realtek,rtl8365mb",
18100 +- .data = &rtl8365mb_variant,
18101 +- },
18102 +- { /* sentinel */ },
18103 +-};
18104 +-MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
18105 +-
18106 +-static struct platform_driver realtek_smi_driver = {
18107 +- .driver = {
18108 +- .name = "realtek-smi",
18109 +- .of_match_table = of_match_ptr(realtek_smi_of_match),
18110 +- },
18111 +- .probe = realtek_smi_probe,
18112 +- .remove = realtek_smi_remove,
18113 +- .shutdown = realtek_smi_shutdown,
18114 +-};
18115 +-module_platform_driver(realtek_smi_driver);
18116 +-
18117 +-MODULE_LICENSE("GPL");
18118 +diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
18119 +deleted file mode 100644
18120 +index 5bfa53e2480ae..0000000000000
18121 +--- a/drivers/net/dsa/realtek-smi-core.h
18122 ++++ /dev/null
18123 +@@ -1,145 +0,0 @@
18124 +-/* SPDX-License-Identifier: GPL-2.0+ */
18125 +-/* Realtek SMI interface driver defines
18126 +- *
18127 +- * Copyright (C) 2017 Linus Walleij <linus.walleij@××××××.org>
18128 +- * Copyright (C) 2009-2010 Gabor Juhos <juhosg@×××××××.org>
18129 +- */
18130 +-
18131 +-#ifndef _REALTEK_SMI_H
18132 +-#define _REALTEK_SMI_H
18133 +-
18134 +-#include <linux/phy.h>
18135 +-#include <linux/platform_device.h>
18136 +-#include <linux/gpio/consumer.h>
18137 +-#include <net/dsa.h>
18138 +-
18139 +-struct realtek_smi_ops;
18140 +-struct dentry;
18141 +-struct inode;
18142 +-struct file;
18143 +-
18144 +-struct rtl8366_mib_counter {
18145 +- unsigned int base;
18146 +- unsigned int offset;
18147 +- unsigned int length;
18148 +- const char *name;
18149 +-};
18150 +-
18151 +-/**
18152 +- * struct rtl8366_vlan_mc - Virtual LAN member configuration
18153 +- */
18154 +-struct rtl8366_vlan_mc {
18155 +- u16 vid;
18156 +- u16 untag;
18157 +- u16 member;
18158 +- u8 fid;
18159 +- u8 priority;
18160 +-};
18161 +-
18162 +-struct rtl8366_vlan_4k {
18163 +- u16 vid;
18164 +- u16 untag;
18165 +- u16 member;
18166 +- u8 fid;
18167 +-};
18168 +-
18169 +-struct realtek_smi {
18170 +- struct device *dev;
18171 +- struct gpio_desc *reset;
18172 +- struct gpio_desc *mdc;
18173 +- struct gpio_desc *mdio;
18174 +- struct regmap *map;
18175 +- struct mii_bus *slave_mii_bus;
18176 +-
18177 +- unsigned int clk_delay;
18178 +- u8 cmd_read;
18179 +- u8 cmd_write;
18180 +- spinlock_t lock; /* Locks around command writes */
18181 +- struct dsa_switch *ds;
18182 +- struct irq_domain *irqdomain;
18183 +- bool leds_disabled;
18184 +-
18185 +- unsigned int cpu_port;
18186 +- unsigned int num_ports;
18187 +- unsigned int num_vlan_mc;
18188 +- unsigned int num_mib_counters;
18189 +- struct rtl8366_mib_counter *mib_counters;
18190 +-
18191 +- const struct realtek_smi_ops *ops;
18192 +-
18193 +- int vlan_enabled;
18194 +- int vlan4k_enabled;
18195 +-
18196 +- char buf[4096];
18197 +- void *chip_data; /* Per-chip extra variant data */
18198 +-};
18199 +-
18200 +-/**
18201 +- * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations
18202 +- * @detect: detects the chiptype
18203 +- */
18204 +-struct realtek_smi_ops {
18205 +- int (*detect)(struct realtek_smi *smi);
18206 +- int (*reset_chip)(struct realtek_smi *smi);
18207 +- int (*setup)(struct realtek_smi *smi);
18208 +- void (*cleanup)(struct realtek_smi *smi);
18209 +- int (*get_mib_counter)(struct realtek_smi *smi,
18210 +- int port,
18211 +- struct rtl8366_mib_counter *mib,
18212 +- u64 *mibvalue);
18213 +- int (*get_vlan_mc)(struct realtek_smi *smi, u32 index,
18214 +- struct rtl8366_vlan_mc *vlanmc);
18215 +- int (*set_vlan_mc)(struct realtek_smi *smi, u32 index,
18216 +- const struct rtl8366_vlan_mc *vlanmc);
18217 +- int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid,
18218 +- struct rtl8366_vlan_4k *vlan4k);
18219 +- int (*set_vlan_4k)(struct realtek_smi *smi,
18220 +- const struct rtl8366_vlan_4k *vlan4k);
18221 +- int (*get_mc_index)(struct realtek_smi *smi, int port, int *val);
18222 +- int (*set_mc_index)(struct realtek_smi *smi, int port, int index);
18223 +- bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan);
18224 +- int (*enable_vlan)(struct realtek_smi *smi, bool enable);
18225 +- int (*enable_vlan4k)(struct realtek_smi *smi, bool enable);
18226 +- int (*enable_port)(struct realtek_smi *smi, int port, bool enable);
18227 +- int (*phy_read)(struct realtek_smi *smi, int phy, int regnum);
18228 +- int (*phy_write)(struct realtek_smi *smi, int phy, int regnum,
18229 +- u16 val);
18230 +-};
18231 +-
18232 +-struct realtek_smi_variant {
18233 +- const struct dsa_switch_ops *ds_ops;
18234 +- const struct realtek_smi_ops *ops;
18235 +- unsigned int clk_delay;
18236 +- u8 cmd_read;
18237 +- u8 cmd_write;
18238 +- size_t chip_data_sz;
18239 +-};
18240 +-
18241 +-/* SMI core calls */
18242 +-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
18243 +- u32 data);
18244 +-int realtek_smi_setup_mdio(struct realtek_smi *smi);
18245 +-
18246 +-/* RTL8366 library helpers */
18247 +-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
18248 +-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
18249 +- u32 untag, u32 fid);
18250 +-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
18251 +- unsigned int vid);
18252 +-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
18253 +-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
18254 +-int rtl8366_reset_vlan(struct realtek_smi *smi);
18255 +-int rtl8366_vlan_add(struct dsa_switch *ds, int port,
18256 +- const struct switchdev_obj_port_vlan *vlan,
18257 +- struct netlink_ext_ack *extack);
18258 +-int rtl8366_vlan_del(struct dsa_switch *ds, int port,
18259 +- const struct switchdev_obj_port_vlan *vlan);
18260 +-void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
18261 +- uint8_t *data);
18262 +-int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
18263 +-void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
18264 +-
18265 +-extern const struct realtek_smi_variant rtl8366rb_variant;
18266 +-extern const struct realtek_smi_variant rtl8365mb_variant;
18267 +-
18268 +-#endif /* _REALTEK_SMI_H */
18269 +diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
18270 +new file mode 100644
18271 +index 0000000000000..1c62212fb0ecb
18272 +--- /dev/null
18273 ++++ b/drivers/net/dsa/realtek/Kconfig
18274 +@@ -0,0 +1,20 @@
18275 ++# SPDX-License-Identifier: GPL-2.0-only
18276 ++menuconfig NET_DSA_REALTEK
18277 ++ tristate "Realtek Ethernet switch family support"
18278 ++ depends on NET_DSA
18279 ++ select NET_DSA_TAG_RTL4_A
18280 ++ select NET_DSA_TAG_RTL8_4
18281 ++ select FIXED_PHY
18282 ++ select IRQ_DOMAIN
18283 ++ select REALTEK_PHY
18284 ++ select REGMAP
18285 ++ help
18286 ++ Select to enable support for Realtek Ethernet switch chips.
18287 ++
18288 ++config NET_DSA_REALTEK_SMI
18289 ++ tristate "Realtek SMI connected switch driver"
18290 ++ depends on NET_DSA_REALTEK
18291 ++ default y
18292 ++ help
18293 ++ Select to enable support for registering switches connected
18294 ++ through SMI.
18295 +diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
18296 +new file mode 100644
18297 +index 0000000000000..323b921bfce0f
18298 +--- /dev/null
18299 ++++ b/drivers/net/dsa/realtek/Makefile
18300 +@@ -0,0 +1,3 @@
18301 ++# SPDX-License-Identifier: GPL-2.0
18302 ++obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
18303 ++realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
18304 +diff --git a/drivers/net/dsa/realtek/realtek-smi-core.c b/drivers/net/dsa/realtek/realtek-smi-core.c
18305 +new file mode 100644
18306 +index 0000000000000..aae46ada8d839
18307 +--- /dev/null
18308 ++++ b/drivers/net/dsa/realtek/realtek-smi-core.c
18309 +@@ -0,0 +1,523 @@
18310 ++// SPDX-License-Identifier: GPL-2.0+
18311 ++/* Realtek Simple Management Interface (SMI) driver
18312 ++ * It can be discussed how "simple" this interface is.
18313 ++ *
18314 ++ * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
18315 ++ * but the protocol is not MDIO at all. Instead it is a Realtek
18316 ++ * pecularity that need to bit-bang the lines in a special way to
18317 ++ * communicate with the switch.
18318 ++ *
18319 ++ * ASICs we intend to support with this driver:
18320 ++ *
18321 ++ * RTL8366 - The original version, apparently
18322 ++ * RTL8369 - Similar enough to have the same datsheet as RTL8366
18323 ++ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
18324 ++ * different register layout from the other two
18325 ++ * RTL8366S - Is this "RTL8366 super"?
18326 ++ * RTL8367 - Has an OpenWRT driver as well
18327 ++ * RTL8368S - Seems to be an alternative name for RTL8366RB
18328 ++ * RTL8370 - Also uses SMI
18329 ++ *
18330 ++ * Copyright (C) 2017 Linus Walleij <linus.walleij@××××××.org>
18331 ++ * Copyright (C) 2010 Antti Seppälä <a.seppala@×××××.com>
18332 ++ * Copyright (C) 2010 Roman Yeryomin <roman@×××××.lv>
18333 ++ * Copyright (C) 2011 Colin Leitner <colin.leitner@××××××××××.com>
18334 ++ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@×××××××.org>
18335 ++ */
18336 ++
18337 ++#include <linux/kernel.h>
18338 ++#include <linux/module.h>
18339 ++#include <linux/device.h>
18340 ++#include <linux/spinlock.h>
18341 ++#include <linux/skbuff.h>
18342 ++#include <linux/of.h>
18343 ++#include <linux/of_device.h>
18344 ++#include <linux/of_mdio.h>
18345 ++#include <linux/delay.h>
18346 ++#include <linux/gpio/consumer.h>
18347 ++#include <linux/platform_device.h>
18348 ++#include <linux/regmap.h>
18349 ++#include <linux/bitops.h>
18350 ++#include <linux/if_bridge.h>
18351 ++
18352 ++#include "realtek-smi-core.h"
18353 ++
18354 ++#define REALTEK_SMI_ACK_RETRY_COUNT 5
18355 ++#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */
18356 ++#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */
18357 ++
18358 ++static inline void realtek_smi_clk_delay(struct realtek_smi *smi)
18359 ++{
18360 ++ ndelay(smi->clk_delay);
18361 ++}
18362 ++
18363 ++static void realtek_smi_start(struct realtek_smi *smi)
18364 ++{
18365 ++ /* Set GPIO pins to output mode, with initial state:
18366 ++ * SCK = 0, SDA = 1
18367 ++ */
18368 ++ gpiod_direction_output(smi->mdc, 0);
18369 ++ gpiod_direction_output(smi->mdio, 1);
18370 ++ realtek_smi_clk_delay(smi);
18371 ++
18372 ++ /* CLK 1: 0 -> 1, 1 -> 0 */
18373 ++ gpiod_set_value(smi->mdc, 1);
18374 ++ realtek_smi_clk_delay(smi);
18375 ++ gpiod_set_value(smi->mdc, 0);
18376 ++ realtek_smi_clk_delay(smi);
18377 ++
18378 ++ /* CLK 2: */
18379 ++ gpiod_set_value(smi->mdc, 1);
18380 ++ realtek_smi_clk_delay(smi);
18381 ++ gpiod_set_value(smi->mdio, 0);
18382 ++ realtek_smi_clk_delay(smi);
18383 ++ gpiod_set_value(smi->mdc, 0);
18384 ++ realtek_smi_clk_delay(smi);
18385 ++ gpiod_set_value(smi->mdio, 1);
18386 ++}
18387 ++
18388 ++static void realtek_smi_stop(struct realtek_smi *smi)
18389 ++{
18390 ++ realtek_smi_clk_delay(smi);
18391 ++ gpiod_set_value(smi->mdio, 0);
18392 ++ gpiod_set_value(smi->mdc, 1);
18393 ++ realtek_smi_clk_delay(smi);
18394 ++ gpiod_set_value(smi->mdio, 1);
18395 ++ realtek_smi_clk_delay(smi);
18396 ++ gpiod_set_value(smi->mdc, 1);
18397 ++ realtek_smi_clk_delay(smi);
18398 ++ gpiod_set_value(smi->mdc, 0);
18399 ++ realtek_smi_clk_delay(smi);
18400 ++ gpiod_set_value(smi->mdc, 1);
18401 ++
18402 ++ /* Add a click */
18403 ++ realtek_smi_clk_delay(smi);
18404 ++ gpiod_set_value(smi->mdc, 0);
18405 ++ realtek_smi_clk_delay(smi);
18406 ++ gpiod_set_value(smi->mdc, 1);
18407 ++
18408 ++ /* Set GPIO pins to input mode */
18409 ++ gpiod_direction_input(smi->mdio);
18410 ++ gpiod_direction_input(smi->mdc);
18411 ++}
18412 ++
18413 ++static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len)
18414 ++{
18415 ++ for (; len > 0; len--) {
18416 ++ realtek_smi_clk_delay(smi);
18417 ++
18418 ++ /* Prepare data */
18419 ++ gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1))));
18420 ++ realtek_smi_clk_delay(smi);
18421 ++
18422 ++ /* Clocking */
18423 ++ gpiod_set_value(smi->mdc, 1);
18424 ++ realtek_smi_clk_delay(smi);
18425 ++ gpiod_set_value(smi->mdc, 0);
18426 ++ }
18427 ++}
18428 ++
18429 ++static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data)
18430 ++{
18431 ++ gpiod_direction_input(smi->mdio);
18432 ++
18433 ++ for (*data = 0; len > 0; len--) {
18434 ++ u32 u;
18435 ++
18436 ++ realtek_smi_clk_delay(smi);
18437 ++
18438 ++ /* Clocking */
18439 ++ gpiod_set_value(smi->mdc, 1);
18440 ++ realtek_smi_clk_delay(smi);
18441 ++ u = !!gpiod_get_value(smi->mdio);
18442 ++ gpiod_set_value(smi->mdc, 0);
18443 ++
18444 ++ *data |= (u << (len - 1));
18445 ++ }
18446 ++
18447 ++ gpiod_direction_output(smi->mdio, 0);
18448 ++}
18449 ++
18450 ++static int realtek_smi_wait_for_ack(struct realtek_smi *smi)
18451 ++{
18452 ++ int retry_cnt;
18453 ++
18454 ++ retry_cnt = 0;
18455 ++ do {
18456 ++ u32 ack;
18457 ++
18458 ++ realtek_smi_read_bits(smi, 1, &ack);
18459 ++ if (ack == 0)
18460 ++ break;
18461 ++
18462 ++ if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
18463 ++ dev_err(smi->dev, "ACK timeout\n");
18464 ++ return -ETIMEDOUT;
18465 ++ }
18466 ++ } while (1);
18467 ++
18468 ++ return 0;
18469 ++}
18470 ++
18471 ++static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data)
18472 ++{
18473 ++ realtek_smi_write_bits(smi, data, 8);
18474 ++ return realtek_smi_wait_for_ack(smi);
18475 ++}
18476 ++
18477 ++static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data)
18478 ++{
18479 ++ realtek_smi_write_bits(smi, data, 8);
18480 ++ return 0;
18481 ++}
18482 ++
18483 ++static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data)
18484 ++{
18485 ++ u32 t;
18486 ++
18487 ++ /* Read data */
18488 ++ realtek_smi_read_bits(smi, 8, &t);
18489 ++ *data = (t & 0xff);
18490 ++
18491 ++ /* Send an ACK */
18492 ++ realtek_smi_write_bits(smi, 0x00, 1);
18493 ++
18494 ++ return 0;
18495 ++}
18496 ++
18497 ++static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data)
18498 ++{
18499 ++ u32 t;
18500 ++
18501 ++ /* Read data */
18502 ++ realtek_smi_read_bits(smi, 8, &t);
18503 ++ *data = (t & 0xff);
18504 ++
18505 ++ /* Send an ACK */
18506 ++ realtek_smi_write_bits(smi, 0x01, 1);
18507 ++
18508 ++ return 0;
18509 ++}
18510 ++
18511 ++static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data)
18512 ++{
18513 ++ unsigned long flags;
18514 ++ u8 lo = 0;
18515 ++ u8 hi = 0;
18516 ++ int ret;
18517 ++
18518 ++ spin_lock_irqsave(&smi->lock, flags);
18519 ++
18520 ++ realtek_smi_start(smi);
18521 ++
18522 ++ /* Send READ command */
18523 ++ ret = realtek_smi_write_byte(smi, smi->cmd_read);
18524 ++ if (ret)
18525 ++ goto out;
18526 ++
18527 ++ /* Set ADDR[7:0] */
18528 ++ ret = realtek_smi_write_byte(smi, addr & 0xff);
18529 ++ if (ret)
18530 ++ goto out;
18531 ++
18532 ++ /* Set ADDR[15:8] */
18533 ++ ret = realtek_smi_write_byte(smi, addr >> 8);
18534 ++ if (ret)
18535 ++ goto out;
18536 ++
18537 ++ /* Read DATA[7:0] */
18538 ++ realtek_smi_read_byte0(smi, &lo);
18539 ++ /* Read DATA[15:8] */
18540 ++ realtek_smi_read_byte1(smi, &hi);
18541 ++
18542 ++ *data = ((u32)lo) | (((u32)hi) << 8);
18543 ++
18544 ++ ret = 0;
18545 ++
18546 ++ out:
18547 ++ realtek_smi_stop(smi);
18548 ++ spin_unlock_irqrestore(&smi->lock, flags);
18549 ++
18550 ++ return ret;
18551 ++}
18552 ++
18553 ++static int realtek_smi_write_reg(struct realtek_smi *smi,
18554 ++ u32 addr, u32 data, bool ack)
18555 ++{
18556 ++ unsigned long flags;
18557 ++ int ret;
18558 ++
18559 ++ spin_lock_irqsave(&smi->lock, flags);
18560 ++
18561 ++ realtek_smi_start(smi);
18562 ++
18563 ++ /* Send WRITE command */
18564 ++ ret = realtek_smi_write_byte(smi, smi->cmd_write);
18565 ++ if (ret)
18566 ++ goto out;
18567 ++
18568 ++ /* Set ADDR[7:0] */
18569 ++ ret = realtek_smi_write_byte(smi, addr & 0xff);
18570 ++ if (ret)
18571 ++ goto out;
18572 ++
18573 ++ /* Set ADDR[15:8] */
18574 ++ ret = realtek_smi_write_byte(smi, addr >> 8);
18575 ++ if (ret)
18576 ++ goto out;
18577 ++
18578 ++ /* Write DATA[7:0] */
18579 ++ ret = realtek_smi_write_byte(smi, data & 0xff);
18580 ++ if (ret)
18581 ++ goto out;
18582 ++
18583 ++ /* Write DATA[15:8] */
18584 ++ if (ack)
18585 ++ ret = realtek_smi_write_byte(smi, data >> 8);
18586 ++ else
18587 ++ ret = realtek_smi_write_byte_noack(smi, data >> 8);
18588 ++ if (ret)
18589 ++ goto out;
18590 ++
18591 ++ ret = 0;
18592 ++
18593 ++ out:
18594 ++ realtek_smi_stop(smi);
18595 ++ spin_unlock_irqrestore(&smi->lock, flags);
18596 ++
18597 ++ return ret;
18598 ++}
18599 ++
18600 ++/* There is one single case when we need to use this accessor and that
18601 ++ * is when issueing soft reset. Since the device reset as soon as we write
18602 ++ * that bit, no ACK will come back for natural reasons.
18603 ++ */
18604 ++int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
18605 ++ u32 data)
18606 ++{
18607 ++ return realtek_smi_write_reg(smi, addr, data, false);
18608 ++}
18609 ++EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack);
18610 ++
18611 ++/* Regmap accessors */
18612 ++
18613 ++static int realtek_smi_write(void *ctx, u32 reg, u32 val)
18614 ++{
18615 ++ struct realtek_smi *smi = ctx;
18616 ++
18617 ++ return realtek_smi_write_reg(smi, reg, val, true);
18618 ++}
18619 ++
18620 ++static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
18621 ++{
18622 ++ struct realtek_smi *smi = ctx;
18623 ++
18624 ++ return realtek_smi_read_reg(smi, reg, val);
18625 ++}
18626 ++
18627 ++static const struct regmap_config realtek_smi_mdio_regmap_config = {
18628 ++ .reg_bits = 10, /* A4..A0 R4..R0 */
18629 ++ .val_bits = 16,
18630 ++ .reg_stride = 1,
18631 ++ /* PHY regs are at 0x8000 */
18632 ++ .max_register = 0xffff,
18633 ++ .reg_format_endian = REGMAP_ENDIAN_BIG,
18634 ++ .reg_read = realtek_smi_read,
18635 ++ .reg_write = realtek_smi_write,
18636 ++ .cache_type = REGCACHE_NONE,
18637 ++};
18638 ++
18639 ++static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
18640 ++{
18641 ++ struct realtek_smi *smi = bus->priv;
18642 ++
18643 ++ return smi->ops->phy_read(smi, addr, regnum);
18644 ++}
18645 ++
18646 ++static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
18647 ++ u16 val)
18648 ++{
18649 ++ struct realtek_smi *smi = bus->priv;
18650 ++
18651 ++ return smi->ops->phy_write(smi, addr, regnum, val);
18652 ++}
18653 ++
18654 ++int realtek_smi_setup_mdio(struct realtek_smi *smi)
18655 ++{
18656 ++ struct device_node *mdio_np;
18657 ++ int ret;
18658 ++
18659 ++ mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
18660 ++ if (!mdio_np) {
18661 ++ dev_err(smi->dev, "no MDIO bus node\n");
18662 ++ return -ENODEV;
18663 ++ }
18664 ++
18665 ++ smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
18666 ++ if (!smi->slave_mii_bus) {
18667 ++ ret = -ENOMEM;
18668 ++ goto err_put_node;
18669 ++ }
18670 ++ smi->slave_mii_bus->priv = smi;
18671 ++ smi->slave_mii_bus->name = "SMI slave MII";
18672 ++ smi->slave_mii_bus->read = realtek_smi_mdio_read;
18673 ++ smi->slave_mii_bus->write = realtek_smi_mdio_write;
18674 ++ snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
18675 ++ smi->ds->index);
18676 ++ smi->slave_mii_bus->dev.of_node = mdio_np;
18677 ++ smi->slave_mii_bus->parent = smi->dev;
18678 ++ smi->ds->slave_mii_bus = smi->slave_mii_bus;
18679 ++
18680 ++ ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
18681 ++ if (ret) {
18682 ++ dev_err(smi->dev, "unable to register MDIO bus %s\n",
18683 ++ smi->slave_mii_bus->id);
18684 ++ goto err_put_node;
18685 ++ }
18686 ++
18687 ++ return 0;
18688 ++
18689 ++err_put_node:
18690 ++ of_node_put(mdio_np);
18691 ++
18692 ++ return ret;
18693 ++}
18694 ++
18695 ++static int realtek_smi_probe(struct platform_device *pdev)
18696 ++{
18697 ++ const struct realtek_smi_variant *var;
18698 ++ struct device *dev = &pdev->dev;
18699 ++ struct realtek_smi *smi;
18700 ++ struct device_node *np;
18701 ++ int ret;
18702 ++
18703 ++ var = of_device_get_match_data(dev);
18704 ++ np = dev->of_node;
18705 ++
18706 ++ smi = devm_kzalloc(dev, sizeof(*smi) + var->chip_data_sz, GFP_KERNEL);
18707 ++ if (!smi)
18708 ++ return -ENOMEM;
18709 ++ smi->chip_data = (void *)smi + sizeof(*smi);
18710 ++ smi->map = devm_regmap_init(dev, NULL, smi,
18711 ++ &realtek_smi_mdio_regmap_config);
18712 ++ if (IS_ERR(smi->map)) {
18713 ++ ret = PTR_ERR(smi->map);
18714 ++ dev_err(dev, "regmap init failed: %d\n", ret);
18715 ++ return ret;
18716 ++ }
18717 ++
18718 ++ /* Link forward and backward */
18719 ++ smi->dev = dev;
18720 ++ smi->clk_delay = var->clk_delay;
18721 ++ smi->cmd_read = var->cmd_read;
18722 ++ smi->cmd_write = var->cmd_write;
18723 ++ smi->ops = var->ops;
18724 ++
18725 ++ dev_set_drvdata(dev, smi);
18726 ++ spin_lock_init(&smi->lock);
18727 ++
18728 ++ /* TODO: if power is software controlled, set up any regulators here */
18729 ++
18730 ++ /* Assert then deassert RESET */
18731 ++ smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
18732 ++ if (IS_ERR(smi->reset)) {
18733 ++ dev_err(dev, "failed to get RESET GPIO\n");
18734 ++ return PTR_ERR(smi->reset);
18735 ++ }
18736 ++ msleep(REALTEK_SMI_HW_STOP_DELAY);
18737 ++ gpiod_set_value(smi->reset, 0);
18738 ++ msleep(REALTEK_SMI_HW_START_DELAY);
18739 ++ dev_info(dev, "deasserted RESET\n");
18740 ++
18741 ++ /* Fetch MDIO pins */
18742 ++ smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
18743 ++ if (IS_ERR(smi->mdc))
18744 ++ return PTR_ERR(smi->mdc);
18745 ++ smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
18746 ++ if (IS_ERR(smi->mdio))
18747 ++ return PTR_ERR(smi->mdio);
18748 ++
18749 ++ smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
18750 ++
18751 ++ ret = smi->ops->detect(smi);
18752 ++ if (ret) {
18753 ++ dev_err(dev, "unable to detect switch\n");
18754 ++ return ret;
18755 ++ }
18756 ++
18757 ++ smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
18758 ++ if (!smi->ds)
18759 ++ return -ENOMEM;
18760 ++
18761 ++ smi->ds->dev = dev;
18762 ++ smi->ds->num_ports = smi->num_ports;
18763 ++ smi->ds->priv = smi;
18764 ++
18765 ++ smi->ds->ops = var->ds_ops;
18766 ++ ret = dsa_register_switch(smi->ds);
18767 ++ if (ret) {
18768 ++ dev_err_probe(dev, ret, "unable to register switch\n");
18769 ++ return ret;
18770 ++ }
18771 ++ return 0;
18772 ++}
18773 ++
18774 ++static int realtek_smi_remove(struct platform_device *pdev)
18775 ++{
18776 ++ struct realtek_smi *smi = platform_get_drvdata(pdev);
18777 ++
18778 ++ if (!smi)
18779 ++ return 0;
18780 ++
18781 ++ dsa_unregister_switch(smi->ds);
18782 ++ if (smi->slave_mii_bus)
18783 ++ of_node_put(smi->slave_mii_bus->dev.of_node);
18784 ++ gpiod_set_value(smi->reset, 1);
18785 ++
18786 ++ platform_set_drvdata(pdev, NULL);
18787 ++
18788 ++ return 0;
18789 ++}
18790 ++
18791 ++static void realtek_smi_shutdown(struct platform_device *pdev)
18792 ++{
18793 ++ struct realtek_smi *smi = platform_get_drvdata(pdev);
18794 ++
18795 ++ if (!smi)
18796 ++ return;
18797 ++
18798 ++ dsa_switch_shutdown(smi->ds);
18799 ++
18800 ++ platform_set_drvdata(pdev, NULL);
18801 ++}
18802 ++
18803 ++static const struct of_device_id realtek_smi_of_match[] = {
18804 ++ {
18805 ++ .compatible = "realtek,rtl8366rb",
18806 ++ .data = &rtl8366rb_variant,
18807 ++ },
18808 ++ {
18809 ++ /* FIXME: add support for RTL8366S and more */
18810 ++ .compatible = "realtek,rtl8366s",
18811 ++ .data = NULL,
18812 ++ },
18813 ++ {
18814 ++ .compatible = "realtek,rtl8365mb",
18815 ++ .data = &rtl8365mb_variant,
18816 ++ },
18817 ++ { /* sentinel */ },
18818 ++};
18819 ++MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
18820 ++
18821 ++static struct platform_driver realtek_smi_driver = {
18822 ++ .driver = {
18823 ++ .name = "realtek-smi",
18824 ++ .of_match_table = of_match_ptr(realtek_smi_of_match),
18825 ++ },
18826 ++ .probe = realtek_smi_probe,
18827 ++ .remove = realtek_smi_remove,
18828 ++ .shutdown = realtek_smi_shutdown,
18829 ++};
18830 ++module_platform_driver(realtek_smi_driver);
18831 ++
18832 ++MODULE_LICENSE("GPL");
18833 +diff --git a/drivers/net/dsa/realtek/realtek-smi-core.h b/drivers/net/dsa/realtek/realtek-smi-core.h
18834 +new file mode 100644
18835 +index 0000000000000..faed387d8db38
18836 +--- /dev/null
18837 ++++ b/drivers/net/dsa/realtek/realtek-smi-core.h
18838 +@@ -0,0 +1,145 @@
18839 ++/* SPDX-License-Identifier: GPL-2.0+ */
18840 ++/* Realtek SMI interface driver defines
18841 ++ *
18842 ++ * Copyright (C) 2017 Linus Walleij <linus.walleij@××××××.org>
18843 ++ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@×××××××.org>
18844 ++ */
18845 ++
18846 ++#ifndef _REALTEK_SMI_H
18847 ++#define _REALTEK_SMI_H
18848 ++
18849 ++#include <linux/phy.h>
18850 ++#include <linux/platform_device.h>
18851 ++#include <linux/gpio/consumer.h>
18852 ++#include <net/dsa.h>
18853 ++
18854 ++struct realtek_smi_ops;
18855 ++struct dentry;
18856 ++struct inode;
18857 ++struct file;
18858 ++
18859 ++struct rtl8366_mib_counter {
18860 ++ unsigned int base;
18861 ++ unsigned int offset;
18862 ++ unsigned int length;
18863 ++ const char *name;
18864 ++};
18865 ++
18866 ++/*
18867 ++ * struct rtl8366_vlan_mc - Virtual LAN member configuration
18868 ++ */
18869 ++struct rtl8366_vlan_mc {
18870 ++ u16 vid;
18871 ++ u16 untag;
18872 ++ u16 member;
18873 ++ u8 fid;
18874 ++ u8 priority;
18875 ++};
18876 ++
18877 ++struct rtl8366_vlan_4k {
18878 ++ u16 vid;
18879 ++ u16 untag;
18880 ++ u16 member;
18881 ++ u8 fid;
18882 ++};
18883 ++
18884 ++struct realtek_smi {
18885 ++ struct device *dev;
18886 ++ struct gpio_desc *reset;
18887 ++ struct gpio_desc *mdc;
18888 ++ struct gpio_desc *mdio;
18889 ++ struct regmap *map;
18890 ++ struct mii_bus *slave_mii_bus;
18891 ++
18892 ++ unsigned int clk_delay;
18893 ++ u8 cmd_read;
18894 ++ u8 cmd_write;
18895 ++ spinlock_t lock; /* Locks around command writes */
18896 ++ struct dsa_switch *ds;
18897 ++ struct irq_domain *irqdomain;
18898 ++ bool leds_disabled;
18899 ++
18900 ++ unsigned int cpu_port;
18901 ++ unsigned int num_ports;
18902 ++ unsigned int num_vlan_mc;
18903 ++ unsigned int num_mib_counters;
18904 ++ struct rtl8366_mib_counter *mib_counters;
18905 ++
18906 ++ const struct realtek_smi_ops *ops;
18907 ++
18908 ++ int vlan_enabled;
18909 ++ int vlan4k_enabled;
18910 ++
18911 ++ char buf[4096];
18912 ++ void *chip_data; /* Per-chip extra variant data */
18913 ++};
18914 ++
18915 ++/*
18916 ++ * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations
18917 ++ * @detect: detects the chiptype
18918 ++ */
18919 ++struct realtek_smi_ops {
18920 ++ int (*detect)(struct realtek_smi *smi);
18921 ++ int (*reset_chip)(struct realtek_smi *smi);
18922 ++ int (*setup)(struct realtek_smi *smi);
18923 ++ void (*cleanup)(struct realtek_smi *smi);
18924 ++ int (*get_mib_counter)(struct realtek_smi *smi,
18925 ++ int port,
18926 ++ struct rtl8366_mib_counter *mib,
18927 ++ u64 *mibvalue);
18928 ++ int (*get_vlan_mc)(struct realtek_smi *smi, u32 index,
18929 ++ struct rtl8366_vlan_mc *vlanmc);
18930 ++ int (*set_vlan_mc)(struct realtek_smi *smi, u32 index,
18931 ++ const struct rtl8366_vlan_mc *vlanmc);
18932 ++ int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid,
18933 ++ struct rtl8366_vlan_4k *vlan4k);
18934 ++ int (*set_vlan_4k)(struct realtek_smi *smi,
18935 ++ const struct rtl8366_vlan_4k *vlan4k);
18936 ++ int (*get_mc_index)(struct realtek_smi *smi, int port, int *val);
18937 ++ int (*set_mc_index)(struct realtek_smi *smi, int port, int index);
18938 ++ bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan);
18939 ++ int (*enable_vlan)(struct realtek_smi *smi, bool enable);
18940 ++ int (*enable_vlan4k)(struct realtek_smi *smi, bool enable);
18941 ++ int (*enable_port)(struct realtek_smi *smi, int port, bool enable);
18942 ++ int (*phy_read)(struct realtek_smi *smi, int phy, int regnum);
18943 ++ int (*phy_write)(struct realtek_smi *smi, int phy, int regnum,
18944 ++ u16 val);
18945 ++};
18946 ++
18947 ++struct realtek_smi_variant {
18948 ++ const struct dsa_switch_ops *ds_ops;
18949 ++ const struct realtek_smi_ops *ops;
18950 ++ unsigned int clk_delay;
18951 ++ u8 cmd_read;
18952 ++ u8 cmd_write;
18953 ++ size_t chip_data_sz;
18954 ++};
18955 ++
18956 ++/* SMI core calls */
18957 ++int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
18958 ++ u32 data);
18959 ++int realtek_smi_setup_mdio(struct realtek_smi *smi);
18960 ++
18961 ++/* RTL8366 library helpers */
18962 ++int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
18963 ++int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
18964 ++ u32 untag, u32 fid);
18965 ++int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
18966 ++ unsigned int vid);
18967 ++int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
18968 ++int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
18969 ++int rtl8366_reset_vlan(struct realtek_smi *smi);
18970 ++int rtl8366_vlan_add(struct dsa_switch *ds, int port,
18971 ++ const struct switchdev_obj_port_vlan *vlan,
18972 ++ struct netlink_ext_ack *extack);
18973 ++int rtl8366_vlan_del(struct dsa_switch *ds, int port,
18974 ++ const struct switchdev_obj_port_vlan *vlan);
18975 ++void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
18976 ++ uint8_t *data);
18977 ++int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
18978 ++void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
18979 ++
18980 ++extern const struct realtek_smi_variant rtl8366rb_variant;
18981 ++extern const struct realtek_smi_variant rtl8365mb_variant;
18982 ++
18983 ++#endif /* _REALTEK_SMI_H */
18984 +diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
18985 +new file mode 100644
18986 +index 0000000000000..3b729544798b1
18987 +--- /dev/null
18988 ++++ b/drivers/net/dsa/realtek/rtl8365mb.c
18989 +@@ -0,0 +1,1987 @@
18990 ++// SPDX-License-Identifier: GPL-2.0
18991 ++/* Realtek SMI subdriver for the Realtek RTL8365MB-VC ethernet switch.
18992 ++ *
18993 ++ * Copyright (C) 2021 Alvin Šipraga <alsi@××××××××××××.dk>
18994 ++ * Copyright (C) 2021 Michael Rasmussen <mir@××××××××××××.dk>
18995 ++ *
18996 ++ * The RTL8365MB-VC is a 4+1 port 10/100/1000M switch controller. It includes 4
18997 ++ * integrated PHYs for the user facing ports, and an extension interface which
18998 ++ * can be connected to the CPU - or another PHY - via either MII, RMII, or
18999 ++ * RGMII. The switch is configured via the Realtek Simple Management Interface
19000 ++ * (SMI), which uses the MDIO/MDC lines.
19001 ++ *
19002 ++ * Below is a simplified block diagram of the chip and its relevant interfaces.
19003 ++ *
19004 ++ * .-----------------------------------.
19005 ++ * | |
19006 ++ * UTP <---------------> Giga PHY <-> PCS <-> P0 GMAC |
19007 ++ * UTP <---------------> Giga PHY <-> PCS <-> P1 GMAC |
19008 ++ * UTP <---------------> Giga PHY <-> PCS <-> P2 GMAC |
19009 ++ * UTP <---------------> Giga PHY <-> PCS <-> P3 GMAC |
19010 ++ * | |
19011 ++ * CPU/PHY <-MII/RMII/RGMII---> Extension <---> Extension |
19012 ++ * | interface 1 GMAC 1 |
19013 ++ * | |
19014 ++ * SMI driver/ <-MDC/SCL---> Management ~~~~~~~~~~~~~~ |
19015 ++ * EEPROM <-MDIO/SDA--> interface ~REALTEK ~~~~~ |
19016 ++ * | ~RTL8365MB ~~~ |
19017 ++ * | ~GXXXC TAIWAN~ |
19018 ++ * GPIO <--------------> Reset ~~~~~~~~~~~~~~ |
19019 ++ * | |
19020 ++ * Interrupt <----------> Link UP/DOWN events |
19021 ++ * controller | |
19022 ++ * '-----------------------------------'
19023 ++ *
19024 ++ * The driver uses DSA to integrate the 4 user and 1 extension ports into the
19025 ++ * kernel. Netdevices are created for the user ports, as are PHY devices for
19026 ++ * their integrated PHYs. The device tree firmware should also specify the link
19027 ++ * partner of the extension port - either via a fixed-link or other phy-handle.
19028 ++ * See the device tree bindings for more detailed information. Note that the
19029 ++ * driver has only been tested with a fixed-link, but in principle it should not
19030 ++ * matter.
19031 ++ *
19032 ++ * NOTE: Currently, only the RGMII interface is implemented in this driver.
19033 ++ *
19034 ++ * The interrupt line is asserted on link UP/DOWN events. The driver creates a
19035 ++ * custom irqchip to handle this interrupt and demultiplex the events by reading
19036 ++ * the status registers via SMI. Interrupts are then propagated to the relevant
19037 ++ * PHY device.
19038 ++ *
19039 ++ * The EEPROM contains initial register values which the chip will read over I2C
19040 ++ * upon hardware reset. It is also possible to omit the EEPROM. In both cases,
19041 ++ * the driver will manually reprogram some registers using jam tables to reach
19042 ++ * an initial state defined by the vendor driver.
19043 ++ *
19044 ++ * This Linux driver is written based on an OS-agnostic vendor driver from
19045 ++ * Realtek. The reference GPL-licensed sources can be found in the OpenWrt
19046 ++ * source tree under the name rtl8367c. The vendor driver claims to support a
19047 ++ * number of similar switch controllers from Realtek, but the only hardware we
19048 ++ * have is the RTL8365MB-VC. Moreover, there does not seem to be any chip under
19049 ++ * the name RTL8367C. Although one wishes that the 'C' stood for some kind of
19050 ++ * common hardware revision, there exist examples of chips with the suffix -VC
19051 ++ * which are explicitly not supported by the rtl8367c driver and which instead
19052 ++ * require the rtl8367d vendor driver. With all this uncertainty, the driver has
19053 ++ * been modestly named rtl8365mb. Future implementors may wish to rename things
19054 ++ * accordingly.
19055 ++ *
19056 ++ * In the same family of chips, some carry up to 8 user ports and up to 2
19057 ++ * extension ports. Where possible this driver tries to make things generic, but
19058 ++ * more work must be done to support these configurations. According to
19059 ++ * documentation from Realtek, the family should include the following chips:
19060 ++ *
19061 ++ * - RTL8363NB
19062 ++ * - RTL8363NB-VB
19063 ++ * - RTL8363SC
19064 ++ * - RTL8363SC-VB
19065 ++ * - RTL8364NB
19066 ++ * - RTL8364NB-VB
19067 ++ * - RTL8365MB-VC
19068 ++ * - RTL8366SC
19069 ++ * - RTL8367RB-VB
19070 ++ * - RTL8367SB
19071 ++ * - RTL8367S
19072 ++ * - RTL8370MB
19073 ++ * - RTL8310SR
19074 ++ *
19075 ++ * Some of the register logic for these additional chips has been skipped over
19076 ++ * while implementing this driver. It is therefore not possible to assume that
19077 ++ * things will work out-of-the-box for other chips, and a careful review of the
19078 ++ * vendor driver may be needed to expand support. The RTL8365MB-VC seems to be
19079 ++ * one of the simpler chips.
19080 ++ */
19081 ++
19082 ++#include <linux/bitfield.h>
19083 ++#include <linux/bitops.h>
19084 ++#include <linux/interrupt.h>
19085 ++#include <linux/irqdomain.h>
19086 ++#include <linux/mutex.h>
19087 ++#include <linux/of_irq.h>
19088 ++#include <linux/regmap.h>
19089 ++#include <linux/if_bridge.h>
19090 ++
19091 ++#include "realtek-smi-core.h"
19092 ++
19093 ++/* Chip-specific data and limits */
19094 ++#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
19095 ++#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6
19096 ++#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
19097 ++
19098 ++/* Family-specific data and limits */
19099 ++#define RTL8365MB_PHYADDRMAX 7
19100 ++#define RTL8365MB_NUM_PHYREGS 32
19101 ++#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
19102 ++#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
19103 ++
19104 ++/* Chip identification registers */
19105 ++#define RTL8365MB_CHIP_ID_REG 0x1300
19106 ++
19107 ++#define RTL8365MB_CHIP_VER_REG 0x1301
19108 ++
19109 ++#define RTL8365MB_MAGIC_REG 0x13C2
19110 ++#define RTL8365MB_MAGIC_VALUE 0x0249
19111 ++
19112 ++/* Chip reset register */
19113 ++#define RTL8365MB_CHIP_RESET_REG 0x1322
19114 ++#define RTL8365MB_CHIP_RESET_SW_MASK 0x0002
19115 ++#define RTL8365MB_CHIP_RESET_HW_MASK 0x0001
19116 ++
19117 ++/* Interrupt polarity register */
19118 ++#define RTL8365MB_INTR_POLARITY_REG 0x1100
19119 ++#define RTL8365MB_INTR_POLARITY_MASK 0x0001
19120 ++#define RTL8365MB_INTR_POLARITY_HIGH 0
19121 ++#define RTL8365MB_INTR_POLARITY_LOW 1
19122 ++
19123 ++/* Interrupt control/status register - enable/check specific interrupt types */
19124 ++#define RTL8365MB_INTR_CTRL_REG 0x1101
19125 ++#define RTL8365MB_INTR_STATUS_REG 0x1102
19126 ++#define RTL8365MB_INTR_SLIENT_START_2_MASK 0x1000
19127 ++#define RTL8365MB_INTR_SLIENT_START_MASK 0x0800
19128 ++#define RTL8365MB_INTR_ACL_ACTION_MASK 0x0200
19129 ++#define RTL8365MB_INTR_CABLE_DIAG_FIN_MASK 0x0100
19130 ++#define RTL8365MB_INTR_INTERRUPT_8051_MASK 0x0080
19131 ++#define RTL8365MB_INTR_LOOP_DETECTION_MASK 0x0040
19132 ++#define RTL8365MB_INTR_GREEN_TIMER_MASK 0x0020
19133 ++#define RTL8365MB_INTR_SPECIAL_CONGEST_MASK 0x0010
19134 ++#define RTL8365MB_INTR_SPEED_CHANGE_MASK 0x0008
19135 ++#define RTL8365MB_INTR_LEARN_OVER_MASK 0x0004
19136 ++#define RTL8365MB_INTR_METER_EXCEEDED_MASK 0x0002
19137 ++#define RTL8365MB_INTR_LINK_CHANGE_MASK 0x0001
19138 ++#define RTL8365MB_INTR_ALL_MASK \
19139 ++ (RTL8365MB_INTR_SLIENT_START_2_MASK | \
19140 ++ RTL8365MB_INTR_SLIENT_START_MASK | \
19141 ++ RTL8365MB_INTR_ACL_ACTION_MASK | \
19142 ++ RTL8365MB_INTR_CABLE_DIAG_FIN_MASK | \
19143 ++ RTL8365MB_INTR_INTERRUPT_8051_MASK | \
19144 ++ RTL8365MB_INTR_LOOP_DETECTION_MASK | \
19145 ++ RTL8365MB_INTR_GREEN_TIMER_MASK | \
19146 ++ RTL8365MB_INTR_SPECIAL_CONGEST_MASK | \
19147 ++ RTL8365MB_INTR_SPEED_CHANGE_MASK | \
19148 ++ RTL8365MB_INTR_LEARN_OVER_MASK | \
19149 ++ RTL8365MB_INTR_METER_EXCEEDED_MASK | \
19150 ++ RTL8365MB_INTR_LINK_CHANGE_MASK)
19151 ++
19152 ++/* Per-port interrupt type status registers */
19153 ++#define RTL8365MB_PORT_LINKDOWN_IND_REG 0x1106
19154 ++#define RTL8365MB_PORT_LINKDOWN_IND_MASK 0x07FF
19155 ++
19156 ++#define RTL8365MB_PORT_LINKUP_IND_REG 0x1107
19157 ++#define RTL8365MB_PORT_LINKUP_IND_MASK 0x07FF
19158 ++
19159 ++/* PHY indirect access registers */
19160 ++#define RTL8365MB_INDIRECT_ACCESS_CTRL_REG 0x1F00
19161 ++#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK 0x0002
19162 ++#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ 0
19163 ++#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE 1
19164 ++#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK 0x0001
19165 ++#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE 1
19166 ++#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
19167 ++#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
19168 ++#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
19169 ++#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(7, 5)
19170 ++#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
19171 ++#define RTL8365MB_PHY_BASE 0x2000
19172 ++#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
19173 ++#define RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG 0x1F04
19174 ++
19175 ++/* PHY OCP address prefix register */
19176 ++#define RTL8365MB_GPHY_OCP_MSB_0_REG 0x1D15
19177 ++#define RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK 0x0FC0
19178 ++#define RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK 0xFC00
19179 ++
19180 ++/* The PHY OCP addresses of PHY registers 0~31 start here */
19181 ++#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
19182 ++
19183 ++/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */
19184 ++#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
19185 ++#define RTL8365MB_EXT_PORT_MODE_RGMII 1
19186 ++#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
19187 ++#define RTL8365MB_EXT_PORT_MODE_MII_PHY 3
19188 ++#define RTL8365MB_EXT_PORT_MODE_TMII_MAC 4
19189 ++#define RTL8365MB_EXT_PORT_MODE_TMII_PHY 5
19190 ++#define RTL8365MB_EXT_PORT_MODE_GMII 6
19191 ++#define RTL8365MB_EXT_PORT_MODE_RMII_MAC 7
19192 ++#define RTL8365MB_EXT_PORT_MODE_RMII_PHY 8
19193 ++#define RTL8365MB_EXT_PORT_MODE_SGMII 9
19194 ++#define RTL8365MB_EXT_PORT_MODE_HSGMII 10
19195 ++#define RTL8365MB_EXT_PORT_MODE_1000X_100FX 11
19196 ++#define RTL8365MB_EXT_PORT_MODE_1000X 12
19197 ++#define RTL8365MB_EXT_PORT_MODE_100FX 13
19198 ++
19199 ++/* EXT port interface mode configuration registers 0~1 */
19200 ++#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305
19201 ++#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3
19202 ++#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \
19203 ++ (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \
19204 ++ ((_extport) >> 1) * (0x13C3 - 0x1305))
19205 ++#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \
19206 ++ (0xF << (((_extport) % 2)))
19207 ++#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \
19208 ++ (((_extport) % 2) * 4)
19209 ++
19210 ++/* EXT port RGMII TX/RX delay configuration registers 1~2 */
19211 ++#define RTL8365MB_EXT_RGMXF_REG1 0x1307
19212 ++#define RTL8365MB_EXT_RGMXF_REG2 0x13C5
19213 ++#define RTL8365MB_EXT_RGMXF_REG(_extport) \
19214 ++ (RTL8365MB_EXT_RGMXF_REG1 + \
19215 ++ (((_extport) >> 1) * (0x13C5 - 0x1307)))
19216 ++#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
19217 ++#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
19218 ++
19219 ++/* External port speed values - used in DIGITAL_INTERFACE_FORCE */
19220 ++#define RTL8365MB_PORT_SPEED_10M 0
19221 ++#define RTL8365MB_PORT_SPEED_100M 1
19222 ++#define RTL8365MB_PORT_SPEED_1000M 2
19223 ++
19224 ++/* EXT port force configuration registers 0~2 */
19225 ++#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310
19226 ++#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311
19227 ++#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4
19228 ++#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \
19229 ++ (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \
19230 ++ ((_extport) & 0x1) + \
19231 ++ ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310)))
19232 ++#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
19233 ++#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
19234 ++#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
19235 ++#define RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK 0x0020
19236 ++#define RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK 0x0010
19237 ++#define RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK 0x0004
19238 ++#define RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK 0x0003
19239 ++
19240 ++/* CPU port mask register - controls which ports are treated as CPU ports */
19241 ++#define RTL8365MB_CPU_PORT_MASK_REG 0x1219
19242 ++#define RTL8365MB_CPU_PORT_MASK_MASK 0x07FF
19243 ++
19244 ++/* CPU control register */
19245 ++#define RTL8365MB_CPU_CTRL_REG 0x121A
19246 ++#define RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK 0x0400
19247 ++#define RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK 0x0200
19248 ++#define RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK 0x0080
19249 ++#define RTL8365MB_CPU_CTRL_TAG_POSITION_MASK 0x0040
19250 ++#define RTL8365MB_CPU_CTRL_TRAP_PORT_MASK 0x0038
19251 ++#define RTL8365MB_CPU_CTRL_INSERTMODE_MASK 0x0006
19252 ++#define RTL8365MB_CPU_CTRL_EN_MASK 0x0001
19253 ++
19254 ++/* Maximum packet length register */
19255 ++#define RTL8365MB_CFG0_MAX_LEN_REG 0x088C
19256 ++#define RTL8365MB_CFG0_MAX_LEN_MASK 0x3FFF
19257 ++
19258 ++/* Port learning limit registers */
19259 ++#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE 0x0A20
19260 ++#define RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(_physport) \
19261 ++ (RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE + (_physport))
19262 ++
19263 ++/* Port isolation (forwarding mask) registers */
19264 ++#define RTL8365MB_PORT_ISOLATION_REG_BASE 0x08A2
19265 ++#define RTL8365MB_PORT_ISOLATION_REG(_physport) \
19266 ++ (RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport))
19267 ++#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF
19268 ++
19269 ++/* MSTP port state registers - indexed by tree instance */
19270 ++#define RTL8365MB_MSTI_CTRL_BASE 0x0A00
19271 ++#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \
19272 ++ (RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3))
19273 ++#define RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(_physport) ((_physport) << 1)
19274 ++#define RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(_physport) \
19275 ++ (0x3 << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET((_physport)))
19276 ++
19277 ++/* MIB counter value registers */
19278 ++#define RTL8365MB_MIB_COUNTER_BASE 0x1000
19279 ++#define RTL8365MB_MIB_COUNTER_REG(_x) (RTL8365MB_MIB_COUNTER_BASE + (_x))
19280 ++
19281 ++/* MIB counter address register */
19282 ++#define RTL8365MB_MIB_ADDRESS_REG 0x1004
19283 ++#define RTL8365MB_MIB_ADDRESS_PORT_OFFSET 0x007C
19284 ++#define RTL8365MB_MIB_ADDRESS(_p, _x) \
19285 ++ (((RTL8365MB_MIB_ADDRESS_PORT_OFFSET) * (_p) + (_x)) >> 2)
19286 ++
19287 ++#define RTL8365MB_MIB_CTRL0_REG 0x1005
19288 ++#define RTL8365MB_MIB_CTRL0_RESET_MASK 0x0002
19289 ++#define RTL8365MB_MIB_CTRL0_BUSY_MASK 0x0001
19290 ++
19291 ++/* The DSA callback .get_stats64 runs in atomic context, so we are not allowed
19292 ++ * to block. On the other hand, accessing MIB counters absolutely requires us to
19293 ++ * block. The solution is thus to schedule work which polls the MIB counters
19294 ++ * asynchronously and updates some private data, which the callback can then
19295 ++ * fetch atomically. Three seconds should be a good enough polling interval.
19296 ++ */
19297 ++#define RTL8365MB_STATS_INTERVAL_JIFFIES (3 * HZ)
19298 ++
19299 ++enum rtl8365mb_mib_counter_index {
19300 ++ RTL8365MB_MIB_ifInOctets,
19301 ++ RTL8365MB_MIB_dot3StatsFCSErrors,
19302 ++ RTL8365MB_MIB_dot3StatsSymbolErrors,
19303 ++ RTL8365MB_MIB_dot3InPauseFrames,
19304 ++ RTL8365MB_MIB_dot3ControlInUnknownOpcodes,
19305 ++ RTL8365MB_MIB_etherStatsFragments,
19306 ++ RTL8365MB_MIB_etherStatsJabbers,
19307 ++ RTL8365MB_MIB_ifInUcastPkts,
19308 ++ RTL8365MB_MIB_etherStatsDropEvents,
19309 ++ RTL8365MB_MIB_ifInMulticastPkts,
19310 ++ RTL8365MB_MIB_ifInBroadcastPkts,
19311 ++ RTL8365MB_MIB_inMldChecksumError,
19312 ++ RTL8365MB_MIB_inIgmpChecksumError,
19313 ++ RTL8365MB_MIB_inMldSpecificQuery,
19314 ++ RTL8365MB_MIB_inMldGeneralQuery,
19315 ++ RTL8365MB_MIB_inIgmpSpecificQuery,
19316 ++ RTL8365MB_MIB_inIgmpGeneralQuery,
19317 ++ RTL8365MB_MIB_inMldLeaves,
19318 ++ RTL8365MB_MIB_inIgmpLeaves,
19319 ++ RTL8365MB_MIB_etherStatsOctets,
19320 ++ RTL8365MB_MIB_etherStatsUnderSizePkts,
19321 ++ RTL8365MB_MIB_etherOversizeStats,
19322 ++ RTL8365MB_MIB_etherStatsPkts64Octets,
19323 ++ RTL8365MB_MIB_etherStatsPkts65to127Octets,
19324 ++ RTL8365MB_MIB_etherStatsPkts128to255Octets,
19325 ++ RTL8365MB_MIB_etherStatsPkts256to511Octets,
19326 ++ RTL8365MB_MIB_etherStatsPkts512to1023Octets,
19327 ++ RTL8365MB_MIB_etherStatsPkts1024to1518Octets,
19328 ++ RTL8365MB_MIB_ifOutOctets,
19329 ++ RTL8365MB_MIB_dot3StatsSingleCollisionFrames,
19330 ++ RTL8365MB_MIB_dot3StatsMultipleCollisionFrames,
19331 ++ RTL8365MB_MIB_dot3StatsDeferredTransmissions,
19332 ++ RTL8365MB_MIB_dot3StatsLateCollisions,
19333 ++ RTL8365MB_MIB_etherStatsCollisions,
19334 ++ RTL8365MB_MIB_dot3StatsExcessiveCollisions,
19335 ++ RTL8365MB_MIB_dot3OutPauseFrames,
19336 ++ RTL8365MB_MIB_ifOutDiscards,
19337 ++ RTL8365MB_MIB_dot1dTpPortInDiscards,
19338 ++ RTL8365MB_MIB_ifOutUcastPkts,
19339 ++ RTL8365MB_MIB_ifOutMulticastPkts,
19340 ++ RTL8365MB_MIB_ifOutBroadcastPkts,
19341 ++ RTL8365MB_MIB_outOampduPkts,
19342 ++ RTL8365MB_MIB_inOampduPkts,
19343 ++ RTL8365MB_MIB_inIgmpJoinsSuccess,
19344 ++ RTL8365MB_MIB_inIgmpJoinsFail,
19345 ++ RTL8365MB_MIB_inMldJoinsSuccess,
19346 ++ RTL8365MB_MIB_inMldJoinsFail,
19347 ++ RTL8365MB_MIB_inReportSuppressionDrop,
19348 ++ RTL8365MB_MIB_inLeaveSuppressionDrop,
19349 ++ RTL8365MB_MIB_outIgmpReports,
19350 ++ RTL8365MB_MIB_outIgmpLeaves,
19351 ++ RTL8365MB_MIB_outIgmpGeneralQuery,
19352 ++ RTL8365MB_MIB_outIgmpSpecificQuery,
19353 ++ RTL8365MB_MIB_outMldReports,
19354 ++ RTL8365MB_MIB_outMldLeaves,
19355 ++ RTL8365MB_MIB_outMldGeneralQuery,
19356 ++ RTL8365MB_MIB_outMldSpecificQuery,
19357 ++ RTL8365MB_MIB_inKnownMulticastPkts,
19358 ++ RTL8365MB_MIB_END,
19359 ++};
19360 ++
19361 ++struct rtl8365mb_mib_counter {
19362 ++ u32 offset;
19363 ++ u32 length;
19364 ++ const char *name;
19365 ++};
19366 ++
19367 ++#define RTL8365MB_MAKE_MIB_COUNTER(_offset, _length, _name) \
19368 ++ [RTL8365MB_MIB_ ## _name] = { _offset, _length, #_name }
19369 ++
19370 ++static struct rtl8365mb_mib_counter rtl8365mb_mib_counters[] = {
19371 ++ RTL8365MB_MAKE_MIB_COUNTER(0, 4, ifInOctets),
19372 ++ RTL8365MB_MAKE_MIB_COUNTER(4, 2, dot3StatsFCSErrors),
19373 ++ RTL8365MB_MAKE_MIB_COUNTER(6, 2, dot3StatsSymbolErrors),
19374 ++ RTL8365MB_MAKE_MIB_COUNTER(8, 2, dot3InPauseFrames),
19375 ++ RTL8365MB_MAKE_MIB_COUNTER(10, 2, dot3ControlInUnknownOpcodes),
19376 ++ RTL8365MB_MAKE_MIB_COUNTER(12, 2, etherStatsFragments),
19377 ++ RTL8365MB_MAKE_MIB_COUNTER(14, 2, etherStatsJabbers),
19378 ++ RTL8365MB_MAKE_MIB_COUNTER(16, 2, ifInUcastPkts),
19379 ++ RTL8365MB_MAKE_MIB_COUNTER(18, 2, etherStatsDropEvents),
19380 ++ RTL8365MB_MAKE_MIB_COUNTER(20, 2, ifInMulticastPkts),
19381 ++ RTL8365MB_MAKE_MIB_COUNTER(22, 2, ifInBroadcastPkts),
19382 ++ RTL8365MB_MAKE_MIB_COUNTER(24, 2, inMldChecksumError),
19383 ++ RTL8365MB_MAKE_MIB_COUNTER(26, 2, inIgmpChecksumError),
19384 ++ RTL8365MB_MAKE_MIB_COUNTER(28, 2, inMldSpecificQuery),
19385 ++ RTL8365MB_MAKE_MIB_COUNTER(30, 2, inMldGeneralQuery),
19386 ++ RTL8365MB_MAKE_MIB_COUNTER(32, 2, inIgmpSpecificQuery),
19387 ++ RTL8365MB_MAKE_MIB_COUNTER(34, 2, inIgmpGeneralQuery),
19388 ++ RTL8365MB_MAKE_MIB_COUNTER(36, 2, inMldLeaves),
19389 ++ RTL8365MB_MAKE_MIB_COUNTER(38, 2, inIgmpLeaves),
19390 ++ RTL8365MB_MAKE_MIB_COUNTER(40, 4, etherStatsOctets),
19391 ++ RTL8365MB_MAKE_MIB_COUNTER(44, 2, etherStatsUnderSizePkts),
19392 ++ RTL8365MB_MAKE_MIB_COUNTER(46, 2, etherOversizeStats),
19393 ++ RTL8365MB_MAKE_MIB_COUNTER(48, 2, etherStatsPkts64Octets),
19394 ++ RTL8365MB_MAKE_MIB_COUNTER(50, 2, etherStatsPkts65to127Octets),
19395 ++ RTL8365MB_MAKE_MIB_COUNTER(52, 2, etherStatsPkts128to255Octets),
19396 ++ RTL8365MB_MAKE_MIB_COUNTER(54, 2, etherStatsPkts256to511Octets),
19397 ++ RTL8365MB_MAKE_MIB_COUNTER(56, 2, etherStatsPkts512to1023Octets),
19398 ++ RTL8365MB_MAKE_MIB_COUNTER(58, 2, etherStatsPkts1024to1518Octets),
19399 ++ RTL8365MB_MAKE_MIB_COUNTER(60, 4, ifOutOctets),
19400 ++ RTL8365MB_MAKE_MIB_COUNTER(64, 2, dot3StatsSingleCollisionFrames),
19401 ++ RTL8365MB_MAKE_MIB_COUNTER(66, 2, dot3StatsMultipleCollisionFrames),
19402 ++ RTL8365MB_MAKE_MIB_COUNTER(68, 2, dot3StatsDeferredTransmissions),
19403 ++ RTL8365MB_MAKE_MIB_COUNTER(70, 2, dot3StatsLateCollisions),
19404 ++ RTL8365MB_MAKE_MIB_COUNTER(72, 2, etherStatsCollisions),
19405 ++ RTL8365MB_MAKE_MIB_COUNTER(74, 2, dot3StatsExcessiveCollisions),
19406 ++ RTL8365MB_MAKE_MIB_COUNTER(76, 2, dot3OutPauseFrames),
19407 ++ RTL8365MB_MAKE_MIB_COUNTER(78, 2, ifOutDiscards),
19408 ++ RTL8365MB_MAKE_MIB_COUNTER(80, 2, dot1dTpPortInDiscards),
19409 ++ RTL8365MB_MAKE_MIB_COUNTER(82, 2, ifOutUcastPkts),
19410 ++ RTL8365MB_MAKE_MIB_COUNTER(84, 2, ifOutMulticastPkts),
19411 ++ RTL8365MB_MAKE_MIB_COUNTER(86, 2, ifOutBroadcastPkts),
19412 ++ RTL8365MB_MAKE_MIB_COUNTER(88, 2, outOampduPkts),
19413 ++ RTL8365MB_MAKE_MIB_COUNTER(90, 2, inOampduPkts),
19414 ++ RTL8365MB_MAKE_MIB_COUNTER(92, 4, inIgmpJoinsSuccess),
19415 ++ RTL8365MB_MAKE_MIB_COUNTER(96, 2, inIgmpJoinsFail),
19416 ++ RTL8365MB_MAKE_MIB_COUNTER(98, 2, inMldJoinsSuccess),
19417 ++ RTL8365MB_MAKE_MIB_COUNTER(100, 2, inMldJoinsFail),
19418 ++ RTL8365MB_MAKE_MIB_COUNTER(102, 2, inReportSuppressionDrop),
19419 ++ RTL8365MB_MAKE_MIB_COUNTER(104, 2, inLeaveSuppressionDrop),
19420 ++ RTL8365MB_MAKE_MIB_COUNTER(106, 2, outIgmpReports),
19421 ++ RTL8365MB_MAKE_MIB_COUNTER(108, 2, outIgmpLeaves),
19422 ++ RTL8365MB_MAKE_MIB_COUNTER(110, 2, outIgmpGeneralQuery),
19423 ++ RTL8365MB_MAKE_MIB_COUNTER(112, 2, outIgmpSpecificQuery),
19424 ++ RTL8365MB_MAKE_MIB_COUNTER(114, 2, outMldReports),
19425 ++ RTL8365MB_MAKE_MIB_COUNTER(116, 2, outMldLeaves),
19426 ++ RTL8365MB_MAKE_MIB_COUNTER(118, 2, outMldGeneralQuery),
19427 ++ RTL8365MB_MAKE_MIB_COUNTER(120, 2, outMldSpecificQuery),
19428 ++ RTL8365MB_MAKE_MIB_COUNTER(122, 2, inKnownMulticastPkts),
19429 ++};
19430 ++
19431 ++static_assert(ARRAY_SIZE(rtl8365mb_mib_counters) == RTL8365MB_MIB_END);
19432 ++
19433 ++struct rtl8365mb_jam_tbl_entry {
19434 ++ u16 reg;
19435 ++ u16 val;
19436 ++};
19437 ++
19438 ++/* Lifted from the vendor driver sources */
19439 ++static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_8365mb_vc[] = {
19440 ++ { 0x13EB, 0x15BB }, { 0x1303, 0x06D6 }, { 0x1304, 0x0700 },
19441 ++ { 0x13E2, 0x003F }, { 0x13F9, 0x0090 }, { 0x121E, 0x03CA },
19442 ++ { 0x1233, 0x0352 }, { 0x1237, 0x00A0 }, { 0x123A, 0x0030 },
19443 ++ { 0x1239, 0x0084 }, { 0x0301, 0x1000 }, { 0x1349, 0x001F },
19444 ++ { 0x18E0, 0x4004 }, { 0x122B, 0x241C }, { 0x1305, 0xC000 },
19445 ++ { 0x13F0, 0x0000 },
19446 ++};
19447 ++
19448 ++static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
19449 ++ { 0x1200, 0x7FCB }, { 0x0884, 0x0003 }, { 0x06EB, 0x0001 },
19450 ++ { 0x03Fa, 0x0007 }, { 0x08C8, 0x00C0 }, { 0x0A30, 0x020E },
19451 ++ { 0x0800, 0x0000 }, { 0x0802, 0x0000 }, { 0x09DA, 0x0013 },
19452 ++ { 0x1D32, 0x0002 },
19453 ++};
19454 ++
19455 ++enum rtl8365mb_stp_state {
19456 ++ RTL8365MB_STP_STATE_DISABLED = 0,
19457 ++ RTL8365MB_STP_STATE_BLOCKING = 1,
19458 ++ RTL8365MB_STP_STATE_LEARNING = 2,
19459 ++ RTL8365MB_STP_STATE_FORWARDING = 3,
19460 ++};
19461 ++
19462 ++enum rtl8365mb_cpu_insert {
19463 ++ RTL8365MB_CPU_INSERT_TO_ALL = 0,
19464 ++ RTL8365MB_CPU_INSERT_TO_TRAPPING = 1,
19465 ++ RTL8365MB_CPU_INSERT_TO_NONE = 2,
19466 ++};
19467 ++
19468 ++enum rtl8365mb_cpu_position {
19469 ++ RTL8365MB_CPU_POS_AFTER_SA = 0,
19470 ++ RTL8365MB_CPU_POS_BEFORE_CRC = 1,
19471 ++};
19472 ++
19473 ++enum rtl8365mb_cpu_format {
19474 ++ RTL8365MB_CPU_FORMAT_8BYTES = 0,
19475 ++ RTL8365MB_CPU_FORMAT_4BYTES = 1,
19476 ++};
19477 ++
19478 ++enum rtl8365mb_cpu_rxlen {
19479 ++ RTL8365MB_CPU_RXLEN_72BYTES = 0,
19480 ++ RTL8365MB_CPU_RXLEN_64BYTES = 1,
19481 ++};
19482 ++
19483 ++/**
19484 ++ * struct rtl8365mb_cpu - CPU port configuration
19485 ++ * @enable: enable/disable hardware insertion of CPU tag in switch->CPU frames
19486 ++ * @mask: port mask of ports that parse should parse CPU tags
19487 ++ * @trap_port: forward trapped frames to this port
19488 ++ * @insert: CPU tag insertion mode in switch->CPU frames
19489 ++ * @position: position of CPU tag in frame
19490 ++ * @rx_length: minimum CPU RX length
19491 ++ * @format: CPU tag format
19492 ++ *
19493 ++ * Represents the CPU tagging and CPU port configuration of the switch. These
19494 ++ * settings are configurable at runtime.
19495 ++ */
19496 ++struct rtl8365mb_cpu {
19497 ++ bool enable;
19498 ++ u32 mask;
19499 ++ u32 trap_port;
19500 ++ enum rtl8365mb_cpu_insert insert;
19501 ++ enum rtl8365mb_cpu_position position;
19502 ++ enum rtl8365mb_cpu_rxlen rx_length;
19503 ++ enum rtl8365mb_cpu_format format;
19504 ++};
19505 ++
19506 ++/**
19507 ++ * struct rtl8365mb_port - private per-port data
19508 ++ * @smi: pointer to parent realtek_smi data
19509 ++ * @index: DSA port index, same as dsa_port::index
19510 ++ * @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
19511 ++ * access via rtl8365mb_get_stats64
19512 ++ * @stats_lock: protect the stats structure during read/update
19513 ++ * @mib_work: delayed work for polling MIB counters
19514 ++ */
19515 ++struct rtl8365mb_port {
19516 ++ struct realtek_smi *smi;
19517 ++ unsigned int index;
19518 ++ struct rtnl_link_stats64 stats;
19519 ++ spinlock_t stats_lock;
19520 ++ struct delayed_work mib_work;
19521 ++};
19522 ++
19523 ++/**
19524 ++ * struct rtl8365mb - private chip-specific driver data
19525 ++ * @smi: pointer to parent realtek_smi data
19526 ++ * @irq: registered IRQ or zero
19527 ++ * @chip_id: chip identifier
19528 ++ * @chip_ver: chip silicon revision
19529 ++ * @port_mask: mask of all ports
19530 ++ * @learn_limit_max: maximum number of L2 addresses the chip can learn
19531 ++ * @cpu: CPU tagging and CPU port configuration for this chip
19532 ++ * @mib_lock: prevent concurrent reads of MIB counters
19533 ++ * @ports: per-port data
19534 ++ * @jam_table: chip-specific initialization jam table
19535 ++ * @jam_size: size of the chip's jam table
19536 ++ *
19537 ++ * Private data for this driver.
19538 ++ */
19539 ++struct rtl8365mb {
19540 ++ struct realtek_smi *smi;
19541 ++ int irq;
19542 ++ u32 chip_id;
19543 ++ u32 chip_ver;
19544 ++ u32 port_mask;
19545 ++ u32 learn_limit_max;
19546 ++ struct rtl8365mb_cpu cpu;
19547 ++ struct mutex mib_lock;
19548 ++ struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
19549 ++ const struct rtl8365mb_jam_tbl_entry *jam_table;
19550 ++ size_t jam_size;
19551 ++};
19552 ++
19553 ++static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
19554 ++{
19555 ++ u32 val;
19556 ++
19557 ++ return regmap_read_poll_timeout(smi->map,
19558 ++ RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
19559 ++ val, !val, 10, 100);
19560 ++}
19561 ++
19562 ++static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
19563 ++ u32 ocp_addr)
19564 ++{
19565 ++ u32 val;
19566 ++ int ret;
19567 ++
19568 ++ /* Set OCP prefix */
19569 ++ val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
19570 ++ ret = regmap_update_bits(
19571 ++ smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
19572 ++ RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
19573 ++ FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
19574 ++ if (ret)
19575 ++ return ret;
19576 ++
19577 ++ /* Set PHY register address */
19578 ++ val = RTL8365MB_PHY_BASE;
19579 ++ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK, phy);
19580 ++ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK,
19581 ++ ocp_addr >> 1);
19582 ++ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
19583 ++ ocp_addr >> 6);
19584 ++ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
19585 ++ val);
19586 ++ if (ret)
19587 ++ return ret;
19588 ++
19589 ++ return 0;
19590 ++}
19591 ++
19592 ++static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
19593 ++ u32 ocp_addr, u16 *data)
19594 ++{
19595 ++ u32 val;
19596 ++ int ret;
19597 ++
19598 ++ ret = rtl8365mb_phy_poll_busy(smi);
19599 ++ if (ret)
19600 ++ return ret;
19601 ++
19602 ++ ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
19603 ++ if (ret)
19604 ++ return ret;
19605 ++
19606 ++ /* Execute read operation */
19607 ++ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
19608 ++ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
19609 ++ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
19610 ++ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
19611 ++ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
19612 ++ if (ret)
19613 ++ return ret;
19614 ++
19615 ++ ret = rtl8365mb_phy_poll_busy(smi);
19616 ++ if (ret)
19617 ++ return ret;
19618 ++
19619 ++ /* Get PHY register data */
19620 ++ ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
19621 ++ &val);
19622 ++ if (ret)
19623 ++ return ret;
19624 ++
19625 ++ *data = val & 0xFFFF;
19626 ++
19627 ++ return 0;
19628 ++}
19629 ++
19630 ++static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
19631 ++ u32 ocp_addr, u16 data)
19632 ++{
19633 ++ u32 val;
19634 ++ int ret;
19635 ++
19636 ++ ret = rtl8365mb_phy_poll_busy(smi);
19637 ++ if (ret)
19638 ++ return ret;
19639 ++
19640 ++ ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
19641 ++ if (ret)
19642 ++ return ret;
19643 ++
19644 ++ /* Set PHY register data */
19645 ++ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
19646 ++ data);
19647 ++ if (ret)
19648 ++ return ret;
19649 ++
19650 ++ /* Execute write operation */
19651 ++ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
19652 ++ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
19653 ++ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
19654 ++ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
19655 ++ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
19656 ++ if (ret)
19657 ++ return ret;
19658 ++
19659 ++ ret = rtl8365mb_phy_poll_busy(smi);
19660 ++ if (ret)
19661 ++ return ret;
19662 ++
19663 ++ return 0;
19664 ++}
19665 ++
19666 ++static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
19667 ++{
19668 ++ u32 ocp_addr;
19669 ++ u16 val;
19670 ++ int ret;
19671 ++
19672 ++ if (phy > RTL8365MB_PHYADDRMAX)
19673 ++ return -EINVAL;
19674 ++
19675 ++ if (regnum > RTL8365MB_PHYREGMAX)
19676 ++ return -EINVAL;
19677 ++
19678 ++ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
19679 ++
19680 ++ ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val);
19681 ++ if (ret) {
19682 ++ dev_err(smi->dev,
19683 ++ "failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
19684 ++ regnum, ocp_addr, ret);
19685 ++ return ret;
19686 ++ }
19687 ++
19688 ++ dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
19689 ++ phy, regnum, ocp_addr, val);
19690 ++
19691 ++ return val;
19692 ++}
19693 ++
19694 ++static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
19695 ++ u16 val)
19696 ++{
19697 ++ u32 ocp_addr;
19698 ++ int ret;
19699 ++
19700 ++ if (phy > RTL8365MB_PHYADDRMAX)
19701 ++ return -EINVAL;
19702 ++
19703 ++ if (regnum > RTL8365MB_PHYREGMAX)
19704 ++ return -EINVAL;
19705 ++
19706 ++ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
19707 ++
19708 ++ ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val);
19709 ++ if (ret) {
19710 ++ dev_err(smi->dev,
19711 ++ "failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
19712 ++ regnum, ocp_addr, ret);
19713 ++ return ret;
19714 ++ }
19715 ++
19716 ++ dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
19717 ++ phy, regnum, ocp_addr, val);
19718 ++
19719 ++ return 0;
19720 ++}
19721 ++
19722 ++static enum dsa_tag_protocol
19723 ++rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
19724 ++ enum dsa_tag_protocol mp)
19725 ++{
19726 ++ return DSA_TAG_PROTO_RTL8_4;
19727 ++}
19728 ++
19729 ++static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
19730 ++ phy_interface_t interface)
19731 ++{
19732 ++ struct device_node *dn;
19733 ++ struct dsa_port *dp;
19734 ++ int tx_delay = 0;
19735 ++ int rx_delay = 0;
19736 ++ int ext_port;
19737 ++ u32 val;
19738 ++ int ret;
19739 ++
19740 ++ if (port == smi->cpu_port) {
19741 ++ ext_port = 1;
19742 ++ } else {
19743 ++ dev_err(smi->dev, "only one EXT port is currently supported\n");
19744 ++ return -EINVAL;
19745 ++ }
19746 ++
19747 ++ dp = dsa_to_port(smi->ds, port);
19748 ++ dn = dp->dn;
19749 ++
19750 ++ /* Set the RGMII TX/RX delay
19751 ++ *
19752 ++ * The Realtek vendor driver indicates the following possible
19753 ++ * configuration settings:
19754 ++ *
19755 ++ * TX delay:
19756 ++ * 0 = no delay, 1 = 2 ns delay
19757 ++ * RX delay:
19758 ++ * 0 = no delay, 7 = maximum delay
19759 ++ * Each step is approximately 0.3 ns, so the maximum delay is about
19760 ++ * 2.1 ns.
19761 ++ *
19762 ++ * The vendor driver also states that this must be configured *before*
19763 ++ * forcing the external interface into a particular mode, which is done
19764 ++ * in the rtl8365mb_phylink_mac_link_{up,down} functions.
19765 ++ *
19766 ++ * Only configure an RGMII TX (resp. RX) delay if the
19767 ++ * tx-internal-delay-ps (resp. rx-internal-delay-ps) OF property is
19768 ++ * specified. We ignore the detail of the RGMII interface mode
19769 ++ * (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only
19770 ++ * property.
19771 ++ */
19772 ++ if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) {
19773 ++ val = val / 1000; /* convert to ns */
19774 ++
19775 ++ if (val == 0 || val == 2)
19776 ++ tx_delay = val / 2;
19777 ++ else
19778 ++ dev_warn(smi->dev,
19779 ++ "EXT port TX delay must be 0 or 2 ns\n");
19780 ++ }
19781 ++
19782 ++ if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
19783 ++ val = DIV_ROUND_CLOSEST(val, 300); /* convert to 0.3 ns step */
19784 ++
19785 ++ if (val <= 7)
19786 ++ rx_delay = val;
19787 ++ else
19788 ++ dev_warn(smi->dev,
19789 ++ "EXT port RX delay must be 0 to 2.1 ns\n");
19790 ++ }
19791 ++
19792 ++ ret = regmap_update_bits(
19793 ++ smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port),
19794 ++ RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
19795 ++ RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
19796 ++ FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
19797 ++ FIELD_PREP(RTL8365MB_EXT_RGMXF_RXDELAY_MASK, rx_delay));
19798 ++ if (ret)
19799 ++ return ret;
19800 ++
19801 ++ ret = regmap_update_bits(
19802 ++ smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port),
19803 ++ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port),
19804 ++ RTL8365MB_EXT_PORT_MODE_RGMII
19805 ++ << RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
19806 ++ ext_port));
19807 ++ if (ret)
19808 ++ return ret;
19809 ++
19810 ++ return 0;
19811 ++}
19812 ++
19813 ++static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
19814 ++ bool link, int speed, int duplex,
19815 ++ bool tx_pause, bool rx_pause)
19816 ++{
19817 ++ u32 r_tx_pause;
19818 ++ u32 r_rx_pause;
19819 ++ u32 r_duplex;
19820 ++ u32 r_speed;
19821 ++ u32 r_link;
19822 ++ int ext_port;
19823 ++ int val;
19824 ++ int ret;
19825 ++
19826 ++ if (port == smi->cpu_port) {
19827 ++ ext_port = 1;
19828 ++ } else {
19829 ++ dev_err(smi->dev, "only one EXT port is currently supported\n");
19830 ++ return -EINVAL;
19831 ++ }
19832 ++
19833 ++ if (link) {
19834 ++ /* Force the link up with the desired configuration */
19835 ++ r_link = 1;
19836 ++ r_rx_pause = rx_pause ? 1 : 0;
19837 ++ r_tx_pause = tx_pause ? 1 : 0;
19838 ++
19839 ++ if (speed == SPEED_1000) {
19840 ++ r_speed = RTL8365MB_PORT_SPEED_1000M;
19841 ++ } else if (speed == SPEED_100) {
19842 ++ r_speed = RTL8365MB_PORT_SPEED_100M;
19843 ++ } else if (speed == SPEED_10) {
19844 ++ r_speed = RTL8365MB_PORT_SPEED_10M;
19845 ++ } else {
19846 ++ dev_err(smi->dev, "unsupported port speed %s\n",
19847 ++ phy_speed_to_str(speed));
19848 ++ return -EINVAL;
19849 ++ }
19850 ++
19851 ++ if (duplex == DUPLEX_FULL) {
19852 ++ r_duplex = 1;
19853 ++ } else if (duplex == DUPLEX_HALF) {
19854 ++ r_duplex = 0;
19855 ++ } else {
19856 ++ dev_err(smi->dev, "unsupported duplex %s\n",
19857 ++ phy_duplex_to_str(duplex));
19858 ++ return -EINVAL;
19859 ++ }
19860 ++ } else {
19861 ++ /* Force the link down and reset any programmed configuration */
19862 ++ r_link = 0;
19863 ++ r_tx_pause = 0;
19864 ++ r_rx_pause = 0;
19865 ++ r_speed = 0;
19866 ++ r_duplex = 0;
19867 ++ }
19868 ++
19869 ++ val = FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK, 1) |
19870 ++ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK,
19871 ++ r_tx_pause) |
19872 ++ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK,
19873 ++ r_rx_pause) |
19874 ++ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK, r_link) |
19875 ++ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
19876 ++ r_duplex) |
19877 ++ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
19878 ++ ret = regmap_write(smi->map,
19879 ++ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port),
19880 ++ val);
19881 ++ if (ret)
19882 ++ return ret;
19883 ++
19884 ++ return 0;
19885 ++}
19886 ++
19887 ++static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
19888 ++ phy_interface_t interface)
19889 ++{
19890 ++ if (dsa_is_user_port(ds, port) &&
19891 ++ (interface == PHY_INTERFACE_MODE_NA ||
19892 ++ interface == PHY_INTERFACE_MODE_INTERNAL ||
19893 ++ interface == PHY_INTERFACE_MODE_GMII))
19894 ++ /* Internal PHY */
19895 ++ return true;
19896 ++ else if (dsa_is_cpu_port(ds, port) &&
19897 ++ phy_interface_mode_is_rgmii(interface))
19898 ++ /* Extension MAC */
19899 ++ return true;
19900 ++
19901 ++ return false;
19902 ++}
19903 ++
19904 ++static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port,
19905 ++ unsigned long *supported,
19906 ++ struct phylink_link_state *state)
19907 ++{
19908 ++ struct realtek_smi *smi = ds->priv;
19909 ++ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 };
19910 ++
19911 ++ /* include/linux/phylink.h says:
19912 ++ * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
19913 ++ * expects the MAC driver to return all supported link modes.
19914 ++ */
19915 ++ if (state->interface != PHY_INTERFACE_MODE_NA &&
19916 ++ !rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
19917 ++ dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
19918 ++ phy_modes(state->interface), port);
19919 ++ linkmode_zero(supported);
19920 ++ return;
19921 ++ }
19922 ++
19923 ++ phylink_set_port_modes(mask);
19924 ++
19925 ++ phylink_set(mask, Autoneg);
19926 ++ phylink_set(mask, Pause);
19927 ++ phylink_set(mask, Asym_Pause);
19928 ++
19929 ++ phylink_set(mask, 10baseT_Half);
19930 ++ phylink_set(mask, 10baseT_Full);
19931 ++ phylink_set(mask, 100baseT_Half);
19932 ++ phylink_set(mask, 100baseT_Full);
19933 ++ phylink_set(mask, 1000baseT_Full);
19934 ++
19935 ++ linkmode_and(supported, supported, mask);
19936 ++ linkmode_and(state->advertising, state->advertising, mask);
19937 ++}
19938 ++
19939 ++static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
19940 ++ unsigned int mode,
19941 ++ const struct phylink_link_state *state)
19942 ++{
19943 ++ struct realtek_smi *smi = ds->priv;
19944 ++ int ret;
19945 ++
19946 ++ if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
19947 ++ dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
19948 ++ phy_modes(state->interface), port);
19949 ++ return;
19950 ++ }
19951 ++
19952 ++ if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
19953 ++ dev_err(smi->dev,
19954 ++ "port %d supports only conventional PHY or fixed-link\n",
19955 ++ port);
19956 ++ return;
19957 ++ }
19958 ++
19959 ++ if (phy_interface_mode_is_rgmii(state->interface)) {
19960 ++ ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface);
19961 ++ if (ret)
19962 ++ dev_err(smi->dev,
19963 ++ "failed to configure RGMII mode on port %d: %d\n",
19964 ++ port, ret);
19965 ++ return;
19966 ++ }
19967 ++
19968 ++ /* TODO: Implement MII and RMII modes, which the RTL8365MB-VC also
19969 ++ * supports
19970 ++ */
19971 ++}
19972 ++
19973 ++static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
19974 ++ unsigned int mode,
19975 ++ phy_interface_t interface)
19976 ++{
19977 ++ struct realtek_smi *smi = ds->priv;
19978 ++ struct rtl8365mb_port *p;
19979 ++ struct rtl8365mb *mb;
19980 ++ int ret;
19981 ++
19982 ++ mb = smi->chip_data;
19983 ++ p = &mb->ports[port];
19984 ++ cancel_delayed_work_sync(&p->mib_work);
19985 ++
19986 ++ if (phy_interface_mode_is_rgmii(interface)) {
19987 ++ ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0,
19988 ++ false, false);
19989 ++ if (ret)
19990 ++ dev_err(smi->dev,
19991 ++ "failed to reset forced mode on port %d: %d\n",
19992 ++ port, ret);
19993 ++
19994 ++ return;
19995 ++ }
19996 ++}
19997 ++
19998 ++static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
19999 ++ unsigned int mode,
20000 ++ phy_interface_t interface,
20001 ++ struct phy_device *phydev, int speed,
20002 ++ int duplex, bool tx_pause,
20003 ++ bool rx_pause)
20004 ++{
20005 ++ struct realtek_smi *smi = ds->priv;
20006 ++ struct rtl8365mb_port *p;
20007 ++ struct rtl8365mb *mb;
20008 ++ int ret;
20009 ++
20010 ++ mb = smi->chip_data;
20011 ++ p = &mb->ports[port];
20012 ++ schedule_delayed_work(&p->mib_work, 0);
20013 ++
20014 ++ if (phy_interface_mode_is_rgmii(interface)) {
20015 ++ ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed,
20016 ++ duplex, tx_pause,
20017 ++ rx_pause);
20018 ++ if (ret)
20019 ++ dev_err(smi->dev,
20020 ++ "failed to force mode on port %d: %d\n", port,
20021 ++ ret);
20022 ++
20023 ++ return;
20024 ++ }
20025 ++}
20026 ++
20027 ++static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
20028 ++ u8 state)
20029 ++{
20030 ++ struct realtek_smi *smi = ds->priv;
20031 ++ enum rtl8365mb_stp_state val;
20032 ++ int msti = 0;
20033 ++
20034 ++ switch (state) {
20035 ++ case BR_STATE_DISABLED:
20036 ++ val = RTL8365MB_STP_STATE_DISABLED;
20037 ++ break;
20038 ++ case BR_STATE_BLOCKING:
20039 ++ case BR_STATE_LISTENING:
20040 ++ val = RTL8365MB_STP_STATE_BLOCKING;
20041 ++ break;
20042 ++ case BR_STATE_LEARNING:
20043 ++ val = RTL8365MB_STP_STATE_LEARNING;
20044 ++ break;
20045 ++ case BR_STATE_FORWARDING:
20046 ++ val = RTL8365MB_STP_STATE_FORWARDING;
20047 ++ break;
20048 ++ default:
20049 ++ dev_err(smi->dev, "invalid STP state: %u\n", state);
20050 ++ return;
20051 ++ }
20052 ++
20053 ++ regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
20054 ++ RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
20055 ++ val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
20056 ++}
20057 ++
20058 ++static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port,
20059 ++ bool enable)
20060 ++{
20061 ++ struct rtl8365mb *mb = smi->chip_data;
20062 ++
20063 ++ /* Enable/disable learning by limiting the number of L2 addresses the
20064 ++ * port can learn. Realtek documentation states that a limit of zero
20065 ++ * disables learning. When enabling learning, set it to the chip's
20066 ++ * maximum.
20067 ++ */
20068 ++ return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
20069 ++ enable ? mb->learn_limit_max : 0);
20070 ++}
20071 ++
20072 ++static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port,
20073 ++ u32 mask)
20074 ++{
20075 ++ return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
20076 ++}
20077 ++
20078 ++static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
20079 ++ u32 offset, u32 length, u64 *mibvalue)
20080 ++{
20081 ++ u64 tmpvalue = 0;
20082 ++ u32 val;
20083 ++ int ret;
20084 ++ int i;
20085 ++
20086 ++ /* The MIB address is an SRAM address. We request a particular address
20087 ++ * and then poll the control register before reading the value from some
20088 ++ * counter registers.
20089 ++ */
20090 ++ ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG,
20091 ++ RTL8365MB_MIB_ADDRESS(port, offset));
20092 ++ if (ret)
20093 ++ return ret;
20094 ++
20095 ++ /* Poll for completion */
20096 ++ ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val,
20097 ++ !(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
20098 ++ 10, 100);
20099 ++ if (ret)
20100 ++ return ret;
20101 ++
20102 ++ /* Presumably this indicates a MIB counter read failure */
20103 ++ if (val & RTL8365MB_MIB_CTRL0_RESET_MASK)
20104 ++ return -EIO;
20105 ++
20106 ++ /* There are four MIB counter registers each holding a 16 bit word of a
20107 ++ * MIB counter. Depending on the offset, we should read from the upper
20108 ++ * two or lower two registers. In case the MIB counter is 4 words, we
20109 ++ * read from all four registers.
20110 ++ */
20111 ++ if (length == 4)
20112 ++ offset = 3;
20113 ++ else
20114 ++ offset = (offset + 1) % 4;
20115 ++
20116 ++ /* Read the MIB counter 16 bits at a time */
20117 ++ for (i = 0; i < length; i++) {
20118 ++ ret = regmap_read(smi->map,
20119 ++ RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
20120 ++ if (ret)
20121 ++ return ret;
20122 ++
20123 ++ tmpvalue = ((tmpvalue) << 16) | (val & 0xFFFF);
20124 ++ }
20125 ++
20126 ++ /* Only commit the result if no error occurred */
20127 ++ *mibvalue = tmpvalue;
20128 ++
20129 ++ return 0;
20130 ++}
20131 ++
20132 ++static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
20133 ++{
20134 ++ struct realtek_smi *smi = ds->priv;
20135 ++ struct rtl8365mb *mb;
20136 ++ int ret;
20137 ++ int i;
20138 ++
20139 ++ mb = smi->chip_data;
20140 ++
20141 ++ mutex_lock(&mb->mib_lock);
20142 ++ for (i = 0; i < RTL8365MB_MIB_END; i++) {
20143 ++ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
20144 ++
20145 ++ ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
20146 ++ mib->length, &data[i]);
20147 ++ if (ret) {
20148 ++ dev_err(smi->dev,
20149 ++ "failed to read port %d counters: %d\n", port,
20150 ++ ret);
20151 ++ break;
20152 ++ }
20153 ++ }
20154 ++ mutex_unlock(&mb->mib_lock);
20155 ++}
20156 ++
20157 ++static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data)
20158 ++{
20159 ++ int i;
20160 ++
20161 ++ if (stringset != ETH_SS_STATS)
20162 ++ return;
20163 ++
20164 ++ for (i = 0; i < RTL8365MB_MIB_END; i++) {
20165 ++ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
20166 ++
20167 ++ strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN);
20168 ++ }
20169 ++}
20170 ++
20171 ++static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
20172 ++{
20173 ++ if (sset != ETH_SS_STATS)
20174 ++ return -EOPNOTSUPP;
20175 ++
20176 ++ return RTL8365MB_MIB_END;
20177 ++}
20178 ++
20179 ++static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
20180 ++ struct ethtool_eth_phy_stats *phy_stats)
20181 ++{
20182 ++ struct realtek_smi *smi = ds->priv;
20183 ++ struct rtl8365mb_mib_counter *mib;
20184 ++ struct rtl8365mb *mb;
20185 ++
20186 ++ mb = smi->chip_data;
20187 ++ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
20188 ++
20189 ++ mutex_lock(&mb->mib_lock);
20190 ++ rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
20191 ++ &phy_stats->SymbolErrorDuringCarrier);
20192 ++ mutex_unlock(&mb->mib_lock);
20193 ++}
20194 ++
20195 ++static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
20196 ++ struct ethtool_eth_mac_stats *mac_stats)
20197 ++{
20198 ++ u64 cnt[RTL8365MB_MIB_END] = {
20199 ++ [RTL8365MB_MIB_ifOutOctets] = 1,
20200 ++ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
20201 ++ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
20202 ++ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
20203 ++ [RTL8365MB_MIB_dot3OutPauseFrames] = 1,
20204 ++ [RTL8365MB_MIB_ifOutDiscards] = 1,
20205 ++ [RTL8365MB_MIB_ifInOctets] = 1,
20206 ++ [RTL8365MB_MIB_ifInUcastPkts] = 1,
20207 ++ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
20208 ++ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
20209 ++ [RTL8365MB_MIB_dot3InPauseFrames] = 1,
20210 ++ [RTL8365MB_MIB_dot3StatsSingleCollisionFrames] = 1,
20211 ++ [RTL8365MB_MIB_dot3StatsMultipleCollisionFrames] = 1,
20212 ++ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
20213 ++ [RTL8365MB_MIB_dot3StatsDeferredTransmissions] = 1,
20214 ++ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
20215 ++ [RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
20216 ++
20217 ++ };
20218 ++ struct realtek_smi *smi = ds->priv;
20219 ++ struct rtl8365mb *mb;
20220 ++ int ret;
20221 ++ int i;
20222 ++
20223 ++ mb = smi->chip_data;
20224 ++
20225 ++ mutex_lock(&mb->mib_lock);
20226 ++ for (i = 0; i < RTL8365MB_MIB_END; i++) {
20227 ++ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
20228 ++
20229 ++ /* Only fetch required MIB counters (marked = 1 above) */
20230 ++ if (!cnt[i])
20231 ++ continue;
20232 ++
20233 ++ ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
20234 ++ mib->length, &cnt[i]);
20235 ++ if (ret)
20236 ++ break;
20237 ++ }
20238 ++ mutex_unlock(&mb->mib_lock);
20239 ++
20240 ++ /* The RTL8365MB-VC exposes MIB objects, which we have to translate into
20241 ++ * IEEE 802.3 Managed Objects. This is not always completely faithful,
20242 ++ * but we try out best. See RFC 3635 for a detailed treatment of the
20243 ++ * subject.
20244 ++ */
20245 ++
20246 ++ mac_stats->FramesTransmittedOK = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
20247 ++ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
20248 ++ cnt[RTL8365MB_MIB_ifOutBroadcastPkts] +
20249 ++ cnt[RTL8365MB_MIB_dot3OutPauseFrames] -
20250 ++ cnt[RTL8365MB_MIB_ifOutDiscards];
20251 ++ mac_stats->SingleCollisionFrames =
20252 ++ cnt[RTL8365MB_MIB_dot3StatsSingleCollisionFrames];
20253 ++ mac_stats->MultipleCollisionFrames =
20254 ++ cnt[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames];
20255 ++ mac_stats->FramesReceivedOK = cnt[RTL8365MB_MIB_ifInUcastPkts] +
20256 ++ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
20257 ++ cnt[RTL8365MB_MIB_ifInBroadcastPkts] +
20258 ++ cnt[RTL8365MB_MIB_dot3InPauseFrames];
20259 ++ mac_stats->FrameCheckSequenceErrors =
20260 ++ cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
20261 ++ mac_stats->OctetsTransmittedOK = cnt[RTL8365MB_MIB_ifOutOctets] -
20262 ++ 18 * mac_stats->FramesTransmittedOK;
20263 ++ mac_stats->FramesWithDeferredXmissions =
20264 ++ cnt[RTL8365MB_MIB_dot3StatsDeferredTransmissions];
20265 ++ mac_stats->LateCollisions = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
20266 ++ mac_stats->FramesAbortedDueToXSColls =
20267 ++ cnt[RTL8365MB_MIB_dot3StatsExcessiveCollisions];
20268 ++ mac_stats->OctetsReceivedOK = cnt[RTL8365MB_MIB_ifInOctets] -
20269 ++ 18 * mac_stats->FramesReceivedOK;
20270 ++ mac_stats->MulticastFramesXmittedOK =
20271 ++ cnt[RTL8365MB_MIB_ifOutMulticastPkts];
20272 ++ mac_stats->BroadcastFramesXmittedOK =
20273 ++ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
20274 ++ mac_stats->MulticastFramesReceivedOK =
20275 ++ cnt[RTL8365MB_MIB_ifInMulticastPkts];
20276 ++ mac_stats->BroadcastFramesReceivedOK =
20277 ++ cnt[RTL8365MB_MIB_ifInBroadcastPkts];
20278 ++}
20279 ++
20280 ++static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
20281 ++ struct ethtool_eth_ctrl_stats *ctrl_stats)
20282 ++{
20283 ++ struct realtek_smi *smi = ds->priv;
20284 ++ struct rtl8365mb_mib_counter *mib;
20285 ++ struct rtl8365mb *mb;
20286 ++
20287 ++ mb = smi->chip_data;
20288 ++ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
20289 ++
20290 ++ mutex_lock(&mb->mib_lock);
20291 ++ rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
20292 ++ &ctrl_stats->UnsupportedOpcodesReceived);
20293 ++ mutex_unlock(&mb->mib_lock);
20294 ++}
20295 ++
20296 ++static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
20297 ++{
20298 ++ u64 cnt[RTL8365MB_MIB_END] = {
20299 ++ [RTL8365MB_MIB_ifOutOctets] = 1,
20300 ++ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
20301 ++ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
20302 ++ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
20303 ++ [RTL8365MB_MIB_ifOutDiscards] = 1,
20304 ++ [RTL8365MB_MIB_ifInOctets] = 1,
20305 ++ [RTL8365MB_MIB_ifInUcastPkts] = 1,
20306 ++ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
20307 ++ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
20308 ++ [RTL8365MB_MIB_etherStatsDropEvents] = 1,
20309 ++ [RTL8365MB_MIB_etherStatsCollisions] = 1,
20310 ++ [RTL8365MB_MIB_etherStatsFragments] = 1,
20311 ++ [RTL8365MB_MIB_etherStatsJabbers] = 1,
20312 ++ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
20313 ++ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
20314 ++ };
20315 ++ struct rtl8365mb *mb = smi->chip_data;
20316 ++ struct rtnl_link_stats64 *stats;
20317 ++ int ret;
20318 ++ int i;
20319 ++
20320 ++ stats = &mb->ports[port].stats;
20321 ++
20322 ++ mutex_lock(&mb->mib_lock);
20323 ++ for (i = 0; i < RTL8365MB_MIB_END; i++) {
20324 ++ struct rtl8365mb_mib_counter *c = &rtl8365mb_mib_counters[i];
20325 ++
20326 ++ /* Only fetch required MIB counters (marked = 1 above) */
20327 ++ if (!cnt[i])
20328 ++ continue;
20329 ++
20330 ++ ret = rtl8365mb_mib_counter_read(smi, port, c->offset,
20331 ++ c->length, &cnt[i]);
20332 ++ if (ret)
20333 ++ break;
20334 ++ }
20335 ++ mutex_unlock(&mb->mib_lock);
20336 ++
20337 ++ /* Don't update statistics if there was an error reading the counters */
20338 ++ if (ret)
20339 ++ return;
20340 ++
20341 ++ spin_lock(&mb->ports[port].stats_lock);
20342 ++
20343 ++ stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] +
20344 ++ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
20345 ++ cnt[RTL8365MB_MIB_ifInBroadcastPkts] -
20346 ++ cnt[RTL8365MB_MIB_ifOutDiscards];
20347 ++
20348 ++ stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
20349 ++ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
20350 ++ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
20351 ++
20352 ++ /* if{In,Out}Octets includes FCS - remove it */
20353 ++ stats->rx_bytes = cnt[RTL8365MB_MIB_ifInOctets] - 4 * stats->rx_packets;
20354 ++ stats->tx_bytes =
20355 ++ cnt[RTL8365MB_MIB_ifOutOctets] - 4 * stats->tx_packets;
20356 ++
20357 ++ stats->rx_dropped = cnt[RTL8365MB_MIB_etherStatsDropEvents];
20358 ++ stats->tx_dropped = cnt[RTL8365MB_MIB_ifOutDiscards];
20359 ++
20360 ++ stats->multicast = cnt[RTL8365MB_MIB_ifInMulticastPkts];
20361 ++ stats->collisions = cnt[RTL8365MB_MIB_etherStatsCollisions];
20362 ++
20363 ++ stats->rx_length_errors = cnt[RTL8365MB_MIB_etherStatsFragments] +
20364 ++ cnt[RTL8365MB_MIB_etherStatsJabbers];
20365 ++ stats->rx_crc_errors = cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
20366 ++ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors;
20367 ++
20368 ++ stats->tx_aborted_errors = cnt[RTL8365MB_MIB_ifOutDiscards];
20369 ++ stats->tx_window_errors = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
20370 ++ stats->tx_errors = stats->tx_aborted_errors + stats->tx_window_errors;
20371 ++
20372 ++ spin_unlock(&mb->ports[port].stats_lock);
20373 ++}
20374 ++
20375 ++static void rtl8365mb_stats_poll(struct work_struct *work)
20376 ++{
20377 ++ struct rtl8365mb_port *p = container_of(to_delayed_work(work),
20378 ++ struct rtl8365mb_port,
20379 ++ mib_work);
20380 ++ struct realtek_smi *smi = p->smi;
20381 ++
20382 ++ rtl8365mb_stats_update(smi, p->index);
20383 ++
20384 ++ schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
20385 ++}
20386 ++
20387 ++static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
20388 ++ struct rtnl_link_stats64 *s)
20389 ++{
20390 ++ struct realtek_smi *smi = ds->priv;
20391 ++ struct rtl8365mb_port *p;
20392 ++ struct rtl8365mb *mb;
20393 ++
20394 ++ mb = smi->chip_data;
20395 ++ p = &mb->ports[port];
20396 ++
20397 ++ spin_lock(&p->stats_lock);
20398 ++ memcpy(s, &p->stats, sizeof(*s));
20399 ++ spin_unlock(&p->stats_lock);
20400 ++}
20401 ++
20402 ++static void rtl8365mb_stats_setup(struct realtek_smi *smi)
20403 ++{
20404 ++ struct rtl8365mb *mb = smi->chip_data;
20405 ++ int i;
20406 ++
20407 ++ /* Per-chip global mutex to protect MIB counter access, since doing
20408 ++ * so requires accessing a series of registers in a particular order.
20409 ++ */
20410 ++ mutex_init(&mb->mib_lock);
20411 ++
20412 ++ for (i = 0; i < smi->num_ports; i++) {
20413 ++ struct rtl8365mb_port *p = &mb->ports[i];
20414 ++
20415 ++ if (dsa_is_unused_port(smi->ds, i))
20416 ++ continue;
20417 ++
20418 ++ /* Per-port spinlock to protect the stats64 data */
20419 ++ spin_lock_init(&p->stats_lock);
20420 ++
20421 ++ /* This work polls the MIB counters and keeps the stats64 data
20422 ++ * up-to-date.
20423 ++ */
20424 ++ INIT_DELAYED_WORK(&p->mib_work, rtl8365mb_stats_poll);
20425 ++ }
20426 ++}
20427 ++
20428 ++static void rtl8365mb_stats_teardown(struct realtek_smi *smi)
20429 ++{
20430 ++ struct rtl8365mb *mb = smi->chip_data;
20431 ++ int i;
20432 ++
20433 ++ for (i = 0; i < smi->num_ports; i++) {
20434 ++ struct rtl8365mb_port *p = &mb->ports[i];
20435 ++
20436 ++ if (dsa_is_unused_port(smi->ds, i))
20437 ++ continue;
20438 ++
20439 ++ cancel_delayed_work_sync(&p->mib_work);
20440 ++ }
20441 ++}
20442 ++
20443 ++static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg,
20444 ++ u32 *val)
20445 ++{
20446 ++ int ret;
20447 ++
20448 ++ ret = regmap_read(smi->map, reg, val);
20449 ++ if (ret)
20450 ++ return ret;
20451 ++
20452 ++ return regmap_write(smi->map, reg, *val);
20453 ++}
20454 ++
20455 ++static irqreturn_t rtl8365mb_irq(int irq, void *data)
20456 ++{
20457 ++ struct realtek_smi *smi = data;
20458 ++ unsigned long line_changes = 0;
20459 ++ struct rtl8365mb *mb;
20460 ++ u32 stat;
20461 ++ int line;
20462 ++ int ret;
20463 ++
20464 ++ mb = smi->chip_data;
20465 ++
20466 ++ ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG,
20467 ++ &stat);
20468 ++ if (ret)
20469 ++ goto out_error;
20470 ++
20471 ++ if (stat & RTL8365MB_INTR_LINK_CHANGE_MASK) {
20472 ++ u32 linkdown_ind;
20473 ++ u32 linkup_ind;
20474 ++ u32 val;
20475 ++
20476 ++ ret = rtl8365mb_get_and_clear_status_reg(
20477 ++ smi, RTL8365MB_PORT_LINKUP_IND_REG, &val);
20478 ++ if (ret)
20479 ++ goto out_error;
20480 ++
20481 ++ linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
20482 ++
20483 ++ ret = rtl8365mb_get_and_clear_status_reg(
20484 ++ smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
20485 ++ if (ret)
20486 ++ goto out_error;
20487 ++
20488 ++ linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
20489 ++
20490 ++ line_changes = (linkup_ind | linkdown_ind) & mb->port_mask;
20491 ++ }
20492 ++
20493 ++ if (!line_changes)
20494 ++ goto out_none;
20495 ++
20496 ++ for_each_set_bit(line, &line_changes, smi->num_ports) {
20497 ++ int child_irq = irq_find_mapping(smi->irqdomain, line);
20498 ++
20499 ++ handle_nested_irq(child_irq);
20500 ++ }
20501 ++
20502 ++ return IRQ_HANDLED;
20503 ++
20504 ++out_error:
20505 ++ dev_err(smi->dev, "failed to read interrupt status: %d\n", ret);
20506 ++
20507 ++out_none:
20508 ++ return IRQ_NONE;
20509 ++}
20510 ++
20511 ++static struct irq_chip rtl8365mb_irq_chip = {
20512 ++ .name = "rtl8365mb",
20513 ++ /* The hardware doesn't support masking IRQs on a per-port basis */
20514 ++};
20515 ++
20516 ++static int rtl8365mb_irq_map(struct irq_domain *domain, unsigned int irq,
20517 ++ irq_hw_number_t hwirq)
20518 ++{
20519 ++ irq_set_chip_data(irq, domain->host_data);
20520 ++ irq_set_chip_and_handler(irq, &rtl8365mb_irq_chip, handle_simple_irq);
20521 ++ irq_set_nested_thread(irq, 1);
20522 ++ irq_set_noprobe(irq);
20523 ++
20524 ++ return 0;
20525 ++}
20526 ++
20527 ++static void rtl8365mb_irq_unmap(struct irq_domain *d, unsigned int irq)
20528 ++{
20529 ++ irq_set_nested_thread(irq, 0);
20530 ++ irq_set_chip_and_handler(irq, NULL, NULL);
20531 ++ irq_set_chip_data(irq, NULL);
20532 ++}
20533 ++
20534 ++static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
20535 ++ .map = rtl8365mb_irq_map,
20536 ++ .unmap = rtl8365mb_irq_unmap,
20537 ++ .xlate = irq_domain_xlate_onecell,
20538 ++};
20539 ++
20540 ++static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable)
20541 ++{
20542 ++ return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG,
20543 ++ RTL8365MB_INTR_LINK_CHANGE_MASK,
20544 ++ FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
20545 ++ enable ? 1 : 0));
20546 ++}
20547 ++
20548 ++static int rtl8365mb_irq_enable(struct realtek_smi *smi)
20549 ++{
20550 ++ return rtl8365mb_set_irq_enable(smi, true);
20551 ++}
20552 ++
20553 ++static int rtl8365mb_irq_disable(struct realtek_smi *smi)
20554 ++{
20555 ++ return rtl8365mb_set_irq_enable(smi, false);
20556 ++}
20557 ++
20558 ++static int rtl8365mb_irq_setup(struct realtek_smi *smi)
20559 ++{
20560 ++ struct rtl8365mb *mb = smi->chip_data;
20561 ++ struct device_node *intc;
20562 ++ u32 irq_trig;
20563 ++ int virq;
20564 ++ int irq;
20565 ++ u32 val;
20566 ++ int ret;
20567 ++ int i;
20568 ++
20569 ++ intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
20570 ++ if (!intc) {
20571 ++ dev_err(smi->dev, "missing child interrupt-controller node\n");
20572 ++ return -EINVAL;
20573 ++ }
20574 ++
20575 ++ /* rtl8365mb IRQs cascade off this one */
20576 ++ irq = of_irq_get(intc, 0);
20577 ++ if (irq <= 0) {
20578 ++ if (irq != -EPROBE_DEFER)
20579 ++ dev_err(smi->dev, "failed to get parent irq: %d\n",
20580 ++ irq);
20581 ++ ret = irq ? irq : -EINVAL;
20582 ++ goto out_put_node;
20583 ++ }
20584 ++
20585 ++ smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports,
20586 ++ &rtl8365mb_irqdomain_ops, smi);
20587 ++ if (!smi->irqdomain) {
20588 ++ dev_err(smi->dev, "failed to add irq domain\n");
20589 ++ ret = -ENOMEM;
20590 ++ goto out_put_node;
20591 ++ }
20592 ++
20593 ++ for (i = 0; i < smi->num_ports; i++) {
20594 ++ virq = irq_create_mapping(smi->irqdomain, i);
20595 ++ if (!virq) {
20596 ++ dev_err(smi->dev,
20597 ++ "failed to create irq domain mapping\n");
20598 ++ ret = -EINVAL;
20599 ++ goto out_remove_irqdomain;
20600 ++ }
20601 ++
20602 ++ irq_set_parent(virq, irq);
20603 ++ }
20604 ++
20605 ++ /* Configure chip interrupt signal polarity */
20606 ++ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
20607 ++ switch (irq_trig) {
20608 ++ case IRQF_TRIGGER_RISING:
20609 ++ case IRQF_TRIGGER_HIGH:
20610 ++ val = RTL8365MB_INTR_POLARITY_HIGH;
20611 ++ break;
20612 ++ case IRQF_TRIGGER_FALLING:
20613 ++ case IRQF_TRIGGER_LOW:
20614 ++ val = RTL8365MB_INTR_POLARITY_LOW;
20615 ++ break;
20616 ++ default:
20617 ++ dev_err(smi->dev, "unsupported irq trigger type %u\n",
20618 ++ irq_trig);
20619 ++ ret = -EINVAL;
20620 ++ goto out_remove_irqdomain;
20621 ++ }
20622 ++
20623 ++ ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG,
20624 ++ RTL8365MB_INTR_POLARITY_MASK,
20625 ++ FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
20626 ++ if (ret)
20627 ++ goto out_remove_irqdomain;
20628 ++
20629 ++ /* Disable the interrupt in case the chip has it enabled on reset */
20630 ++ ret = rtl8365mb_irq_disable(smi);
20631 ++ if (ret)
20632 ++ goto out_remove_irqdomain;
20633 ++
20634 ++ /* Clear the interrupt status register */
20635 ++ ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG,
20636 ++ RTL8365MB_INTR_ALL_MASK);
20637 ++ if (ret)
20638 ++ goto out_remove_irqdomain;
20639 ++
20640 ++ ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
20641 ++ "rtl8365mb", smi);
20642 ++ if (ret) {
20643 ++ dev_err(smi->dev, "failed to request irq: %d\n", ret);
20644 ++ goto out_remove_irqdomain;
20645 ++ }
20646 ++
20647 ++ /* Store the irq so that we know to free it during teardown */
20648 ++ mb->irq = irq;
20649 ++
20650 ++ ret = rtl8365mb_irq_enable(smi);
20651 ++ if (ret)
20652 ++ goto out_free_irq;
20653 ++
20654 ++ of_node_put(intc);
20655 ++
20656 ++ return 0;
20657 ++
20658 ++out_free_irq:
20659 ++ free_irq(mb->irq, smi);
20660 ++ mb->irq = 0;
20661 ++
20662 ++out_remove_irqdomain:
20663 ++ for (i = 0; i < smi->num_ports; i++) {
20664 ++ virq = irq_find_mapping(smi->irqdomain, i);
20665 ++ irq_dispose_mapping(virq);
20666 ++ }
20667 ++
20668 ++ irq_domain_remove(smi->irqdomain);
20669 ++ smi->irqdomain = NULL;
20670 ++
20671 ++out_put_node:
20672 ++ of_node_put(intc);
20673 ++
20674 ++ return ret;
20675 ++}
20676 ++
20677 ++static void rtl8365mb_irq_teardown(struct realtek_smi *smi)
20678 ++{
20679 ++ struct rtl8365mb *mb = smi->chip_data;
20680 ++ int virq;
20681 ++ int i;
20682 ++
20683 ++ if (mb->irq) {
20684 ++ free_irq(mb->irq, smi);
20685 ++ mb->irq = 0;
20686 ++ }
20687 ++
20688 ++ if (smi->irqdomain) {
20689 ++ for (i = 0; i < smi->num_ports; i++) {
20690 ++ virq = irq_find_mapping(smi->irqdomain, i);
20691 ++ irq_dispose_mapping(virq);
20692 ++ }
20693 ++
20694 ++ irq_domain_remove(smi->irqdomain);
20695 ++ smi->irqdomain = NULL;
20696 ++ }
20697 ++}
20698 ++
20699 ++static int rtl8365mb_cpu_config(struct realtek_smi *smi)
20700 ++{
20701 ++ struct rtl8365mb *mb = smi->chip_data;
20702 ++ struct rtl8365mb_cpu *cpu = &mb->cpu;
20703 ++ u32 val;
20704 ++ int ret;
20705 ++
20706 ++ ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG,
20707 ++ RTL8365MB_CPU_PORT_MASK_MASK,
20708 ++ FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
20709 ++ cpu->mask));
20710 ++ if (ret)
20711 ++ return ret;
20712 ++
20713 ++ val = FIELD_PREP(RTL8365MB_CPU_CTRL_EN_MASK, cpu->enable ? 1 : 0) |
20714 ++ FIELD_PREP(RTL8365MB_CPU_CTRL_INSERTMODE_MASK, cpu->insert) |
20715 ++ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
20716 ++ FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
20717 ++ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
20718 ++ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) |
20719 ++ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
20720 ++ cpu->trap_port >> 3);
20721 ++ ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val);
20722 ++ if (ret)
20723 ++ return ret;
20724 ++
20725 ++ return 0;
20726 ++}
20727 ++
20728 ++static int rtl8365mb_switch_init(struct realtek_smi *smi)
20729 ++{
20730 ++ struct rtl8365mb *mb = smi->chip_data;
20731 ++ int ret;
20732 ++ int i;
20733 ++
20734 ++ /* Do any chip-specific init jam before getting to the common stuff */
20735 ++ if (mb->jam_table) {
20736 ++ for (i = 0; i < mb->jam_size; i++) {
20737 ++ ret = regmap_write(smi->map, mb->jam_table[i].reg,
20738 ++ mb->jam_table[i].val);
20739 ++ if (ret)
20740 ++ return ret;
20741 ++ }
20742 ++ }
20743 ++
20744 ++ /* Common init jam */
20745 ++ for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
20746 ++ ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg,
20747 ++ rtl8365mb_init_jam_common[i].val);
20748 ++ if (ret)
20749 ++ return ret;
20750 ++ }
20751 ++
20752 ++ return 0;
20753 ++}
20754 ++
20755 ++static int rtl8365mb_reset_chip(struct realtek_smi *smi)
20756 ++{
20757 ++ u32 val;
20758 ++
20759 ++ realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG,
20760 ++ FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK,
20761 ++ 1));
20762 ++
20763 ++ /* Realtek documentation says the chip needs 1 second to reset. Sleep
20764 ++ * for 100 ms before accessing any registers to prevent ACK timeouts.
20765 ++ */
20766 ++ msleep(100);
20767 ++ return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val,
20768 ++ !(val & RTL8365MB_CHIP_RESET_HW_MASK),
20769 ++ 20000, 1e6);
20770 ++}
20771 ++
20772 ++static int rtl8365mb_setup(struct dsa_switch *ds)
20773 ++{
20774 ++ struct realtek_smi *smi = ds->priv;
20775 ++ struct rtl8365mb *mb;
20776 ++ int ret;
20777 ++ int i;
20778 ++
20779 ++ mb = smi->chip_data;
20780 ++
20781 ++ ret = rtl8365mb_reset_chip(smi);
20782 ++ if (ret) {
20783 ++ dev_err(smi->dev, "failed to reset chip: %d\n", ret);
20784 ++ goto out_error;
20785 ++ }
20786 ++
20787 ++ /* Configure switch to vendor-defined initial state */
20788 ++ ret = rtl8365mb_switch_init(smi);
20789 ++ if (ret) {
20790 ++ dev_err(smi->dev, "failed to initialize switch: %d\n", ret);
20791 ++ goto out_error;
20792 ++ }
20793 ++
20794 ++ /* Set up cascading IRQs */
20795 ++ ret = rtl8365mb_irq_setup(smi);
20796 ++ if (ret == -EPROBE_DEFER)
20797 ++ return ret;
20798 ++ else if (ret)
20799 ++ dev_info(smi->dev, "no interrupt support\n");
20800 ++
20801 ++ /* Configure CPU tagging */
20802 ++ ret = rtl8365mb_cpu_config(smi);
20803 ++ if (ret)
20804 ++ goto out_teardown_irq;
20805 ++
20806 ++ /* Configure ports */
20807 ++ for (i = 0; i < smi->num_ports; i++) {
20808 ++ struct rtl8365mb_port *p = &mb->ports[i];
20809 ++
20810 ++ if (dsa_is_unused_port(smi->ds, i))
20811 ++ continue;
20812 ++
20813 ++ /* Set up per-port private data */
20814 ++ p->smi = smi;
20815 ++ p->index = i;
20816 ++
20817 ++ /* Forward only to the CPU */
20818 ++ ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port));
20819 ++ if (ret)
20820 ++ goto out_teardown_irq;
20821 ++
20822 ++ /* Disable learning */
20823 ++ ret = rtl8365mb_port_set_learning(smi, i, false);
20824 ++ if (ret)
20825 ++ goto out_teardown_irq;
20826 ++
20827 ++ /* Set the initial STP state of all ports to DISABLED, otherwise
20828 ++ * ports will still forward frames to the CPU despite being
20829 ++ * administratively down by default.
20830 ++ */
20831 ++ rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED);
20832 ++ }
20833 ++
20834 ++ /* Set maximum packet length to 1536 bytes */
20835 ++ ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG,
20836 ++ RTL8365MB_CFG0_MAX_LEN_MASK,
20837 ++ FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
20838 ++ if (ret)
20839 ++ goto out_teardown_irq;
20840 ++
20841 ++ ret = realtek_smi_setup_mdio(smi);
20842 ++ if (ret) {
20843 ++ dev_err(smi->dev, "could not set up MDIO bus\n");
20844 ++ goto out_teardown_irq;
20845 ++ }
20846 ++
20847 ++ /* Start statistics counter polling */
20848 ++ rtl8365mb_stats_setup(smi);
20849 ++
20850 ++ return 0;
20851 ++
20852 ++out_teardown_irq:
20853 ++ rtl8365mb_irq_teardown(smi);
20854 ++
20855 ++out_error:
20856 ++ return ret;
20857 ++}
20858 ++
20859 ++static void rtl8365mb_teardown(struct dsa_switch *ds)
20860 ++{
20861 ++ struct realtek_smi *smi = ds->priv;
20862 ++
20863 ++ rtl8365mb_stats_teardown(smi);
20864 ++ rtl8365mb_irq_teardown(smi);
20865 ++}
20866 ++
20867 ++static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
20868 ++{
20869 ++ int ret;
20870 ++
20871 ++ /* For some reason we have to write a magic value to an arbitrary
20872 ++ * register whenever accessing the chip ID/version registers.
20873 ++ */
20874 ++ ret = regmap_write(map, RTL8365MB_MAGIC_REG, RTL8365MB_MAGIC_VALUE);
20875 ++ if (ret)
20876 ++ return ret;
20877 ++
20878 ++ ret = regmap_read(map, RTL8365MB_CHIP_ID_REG, id);
20879 ++ if (ret)
20880 ++ return ret;
20881 ++
20882 ++ ret = regmap_read(map, RTL8365MB_CHIP_VER_REG, ver);
20883 ++ if (ret)
20884 ++ return ret;
20885 ++
20886 ++ /* Reset magic register */
20887 ++ ret = regmap_write(map, RTL8365MB_MAGIC_REG, 0);
20888 ++ if (ret)
20889 ++ return ret;
20890 ++
20891 ++ return 0;
20892 ++}
20893 ++
20894 ++static int rtl8365mb_detect(struct realtek_smi *smi)
20895 ++{
20896 ++ struct rtl8365mb *mb = smi->chip_data;
20897 ++ u32 chip_id;
20898 ++ u32 chip_ver;
20899 ++ int ret;
20900 ++
20901 ++ ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver);
20902 ++ if (ret) {
20903 ++ dev_err(smi->dev, "failed to read chip id and version: %d\n",
20904 ++ ret);
20905 ++ return ret;
20906 ++ }
20907 ++
20908 ++ switch (chip_id) {
20909 ++ case RTL8365MB_CHIP_ID_8365MB_VC:
20910 ++ dev_info(smi->dev,
20911 ++ "found an RTL8365MB-VC switch (ver=0x%04x)\n",
20912 ++ chip_ver);
20913 ++
20914 ++ smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC;
20915 ++ smi->num_ports = smi->cpu_port + 1;
20916 ++
20917 ++ mb->smi = smi;
20918 ++ mb->chip_id = chip_id;
20919 ++ mb->chip_ver = chip_ver;
20920 ++ mb->port_mask = BIT(smi->num_ports) - 1;
20921 ++ mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC;
20922 ++ mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
20923 ++ mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
20924 ++
20925 ++ mb->cpu.enable = 1;
20926 ++ mb->cpu.mask = BIT(smi->cpu_port);
20927 ++ mb->cpu.trap_port = smi->cpu_port;
20928 ++ mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
20929 ++ mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
20930 ++ mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
20931 ++ mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
20932 ++
20933 ++ break;
20934 ++ default:
20935 ++ dev_err(smi->dev,
20936 ++ "found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
20937 ++ chip_id, chip_ver);
20938 ++ return -ENODEV;
20939 ++ }
20940 ++
20941 ++ return 0;
20942 ++}
20943 ++
20944 ++static const struct dsa_switch_ops rtl8365mb_switch_ops = {
20945 ++ .get_tag_protocol = rtl8365mb_get_tag_protocol,
20946 ++ .setup = rtl8365mb_setup,
20947 ++ .teardown = rtl8365mb_teardown,
20948 ++ .phylink_validate = rtl8365mb_phylink_validate,
20949 ++ .phylink_mac_config = rtl8365mb_phylink_mac_config,
20950 ++ .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
20951 ++ .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
20952 ++ .port_stp_state_set = rtl8365mb_port_stp_state_set,
20953 ++ .get_strings = rtl8365mb_get_strings,
20954 ++ .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
20955 ++ .get_sset_count = rtl8365mb_get_sset_count,
20956 ++ .get_eth_phy_stats = rtl8365mb_get_phy_stats,
20957 ++ .get_eth_mac_stats = rtl8365mb_get_mac_stats,
20958 ++ .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
20959 ++ .get_stats64 = rtl8365mb_get_stats64,
20960 ++};
20961 ++
20962 ++static const struct realtek_smi_ops rtl8365mb_smi_ops = {
20963 ++ .detect = rtl8365mb_detect,
20964 ++ .phy_read = rtl8365mb_phy_read,
20965 ++ .phy_write = rtl8365mb_phy_write,
20966 ++};
20967 ++
20968 ++const struct realtek_smi_variant rtl8365mb_variant = {
20969 ++ .ds_ops = &rtl8365mb_switch_ops,
20970 ++ .ops = &rtl8365mb_smi_ops,
20971 ++ .clk_delay = 10,
20972 ++ .cmd_read = 0xb9,
20973 ++ .cmd_write = 0xb8,
20974 ++ .chip_data_sz = sizeof(struct rtl8365mb),
20975 ++};
20976 ++EXPORT_SYMBOL_GPL(rtl8365mb_variant);
20977 +diff --git a/drivers/net/dsa/realtek/rtl8366.c b/drivers/net/dsa/realtek/rtl8366.c
20978 +new file mode 100644
20979 +index 0000000000000..bdb8d8d348807
20980 +--- /dev/null
20981 ++++ b/drivers/net/dsa/realtek/rtl8366.c
20982 +@@ -0,0 +1,448 @@
20983 ++// SPDX-License-Identifier: GPL-2.0
20984 ++/* Realtek SMI library helpers for the RTL8366x variants
20985 ++ * RTL8366RB and RTL8366S
20986 ++ *
20987 ++ * Copyright (C) 2017 Linus Walleij <linus.walleij@××××××.org>
20988 ++ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@×××××××.org>
20989 ++ * Copyright (C) 2010 Antti Seppälä <a.seppala@×××××.com>
20990 ++ * Copyright (C) 2010 Roman Yeryomin <roman@×××××.lv>
20991 ++ * Copyright (C) 2011 Colin Leitner <colin.leitner@××××××××××.com>
20992 ++ */
20993 ++#include <linux/if_bridge.h>
20994 ++#include <net/dsa.h>
20995 ++
20996 ++#include "realtek-smi-core.h"
20997 ++
20998 ++int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
20999 ++{
21000 ++ int ret;
21001 ++ int i;
21002 ++
21003 ++ *used = 0;
21004 ++ for (i = 0; i < smi->num_ports; i++) {
21005 ++ int index = 0;
21006 ++
21007 ++ ret = smi->ops->get_mc_index(smi, i, &index);
21008 ++ if (ret)
21009 ++ return ret;
21010 ++
21011 ++ if (mc_index == index) {
21012 ++ *used = 1;
21013 ++ break;
21014 ++ }
21015 ++ }
21016 ++
21017 ++ return 0;
21018 ++}
21019 ++EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
21020 ++
21021 ++/**
21022 ++ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
21023 ++ * @smi: the Realtek SMI device instance
21024 ++ * @vid: the VLAN ID to look up or allocate
21025 ++ * @vlanmc: the pointer will be assigned to a pointer to a valid member config
21026 ++ * if successful
21027 ++ * @return: index of a new member config or negative error number
21028 ++ */
21029 ++static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
21030 ++ struct rtl8366_vlan_mc *vlanmc)
21031 ++{
21032 ++ struct rtl8366_vlan_4k vlan4k;
21033 ++ int ret;
21034 ++ int i;
21035 ++
21036 ++ /* Try to find an existing member config entry for this VID */
21037 ++ for (i = 0; i < smi->num_vlan_mc; i++) {
21038 ++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
21039 ++ if (ret) {
21040 ++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
21041 ++ i, vid);
21042 ++ return ret;
21043 ++ }
21044 ++
21045 ++ if (vid == vlanmc->vid)
21046 ++ return i;
21047 ++ }
21048 ++
21049 ++ /* We have no MC entry for this VID, try to find an empty one */
21050 ++ for (i = 0; i < smi->num_vlan_mc; i++) {
21051 ++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
21052 ++ if (ret) {
21053 ++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
21054 ++ i, vid);
21055 ++ return ret;
21056 ++ }
21057 ++
21058 ++ if (vlanmc->vid == 0 && vlanmc->member == 0) {
21059 ++ /* Update the entry from the 4K table */
21060 ++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
21061 ++ if (ret) {
21062 ++ dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
21063 ++ i, vid);
21064 ++ return ret;
21065 ++ }
21066 ++
21067 ++ vlanmc->vid = vid;
21068 ++ vlanmc->member = vlan4k.member;
21069 ++ vlanmc->untag = vlan4k.untag;
21070 ++ vlanmc->fid = vlan4k.fid;
21071 ++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
21072 ++ if (ret) {
21073 ++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
21074 ++ i, vid);
21075 ++ return ret;
21076 ++ }
21077 ++
21078 ++ dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
21079 ++ i, vid);
21080 ++ return i;
21081 ++ }
21082 ++ }
21083 ++
21084 ++ /* MC table is full, try to find an unused entry and replace it */
21085 ++ for (i = 0; i < smi->num_vlan_mc; i++) {
21086 ++ int used;
21087 ++
21088 ++ ret = rtl8366_mc_is_used(smi, i, &used);
21089 ++ if (ret)
21090 ++ return ret;
21091 ++
21092 ++ if (!used) {
21093 ++ /* Update the entry from the 4K table */
21094 ++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
21095 ++ if (ret)
21096 ++ return ret;
21097 ++
21098 ++ vlanmc->vid = vid;
21099 ++ vlanmc->member = vlan4k.member;
21100 ++ vlanmc->untag = vlan4k.untag;
21101 ++ vlanmc->fid = vlan4k.fid;
21102 ++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
21103 ++ if (ret) {
21104 ++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
21105 ++ i, vid);
21106 ++ return ret;
21107 ++ }
21108 ++ dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
21109 ++ i, vid);
21110 ++ return i;
21111 ++ }
21112 ++ }
21113 ++
21114 ++ dev_err(smi->dev, "all VLAN member configurations are in use\n");
21115 ++ return -ENOSPC;
21116 ++}
21117 ++
21118 ++int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
21119 ++ u32 untag, u32 fid)
21120 ++{
21121 ++ struct rtl8366_vlan_mc vlanmc;
21122 ++ struct rtl8366_vlan_4k vlan4k;
21123 ++ int mc;
21124 ++ int ret;
21125 ++
21126 ++ if (!smi->ops->is_vlan_valid(smi, vid))
21127 ++ return -EINVAL;
21128 ++
21129 ++ dev_dbg(smi->dev,
21130 ++ "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
21131 ++ vid, member, untag);
21132 ++
21133 ++ /* Update the 4K table */
21134 ++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
21135 ++ if (ret)
21136 ++ return ret;
21137 ++
21138 ++ vlan4k.member |= member;
21139 ++ vlan4k.untag |= untag;
21140 ++ vlan4k.fid = fid;
21141 ++ ret = smi->ops->set_vlan_4k(smi, &vlan4k);
21142 ++ if (ret)
21143 ++ return ret;
21144 ++
21145 ++ dev_dbg(smi->dev,
21146 ++ "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
21147 ++ vid, vlan4k.member, vlan4k.untag);
21148 ++
21149 ++ /* Find or allocate a member config for this VID */
21150 ++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
21151 ++ if (ret < 0)
21152 ++ return ret;
21153 ++ mc = ret;
21154 ++
21155 ++ /* Update the MC entry */
21156 ++ vlanmc.member |= member;
21157 ++ vlanmc.untag |= untag;
21158 ++ vlanmc.fid = fid;
21159 ++
21160 ++ /* Commit updates to the MC entry */
21161 ++ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
21162 ++ if (ret)
21163 ++ dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
21164 ++ mc, vid);
21165 ++ else
21166 ++ dev_dbg(smi->dev,
21167 ++ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
21168 ++ vid, vlanmc.member, vlanmc.untag);
21169 ++
21170 ++ return ret;
21171 ++}
21172 ++EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
21173 ++
21174 ++int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
21175 ++ unsigned int vid)
21176 ++{
21177 ++ struct rtl8366_vlan_mc vlanmc;
21178 ++ int mc;
21179 ++ int ret;
21180 ++
21181 ++ if (!smi->ops->is_vlan_valid(smi, vid))
21182 ++ return -EINVAL;
21183 ++
21184 ++ /* Find or allocate a member config for this VID */
21185 ++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
21186 ++ if (ret < 0)
21187 ++ return ret;
21188 ++ mc = ret;
21189 ++
21190 ++ ret = smi->ops->set_mc_index(smi, port, mc);
21191 ++ if (ret) {
21192 ++ dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
21193 ++ mc, port);
21194 ++ return ret;
21195 ++ }
21196 ++
21197 ++ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
21198 ++ port, vid, mc);
21199 ++
21200 ++ return 0;
21201 ++}
21202 ++EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
21203 ++
21204 ++int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
21205 ++{
21206 ++ int ret;
21207 ++
21208 ++ /* To enable 4k VLAN, ordinary VLAN must be enabled first,
21209 ++ * but if we disable 4k VLAN it is fine to leave ordinary
21210 ++ * VLAN enabled.
21211 ++ */
21212 ++ if (enable) {
21213 ++ /* Make sure VLAN is ON */
21214 ++ ret = smi->ops->enable_vlan(smi, true);
21215 ++ if (ret)
21216 ++ return ret;
21217 ++
21218 ++ smi->vlan_enabled = true;
21219 ++ }
21220 ++
21221 ++ ret = smi->ops->enable_vlan4k(smi, enable);
21222 ++ if (ret)
21223 ++ return ret;
21224 ++
21225 ++ smi->vlan4k_enabled = enable;
21226 ++ return 0;
21227 ++}
21228 ++EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
21229 ++
21230 ++int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable)
21231 ++{
21232 ++ int ret;
21233 ++
21234 ++ ret = smi->ops->enable_vlan(smi, enable);
21235 ++ if (ret)
21236 ++ return ret;
21237 ++
21238 ++ smi->vlan_enabled = enable;
21239 ++
21240 ++ /* If we turn VLAN off, make sure that we turn off
21241 ++ * 4k VLAN as well, if that happened to be on.
21242 ++ */
21243 ++ if (!enable) {
21244 ++ smi->vlan4k_enabled = false;
21245 ++ ret = smi->ops->enable_vlan4k(smi, false);
21246 ++ }
21247 ++
21248 ++ return ret;
21249 ++}
21250 ++EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
21251 ++
21252 ++int rtl8366_reset_vlan(struct realtek_smi *smi)
21253 ++{
21254 ++ struct rtl8366_vlan_mc vlanmc;
21255 ++ int ret;
21256 ++ int i;
21257 ++
21258 ++ rtl8366_enable_vlan(smi, false);
21259 ++ rtl8366_enable_vlan4k(smi, false);
21260 ++
21261 ++ /* Clear the 16 VLAN member configurations */
21262 ++ vlanmc.vid = 0;
21263 ++ vlanmc.priority = 0;
21264 ++ vlanmc.member = 0;
21265 ++ vlanmc.untag = 0;
21266 ++ vlanmc.fid = 0;
21267 ++ for (i = 0; i < smi->num_vlan_mc; i++) {
21268 ++ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
21269 ++ if (ret)
21270 ++ return ret;
21271 ++ }
21272 ++
21273 ++ return 0;
21274 ++}
21275 ++EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
21276 ++
21277 ++int rtl8366_vlan_add(struct dsa_switch *ds, int port,
21278 ++ const struct switchdev_obj_port_vlan *vlan,
21279 ++ struct netlink_ext_ack *extack)
21280 ++{
21281 ++ bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
21282 ++ bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
21283 ++ struct realtek_smi *smi = ds->priv;
21284 ++ u32 member = 0;
21285 ++ u32 untag = 0;
21286 ++ int ret;
21287 ++
21288 ++ if (!smi->ops->is_vlan_valid(smi, vlan->vid)) {
21289 ++ NL_SET_ERR_MSG_MOD(extack, "VLAN ID not valid");
21290 ++ return -EINVAL;
21291 ++ }
21292 ++
21293 ++ /* Enable VLAN in the hardware
21294 ++ * FIXME: what's with this 4k business?
21295 ++ * Just rtl8366_enable_vlan() seems inconclusive.
21296 ++ */
21297 ++ ret = rtl8366_enable_vlan4k(smi, true);
21298 ++ if (ret) {
21299 ++ NL_SET_ERR_MSG_MOD(extack, "Failed to enable VLAN 4K");
21300 ++ return ret;
21301 ++ }
21302 ++
21303 ++ dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
21304 ++ vlan->vid, port, untagged ? "untagged" : "tagged",
21305 ++ pvid ? "PVID" : "no PVID");
21306 ++
21307 ++ member |= BIT(port);
21308 ++
21309 ++ if (untagged)
21310 ++ untag |= BIT(port);
21311 ++
21312 ++ ret = rtl8366_set_vlan(smi, vlan->vid, member, untag, 0);
21313 ++ if (ret) {
21314 ++ dev_err(smi->dev, "failed to set up VLAN %04x", vlan->vid);
21315 ++ return ret;
21316 ++ }
21317 ++
21318 ++ if (!pvid)
21319 ++ return 0;
21320 ++
21321 ++ ret = rtl8366_set_pvid(smi, port, vlan->vid);
21322 ++ if (ret) {
21323 ++ dev_err(smi->dev, "failed to set PVID on port %d to VLAN %04x",
21324 ++ port, vlan->vid);
21325 ++ return ret;
21326 ++ }
21327 ++
21328 ++ return 0;
21329 ++}
21330 ++EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
21331 ++
21332 ++int rtl8366_vlan_del(struct dsa_switch *ds, int port,
21333 ++ const struct switchdev_obj_port_vlan *vlan)
21334 ++{
21335 ++ struct realtek_smi *smi = ds->priv;
21336 ++ int ret, i;
21337 ++
21338 ++ dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
21339 ++
21340 ++ for (i = 0; i < smi->num_vlan_mc; i++) {
21341 ++ struct rtl8366_vlan_mc vlanmc;
21342 ++
21343 ++ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
21344 ++ if (ret)
21345 ++ return ret;
21346 ++
21347 ++ if (vlan->vid == vlanmc.vid) {
21348 ++ /* Remove this port from the VLAN */
21349 ++ vlanmc.member &= ~BIT(port);
21350 ++ vlanmc.untag &= ~BIT(port);
21351 ++ /*
21352 ++ * If no ports are members of this VLAN
21353 ++ * anymore then clear the whole member
21354 ++ * config so it can be reused.
21355 ++ */
21356 ++ if (!vlanmc.member) {
21357 ++ vlanmc.vid = 0;
21358 ++ vlanmc.priority = 0;
21359 ++ vlanmc.fid = 0;
21360 ++ }
21361 ++ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
21362 ++ if (ret) {
21363 ++ dev_err(smi->dev,
21364 ++ "failed to remove VLAN %04x\n",
21365 ++ vlan->vid);
21366 ++ return ret;
21367 ++ }
21368 ++ break;
21369 ++ }
21370 ++ }
21371 ++
21372 ++ return 0;
21373 ++}
21374 ++EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
21375 ++
21376 ++void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
21377 ++ uint8_t *data)
21378 ++{
21379 ++ struct realtek_smi *smi = ds->priv;
21380 ++ struct rtl8366_mib_counter *mib;
21381 ++ int i;
21382 ++
21383 ++ if (port >= smi->num_ports)
21384 ++ return;
21385 ++
21386 ++ for (i = 0; i < smi->num_mib_counters; i++) {
21387 ++ mib = &smi->mib_counters[i];
21388 ++ strncpy(data + i * ETH_GSTRING_LEN,
21389 ++ mib->name, ETH_GSTRING_LEN);
21390 ++ }
21391 ++}
21392 ++EXPORT_SYMBOL_GPL(rtl8366_get_strings);
21393 ++
21394 ++int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
21395 ++{
21396 ++ struct realtek_smi *smi = ds->priv;
21397 ++
21398 ++ /* We only support SS_STATS */
21399 ++ if (sset != ETH_SS_STATS)
21400 ++ return 0;
21401 ++ if (port >= smi->num_ports)
21402 ++ return -EINVAL;
21403 ++
21404 ++ return smi->num_mib_counters;
21405 ++}
21406 ++EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
21407 ++
21408 ++void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
21409 ++{
21410 ++ struct realtek_smi *smi = ds->priv;
21411 ++ int i;
21412 ++ int ret;
21413 ++
21414 ++ if (port >= smi->num_ports)
21415 ++ return;
21416 ++
21417 ++ for (i = 0; i < smi->num_mib_counters; i++) {
21418 ++ struct rtl8366_mib_counter *mib;
21419 ++ u64 mibvalue = 0;
21420 ++
21421 ++ mib = &smi->mib_counters[i];
21422 ++ ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue);
21423 ++ if (ret) {
21424 ++ dev_err(smi->dev, "error reading MIB counter %s\n",
21425 ++ mib->name);
21426 ++ }
21427 ++ data[i] = mibvalue;
21428 ++ }
21429 ++}
21430 ++EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
21431 +diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
21432 +new file mode 100644
21433 +index 0000000000000..4f8c06d7ab3a9
21434 +--- /dev/null
21435 ++++ b/drivers/net/dsa/realtek/rtl8366rb.c
21436 +@@ -0,0 +1,1816 @@
21437 ++// SPDX-License-Identifier: GPL-2.0
21438 ++/* Realtek SMI subdriver for the Realtek RTL8366RB ethernet switch
21439 ++ *
21440 ++ * This is a sparsely documented chip, the only viable documentation seems
21441 ++ * to be a patched up code drop from the vendor that appear in various
21442 ++ * GPL source trees.
21443 ++ *
21444 ++ * Copyright (C) 2017 Linus Walleij <linus.walleij@××××××.org>
21445 ++ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@×××××××.org>
21446 ++ * Copyright (C) 2010 Antti Seppälä <a.seppala@×××××.com>
21447 ++ * Copyright (C) 2010 Roman Yeryomin <roman@×××××.lv>
21448 ++ * Copyright (C) 2011 Colin Leitner <colin.leitner@××××××××××.com>
21449 ++ */
21450 ++
21451 ++#include <linux/bitops.h>
21452 ++#include <linux/etherdevice.h>
21453 ++#include <linux/if_bridge.h>
21454 ++#include <linux/interrupt.h>
21455 ++#include <linux/irqdomain.h>
21456 ++#include <linux/irqchip/chained_irq.h>
21457 ++#include <linux/of_irq.h>
21458 ++#include <linux/regmap.h>
21459 ++
21460 ++#include "realtek-smi-core.h"
21461 ++
21462 ++#define RTL8366RB_PORT_NUM_CPU 5
21463 ++#define RTL8366RB_NUM_PORTS 6
21464 ++#define RTL8366RB_PHY_NO_MAX 4
21465 ++#define RTL8366RB_PHY_ADDR_MAX 31
21466 ++
21467 ++/* Switch Global Configuration register */
21468 ++#define RTL8366RB_SGCR 0x0000
21469 ++#define RTL8366RB_SGCR_EN_BC_STORM_CTRL BIT(0)
21470 ++#define RTL8366RB_SGCR_MAX_LENGTH(a) ((a) << 4)
21471 ++#define RTL8366RB_SGCR_MAX_LENGTH_MASK RTL8366RB_SGCR_MAX_LENGTH(0x3)
21472 ++#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0)
21473 ++#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1)
21474 ++#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2)
21475 ++#define RTL8366RB_SGCR_MAX_LENGTH_16000 RTL8366RB_SGCR_MAX_LENGTH(0x3)
21476 ++#define RTL8366RB_SGCR_EN_VLAN BIT(13)
21477 ++#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14)
21478 ++
21479 ++/* Port Enable Control register */
21480 ++#define RTL8366RB_PECR 0x0001
21481 ++
21482 ++/* Switch per-port learning disablement register */
21483 ++#define RTL8366RB_PORT_LEARNDIS_CTRL 0x0002
21484 ++
21485 ++/* Security control, actually aging register */
21486 ++#define RTL8366RB_SECURITY_CTRL 0x0003
21487 ++
21488 ++#define RTL8366RB_SSCR2 0x0004
21489 ++#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
21490 ++
21491 ++/* Port Mode Control registers */
21492 ++#define RTL8366RB_PMC0 0x0005
21493 ++#define RTL8366RB_PMC0_SPI BIT(0)
21494 ++#define RTL8366RB_PMC0_EN_AUTOLOAD BIT(1)
21495 ++#define RTL8366RB_PMC0_PROBE BIT(2)
21496 ++#define RTL8366RB_PMC0_DIS_BISR BIT(3)
21497 ++#define RTL8366RB_PMC0_ADCTEST BIT(4)
21498 ++#define RTL8366RB_PMC0_SRAM_DIAG BIT(5)
21499 ++#define RTL8366RB_PMC0_EN_SCAN BIT(6)
21500 ++#define RTL8366RB_PMC0_P4_IOMODE_SHIFT 7
21501 ++#define RTL8366RB_PMC0_P4_IOMODE_MASK GENMASK(9, 7)
21502 ++#define RTL8366RB_PMC0_P5_IOMODE_SHIFT 10
21503 ++#define RTL8366RB_PMC0_P5_IOMODE_MASK GENMASK(12, 10)
21504 ++#define RTL8366RB_PMC0_SDSMODE_SHIFT 13
21505 ++#define RTL8366RB_PMC0_SDSMODE_MASK GENMASK(15, 13)
21506 ++#define RTL8366RB_PMC1 0x0006
21507 ++
21508 ++/* Port Mirror Control Register */
21509 ++#define RTL8366RB_PMCR 0x0007
21510 ++#define RTL8366RB_PMCR_SOURCE_PORT(a) (a)
21511 ++#define RTL8366RB_PMCR_SOURCE_PORT_MASK 0x000f
21512 ++#define RTL8366RB_PMCR_MONITOR_PORT(a) ((a) << 4)
21513 ++#define RTL8366RB_PMCR_MONITOR_PORT_MASK 0x00f0
21514 ++#define RTL8366RB_PMCR_MIRROR_RX BIT(8)
21515 ++#define RTL8366RB_PMCR_MIRROR_TX BIT(9)
21516 ++#define RTL8366RB_PMCR_MIRROR_SPC BIT(10)
21517 ++#define RTL8366RB_PMCR_MIRROR_ISO BIT(11)
21518 ++
21519 ++/* bits 0..7 = port 0, bits 8..15 = port 1 */
21520 ++#define RTL8366RB_PAACR0 0x0010
21521 ++/* bits 0..7 = port 2, bits 8..15 = port 3 */
21522 ++#define RTL8366RB_PAACR1 0x0011
21523 ++/* bits 0..7 = port 4, bits 8..15 = port 5 */
21524 ++#define RTL8366RB_PAACR2 0x0012
21525 ++#define RTL8366RB_PAACR_SPEED_10M 0
21526 ++#define RTL8366RB_PAACR_SPEED_100M 1
21527 ++#define RTL8366RB_PAACR_SPEED_1000M 2
21528 ++#define RTL8366RB_PAACR_FULL_DUPLEX BIT(2)
21529 ++#define RTL8366RB_PAACR_LINK_UP BIT(4)
21530 ++#define RTL8366RB_PAACR_TX_PAUSE BIT(5)
21531 ++#define RTL8366RB_PAACR_RX_PAUSE BIT(6)
21532 ++#define RTL8366RB_PAACR_AN BIT(7)
21533 ++
21534 ++#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \
21535 ++ RTL8366RB_PAACR_FULL_DUPLEX | \
21536 ++ RTL8366RB_PAACR_LINK_UP | \
21537 ++ RTL8366RB_PAACR_TX_PAUSE | \
21538 ++ RTL8366RB_PAACR_RX_PAUSE)
21539 ++
21540 ++/* bits 0..7 = port 0, bits 8..15 = port 1 */
21541 ++#define RTL8366RB_PSTAT0 0x0014
21542 ++/* bits 0..7 = port 2, bits 8..15 = port 3 */
21543 ++#define RTL8366RB_PSTAT1 0x0015
21544 ++/* bits 0..7 = port 4, bits 8..15 = port 5 */
21545 ++#define RTL8366RB_PSTAT2 0x0016
21546 ++
21547 ++#define RTL8366RB_POWER_SAVING_REG 0x0021
21548 ++
21549 ++/* Spanning tree status (STP) control, two bits per port per FID */
21550 ++#define RTL8366RB_STP_STATE_BASE 0x0050 /* 0x0050..0x0057 */
21551 ++#define RTL8366RB_STP_STATE_DISABLED 0x0
21552 ++#define RTL8366RB_STP_STATE_BLOCKING 0x1
21553 ++#define RTL8366RB_STP_STATE_LEARNING 0x2
21554 ++#define RTL8366RB_STP_STATE_FORWARDING 0x3
21555 ++#define RTL8366RB_STP_MASK GENMASK(1, 0)
21556 ++#define RTL8366RB_STP_STATE(port, state) \
21557 ++ ((state) << ((port) * 2))
21558 ++#define RTL8366RB_STP_STATE_MASK(port) \
21559 ++ RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK)
21560 ++
21561 ++/* CPU port control reg */
21562 ++#define RTL8368RB_CPU_CTRL_REG 0x0061
21563 ++#define RTL8368RB_CPU_PORTS_MSK 0x00FF
21564 ++/* Disables inserting custom tag length/type 0x8899 */
21565 ++#define RTL8368RB_CPU_NO_TAG BIT(15)
21566 ++
21567 ++#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */
21568 ++#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */
21569 ++#define RTL8366RB_SMAR2 0x0072 /* bits 32..47 */
21570 ++
21571 ++#define RTL8366RB_RESET_CTRL_REG 0x0100
21572 ++#define RTL8366RB_CHIP_CTRL_RESET_HW BIT(0)
21573 ++#define RTL8366RB_CHIP_CTRL_RESET_SW BIT(1)
21574 ++
21575 ++#define RTL8366RB_CHIP_ID_REG 0x0509
21576 ++#define RTL8366RB_CHIP_ID_8366 0x5937
21577 ++#define RTL8366RB_CHIP_VERSION_CTRL_REG 0x050A
21578 ++#define RTL8366RB_CHIP_VERSION_MASK 0xf
21579 ++
21580 ++/* PHY registers control */
21581 ++#define RTL8366RB_PHY_ACCESS_CTRL_REG 0x8000
21582 ++#define RTL8366RB_PHY_CTRL_READ BIT(0)
21583 ++#define RTL8366RB_PHY_CTRL_WRITE 0
21584 ++#define RTL8366RB_PHY_ACCESS_BUSY_REG 0x8001
21585 ++#define RTL8366RB_PHY_INT_BUSY BIT(0)
21586 ++#define RTL8366RB_PHY_EXT_BUSY BIT(4)
21587 ++#define RTL8366RB_PHY_ACCESS_DATA_REG 0x8002
21588 ++#define RTL8366RB_PHY_EXT_CTRL_REG 0x8010
21589 ++#define RTL8366RB_PHY_EXT_WRDATA_REG 0x8011
21590 ++#define RTL8366RB_PHY_EXT_RDDATA_REG 0x8012
21591 ++
21592 ++#define RTL8366RB_PHY_REG_MASK 0x1f
21593 ++#define RTL8366RB_PHY_PAGE_OFFSET 5
21594 ++#define RTL8366RB_PHY_PAGE_MASK (0xf << 5)
21595 ++#define RTL8366RB_PHY_NO_OFFSET 9
21596 ++#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
21597 ++
21598 ++/* VLAN Ingress Control Register 1, one bit per port.
21599 ++ * bit 0 .. 5 will make the switch drop ingress frames without
21600 ++ * VID such as untagged or priority-tagged frames for respective
21601 ++ * port.
21602 ++ * bit 6 .. 11 will make the switch drop ingress frames carrying
21603 ++ * a C-tag with VID != 0 for respective port.
21604 ++ */
21605 ++#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E
21606 ++#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6))
21607 ++
21608 ++/* VLAN Ingress Control Register 2, one bit per port.
21609 ++ * bit0 .. bit5 will make the switch drop all ingress frames with
21610 ++ * a VLAN classification that does not include the port is in its
21611 ++ * member set.
21612 ++ */
21613 ++#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
21614 ++
21615 ++/* LED control registers */
21616 ++#define RTL8366RB_LED_BLINKRATE_REG 0x0430
21617 ++#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
21618 ++#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
21619 ++#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
21620 ++#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
21621 ++#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
21622 ++#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
21623 ++#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
21624 ++
21625 ++#define RTL8366RB_LED_CTRL_REG 0x0431
21626 ++#define RTL8366RB_LED_OFF 0x0
21627 ++#define RTL8366RB_LED_DUP_COL 0x1
21628 ++#define RTL8366RB_LED_LINK_ACT 0x2
21629 ++#define RTL8366RB_LED_SPD1000 0x3
21630 ++#define RTL8366RB_LED_SPD100 0x4
21631 ++#define RTL8366RB_LED_SPD10 0x5
21632 ++#define RTL8366RB_LED_SPD1000_ACT 0x6
21633 ++#define RTL8366RB_LED_SPD100_ACT 0x7
21634 ++#define RTL8366RB_LED_SPD10_ACT 0x8
21635 ++#define RTL8366RB_LED_SPD100_10_ACT 0x9
21636 ++#define RTL8366RB_LED_FIBER 0xa
21637 ++#define RTL8366RB_LED_AN_FAULT 0xb
21638 ++#define RTL8366RB_LED_LINK_RX 0xc
21639 ++#define RTL8366RB_LED_LINK_TX 0xd
21640 ++#define RTL8366RB_LED_MASTER 0xe
21641 ++#define RTL8366RB_LED_FORCE 0xf
21642 ++#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
21643 ++#define RTL8366RB_LED_1_OFFSET 6
21644 ++#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
21645 ++#define RTL8366RB_LED_3_OFFSET 6
21646 ++
21647 ++#define RTL8366RB_MIB_COUNT 33
21648 ++#define RTL8366RB_GLOBAL_MIB_COUNT 1
21649 ++#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
21650 ++#define RTL8366RB_MIB_COUNTER_BASE 0x1000
21651 ++#define RTL8366RB_MIB_CTRL_REG 0x13F0
21652 ++#define RTL8366RB_MIB_CTRL_USER_MASK 0x0FFC
21653 ++#define RTL8366RB_MIB_CTRL_BUSY_MASK BIT(0)
21654 ++#define RTL8366RB_MIB_CTRL_RESET_MASK BIT(1)
21655 ++#define RTL8366RB_MIB_CTRL_PORT_RESET(_p) BIT(2 + (_p))
21656 ++#define RTL8366RB_MIB_CTRL_GLOBAL_RESET BIT(11)
21657 ++
21658 ++#define RTL8366RB_PORT_VLAN_CTRL_BASE 0x0063
21659 ++#define RTL8366RB_PORT_VLAN_CTRL_REG(_p) \
21660 ++ (RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4)
21661 ++#define RTL8366RB_PORT_VLAN_CTRL_MASK 0xf
21662 ++#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4))
21663 ++
21664 ++#define RTL8366RB_VLAN_TABLE_READ_BASE 0x018C
21665 ++#define RTL8366RB_VLAN_TABLE_WRITE_BASE 0x0185
21666 ++
21667 ++#define RTL8366RB_TABLE_ACCESS_CTRL_REG 0x0180
21668 ++#define RTL8366RB_TABLE_VLAN_READ_CTRL 0x0E01
21669 ++#define RTL8366RB_TABLE_VLAN_WRITE_CTRL 0x0F01
21670 ++
21671 ++#define RTL8366RB_VLAN_MC_BASE(_x) (0x0020 + (_x) * 3)
21672 ++
21673 ++#define RTL8366RB_PORT_LINK_STATUS_BASE 0x0014
21674 ++#define RTL8366RB_PORT_STATUS_SPEED_MASK 0x0003
21675 ++#define RTL8366RB_PORT_STATUS_DUPLEX_MASK 0x0004
21676 ++#define RTL8366RB_PORT_STATUS_LINK_MASK 0x0010
21677 ++#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK 0x0020
21678 ++#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK 0x0040
21679 ++#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
21680 ++
21681 ++#define RTL8366RB_NUM_VLANS 16
21682 ++#define RTL8366RB_NUM_LEDGROUPS 4
21683 ++#define RTL8366RB_NUM_VIDS 4096
21684 ++#define RTL8366RB_PRIORITYMAX 7
21685 ++#define RTL8366RB_NUM_FIDS 8
21686 ++#define RTL8366RB_FIDMAX 7
21687 ++
21688 ++#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
21689 ++#define RTL8366RB_PORT_2 BIT(1) /* In userspace port 1 */
21690 ++#define RTL8366RB_PORT_3 BIT(2) /* In userspace port 2 */
21691 ++#define RTL8366RB_PORT_4 BIT(3) /* In userspace port 3 */
21692 ++#define RTL8366RB_PORT_5 BIT(4) /* In userspace port 4 */
21693 ++
21694 ++#define RTL8366RB_PORT_CPU BIT(5) /* CPU port */
21695 ++
21696 ++#define RTL8366RB_PORT_ALL (RTL8366RB_PORT_1 | \
21697 ++ RTL8366RB_PORT_2 | \
21698 ++ RTL8366RB_PORT_3 | \
21699 ++ RTL8366RB_PORT_4 | \
21700 ++ RTL8366RB_PORT_5 | \
21701 ++ RTL8366RB_PORT_CPU)
21702 ++
21703 ++#define RTL8366RB_PORT_ALL_BUT_CPU (RTL8366RB_PORT_1 | \
21704 ++ RTL8366RB_PORT_2 | \
21705 ++ RTL8366RB_PORT_3 | \
21706 ++ RTL8366RB_PORT_4 | \
21707 ++ RTL8366RB_PORT_5)
21708 ++
21709 ++#define RTL8366RB_PORT_ALL_EXTERNAL (RTL8366RB_PORT_1 | \
21710 ++ RTL8366RB_PORT_2 | \
21711 ++ RTL8366RB_PORT_3 | \
21712 ++ RTL8366RB_PORT_4)
21713 ++
21714 ++#define RTL8366RB_PORT_ALL_INTERNAL RTL8366RB_PORT_CPU
21715 ++
21716 ++/* First configuration word per member config, VID and prio */
21717 ++#define RTL8366RB_VLAN_VID_MASK 0xfff
21718 ++#define RTL8366RB_VLAN_PRIORITY_SHIFT 12
21719 ++#define RTL8366RB_VLAN_PRIORITY_MASK 0x7
21720 ++/* Second configuration word per member config, member and untagged */
21721 ++#define RTL8366RB_VLAN_UNTAG_SHIFT 8
21722 ++#define RTL8366RB_VLAN_UNTAG_MASK 0xff
21723 ++#define RTL8366RB_VLAN_MEMBER_MASK 0xff
21724 ++/* Third config word per member config, STAG currently unused */
21725 ++#define RTL8366RB_VLAN_STAG_MBR_MASK 0xff
21726 ++#define RTL8366RB_VLAN_STAG_MBR_SHIFT 8
21727 ++#define RTL8366RB_VLAN_STAG_IDX_MASK 0x7
21728 ++#define RTL8366RB_VLAN_STAG_IDX_SHIFT 5
21729 ++#define RTL8366RB_VLAN_FID_MASK 0x7
21730 ++
21731 ++/* Port ingress bandwidth control */
21732 ++#define RTL8366RB_IB_BASE 0x0200
21733 ++#define RTL8366RB_IB_REG(pnum) (RTL8366RB_IB_BASE + (pnum))
21734 ++#define RTL8366RB_IB_BDTH_MASK 0x3fff
21735 ++#define RTL8366RB_IB_PREIFG BIT(14)
21736 ++
21737 ++/* Port egress bandwidth control */
21738 ++#define RTL8366RB_EB_BASE 0x02d1
21739 ++#define RTL8366RB_EB_REG(pnum) (RTL8366RB_EB_BASE + (pnum))
21740 ++#define RTL8366RB_EB_BDTH_MASK 0x3fff
21741 ++#define RTL8366RB_EB_PREIFG_REG 0x02f8
21742 ++#define RTL8366RB_EB_PREIFG BIT(9)
21743 ++
21744 ++#define RTL8366RB_BDTH_SW_MAX 1048512 /* 1048576? */
21745 ++#define RTL8366RB_BDTH_UNIT 64
21746 ++#define RTL8366RB_BDTH_REG_DEFAULT 16383
21747 ++
21748 ++/* QOS */
21749 ++#define RTL8366RB_QOS BIT(15)
21750 ++/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */
21751 ++#define RTL8366RB_QOS_DEFAULT_PREIFG 1
21752 ++
21753 ++/* Interrupt handling */
21754 ++#define RTL8366RB_INTERRUPT_CONTROL_REG 0x0440
21755 ++#define RTL8366RB_INTERRUPT_POLARITY BIT(0)
21756 ++#define RTL8366RB_P4_RGMII_LED BIT(2)
21757 ++#define RTL8366RB_INTERRUPT_MASK_REG 0x0441
21758 ++#define RTL8366RB_INTERRUPT_LINK_CHGALL GENMASK(11, 0)
21759 ++#define RTL8366RB_INTERRUPT_ACLEXCEED BIT(8)
21760 ++#define RTL8366RB_INTERRUPT_STORMEXCEED BIT(9)
21761 ++#define RTL8366RB_INTERRUPT_P4_FIBER BIT(12)
21762 ++#define RTL8366RB_INTERRUPT_P4_UTP BIT(13)
21763 ++#define RTL8366RB_INTERRUPT_VALID (RTL8366RB_INTERRUPT_LINK_CHGALL | \
21764 ++ RTL8366RB_INTERRUPT_ACLEXCEED | \
21765 ++ RTL8366RB_INTERRUPT_STORMEXCEED | \
21766 ++ RTL8366RB_INTERRUPT_P4_FIBER | \
21767 ++ RTL8366RB_INTERRUPT_P4_UTP)
21768 ++#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
21769 ++#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
21770 ++
21771 ++/* Port isolation registers */
21772 ++#define RTL8366RB_PORT_ISO_BASE 0x0F08
21773 ++#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum))
21774 ++#define RTL8366RB_PORT_ISO_EN BIT(0)
21775 ++#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1)
21776 ++#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1)
21777 ++
21778 ++/* bits 0..5 enable force when cleared */
21779 ++#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
21780 ++
21781 ++#define RTL8366RB_OAM_PARSER_REG 0x0F14
21782 ++#define RTL8366RB_OAM_MULTIPLEXER_REG 0x0F15
21783 ++
21784 ++#define RTL8366RB_GREEN_FEATURE_REG 0x0F51
21785 ++#define RTL8366RB_GREEN_FEATURE_MSK 0x0007
21786 ++#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
21787 ++#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
21788 ++
21789 ++/**
21790 ++ * struct rtl8366rb - RTL8366RB-specific data
21791 ++ * @max_mtu: per-port max MTU setting
21792 ++ * @pvid_enabled: if PVID is set for respective port
21793 ++ */
21794 ++struct rtl8366rb {
21795 ++ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
21796 ++ bool pvid_enabled[RTL8366RB_NUM_PORTS];
21797 ++};
21798 ++
21799 ++static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
21800 ++ { 0, 0, 4, "IfInOctets" },
21801 ++ { 0, 4, 4, "EtherStatsOctets" },
21802 ++ { 0, 8, 2, "EtherStatsUnderSizePkts" },
21803 ++ { 0, 10, 2, "EtherFragments" },
21804 ++ { 0, 12, 2, "EtherStatsPkts64Octets" },
21805 ++ { 0, 14, 2, "EtherStatsPkts65to127Octets" },
21806 ++ { 0, 16, 2, "EtherStatsPkts128to255Octets" },
21807 ++ { 0, 18, 2, "EtherStatsPkts256to511Octets" },
21808 ++ { 0, 20, 2, "EtherStatsPkts512to1023Octets" },
21809 ++ { 0, 22, 2, "EtherStatsPkts1024to1518Octets" },
21810 ++ { 0, 24, 2, "EtherOversizeStats" },
21811 ++ { 0, 26, 2, "EtherStatsJabbers" },
21812 ++ { 0, 28, 2, "IfInUcastPkts" },
21813 ++ { 0, 30, 2, "EtherStatsMulticastPkts" },
21814 ++ { 0, 32, 2, "EtherStatsBroadcastPkts" },
21815 ++ { 0, 34, 2, "EtherStatsDropEvents" },
21816 ++ { 0, 36, 2, "Dot3StatsFCSErrors" },
21817 ++ { 0, 38, 2, "Dot3StatsSymbolErrors" },
21818 ++ { 0, 40, 2, "Dot3InPauseFrames" },
21819 ++ { 0, 42, 2, "Dot3ControlInUnknownOpcodes" },
21820 ++ { 0, 44, 4, "IfOutOctets" },
21821 ++ { 0, 48, 2, "Dot3StatsSingleCollisionFrames" },
21822 ++ { 0, 50, 2, "Dot3StatMultipleCollisionFrames" },
21823 ++ { 0, 52, 2, "Dot3sDeferredTransmissions" },
21824 ++ { 0, 54, 2, "Dot3StatsLateCollisions" },
21825 ++ { 0, 56, 2, "EtherStatsCollisions" },
21826 ++ { 0, 58, 2, "Dot3StatsExcessiveCollisions" },
21827 ++ { 0, 60, 2, "Dot3OutPauseFrames" },
21828 ++ { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" },
21829 ++ { 0, 64, 2, "Dot1dTpPortInDiscards" },
21830 ++ { 0, 66, 2, "IfOutUcastPkts" },
21831 ++ { 0, 68, 2, "IfOutMulticastPkts" },
21832 ++ { 0, 70, 2, "IfOutBroadcastPkts" },
21833 ++};
21834 ++
21835 ++static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
21836 ++ int port,
21837 ++ struct rtl8366_mib_counter *mib,
21838 ++ u64 *mibvalue)
21839 ++{
21840 ++ u32 addr, val;
21841 ++ int ret;
21842 ++ int i;
21843 ++
21844 ++ addr = RTL8366RB_MIB_COUNTER_BASE +
21845 ++ RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) +
21846 ++ mib->offset;
21847 ++
21848 ++ /* Writing access counter address first
21849 ++ * then ASIC will prepare 64bits counter wait for being retrived
21850 ++ */
21851 ++ ret = regmap_write(smi->map, addr, 0); /* Write whatever */
21852 ++ if (ret)
21853 ++ return ret;
21854 ++
21855 ++ /* Read MIB control register */
21856 ++ ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val);
21857 ++ if (ret)
21858 ++ return -EIO;
21859 ++
21860 ++ if (val & RTL8366RB_MIB_CTRL_BUSY_MASK)
21861 ++ return -EBUSY;
21862 ++
21863 ++ if (val & RTL8366RB_MIB_CTRL_RESET_MASK)
21864 ++ return -EIO;
21865 ++
21866 ++ /* Read each individual MIB 16 bits at the time */
21867 ++ *mibvalue = 0;
21868 ++ for (i = mib->length; i > 0; i--) {
21869 ++ ret = regmap_read(smi->map, addr + (i - 1), &val);
21870 ++ if (ret)
21871 ++ return ret;
21872 ++ *mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
21873 ++ }
21874 ++ return 0;
21875 ++}
21876 ++
21877 ++static u32 rtl8366rb_get_irqmask(struct irq_data *d)
21878 ++{
21879 ++ int line = irqd_to_hwirq(d);
21880 ++ u32 val;
21881 ++
21882 ++ /* For line interrupts we combine link down in bits
21883 ++ * 6..11 with link up in bits 0..5 into one interrupt.
21884 ++ */
21885 ++ if (line < 12)
21886 ++ val = BIT(line) | BIT(line + 6);
21887 ++ else
21888 ++ val = BIT(line);
21889 ++ return val;
21890 ++}
21891 ++
21892 ++static void rtl8366rb_mask_irq(struct irq_data *d)
21893 ++{
21894 ++ struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
21895 ++ int ret;
21896 ++
21897 ++ ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
21898 ++ rtl8366rb_get_irqmask(d), 0);
21899 ++ if (ret)
21900 ++ dev_err(smi->dev, "could not mask IRQ\n");
21901 ++}
21902 ++
21903 ++static void rtl8366rb_unmask_irq(struct irq_data *d)
21904 ++{
21905 ++ struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
21906 ++ int ret;
21907 ++
21908 ++ ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
21909 ++ rtl8366rb_get_irqmask(d),
21910 ++ rtl8366rb_get_irqmask(d));
21911 ++ if (ret)
21912 ++ dev_err(smi->dev, "could not unmask IRQ\n");
21913 ++}
21914 ++
21915 ++static irqreturn_t rtl8366rb_irq(int irq, void *data)
21916 ++{
21917 ++ struct realtek_smi *smi = data;
21918 ++ u32 stat;
21919 ++ int ret;
21920 ++
21921 ++ /* This clears the IRQ status register */
21922 ++ ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
21923 ++ &stat);
21924 ++ if (ret) {
21925 ++ dev_err(smi->dev, "can't read interrupt status\n");
21926 ++ return IRQ_NONE;
21927 ++ }
21928 ++ stat &= RTL8366RB_INTERRUPT_VALID;
21929 ++ if (!stat)
21930 ++ return IRQ_NONE;
21931 ++ while (stat) {
21932 ++ int line = __ffs(stat);
21933 ++ int child_irq;
21934 ++
21935 ++ stat &= ~BIT(line);
21936 ++ /* For line interrupts we combine link down in bits
21937 ++ * 6..11 with link up in bits 0..5 into one interrupt.
21938 ++ */
21939 ++ if (line < 12 && line > 5)
21940 ++ line -= 5;
21941 ++ child_irq = irq_find_mapping(smi->irqdomain, line);
21942 ++ handle_nested_irq(child_irq);
21943 ++ }
21944 ++ return IRQ_HANDLED;
21945 ++}
21946 ++
21947 ++static struct irq_chip rtl8366rb_irq_chip = {
21948 ++ .name = "RTL8366RB",
21949 ++ .irq_mask = rtl8366rb_mask_irq,
21950 ++ .irq_unmask = rtl8366rb_unmask_irq,
21951 ++};
21952 ++
21953 ++static int rtl8366rb_irq_map(struct irq_domain *domain, unsigned int irq,
21954 ++ irq_hw_number_t hwirq)
21955 ++{
21956 ++ irq_set_chip_data(irq, domain->host_data);
21957 ++ irq_set_chip_and_handler(irq, &rtl8366rb_irq_chip, handle_simple_irq);
21958 ++ irq_set_nested_thread(irq, 1);
21959 ++ irq_set_noprobe(irq);
21960 ++
21961 ++ return 0;
21962 ++}
21963 ++
21964 ++static void rtl8366rb_irq_unmap(struct irq_domain *d, unsigned int irq)
21965 ++{
21966 ++ irq_set_nested_thread(irq, 0);
21967 ++ irq_set_chip_and_handler(irq, NULL, NULL);
21968 ++ irq_set_chip_data(irq, NULL);
21969 ++}
21970 ++
21971 ++static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
21972 ++ .map = rtl8366rb_irq_map,
21973 ++ .unmap = rtl8366rb_irq_unmap,
21974 ++ .xlate = irq_domain_xlate_onecell,
21975 ++};
21976 ++
21977 ++static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
21978 ++{
21979 ++ struct device_node *intc;
21980 ++ unsigned long irq_trig;
21981 ++ int irq;
21982 ++ int ret;
21983 ++ u32 val;
21984 ++ int i;
21985 ++
21986 ++ intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
21987 ++ if (!intc) {
21988 ++ dev_err(smi->dev, "missing child interrupt-controller node\n");
21989 ++ return -EINVAL;
21990 ++ }
21991 ++ /* RB8366RB IRQs cascade off this one */
21992 ++ irq = of_irq_get(intc, 0);
21993 ++ if (irq <= 0) {
21994 ++ dev_err(smi->dev, "failed to get parent IRQ\n");
21995 ++ ret = irq ? irq : -EINVAL;
21996 ++ goto out_put_node;
21997 ++ }
21998 ++
21999 ++ /* This clears the IRQ status register */
22000 ++ ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
22001 ++ &val);
22002 ++ if (ret) {
22003 ++ dev_err(smi->dev, "can't read interrupt status\n");
22004 ++ goto out_put_node;
22005 ++ }
22006 ++
22007 ++ /* Fetch IRQ edge information from the descriptor */
22008 ++ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
22009 ++ switch (irq_trig) {
22010 ++ case IRQF_TRIGGER_RISING:
22011 ++ case IRQF_TRIGGER_HIGH:
22012 ++ dev_info(smi->dev, "active high/rising IRQ\n");
22013 ++ val = 0;
22014 ++ break;
22015 ++ case IRQF_TRIGGER_FALLING:
22016 ++ case IRQF_TRIGGER_LOW:
22017 ++ dev_info(smi->dev, "active low/falling IRQ\n");
22018 ++ val = RTL8366RB_INTERRUPT_POLARITY;
22019 ++ break;
22020 ++ }
22021 ++ ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG,
22022 ++ RTL8366RB_INTERRUPT_POLARITY,
22023 ++ val);
22024 ++ if (ret) {
22025 ++ dev_err(smi->dev, "could not configure IRQ polarity\n");
22026 ++ goto out_put_node;
22027 ++ }
22028 ++
22029 ++ ret = devm_request_threaded_irq(smi->dev, irq, NULL,
22030 ++ rtl8366rb_irq, IRQF_ONESHOT,
22031 ++ "RTL8366RB", smi);
22032 ++ if (ret) {
22033 ++ dev_err(smi->dev, "unable to request irq: %d\n", ret);
22034 ++ goto out_put_node;
22035 ++ }
22036 ++ smi->irqdomain = irq_domain_add_linear(intc,
22037 ++ RTL8366RB_NUM_INTERRUPT,
22038 ++ &rtl8366rb_irqdomain_ops,
22039 ++ smi);
22040 ++ if (!smi->irqdomain) {
22041 ++ dev_err(smi->dev, "failed to create IRQ domain\n");
22042 ++ ret = -EINVAL;
22043 ++ goto out_put_node;
22044 ++ }
22045 ++ for (i = 0; i < smi->num_ports; i++)
22046 ++ irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
22047 ++
22048 ++out_put_node:
22049 ++ of_node_put(intc);
22050 ++ return ret;
22051 ++}
22052 ++
22053 ++static int rtl8366rb_set_addr(struct realtek_smi *smi)
22054 ++{
22055 ++ u8 addr[ETH_ALEN];
22056 ++ u16 val;
22057 ++ int ret;
22058 ++
22059 ++ eth_random_addr(addr);
22060 ++
22061 ++ dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
22062 ++ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
22063 ++ val = addr[0] << 8 | addr[1];
22064 ++ ret = regmap_write(smi->map, RTL8366RB_SMAR0, val);
22065 ++ if (ret)
22066 ++ return ret;
22067 ++ val = addr[2] << 8 | addr[3];
22068 ++ ret = regmap_write(smi->map, RTL8366RB_SMAR1, val);
22069 ++ if (ret)
22070 ++ return ret;
22071 ++ val = addr[4] << 8 | addr[5];
22072 ++ ret = regmap_write(smi->map, RTL8366RB_SMAR2, val);
22073 ++ if (ret)
22074 ++ return ret;
22075 ++
22076 ++ return 0;
22077 ++}
22078 ++
22079 ++/* Found in a vendor driver */
22080 ++
22081 ++/* Struct for handling the jam tables' entries */
22082 ++struct rtl8366rb_jam_tbl_entry {
22083 ++ u16 reg;
22084 ++ u16 val;
22085 ++};
22086 ++
22087 ++/* For the "version 0" early silicon, appear in most source releases */
22088 ++static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_0[] = {
22089 ++ {0x000B, 0x0001}, {0x03A6, 0x0100}, {0x03A7, 0x0001}, {0x02D1, 0x3FFF},
22090 ++ {0x02D2, 0x3FFF}, {0x02D3, 0x3FFF}, {0x02D4, 0x3FFF}, {0x02D5, 0x3FFF},
22091 ++ {0x02D6, 0x3FFF}, {0x02D7, 0x3FFF}, {0x02D8, 0x3FFF}, {0x022B, 0x0688},
22092 ++ {0x022C, 0x0FAC}, {0x03D0, 0x4688}, {0x03D1, 0x01F5}, {0x0000, 0x0830},
22093 ++ {0x02F9, 0x0200}, {0x02F7, 0x7FFF}, {0x02F8, 0x03FF}, {0x0080, 0x03E8},
22094 ++ {0x0081, 0x00CE}, {0x0082, 0x00DA}, {0x0083, 0x0230}, {0xBE0F, 0x2000},
22095 ++ {0x0231, 0x422A}, {0x0232, 0x422A}, {0x0233, 0x422A}, {0x0234, 0x422A},
22096 ++ {0x0235, 0x422A}, {0x0236, 0x422A}, {0x0237, 0x422A}, {0x0238, 0x422A},
22097 ++ {0x0239, 0x422A}, {0x023A, 0x422A}, {0x023B, 0x422A}, {0x023C, 0x422A},
22098 ++ {0x023D, 0x422A}, {0x023E, 0x422A}, {0x023F, 0x422A}, {0x0240, 0x422A},
22099 ++ {0x0241, 0x422A}, {0x0242, 0x422A}, {0x0243, 0x422A}, {0x0244, 0x422A},
22100 ++ {0x0245, 0x422A}, {0x0246, 0x422A}, {0x0247, 0x422A}, {0x0248, 0x422A},
22101 ++ {0x0249, 0x0146}, {0x024A, 0x0146}, {0x024B, 0x0146}, {0xBE03, 0xC961},
22102 ++ {0x024D, 0x0146}, {0x024E, 0x0146}, {0x024F, 0x0146}, {0x0250, 0x0146},
22103 ++ {0xBE64, 0x0226}, {0x0252, 0x0146}, {0x0253, 0x0146}, {0x024C, 0x0146},
22104 ++ {0x0251, 0x0146}, {0x0254, 0x0146}, {0xBE62, 0x3FD0}, {0x0084, 0x0320},
22105 ++ {0x0255, 0x0146}, {0x0256, 0x0146}, {0x0257, 0x0146}, {0x0258, 0x0146},
22106 ++ {0x0259, 0x0146}, {0x025A, 0x0146}, {0x025B, 0x0146}, {0x025C, 0x0146},
22107 ++ {0x025D, 0x0146}, {0x025E, 0x0146}, {0x025F, 0x0146}, {0x0260, 0x0146},
22108 ++ {0x0261, 0xA23F}, {0x0262, 0x0294}, {0x0263, 0xA23F}, {0x0264, 0x0294},
22109 ++ {0x0265, 0xA23F}, {0x0266, 0x0294}, {0x0267, 0xA23F}, {0x0268, 0x0294},
22110 ++ {0x0269, 0xA23F}, {0x026A, 0x0294}, {0x026B, 0xA23F}, {0x026C, 0x0294},
22111 ++ {0x026D, 0xA23F}, {0x026E, 0x0294}, {0x026F, 0xA23F}, {0x0270, 0x0294},
22112 ++ {0x02F5, 0x0048}, {0xBE09, 0x0E00}, {0xBE1E, 0x0FA0}, {0xBE14, 0x8448},
22113 ++ {0xBE15, 0x1007}, {0xBE4A, 0xA284}, {0xC454, 0x3F0B}, {0xC474, 0x3F0B},
22114 ++ {0xBE48, 0x3672}, {0xBE4B, 0x17A7}, {0xBE4C, 0x0B15}, {0xBE52, 0x0EDD},
22115 ++ {0xBE49, 0x8C00}, {0xBE5B, 0x785C}, {0xBE5C, 0x785C}, {0xBE5D, 0x785C},
22116 ++ {0xBE61, 0x368A}, {0xBE63, 0x9B84}, {0xC456, 0xCC13}, {0xC476, 0xCC13},
22117 ++ {0xBE65, 0x307D}, {0xBE6D, 0x0005}, {0xBE6E, 0xE120}, {0xBE2E, 0x7BAF},
22118 ++};
22119 ++
22120 ++/* This v1 init sequence is from Belkin F5D8235 U-Boot release */
22121 ++static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_1[] = {
22122 ++ {0x0000, 0x0830}, {0x0001, 0x8000}, {0x0400, 0x8130}, {0xBE78, 0x3C3C},
22123 ++ {0x0431, 0x5432}, {0xBE37, 0x0CE4}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
22124 ++ {0xC44C, 0x1585}, {0xC44C, 0x1185}, {0xC44C, 0x1585}, {0xC46C, 0x1585},
22125 ++ {0xC46C, 0x1185}, {0xC46C, 0x1585}, {0xC451, 0x2135}, {0xC471, 0x2135},
22126 ++ {0xBE10, 0x8140}, {0xBE15, 0x0007}, {0xBE6E, 0xE120}, {0xBE69, 0xD20F},
22127 ++ {0xBE6B, 0x0320}, {0xBE24, 0xB000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF20},
22128 ++ {0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800}, {0xBE24, 0x0000},
22129 ++ {0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60}, {0xBE21, 0x0140},
22130 ++ {0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000}, {0xBE2E, 0x7B7A},
22131 ++ {0xBE36, 0x0CE4}, {0x02F5, 0x0048}, {0xBE77, 0x2940}, {0x000A, 0x83E0},
22132 ++ {0xBE79, 0x3C3C}, {0xBE00, 0x1340},
22133 ++};
22134 ++
22135 ++/* This v2 init sequence is from Belkin F5D8235 U-Boot release */
22136 ++static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_2[] = {
22137 ++ {0x0450, 0x0000}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
22138 ++ {0xC44F, 0x6250}, {0xC46F, 0x6250}, {0xC456, 0x0C14}, {0xC476, 0x0C14},
22139 ++ {0xC44C, 0x1C85}, {0xC44C, 0x1885}, {0xC44C, 0x1C85}, {0xC46C, 0x1C85},
22140 ++ {0xC46C, 0x1885}, {0xC46C, 0x1C85}, {0xC44C, 0x0885}, {0xC44C, 0x0881},
22141 ++ {0xC44C, 0x0885}, {0xC46C, 0x0885}, {0xC46C, 0x0881}, {0xC46C, 0x0885},
22142 ++ {0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
22143 ++ {0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6E, 0x0320},
22144 ++ {0xBE77, 0x2940}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
22145 ++ {0x8000, 0x0001}, {0xBE15, 0x1007}, {0x8000, 0x0000}, {0xBE15, 0x1007},
22146 ++ {0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160}, {0xBE10, 0x8140},
22147 ++ {0xBE00, 0x1340}, {0x0F51, 0x0010},
22148 ++};
22149 ++
22150 ++/* Appears in a DDWRT code dump */
22151 ++static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_3[] = {
22152 ++ {0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
22153 ++ {0x0F51, 0x0017}, {0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
22154 ++ {0xC456, 0x0C14}, {0xC476, 0x0C14}, {0xC454, 0x3F8B}, {0xC474, 0x3F8B},
22155 ++ {0xC450, 0x2071}, {0xC470, 0x2071}, {0xC451, 0x226B}, {0xC471, 0x226B},
22156 ++ {0xC452, 0xA293}, {0xC472, 0xA293}, {0xC44C, 0x1585}, {0xC44C, 0x1185},
22157 ++ {0xC44C, 0x1585}, {0xC46C, 0x1585}, {0xC46C, 0x1185}, {0xC46C, 0x1585},
22158 ++ {0xC44C, 0x0185}, {0xC44C, 0x0181}, {0xC44C, 0x0185}, {0xC46C, 0x0185},
22159 ++ {0xC46C, 0x0181}, {0xC46C, 0x0185}, {0xBE24, 0xB000}, {0xBE23, 0xFF51},
22160 ++ {0xBE22, 0xDF20}, {0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800},
22161 ++ {0xBE24, 0x0000}, {0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60},
22162 ++ {0xBE21, 0x0140}, {0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000},
22163 ++ {0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
22164 ++ {0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6B, 0x0320},
22165 ++ {0xBE77, 0x2800}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
22166 ++ {0x8000, 0x0001}, {0xBE10, 0x8140}, {0x8000, 0x0000}, {0xBE10, 0x8140},
22167 ++ {0xBE15, 0x1007}, {0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160},
22168 ++ {0xBE10, 0x8140}, {0xBE00, 0x1340}, {0x0450, 0x0000}, {0x0401, 0x0000},
22169 ++};
22170 ++
22171 ++/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */
22172 ++static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_f5d8235[] = {
22173 ++ {0x0242, 0x02BF}, {0x0245, 0x02BF}, {0x0248, 0x02BF}, {0x024B, 0x02BF},
22174 ++ {0x024E, 0x02BF}, {0x0251, 0x02BF}, {0x0254, 0x0A3F}, {0x0256, 0x0A3F},
22175 ++ {0x0258, 0x0A3F}, {0x025A, 0x0A3F}, {0x025C, 0x0A3F}, {0x025E, 0x0A3F},
22176 ++ {0x0263, 0x007C}, {0x0100, 0x0004}, {0xBE5B, 0x3500}, {0x800E, 0x200F},
22177 ++ {0xBE1D, 0x0F00}, {0x8001, 0x5011}, {0x800A, 0xA2F4}, {0x800B, 0x17A3},
22178 ++ {0xBE4B, 0x17A3}, {0xBE41, 0x5011}, {0xBE17, 0x2100}, {0x8000, 0x8304},
22179 ++ {0xBE40, 0x8304}, {0xBE4A, 0xA2F4}, {0x800C, 0xA8D5}, {0x8014, 0x5500},
22180 ++ {0x8015, 0x0004}, {0xBE4C, 0xA8D5}, {0xBE59, 0x0008}, {0xBE09, 0x0E00},
22181 ++ {0xBE36, 0x1036}, {0xBE37, 0x1036}, {0x800D, 0x00FF}, {0xBE4D, 0x00FF},
22182 ++};
22183 ++
22184 ++/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */
22185 ++static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_dgn3500[] = {
22186 ++ {0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0F51, 0x0017},
22187 ++ {0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0}, {0x0450, 0x0000},
22188 ++ {0x0401, 0x0000}, {0x0431, 0x0960},
22189 ++};
22190 ++
22191 ++/* This jam table activates "green ethernet", which means low power mode
22192 ++ * and is claimed to detect the cable length and not use more power than
22193 ++ * necessary, and the ports should enter power saving mode 10 seconds after
22194 ++ * a cable is disconnected. Seems to always be the same.
22195 ++ */
22196 ++static const struct rtl8366rb_jam_tbl_entry rtl8366rb_green_jam[] = {
22197 ++ {0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7},
22198 ++ {0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C},
22199 ++ {0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C},
22200 ++};
22201 ++
22202 ++/* Function that jams the tables in the proper registers */
22203 ++static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
22204 ++ int jam_size, struct realtek_smi *smi,
22205 ++ bool write_dbg)
22206 ++{
22207 ++ u32 val;
22208 ++ int ret;
22209 ++ int i;
22210 ++
22211 ++ for (i = 0; i < jam_size; i++) {
22212 ++ if ((jam_table[i].reg & 0xBE00) == 0xBE00) {
22213 ++ ret = regmap_read(smi->map,
22214 ++ RTL8366RB_PHY_ACCESS_BUSY_REG,
22215 ++ &val);
22216 ++ if (ret)
22217 ++ return ret;
22218 ++ if (!(val & RTL8366RB_PHY_INT_BUSY)) {
22219 ++ ret = regmap_write(smi->map,
22220 ++ RTL8366RB_PHY_ACCESS_CTRL_REG,
22221 ++ RTL8366RB_PHY_CTRL_WRITE);
22222 ++ if (ret)
22223 ++ return ret;
22224 ++ }
22225 ++ }
22226 ++ if (write_dbg)
22227 ++ dev_dbg(smi->dev, "jam %04x into register %04x\n",
22228 ++ jam_table[i].val,
22229 ++ jam_table[i].reg);
22230 ++ ret = regmap_write(smi->map,
22231 ++ jam_table[i].reg,
22232 ++ jam_table[i].val);
22233 ++ if (ret)
22234 ++ return ret;
22235 ++ }
22236 ++ return 0;
22237 ++}
22238 ++
22239 ++static int rtl8366rb_setup(struct dsa_switch *ds)
22240 ++{
22241 ++ struct realtek_smi *smi = ds->priv;
22242 ++ const struct rtl8366rb_jam_tbl_entry *jam_table;
22243 ++ struct rtl8366rb *rb;
22244 ++ u32 chip_ver = 0;
22245 ++ u32 chip_id = 0;
22246 ++ int jam_size;
22247 ++ u32 val;
22248 ++ int ret;
22249 ++ int i;
22250 ++
22251 ++ rb = smi->chip_data;
22252 ++
22253 ++ ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
22254 ++ if (ret) {
22255 ++ dev_err(smi->dev, "unable to read chip id\n");
22256 ++ return ret;
22257 ++ }
22258 ++
22259 ++ switch (chip_id) {
22260 ++ case RTL8366RB_CHIP_ID_8366:
22261 ++ break;
22262 ++ default:
22263 ++ dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id);
22264 ++ return -ENODEV;
22265 ++ }
22266 ++
22267 ++ ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
22268 ++ &chip_ver);
22269 ++ if (ret) {
22270 ++ dev_err(smi->dev, "unable to read chip version\n");
22271 ++ return ret;
22272 ++ }
22273 ++
22274 ++ dev_info(smi->dev, "RTL%04x ver %u chip found\n",
22275 ++ chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
22276 ++
22277 ++ /* Do the init dance using the right jam table */
22278 ++ switch (chip_ver) {
22279 ++ case 0:
22280 ++ jam_table = rtl8366rb_init_jam_ver_0;
22281 ++ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_0);
22282 ++ break;
22283 ++ case 1:
22284 ++ jam_table = rtl8366rb_init_jam_ver_1;
22285 ++ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_1);
22286 ++ break;
22287 ++ case 2:
22288 ++ jam_table = rtl8366rb_init_jam_ver_2;
22289 ++ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_2);
22290 ++ break;
22291 ++ default:
22292 ++ jam_table = rtl8366rb_init_jam_ver_3;
22293 ++ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_3);
22294 ++ break;
22295 ++ }
22296 ++
22297 ++ /* Special jam tables for special routers
22298 ++ * TODO: are these necessary? Maintainers, please test
22299 ++ * without them, using just the off-the-shelf tables.
22300 ++ */
22301 ++ if (of_machine_is_compatible("belkin,f5d8235-v1")) {
22302 ++ jam_table = rtl8366rb_init_jam_f5d8235;
22303 ++ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_f5d8235);
22304 ++ }
22305 ++ if (of_machine_is_compatible("netgear,dgn3500") ||
22306 ++ of_machine_is_compatible("netgear,dgn3500b")) {
22307 ++ jam_table = rtl8366rb_init_jam_dgn3500;
22308 ++ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
22309 ++ }
22310 ++
22311 ++ ret = rtl8366rb_jam_table(jam_table, jam_size, smi, true);
22312 ++ if (ret)
22313 ++ return ret;
22314 ++
22315 ++ /* Isolate all user ports so they can only send packets to itself and the CPU port */
22316 ++ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
22317 ++ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
22318 ++ RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
22319 ++ RTL8366RB_PORT_ISO_EN);
22320 ++ if (ret)
22321 ++ return ret;
22322 ++ }
22323 ++ /* CPU port can send packets to all ports */
22324 ++ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
22325 ++ RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
22326 ++ RTL8366RB_PORT_ISO_EN);
22327 ++ if (ret)
22328 ++ return ret;
22329 ++
22330 ++ /* Set up the "green ethernet" feature */
22331 ++ ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
22332 ++ ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
22333 ++ if (ret)
22334 ++ return ret;
22335 ++
22336 ++ ret = regmap_write(smi->map,
22337 ++ RTL8366RB_GREEN_FEATURE_REG,
22338 ++ (chip_ver == 1) ? 0x0007 : 0x0003);
22339 ++ if (ret)
22340 ++ return ret;
22341 ++
22342 ++ /* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
22343 ++ ret = regmap_write(smi->map, 0x0c, 0x240);
22344 ++ if (ret)
22345 ++ return ret;
22346 ++ ret = regmap_write(smi->map, 0x0d, 0x240);
22347 ++ if (ret)
22348 ++ return ret;
22349 ++
22350 ++ /* Set some random MAC address */
22351 ++ ret = rtl8366rb_set_addr(smi);
22352 ++ if (ret)
22353 ++ return ret;
22354 ++
22355 ++ /* Enable CPU port with custom DSA tag 8899.
22356 ++ *
22357 ++ * If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers
22358 ++ * the custom tag is turned off.
22359 ++ */
22360 ++ ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
22361 ++ 0xFFFF,
22362 ++ BIT(smi->cpu_port));
22363 ++ if (ret)
22364 ++ return ret;
22365 ++
22366 ++ /* Make sure we default-enable the fixed CPU port */
22367 ++ ret = regmap_update_bits(smi->map, RTL8366RB_PECR,
22368 ++ BIT(smi->cpu_port),
22369 ++ 0);
22370 ++ if (ret)
22371 ++ return ret;
22372 ++
22373 ++ /* Set maximum packet length to 1536 bytes */
22374 ++ ret = regmap_update_bits(smi->map, RTL8366RB_SGCR,
22375 ++ RTL8366RB_SGCR_MAX_LENGTH_MASK,
22376 ++ RTL8366RB_SGCR_MAX_LENGTH_1536);
22377 ++ if (ret)
22378 ++ return ret;
22379 ++ for (i = 0; i < RTL8366RB_NUM_PORTS; i++)
22380 ++ /* layer 2 size, see rtl8366rb_change_mtu() */
22381 ++ rb->max_mtu[i] = 1532;
22382 ++
22383 ++ /* Disable learning for all ports */
22384 ++ ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
22385 ++ RTL8366RB_PORT_ALL);
22386 ++ if (ret)
22387 ++ return ret;
22388 ++
22389 ++ /* Enable auto ageing for all ports */
22390 ++ ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
22391 ++ if (ret)
22392 ++ return ret;
22393 ++
22394 ++ /* Port 4 setup: this enables Port 4, usually the WAN port,
22395 ++ * common PHY IO mode is apparently mode 0, and this is not what
22396 ++ * the port is initialized to. There is no explanation of the
22397 ++ * IO modes in the Realtek source code, if your WAN port is
22398 ++ * connected to something exotic such as fiber, then this might
22399 ++ * be worth experimenting with.
22400 ++ */
22401 ++ ret = regmap_update_bits(smi->map, RTL8366RB_PMC0,
22402 ++ RTL8366RB_PMC0_P4_IOMODE_MASK,
22403 ++ 0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
22404 ++ if (ret)
22405 ++ return ret;
22406 ++
22407 ++ /* Accept all packets by default, we enable filtering on-demand */
22408 ++ ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
22409 ++ 0);
22410 ++ if (ret)
22411 ++ return ret;
22412 ++ ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
22413 ++ 0);
22414 ++ if (ret)
22415 ++ return ret;
22416 ++
22417 ++ /* Don't drop packets whose DA has not been learned */
22418 ++ ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2,
22419 ++ RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
22420 ++ if (ret)
22421 ++ return ret;
22422 ++
22423 ++ /* Set blinking, TODO: make this configurable */
22424 ++ ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG,
22425 ++ RTL8366RB_LED_BLINKRATE_MASK,
22426 ++ RTL8366RB_LED_BLINKRATE_56MS);
22427 ++ if (ret)
22428 ++ return ret;
22429 ++
22430 ++ /* Set up LED activity:
22431 ++ * Each port has 4 LEDs, we configure all ports to the same
22432 ++ * behaviour (no individual config) but we can set up each
22433 ++ * LED separately.
22434 ++ */
22435 ++ if (smi->leds_disabled) {
22436 ++ /* Turn everything off */
22437 ++ regmap_update_bits(smi->map,
22438 ++ RTL8366RB_LED_0_1_CTRL_REG,
22439 ++ 0x0FFF, 0);
22440 ++ regmap_update_bits(smi->map,
22441 ++ RTL8366RB_LED_2_3_CTRL_REG,
22442 ++ 0x0FFF, 0);
22443 ++ regmap_update_bits(smi->map,
22444 ++ RTL8366RB_INTERRUPT_CONTROL_REG,
22445 ++ RTL8366RB_P4_RGMII_LED,
22446 ++ 0);
22447 ++ val = RTL8366RB_LED_OFF;
22448 ++ } else {
22449 ++ /* TODO: make this configurable per LED */
22450 ++ val = RTL8366RB_LED_FORCE;
22451 ++ }
22452 ++ for (i = 0; i < 4; i++) {
22453 ++ ret = regmap_update_bits(smi->map,
22454 ++ RTL8366RB_LED_CTRL_REG,
22455 ++ 0xf << (i * 4),
22456 ++ val << (i * 4));
22457 ++ if (ret)
22458 ++ return ret;
22459 ++ }
22460 ++
22461 ++ ret = rtl8366_reset_vlan(smi);
22462 ++ if (ret)
22463 ++ return ret;
22464 ++
22465 ++ ret = rtl8366rb_setup_cascaded_irq(smi);
22466 ++ if (ret)
22467 ++ dev_info(smi->dev, "no interrupt support\n");
22468 ++
22469 ++ ret = realtek_smi_setup_mdio(smi);
22470 ++ if (ret) {
22471 ++ dev_info(smi->dev, "could not set up MDIO bus\n");
22472 ++ return -ENODEV;
22473 ++ }
22474 ++
22475 ++ return 0;
22476 ++}
22477 ++
22478 ++static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
22479 ++ int port,
22480 ++ enum dsa_tag_protocol mp)
22481 ++{
22482 ++ /* This switch uses the 4 byte protocol A Realtek DSA tag */
22483 ++ return DSA_TAG_PROTO_RTL4_A;
22484 ++}
22485 ++
22486 ++static void
22487 ++rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
22488 ++ phy_interface_t interface, struct phy_device *phydev,
22489 ++ int speed, int duplex, bool tx_pause, bool rx_pause)
22490 ++{
22491 ++ struct realtek_smi *smi = ds->priv;
22492 ++ int ret;
22493 ++
22494 ++ if (port != smi->cpu_port)
22495 ++ return;
22496 ++
22497 ++ dev_dbg(smi->dev, "MAC link up on CPU port (%d)\n", port);
22498 ++
22499 ++ /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
22500 ++ ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
22501 ++ BIT(port), BIT(port));
22502 ++ if (ret) {
22503 ++ dev_err(smi->dev, "failed to force 1Gbit on CPU port\n");
22504 ++ return;
22505 ++ }
22506 ++
22507 ++ ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
22508 ++ 0xFF00U,
22509 ++ RTL8366RB_PAACR_CPU_PORT << 8);
22510 ++ if (ret) {
22511 ++ dev_err(smi->dev, "failed to set PAACR on CPU port\n");
22512 ++ return;
22513 ++ }
22514 ++
22515 ++ /* Enable the CPU port */
22516 ++ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
22517 ++ 0);
22518 ++ if (ret) {
22519 ++ dev_err(smi->dev, "failed to enable the CPU port\n");
22520 ++ return;
22521 ++ }
22522 ++}
22523 ++
22524 ++static void
22525 ++rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
22526 ++ phy_interface_t interface)
22527 ++{
22528 ++ struct realtek_smi *smi = ds->priv;
22529 ++ int ret;
22530 ++
22531 ++ if (port != smi->cpu_port)
22532 ++ return;
22533 ++
22534 ++ dev_dbg(smi->dev, "MAC link down on CPU port (%d)\n", port);
22535 ++
22536 ++ /* Disable the CPU port */
22537 ++ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
22538 ++ BIT(port));
22539 ++ if (ret) {
22540 ++ dev_err(smi->dev, "failed to disable the CPU port\n");
22541 ++ return;
22542 ++ }
22543 ++}
22544 ++
22545 ++static void rb8366rb_set_port_led(struct realtek_smi *smi,
22546 ++ int port, bool enable)
22547 ++{
22548 ++ u16 val = enable ? 0x3f : 0;
22549 ++ int ret;
22550 ++
22551 ++ if (smi->leds_disabled)
22552 ++ return;
22553 ++
22554 ++ switch (port) {
22555 ++ case 0:
22556 ++ ret = regmap_update_bits(smi->map,
22557 ++ RTL8366RB_LED_0_1_CTRL_REG,
22558 ++ 0x3F, val);
22559 ++ break;
22560 ++ case 1:
22561 ++ ret = regmap_update_bits(smi->map,
22562 ++ RTL8366RB_LED_0_1_CTRL_REG,
22563 ++ 0x3F << RTL8366RB_LED_1_OFFSET,
22564 ++ val << RTL8366RB_LED_1_OFFSET);
22565 ++ break;
22566 ++ case 2:
22567 ++ ret = regmap_update_bits(smi->map,
22568 ++ RTL8366RB_LED_2_3_CTRL_REG,
22569 ++ 0x3F, val);
22570 ++ break;
22571 ++ case 3:
22572 ++ ret = regmap_update_bits(smi->map,
22573 ++ RTL8366RB_LED_2_3_CTRL_REG,
22574 ++ 0x3F << RTL8366RB_LED_3_OFFSET,
22575 ++ val << RTL8366RB_LED_3_OFFSET);
22576 ++ break;
22577 ++ case 4:
22578 ++ ret = regmap_update_bits(smi->map,
22579 ++ RTL8366RB_INTERRUPT_CONTROL_REG,
22580 ++ RTL8366RB_P4_RGMII_LED,
22581 ++ enable ? RTL8366RB_P4_RGMII_LED : 0);
22582 ++ break;
22583 ++ default:
22584 ++ dev_err(smi->dev, "no LED for port %d\n", port);
22585 ++ return;
22586 ++ }
22587 ++ if (ret)
22588 ++ dev_err(smi->dev, "error updating LED on port %d\n", port);
22589 ++}
22590 ++
22591 ++static int
22592 ++rtl8366rb_port_enable(struct dsa_switch *ds, int port,
22593 ++ struct phy_device *phy)
22594 ++{
22595 ++ struct realtek_smi *smi = ds->priv;
22596 ++ int ret;
22597 ++
22598 ++ dev_dbg(smi->dev, "enable port %d\n", port);
22599 ++ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
22600 ++ 0);
22601 ++ if (ret)
22602 ++ return ret;
22603 ++
22604 ++ rb8366rb_set_port_led(smi, port, true);
22605 ++ return 0;
22606 ++}
22607 ++
22608 ++static void
22609 ++rtl8366rb_port_disable(struct dsa_switch *ds, int port)
22610 ++{
22611 ++ struct realtek_smi *smi = ds->priv;
22612 ++ int ret;
22613 ++
22614 ++ dev_dbg(smi->dev, "disable port %d\n", port);
22615 ++ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
22616 ++ BIT(port));
22617 ++ if (ret)
22618 ++ return;
22619 ++
22620 ++ rb8366rb_set_port_led(smi, port, false);
22621 ++}
22622 ++
22623 ++static int
22624 ++rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
22625 ++ struct dsa_bridge bridge,
22626 ++ bool *tx_fwd_offload)
22627 ++{
22628 ++ struct realtek_smi *smi = ds->priv;
22629 ++ unsigned int port_bitmap = 0;
22630 ++ int ret, i;
22631 ++
22632 ++ /* Loop over all other ports than the current one */
22633 ++ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
22634 ++ /* Current port handled last */
22635 ++ if (i == port)
22636 ++ continue;
22637 ++ /* Not on this bridge */
22638 ++ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
22639 ++ continue;
22640 ++ /* Join this port to each other port on the bridge */
22641 ++ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
22642 ++ RTL8366RB_PORT_ISO_PORTS(BIT(port)),
22643 ++ RTL8366RB_PORT_ISO_PORTS(BIT(port)));
22644 ++ if (ret)
22645 ++ dev_err(smi->dev, "failed to join port %d\n", port);
22646 ++
22647 ++ port_bitmap |= BIT(i);
22648 ++ }
22649 ++
22650 ++ /* Set the bits for the ports we can access */
22651 ++ return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
22652 ++ RTL8366RB_PORT_ISO_PORTS(port_bitmap),
22653 ++ RTL8366RB_PORT_ISO_PORTS(port_bitmap));
22654 ++}
22655 ++
22656 ++static void
22657 ++rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
22658 ++ struct dsa_bridge bridge)
22659 ++{
22660 ++ struct realtek_smi *smi = ds->priv;
22661 ++ unsigned int port_bitmap = 0;
22662 ++ int ret, i;
22663 ++
22664 ++ /* Loop over all other ports than this one */
22665 ++ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
22666 ++ /* Current port handled last */
22667 ++ if (i == port)
22668 ++ continue;
22669 ++ /* Not on this bridge */
22670 ++ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
22671 ++ continue;
22672 ++ /* Remove this port from any other port on the bridge */
22673 ++ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
22674 ++ RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
22675 ++ if (ret)
22676 ++ dev_err(smi->dev, "failed to leave port %d\n", port);
22677 ++
22678 ++ port_bitmap |= BIT(i);
22679 ++ }
22680 ++
22681 ++ /* Clear the bits for the ports we can not access, leave ourselves */
22682 ++ regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
22683 ++ RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
22684 ++}
22685 ++
22686 ++/**
22687 ++ * rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
22688 ++ * @smi: SMI state container
22689 ++ * @port: the port to drop untagged and C-tagged frames on
22690 ++ * @drop: whether to drop or pass untagged and C-tagged frames
22691 ++ *
22692 ++ * Return: zero for success, a negative number on error.
22693 ++ */
22694 ++static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
22695 ++{
22696 ++ return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
22697 ++ RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
22698 ++ drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
22699 ++}
22700 ++
22701 ++static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
22702 ++ bool vlan_filtering,
22703 ++ struct netlink_ext_ack *extack)
22704 ++{
22705 ++ struct realtek_smi *smi = ds->priv;
22706 ++ struct rtl8366rb *rb;
22707 ++ int ret;
22708 ++
22709 ++ rb = smi->chip_data;
22710 ++
22711 ++ dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
22712 ++ vlan_filtering ? "enable" : "disable");
22713 ++
22714 ++ /* If the port is not in the member set, the frame will be dropped */
22715 ++ ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
22716 ++ BIT(port), vlan_filtering ? BIT(port) : 0);
22717 ++ if (ret)
22718 ++ return ret;
22719 ++
22720 ++ /* If VLAN filtering is enabled and PVID is also enabled, we must
22721 ++ * not drop any untagged or C-tagged frames. If we turn off VLAN
22722 ++ * filtering on a port, we need to accept any frames.
22723 ++ */
22724 ++ if (vlan_filtering)
22725 ++ ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
22726 ++ else
22727 ++ ret = rtl8366rb_drop_untagged(smi, port, false);
22728 ++
22729 ++ return ret;
22730 ++}
22731 ++
22732 ++static int
22733 ++rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port,
22734 ++ struct switchdev_brport_flags flags,
22735 ++ struct netlink_ext_ack *extack)
22736 ++{
22737 ++ /* We support enabling/disabling learning */
22738 ++ if (flags.mask & ~(BR_LEARNING))
22739 ++ return -EINVAL;
22740 ++
22741 ++ return 0;
22742 ++}
22743 ++
22744 ++static int
22745 ++rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
22746 ++ struct switchdev_brport_flags flags,
22747 ++ struct netlink_ext_ack *extack)
22748 ++{
22749 ++ struct realtek_smi *smi = ds->priv;
22750 ++ int ret;
22751 ++
22752 ++ if (flags.mask & BR_LEARNING) {
22753 ++ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
22754 ++ BIT(port),
22755 ++ (flags.val & BR_LEARNING) ? 0 : BIT(port));
22756 ++ if (ret)
22757 ++ return ret;
22758 ++ }
22759 ++
22760 ++ return 0;
22761 ++}
22762 ++
22763 ++static void
22764 ++rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
22765 ++{
22766 ++ struct realtek_smi *smi = ds->priv;
22767 ++ u32 val;
22768 ++ int i;
22769 ++
22770 ++ switch (state) {
22771 ++ case BR_STATE_DISABLED:
22772 ++ val = RTL8366RB_STP_STATE_DISABLED;
22773 ++ break;
22774 ++ case BR_STATE_BLOCKING:
22775 ++ case BR_STATE_LISTENING:
22776 ++ val = RTL8366RB_STP_STATE_BLOCKING;
22777 ++ break;
22778 ++ case BR_STATE_LEARNING:
22779 ++ val = RTL8366RB_STP_STATE_LEARNING;
22780 ++ break;
22781 ++ case BR_STATE_FORWARDING:
22782 ++ val = RTL8366RB_STP_STATE_FORWARDING;
22783 ++ break;
22784 ++ default:
22785 ++ dev_err(smi->dev, "unknown bridge state requested\n");
22786 ++ return;
22787 ++ }
22788 ++
22789 ++ /* Set the same status for the port on all the FIDs */
22790 ++ for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
22791 ++ regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
22792 ++ RTL8366RB_STP_STATE_MASK(port),
22793 ++ RTL8366RB_STP_STATE(port, val));
22794 ++ }
22795 ++}
22796 ++
22797 ++static void
22798 ++rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
22799 ++{
22800 ++ struct realtek_smi *smi = ds->priv;
22801 ++
22802 ++ /* This will age out any learned L2 entries */
22803 ++ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
22804 ++ BIT(port), BIT(port));
22805 ++ /* Restore the normal state of things */
22806 ++ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
22807 ++ BIT(port), 0);
22808 ++}
22809 ++
22810 ++static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
22811 ++{
22812 ++ struct realtek_smi *smi = ds->priv;
22813 ++ struct rtl8366rb *rb;
22814 ++ unsigned int max_mtu;
22815 ++ u32 len;
22816 ++ int i;
22817 ++
22818 ++ /* Cache the per-port MTU setting */
22819 ++ rb = smi->chip_data;
22820 ++ rb->max_mtu[port] = new_mtu;
22821 ++
22822 ++ /* Roof out the MTU for the entire switch to the greatest
22823 ++ * common denominator: the biggest set for any one port will
22824 ++ * be the biggest MTU for the switch.
22825 ++ *
22826 ++ * The first setting, 1522 bytes, is max IP packet 1500 bytes,
22827 ++ * plus ethernet header, 1518 bytes, plus CPU tag, 4 bytes.
22828 ++ * This function should consider the parameter an SDU, so the
22829 ++ * MTU passed for this setting is 1518 bytes. The same logic
22830 ++ * of subtracting the DSA tag of 4 bytes apply to the other
22831 ++ * settings.
22832 ++ */
22833 ++ max_mtu = 1518;
22834 ++ for (i = 0; i < RTL8366RB_NUM_PORTS; i++) {
22835 ++ if (rb->max_mtu[i] > max_mtu)
22836 ++ max_mtu = rb->max_mtu[i];
22837 ++ }
22838 ++ if (max_mtu <= 1518)
22839 ++ len = RTL8366RB_SGCR_MAX_LENGTH_1522;
22840 ++ else if (max_mtu > 1518 && max_mtu <= 1532)
22841 ++ len = RTL8366RB_SGCR_MAX_LENGTH_1536;
22842 ++ else if (max_mtu > 1532 && max_mtu <= 1548)
22843 ++ len = RTL8366RB_SGCR_MAX_LENGTH_1552;
22844 ++ else
22845 ++ len = RTL8366RB_SGCR_MAX_LENGTH_16000;
22846 ++
22847 ++ return regmap_update_bits(smi->map, RTL8366RB_SGCR,
22848 ++ RTL8366RB_SGCR_MAX_LENGTH_MASK,
22849 ++ len);
22850 ++}
22851 ++
22852 ++static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
22853 ++{
22854 ++ /* The max MTU is 16000 bytes, so we subtract the CPU tag
22855 ++ * and the max presented to the system is 15996 bytes.
22856 ++ */
22857 ++ return 15996;
22858 ++}
22859 ++
22860 ++static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
22861 ++ struct rtl8366_vlan_4k *vlan4k)
22862 ++{
22863 ++ u32 data[3];
22864 ++ int ret;
22865 ++ int i;
22866 ++
22867 ++ memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
22868 ++
22869 ++ if (vid >= RTL8366RB_NUM_VIDS)
22870 ++ return -EINVAL;
22871 ++
22872 ++ /* write VID */
22873 ++ ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
22874 ++ vid & RTL8366RB_VLAN_VID_MASK);
22875 ++ if (ret)
22876 ++ return ret;
22877 ++
22878 ++ /* write table access control word */
22879 ++ ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
22880 ++ RTL8366RB_TABLE_VLAN_READ_CTRL);
22881 ++ if (ret)
22882 ++ return ret;
22883 ++
22884 ++ for (i = 0; i < 3; i++) {
22885 ++ ret = regmap_read(smi->map,
22886 ++ RTL8366RB_VLAN_TABLE_READ_BASE + i,
22887 ++ &data[i]);
22888 ++ if (ret)
22889 ++ return ret;
22890 ++ }
22891 ++
22892 ++ vlan4k->vid = vid;
22893 ++ vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
22894 ++ RTL8366RB_VLAN_UNTAG_MASK;
22895 ++ vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
22896 ++ vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
22897 ++
22898 ++ return 0;
22899 ++}
22900 ++
22901 ++static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
22902 ++ const struct rtl8366_vlan_4k *vlan4k)
22903 ++{
22904 ++ u32 data[3];
22905 ++ int ret;
22906 ++ int i;
22907 ++
22908 ++ if (vlan4k->vid >= RTL8366RB_NUM_VIDS ||
22909 ++ vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK ||
22910 ++ vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK ||
22911 ++ vlan4k->fid > RTL8366RB_FIDMAX)
22912 ++ return -EINVAL;
22913 ++
22914 ++ data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK;
22915 ++ data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) |
22916 ++ ((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
22917 ++ RTL8366RB_VLAN_UNTAG_SHIFT);
22918 ++ data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
22919 ++
22920 ++ for (i = 0; i < 3; i++) {
22921 ++ ret = regmap_write(smi->map,
22922 ++ RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
22923 ++ data[i]);
22924 ++ if (ret)
22925 ++ return ret;
22926 ++ }
22927 ++
22928 ++ /* write table access control word */
22929 ++ ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
22930 ++ RTL8366RB_TABLE_VLAN_WRITE_CTRL);
22931 ++
22932 ++ return ret;
22933 ++}
22934 ++
22935 ++static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
22936 ++ struct rtl8366_vlan_mc *vlanmc)
22937 ++{
22938 ++ u32 data[3];
22939 ++ int ret;
22940 ++ int i;
22941 ++
22942 ++ memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
22943 ++
22944 ++ if (index >= RTL8366RB_NUM_VLANS)
22945 ++ return -EINVAL;
22946 ++
22947 ++ for (i = 0; i < 3; i++) {
22948 ++ ret = regmap_read(smi->map,
22949 ++ RTL8366RB_VLAN_MC_BASE(index) + i,
22950 ++ &data[i]);
22951 ++ if (ret)
22952 ++ return ret;
22953 ++ }
22954 ++
22955 ++ vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK;
22956 ++ vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) &
22957 ++ RTL8366RB_VLAN_PRIORITY_MASK;
22958 ++ vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
22959 ++ RTL8366RB_VLAN_UNTAG_MASK;
22960 ++ vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
22961 ++ vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
22962 ++
22963 ++ return 0;
22964 ++}
22965 ++
22966 ++static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
22967 ++ const struct rtl8366_vlan_mc *vlanmc)
22968 ++{
22969 ++ u32 data[3];
22970 ++ int ret;
22971 ++ int i;
22972 ++
22973 ++ if (index >= RTL8366RB_NUM_VLANS ||
22974 ++ vlanmc->vid >= RTL8366RB_NUM_VIDS ||
22975 ++ vlanmc->priority > RTL8366RB_PRIORITYMAX ||
22976 ++ vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK ||
22977 ++ vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK ||
22978 ++ vlanmc->fid > RTL8366RB_FIDMAX)
22979 ++ return -EINVAL;
22980 ++
22981 ++ data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) |
22982 ++ ((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) <<
22983 ++ RTL8366RB_VLAN_PRIORITY_SHIFT);
22984 ++ data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) |
22985 ++ ((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
22986 ++ RTL8366RB_VLAN_UNTAG_SHIFT);
22987 ++ data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
22988 ++
22989 ++ for (i = 0; i < 3; i++) {
22990 ++ ret = regmap_write(smi->map,
22991 ++ RTL8366RB_VLAN_MC_BASE(index) + i,
22992 ++ data[i]);
22993 ++ if (ret)
22994 ++ return ret;
22995 ++ }
22996 ++
22997 ++ return 0;
22998 ++}
22999 ++
23000 ++static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
23001 ++{
23002 ++ u32 data;
23003 ++ int ret;
23004 ++
23005 ++ if (port >= smi->num_ports)
23006 ++ return -EINVAL;
23007 ++
23008 ++ ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
23009 ++ &data);
23010 ++ if (ret)
23011 ++ return ret;
23012 ++
23013 ++ *val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) &
23014 ++ RTL8366RB_PORT_VLAN_CTRL_MASK;
23015 ++
23016 ++ return 0;
23017 ++}
23018 ++
23019 ++static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
23020 ++{
23021 ++ struct rtl8366rb *rb;
23022 ++ bool pvid_enabled;
23023 ++ int ret;
23024 ++
23025 ++ rb = smi->chip_data;
23026 ++ pvid_enabled = !!index;
23027 ++
23028 ++ if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
23029 ++ return -EINVAL;
23030 ++
23031 ++ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
23032 ++ RTL8366RB_PORT_VLAN_CTRL_MASK <<
23033 ++ RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
23034 ++ (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
23035 ++ RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
23036 ++ if (ret)
23037 ++ return ret;
23038 ++
23039 ++ rb->pvid_enabled[port] = pvid_enabled;
23040 ++
23041 ++ /* If VLAN filtering is enabled and PVID is also enabled, we must
23042 ++ * not drop any untagged or C-tagged frames. Make sure to update the
23043 ++ * filtering setting.
23044 ++ */
23045 ++ if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
23046 ++ ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
23047 ++
23048 ++ return ret;
23049 ++}
23050 ++
23051 ++static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
23052 ++{
23053 ++ unsigned int max = RTL8366RB_NUM_VLANS - 1;
23054 ++
23055 ++ if (smi->vlan4k_enabled)
23056 ++ max = RTL8366RB_NUM_VIDS - 1;
23057 ++
23058 ++ if (vlan > max)
23059 ++ return false;
23060 ++
23061 ++ return true;
23062 ++}
23063 ++
23064 ++static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable)
23065 ++{
23066 ++ dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable");
23067 ++ return regmap_update_bits(smi->map,
23068 ++ RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
23069 ++ enable ? RTL8366RB_SGCR_EN_VLAN : 0);
23070 ++}
23071 ++
23072 ++static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable)
23073 ++{
23074 ++ dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
23075 ++ return regmap_update_bits(smi->map, RTL8366RB_SGCR,
23076 ++ RTL8366RB_SGCR_EN_VLAN_4KTB,
23077 ++ enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
23078 ++}
23079 ++
23080 ++static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
23081 ++{
23082 ++ u32 val;
23083 ++ u32 reg;
23084 ++ int ret;
23085 ++
23086 ++ if (phy > RTL8366RB_PHY_NO_MAX)
23087 ++ return -EINVAL;
23088 ++
23089 ++ ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
23090 ++ RTL8366RB_PHY_CTRL_READ);
23091 ++ if (ret)
23092 ++ return ret;
23093 ++
23094 ++ reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
23095 ++
23096 ++ ret = regmap_write(smi->map, reg, 0);
23097 ++ if (ret) {
23098 ++ dev_err(smi->dev,
23099 ++ "failed to write PHY%d reg %04x @ %04x, ret %d\n",
23100 ++ phy, regnum, reg, ret);
23101 ++ return ret;
23102 ++ }
23103 ++
23104 ++ ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
23105 ++ if (ret)
23106 ++ return ret;
23107 ++
23108 ++ dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
23109 ++ phy, regnum, reg, val);
23110 ++
23111 ++ return val;
23112 ++}
23113 ++
23114 ++static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
23115 ++ u16 val)
23116 ++{
23117 ++ u32 reg;
23118 ++ int ret;
23119 ++
23120 ++ if (phy > RTL8366RB_PHY_NO_MAX)
23121 ++ return -EINVAL;
23122 ++
23123 ++ ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
23124 ++ RTL8366RB_PHY_CTRL_WRITE);
23125 ++ if (ret)
23126 ++ return ret;
23127 ++
23128 ++ reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
23129 ++
23130 ++ dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
23131 ++ phy, regnum, reg, val);
23132 ++
23133 ++ ret = regmap_write(smi->map, reg, val);
23134 ++ if (ret)
23135 ++ return ret;
23136 ++
23137 ++ return 0;
23138 ++}
23139 ++
23140 ++static int rtl8366rb_reset_chip(struct realtek_smi *smi)
23141 ++{
23142 ++ int timeout = 10;
23143 ++ u32 val;
23144 ++ int ret;
23145 ++
23146 ++ realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
23147 ++ RTL8366RB_CHIP_CTRL_RESET_HW);
23148 ++ do {
23149 ++ usleep_range(20000, 25000);
23150 ++ ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val);
23151 ++ if (ret)
23152 ++ return ret;
23153 ++
23154 ++ if (!(val & RTL8366RB_CHIP_CTRL_RESET_HW))
23155 ++ break;
23156 ++ } while (--timeout);
23157 ++
23158 ++ if (!timeout) {
23159 ++ dev_err(smi->dev, "timeout waiting for the switch to reset\n");
23160 ++ return -EIO;
23161 ++ }
23162 ++
23163 ++ return 0;
23164 ++}
23165 ++
23166 ++static int rtl8366rb_detect(struct realtek_smi *smi)
23167 ++{
23168 ++ struct device *dev = smi->dev;
23169 ++ int ret;
23170 ++ u32 val;
23171 ++
23172 ++ /* Detect device */
23173 ++ ret = regmap_read(smi->map, 0x5c, &val);
23174 ++ if (ret) {
23175 ++ dev_err(dev, "can't get chip ID (%d)\n", ret);
23176 ++ return ret;
23177 ++ }
23178 ++
23179 ++ switch (val) {
23180 ++ case 0x6027:
23181 ++ dev_info(dev, "found an RTL8366S switch\n");
23182 ++ dev_err(dev, "this switch is not yet supported, submit patches!\n");
23183 ++ return -ENODEV;
23184 ++ case 0x5937:
23185 ++ dev_info(dev, "found an RTL8366RB switch\n");
23186 ++ smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
23187 ++ smi->num_ports = RTL8366RB_NUM_PORTS;
23188 ++ smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
23189 ++ smi->mib_counters = rtl8366rb_mib_counters;
23190 ++ smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
23191 ++ break;
23192 ++ default:
23193 ++ dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
23194 ++ val);
23195 ++ break;
23196 ++ }
23197 ++
23198 ++ ret = rtl8366rb_reset_chip(smi);
23199 ++ if (ret)
23200 ++ return ret;
23201 ++
23202 ++ return 0;
23203 ++}
23204 ++
23205 ++static const struct dsa_switch_ops rtl8366rb_switch_ops = {
23206 ++ .get_tag_protocol = rtl8366_get_tag_protocol,
23207 ++ .setup = rtl8366rb_setup,
23208 ++ .phylink_mac_link_up = rtl8366rb_mac_link_up,
23209 ++ .phylink_mac_link_down = rtl8366rb_mac_link_down,
23210 ++ .get_strings = rtl8366_get_strings,
23211 ++ .get_ethtool_stats = rtl8366_get_ethtool_stats,
23212 ++ .get_sset_count = rtl8366_get_sset_count,
23213 ++ .port_bridge_join = rtl8366rb_port_bridge_join,
23214 ++ .port_bridge_leave = rtl8366rb_port_bridge_leave,
23215 ++ .port_vlan_filtering = rtl8366rb_vlan_filtering,
23216 ++ .port_vlan_add = rtl8366_vlan_add,
23217 ++ .port_vlan_del = rtl8366_vlan_del,
23218 ++ .port_enable = rtl8366rb_port_enable,
23219 ++ .port_disable = rtl8366rb_port_disable,
23220 ++ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
23221 ++ .port_bridge_flags = rtl8366rb_port_bridge_flags,
23222 ++ .port_stp_state_set = rtl8366rb_port_stp_state_set,
23223 ++ .port_fast_age = rtl8366rb_port_fast_age,
23224 ++ .port_change_mtu = rtl8366rb_change_mtu,
23225 ++ .port_max_mtu = rtl8366rb_max_mtu,
23226 ++};
23227 ++
23228 ++static const struct realtek_smi_ops rtl8366rb_smi_ops = {
23229 ++ .detect = rtl8366rb_detect,
23230 ++ .get_vlan_mc = rtl8366rb_get_vlan_mc,
23231 ++ .set_vlan_mc = rtl8366rb_set_vlan_mc,
23232 ++ .get_vlan_4k = rtl8366rb_get_vlan_4k,
23233 ++ .set_vlan_4k = rtl8366rb_set_vlan_4k,
23234 ++ .get_mc_index = rtl8366rb_get_mc_index,
23235 ++ .set_mc_index = rtl8366rb_set_mc_index,
23236 ++ .get_mib_counter = rtl8366rb_get_mib_counter,
23237 ++ .is_vlan_valid = rtl8366rb_is_vlan_valid,
23238 ++ .enable_vlan = rtl8366rb_enable_vlan,
23239 ++ .enable_vlan4k = rtl8366rb_enable_vlan4k,
23240 ++ .phy_read = rtl8366rb_phy_read,
23241 ++ .phy_write = rtl8366rb_phy_write,
23242 ++};
23243 ++
23244 ++const struct realtek_smi_variant rtl8366rb_variant = {
23245 ++ .ds_ops = &rtl8366rb_switch_ops,
23246 ++ .ops = &rtl8366rb_smi_ops,
23247 ++ .clk_delay = 10,
23248 ++ .cmd_read = 0xa9,
23249 ++ .cmd_write = 0xa8,
23250 ++ .chip_data_sz = sizeof(struct rtl8366rb),
23251 ++};
23252 ++EXPORT_SYMBOL_GPL(rtl8366rb_variant);
23253 +diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c
23254 +deleted file mode 100644
23255 +index 3b729544798b1..0000000000000
23256 +--- a/drivers/net/dsa/rtl8365mb.c
23257 ++++ /dev/null
23258 +@@ -1,1987 +0,0 @@
23259 +-// SPDX-License-Identifier: GPL-2.0
23260 +-/* Realtek SMI subdriver for the Realtek RTL8365MB-VC ethernet switch.
23261 +- *
23262 +- * Copyright (C) 2021 Alvin Šipraga <alsi@××××××××××××.dk>
23263 +- * Copyright (C) 2021 Michael Rasmussen <mir@××××××××××××.dk>
23264 +- *
23265 +- * The RTL8365MB-VC is a 4+1 port 10/100/1000M switch controller. It includes 4
23266 +- * integrated PHYs for the user facing ports, and an extension interface which
23267 +- * can be connected to the CPU - or another PHY - via either MII, RMII, or
23268 +- * RGMII. The switch is configured via the Realtek Simple Management Interface
23269 +- * (SMI), which uses the MDIO/MDC lines.
23270 +- *
23271 +- * Below is a simplified block diagram of the chip and its relevant interfaces.
23272 +- *
23273 +- * .-----------------------------------.
23274 +- * | |
23275 +- * UTP <---------------> Giga PHY <-> PCS <-> P0 GMAC |
23276 +- * UTP <---------------> Giga PHY <-> PCS <-> P1 GMAC |
23277 +- * UTP <---------------> Giga PHY <-> PCS <-> P2 GMAC |
23278 +- * UTP <---------------> Giga PHY <-> PCS <-> P3 GMAC |
23279 +- * | |
23280 +- * CPU/PHY <-MII/RMII/RGMII---> Extension <---> Extension |
23281 +- * | interface 1 GMAC 1 |
23282 +- * | |
23283 +- * SMI driver/ <-MDC/SCL---> Management ~~~~~~~~~~~~~~ |
23284 +- * EEPROM <-MDIO/SDA--> interface ~REALTEK ~~~~~ |
23285 +- * | ~RTL8365MB ~~~ |
23286 +- * | ~GXXXC TAIWAN~ |
23287 +- * GPIO <--------------> Reset ~~~~~~~~~~~~~~ |
23288 +- * | |
23289 +- * Interrupt <----------> Link UP/DOWN events |
23290 +- * controller | |
23291 +- * '-----------------------------------'
23292 +- *
23293 +- * The driver uses DSA to integrate the 4 user and 1 extension ports into the
23294 +- * kernel. Netdevices are created for the user ports, as are PHY devices for
23295 +- * their integrated PHYs. The device tree firmware should also specify the link
23296 +- * partner of the extension port - either via a fixed-link or other phy-handle.
23297 +- * See the device tree bindings for more detailed information. Note that the
23298 +- * driver has only been tested with a fixed-link, but in principle it should not
23299 +- * matter.
23300 +- *
23301 +- * NOTE: Currently, only the RGMII interface is implemented in this driver.
23302 +- *
23303 +- * The interrupt line is asserted on link UP/DOWN events. The driver creates a
23304 +- * custom irqchip to handle this interrupt and demultiplex the events by reading
23305 +- * the status registers via SMI. Interrupts are then propagated to the relevant
23306 +- * PHY device.
23307 +- *
23308 +- * The EEPROM contains initial register values which the chip will read over I2C
23309 +- * upon hardware reset. It is also possible to omit the EEPROM. In both cases,
23310 +- * the driver will manually reprogram some registers using jam tables to reach
23311 +- * an initial state defined by the vendor driver.
23312 +- *
23313 +- * This Linux driver is written based on an OS-agnostic vendor driver from
23314 +- * Realtek. The reference GPL-licensed sources can be found in the OpenWrt
23315 +- * source tree under the name rtl8367c. The vendor driver claims to support a
23316 +- * number of similar switch controllers from Realtek, but the only hardware we
23317 +- * have is the RTL8365MB-VC. Moreover, there does not seem to be any chip under
23318 +- * the name RTL8367C. Although one wishes that the 'C' stood for some kind of
23319 +- * common hardware revision, there exist examples of chips with the suffix -VC
23320 +- * which are explicitly not supported by the rtl8367c driver and which instead
23321 +- * require the rtl8367d vendor driver. With all this uncertainty, the driver has
23322 +- * been modestly named rtl8365mb. Future implementors may wish to rename things
23323 +- * accordingly.
23324 +- *
23325 +- * In the same family of chips, some carry up to 8 user ports and up to 2
23326 +- * extension ports. Where possible this driver tries to make things generic, but
23327 +- * more work must be done to support these configurations. According to
23328 +- * documentation from Realtek, the family should include the following chips:
23329 +- *
23330 +- * - RTL8363NB
23331 +- * - RTL8363NB-VB
23332 +- * - RTL8363SC
23333 +- * - RTL8363SC-VB
23334 +- * - RTL8364NB
23335 +- * - RTL8364NB-VB
23336 +- * - RTL8365MB-VC
23337 +- * - RTL8366SC
23338 +- * - RTL8367RB-VB
23339 +- * - RTL8367SB
23340 +- * - RTL8367S
23341 +- * - RTL8370MB
23342 +- * - RTL8310SR
23343 +- *
23344 +- * Some of the register logic for these additional chips has been skipped over
23345 +- * while implementing this driver. It is therefore not possible to assume that
23346 +- * things will work out-of-the-box for other chips, and a careful review of the
23347 +- * vendor driver may be needed to expand support. The RTL8365MB-VC seems to be
23348 +- * one of the simpler chips.
23349 +- */
23350 +-
23351 +-#include <linux/bitfield.h>
23352 +-#include <linux/bitops.h>
23353 +-#include <linux/interrupt.h>
23354 +-#include <linux/irqdomain.h>
23355 +-#include <linux/mutex.h>
23356 +-#include <linux/of_irq.h>
23357 +-#include <linux/regmap.h>
23358 +-#include <linux/if_bridge.h>
23359 +-
23360 +-#include "realtek-smi-core.h"
23361 +-
23362 +-/* Chip-specific data and limits */
23363 +-#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
23364 +-#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6
23365 +-#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
23366 +-
23367 +-/* Family-specific data and limits */
23368 +-#define RTL8365MB_PHYADDRMAX 7
23369 +-#define RTL8365MB_NUM_PHYREGS 32
23370 +-#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
23371 +-#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
23372 +-
23373 +-/* Chip identification registers */
23374 +-#define RTL8365MB_CHIP_ID_REG 0x1300
23375 +-
23376 +-#define RTL8365MB_CHIP_VER_REG 0x1301
23377 +-
23378 +-#define RTL8365MB_MAGIC_REG 0x13C2
23379 +-#define RTL8365MB_MAGIC_VALUE 0x0249
23380 +-
23381 +-/* Chip reset register */
23382 +-#define RTL8365MB_CHIP_RESET_REG 0x1322
23383 +-#define RTL8365MB_CHIP_RESET_SW_MASK 0x0002
23384 +-#define RTL8365MB_CHIP_RESET_HW_MASK 0x0001
23385 +-
23386 +-/* Interrupt polarity register */
23387 +-#define RTL8365MB_INTR_POLARITY_REG 0x1100
23388 +-#define RTL8365MB_INTR_POLARITY_MASK 0x0001
23389 +-#define RTL8365MB_INTR_POLARITY_HIGH 0
23390 +-#define RTL8365MB_INTR_POLARITY_LOW 1
23391 +-
23392 +-/* Interrupt control/status register - enable/check specific interrupt types */
23393 +-#define RTL8365MB_INTR_CTRL_REG 0x1101
23394 +-#define RTL8365MB_INTR_STATUS_REG 0x1102
23395 +-#define RTL8365MB_INTR_SLIENT_START_2_MASK 0x1000
23396 +-#define RTL8365MB_INTR_SLIENT_START_MASK 0x0800
23397 +-#define RTL8365MB_INTR_ACL_ACTION_MASK 0x0200
23398 +-#define RTL8365MB_INTR_CABLE_DIAG_FIN_MASK 0x0100
23399 +-#define RTL8365MB_INTR_INTERRUPT_8051_MASK 0x0080
23400 +-#define RTL8365MB_INTR_LOOP_DETECTION_MASK 0x0040
23401 +-#define RTL8365MB_INTR_GREEN_TIMER_MASK 0x0020
23402 +-#define RTL8365MB_INTR_SPECIAL_CONGEST_MASK 0x0010
23403 +-#define RTL8365MB_INTR_SPEED_CHANGE_MASK 0x0008
23404 +-#define RTL8365MB_INTR_LEARN_OVER_MASK 0x0004
23405 +-#define RTL8365MB_INTR_METER_EXCEEDED_MASK 0x0002
23406 +-#define RTL8365MB_INTR_LINK_CHANGE_MASK 0x0001
23407 +-#define RTL8365MB_INTR_ALL_MASK \
23408 +- (RTL8365MB_INTR_SLIENT_START_2_MASK | \
23409 +- RTL8365MB_INTR_SLIENT_START_MASK | \
23410 +- RTL8365MB_INTR_ACL_ACTION_MASK | \
23411 +- RTL8365MB_INTR_CABLE_DIAG_FIN_MASK | \
23412 +- RTL8365MB_INTR_INTERRUPT_8051_MASK | \
23413 +- RTL8365MB_INTR_LOOP_DETECTION_MASK | \
23414 +- RTL8365MB_INTR_GREEN_TIMER_MASK | \
23415 +- RTL8365MB_INTR_SPECIAL_CONGEST_MASK | \
23416 +- RTL8365MB_INTR_SPEED_CHANGE_MASK | \
23417 +- RTL8365MB_INTR_LEARN_OVER_MASK | \
23418 +- RTL8365MB_INTR_METER_EXCEEDED_MASK | \
23419 +- RTL8365MB_INTR_LINK_CHANGE_MASK)
23420 +-
23421 +-/* Per-port interrupt type status registers */
23422 +-#define RTL8365MB_PORT_LINKDOWN_IND_REG 0x1106
23423 +-#define RTL8365MB_PORT_LINKDOWN_IND_MASK 0x07FF
23424 +-
23425 +-#define RTL8365MB_PORT_LINKUP_IND_REG 0x1107
23426 +-#define RTL8365MB_PORT_LINKUP_IND_MASK 0x07FF
23427 +-
23428 +-/* PHY indirect access registers */
23429 +-#define RTL8365MB_INDIRECT_ACCESS_CTRL_REG 0x1F00
23430 +-#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK 0x0002
23431 +-#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ 0
23432 +-#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE 1
23433 +-#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK 0x0001
23434 +-#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE 1
23435 +-#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
23436 +-#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
23437 +-#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
23438 +-#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(7, 5)
23439 +-#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
23440 +-#define RTL8365MB_PHY_BASE 0x2000
23441 +-#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
23442 +-#define RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG 0x1F04
23443 +-
23444 +-/* PHY OCP address prefix register */
23445 +-#define RTL8365MB_GPHY_OCP_MSB_0_REG 0x1D15
23446 +-#define RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK 0x0FC0
23447 +-#define RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK 0xFC00
23448 +-
23449 +-/* The PHY OCP addresses of PHY registers 0~31 start here */
23450 +-#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
23451 +-
23452 +-/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */
23453 +-#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
23454 +-#define RTL8365MB_EXT_PORT_MODE_RGMII 1
23455 +-#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
23456 +-#define RTL8365MB_EXT_PORT_MODE_MII_PHY 3
23457 +-#define RTL8365MB_EXT_PORT_MODE_TMII_MAC 4
23458 +-#define RTL8365MB_EXT_PORT_MODE_TMII_PHY 5
23459 +-#define RTL8365MB_EXT_PORT_MODE_GMII 6
23460 +-#define RTL8365MB_EXT_PORT_MODE_RMII_MAC 7
23461 +-#define RTL8365MB_EXT_PORT_MODE_RMII_PHY 8
23462 +-#define RTL8365MB_EXT_PORT_MODE_SGMII 9
23463 +-#define RTL8365MB_EXT_PORT_MODE_HSGMII 10
23464 +-#define RTL8365MB_EXT_PORT_MODE_1000X_100FX 11
23465 +-#define RTL8365MB_EXT_PORT_MODE_1000X 12
23466 +-#define RTL8365MB_EXT_PORT_MODE_100FX 13
23467 +-
23468 +-/* EXT port interface mode configuration registers 0~1 */
23469 +-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305
23470 +-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3
23471 +-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \
23472 +- (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \
23473 +- ((_extport) >> 1) * (0x13C3 - 0x1305))
23474 +-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \
23475 +- (0xF << (((_extport) % 2)))
23476 +-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \
23477 +- (((_extport) % 2) * 4)
23478 +-
23479 +-/* EXT port RGMII TX/RX delay configuration registers 1~2 */
23480 +-#define RTL8365MB_EXT_RGMXF_REG1 0x1307
23481 +-#define RTL8365MB_EXT_RGMXF_REG2 0x13C5
23482 +-#define RTL8365MB_EXT_RGMXF_REG(_extport) \
23483 +- (RTL8365MB_EXT_RGMXF_REG1 + \
23484 +- (((_extport) >> 1) * (0x13C5 - 0x1307)))
23485 +-#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
23486 +-#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
23487 +-
23488 +-/* External port speed values - used in DIGITAL_INTERFACE_FORCE */
23489 +-#define RTL8365MB_PORT_SPEED_10M 0
23490 +-#define RTL8365MB_PORT_SPEED_100M 1
23491 +-#define RTL8365MB_PORT_SPEED_1000M 2
23492 +-
23493 +-/* EXT port force configuration registers 0~2 */
23494 +-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310
23495 +-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311
23496 +-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4
23497 +-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \
23498 +- (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \
23499 +- ((_extport) & 0x1) + \
23500 +- ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310)))
23501 +-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
23502 +-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
23503 +-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
23504 +-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK 0x0020
23505 +-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK 0x0010
23506 +-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK 0x0004
23507 +-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK 0x0003
23508 +-
23509 +-/* CPU port mask register - controls which ports are treated as CPU ports */
23510 +-#define RTL8365MB_CPU_PORT_MASK_REG 0x1219
23511 +-#define RTL8365MB_CPU_PORT_MASK_MASK 0x07FF
23512 +-
23513 +-/* CPU control register */
23514 +-#define RTL8365MB_CPU_CTRL_REG 0x121A
23515 +-#define RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK 0x0400
23516 +-#define RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK 0x0200
23517 +-#define RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK 0x0080
23518 +-#define RTL8365MB_CPU_CTRL_TAG_POSITION_MASK 0x0040
23519 +-#define RTL8365MB_CPU_CTRL_TRAP_PORT_MASK 0x0038
23520 +-#define RTL8365MB_CPU_CTRL_INSERTMODE_MASK 0x0006
23521 +-#define RTL8365MB_CPU_CTRL_EN_MASK 0x0001
23522 +-
23523 +-/* Maximum packet length register */
23524 +-#define RTL8365MB_CFG0_MAX_LEN_REG 0x088C
23525 +-#define RTL8365MB_CFG0_MAX_LEN_MASK 0x3FFF
23526 +-
23527 +-/* Port learning limit registers */
23528 +-#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE 0x0A20
23529 +-#define RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(_physport) \
23530 +- (RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE + (_physport))
23531 +-
23532 +-/* Port isolation (forwarding mask) registers */
23533 +-#define RTL8365MB_PORT_ISOLATION_REG_BASE 0x08A2
23534 +-#define RTL8365MB_PORT_ISOLATION_REG(_physport) \
23535 +- (RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport))
23536 +-#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF
23537 +-
23538 +-/* MSTP port state registers - indexed by tree instance */
23539 +-#define RTL8365MB_MSTI_CTRL_BASE 0x0A00
23540 +-#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \
23541 +- (RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3))
23542 +-#define RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(_physport) ((_physport) << 1)
23543 +-#define RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(_physport) \
23544 +- (0x3 << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET((_physport)))
23545 +-
23546 +-/* MIB counter value registers */
23547 +-#define RTL8365MB_MIB_COUNTER_BASE 0x1000
23548 +-#define RTL8365MB_MIB_COUNTER_REG(_x) (RTL8365MB_MIB_COUNTER_BASE + (_x))
23549 +-
23550 +-/* MIB counter address register */
23551 +-#define RTL8365MB_MIB_ADDRESS_REG 0x1004
23552 +-#define RTL8365MB_MIB_ADDRESS_PORT_OFFSET 0x007C
23553 +-#define RTL8365MB_MIB_ADDRESS(_p, _x) \
23554 +- (((RTL8365MB_MIB_ADDRESS_PORT_OFFSET) * (_p) + (_x)) >> 2)
23555 +-
23556 +-#define RTL8365MB_MIB_CTRL0_REG 0x1005
23557 +-#define RTL8365MB_MIB_CTRL0_RESET_MASK 0x0002
23558 +-#define RTL8365MB_MIB_CTRL0_BUSY_MASK 0x0001
23559 +-
23560 +-/* The DSA callback .get_stats64 runs in atomic context, so we are not allowed
23561 +- * to block. On the other hand, accessing MIB counters absolutely requires us to
23562 +- * block. The solution is thus to schedule work which polls the MIB counters
23563 +- * asynchronously and updates some private data, which the callback can then
23564 +- * fetch atomically. Three seconds should be a good enough polling interval.
23565 +- */
23566 +-#define RTL8365MB_STATS_INTERVAL_JIFFIES (3 * HZ)
23567 +-
23568 +-enum rtl8365mb_mib_counter_index {
23569 +- RTL8365MB_MIB_ifInOctets,
23570 +- RTL8365MB_MIB_dot3StatsFCSErrors,
23571 +- RTL8365MB_MIB_dot3StatsSymbolErrors,
23572 +- RTL8365MB_MIB_dot3InPauseFrames,
23573 +- RTL8365MB_MIB_dot3ControlInUnknownOpcodes,
23574 +- RTL8365MB_MIB_etherStatsFragments,
23575 +- RTL8365MB_MIB_etherStatsJabbers,
23576 +- RTL8365MB_MIB_ifInUcastPkts,
23577 +- RTL8365MB_MIB_etherStatsDropEvents,
23578 +- RTL8365MB_MIB_ifInMulticastPkts,
23579 +- RTL8365MB_MIB_ifInBroadcastPkts,
23580 +- RTL8365MB_MIB_inMldChecksumError,
23581 +- RTL8365MB_MIB_inIgmpChecksumError,
23582 +- RTL8365MB_MIB_inMldSpecificQuery,
23583 +- RTL8365MB_MIB_inMldGeneralQuery,
23584 +- RTL8365MB_MIB_inIgmpSpecificQuery,
23585 +- RTL8365MB_MIB_inIgmpGeneralQuery,
23586 +- RTL8365MB_MIB_inMldLeaves,
23587 +- RTL8365MB_MIB_inIgmpLeaves,
23588 +- RTL8365MB_MIB_etherStatsOctets,
23589 +- RTL8365MB_MIB_etherStatsUnderSizePkts,
23590 +- RTL8365MB_MIB_etherOversizeStats,
23591 +- RTL8365MB_MIB_etherStatsPkts64Octets,
23592 +- RTL8365MB_MIB_etherStatsPkts65to127Octets,
23593 +- RTL8365MB_MIB_etherStatsPkts128to255Octets,
23594 +- RTL8365MB_MIB_etherStatsPkts256to511Octets,
23595 +- RTL8365MB_MIB_etherStatsPkts512to1023Octets,
23596 +- RTL8365MB_MIB_etherStatsPkts1024to1518Octets,
23597 +- RTL8365MB_MIB_ifOutOctets,
23598 +- RTL8365MB_MIB_dot3StatsSingleCollisionFrames,
23599 +- RTL8365MB_MIB_dot3StatsMultipleCollisionFrames,
23600 +- RTL8365MB_MIB_dot3StatsDeferredTransmissions,
23601 +- RTL8365MB_MIB_dot3StatsLateCollisions,
23602 +- RTL8365MB_MIB_etherStatsCollisions,
23603 +- RTL8365MB_MIB_dot3StatsExcessiveCollisions,
23604 +- RTL8365MB_MIB_dot3OutPauseFrames,
23605 +- RTL8365MB_MIB_ifOutDiscards,
23606 +- RTL8365MB_MIB_dot1dTpPortInDiscards,
23607 +- RTL8365MB_MIB_ifOutUcastPkts,
23608 +- RTL8365MB_MIB_ifOutMulticastPkts,
23609 +- RTL8365MB_MIB_ifOutBroadcastPkts,
23610 +- RTL8365MB_MIB_outOampduPkts,
23611 +- RTL8365MB_MIB_inOampduPkts,
23612 +- RTL8365MB_MIB_inIgmpJoinsSuccess,
23613 +- RTL8365MB_MIB_inIgmpJoinsFail,
23614 +- RTL8365MB_MIB_inMldJoinsSuccess,
23615 +- RTL8365MB_MIB_inMldJoinsFail,
23616 +- RTL8365MB_MIB_inReportSuppressionDrop,
23617 +- RTL8365MB_MIB_inLeaveSuppressionDrop,
23618 +- RTL8365MB_MIB_outIgmpReports,
23619 +- RTL8365MB_MIB_outIgmpLeaves,
23620 +- RTL8365MB_MIB_outIgmpGeneralQuery,
23621 +- RTL8365MB_MIB_outIgmpSpecificQuery,
23622 +- RTL8365MB_MIB_outMldReports,
23623 +- RTL8365MB_MIB_outMldLeaves,
23624 +- RTL8365MB_MIB_outMldGeneralQuery,
23625 +- RTL8365MB_MIB_outMldSpecificQuery,
23626 +- RTL8365MB_MIB_inKnownMulticastPkts,
23627 +- RTL8365MB_MIB_END,
23628 +-};
23629 +-
23630 +-struct rtl8365mb_mib_counter {
23631 +- u32 offset;
23632 +- u32 length;
23633 +- const char *name;
23634 +-};
23635 +-
23636 +-#define RTL8365MB_MAKE_MIB_COUNTER(_offset, _length, _name) \
23637 +- [RTL8365MB_MIB_ ## _name] = { _offset, _length, #_name }
23638 +-
23639 +-static struct rtl8365mb_mib_counter rtl8365mb_mib_counters[] = {
23640 +- RTL8365MB_MAKE_MIB_COUNTER(0, 4, ifInOctets),
23641 +- RTL8365MB_MAKE_MIB_COUNTER(4, 2, dot3StatsFCSErrors),
23642 +- RTL8365MB_MAKE_MIB_COUNTER(6, 2, dot3StatsSymbolErrors),
23643 +- RTL8365MB_MAKE_MIB_COUNTER(8, 2, dot3InPauseFrames),
23644 +- RTL8365MB_MAKE_MIB_COUNTER(10, 2, dot3ControlInUnknownOpcodes),
23645 +- RTL8365MB_MAKE_MIB_COUNTER(12, 2, etherStatsFragments),
23646 +- RTL8365MB_MAKE_MIB_COUNTER(14, 2, etherStatsJabbers),
23647 +- RTL8365MB_MAKE_MIB_COUNTER(16, 2, ifInUcastPkts),
23648 +- RTL8365MB_MAKE_MIB_COUNTER(18, 2, etherStatsDropEvents),
23649 +- RTL8365MB_MAKE_MIB_COUNTER(20, 2, ifInMulticastPkts),
23650 +- RTL8365MB_MAKE_MIB_COUNTER(22, 2, ifInBroadcastPkts),
23651 +- RTL8365MB_MAKE_MIB_COUNTER(24, 2, inMldChecksumError),
23652 +- RTL8365MB_MAKE_MIB_COUNTER(26, 2, inIgmpChecksumError),
23653 +- RTL8365MB_MAKE_MIB_COUNTER(28, 2, inMldSpecificQuery),
23654 +- RTL8365MB_MAKE_MIB_COUNTER(30, 2, inMldGeneralQuery),
23655 +- RTL8365MB_MAKE_MIB_COUNTER(32, 2, inIgmpSpecificQuery),
23656 +- RTL8365MB_MAKE_MIB_COUNTER(34, 2, inIgmpGeneralQuery),
23657 +- RTL8365MB_MAKE_MIB_COUNTER(36, 2, inMldLeaves),
23658 +- RTL8365MB_MAKE_MIB_COUNTER(38, 2, inIgmpLeaves),
23659 +- RTL8365MB_MAKE_MIB_COUNTER(40, 4, etherStatsOctets),
23660 +- RTL8365MB_MAKE_MIB_COUNTER(44, 2, etherStatsUnderSizePkts),
23661 +- RTL8365MB_MAKE_MIB_COUNTER(46, 2, etherOversizeStats),
23662 +- RTL8365MB_MAKE_MIB_COUNTER(48, 2, etherStatsPkts64Octets),
23663 +- RTL8365MB_MAKE_MIB_COUNTER(50, 2, etherStatsPkts65to127Octets),
23664 +- RTL8365MB_MAKE_MIB_COUNTER(52, 2, etherStatsPkts128to255Octets),
23665 +- RTL8365MB_MAKE_MIB_COUNTER(54, 2, etherStatsPkts256to511Octets),
23666 +- RTL8365MB_MAKE_MIB_COUNTER(56, 2, etherStatsPkts512to1023Octets),
23667 +- RTL8365MB_MAKE_MIB_COUNTER(58, 2, etherStatsPkts1024to1518Octets),
23668 +- RTL8365MB_MAKE_MIB_COUNTER(60, 4, ifOutOctets),
23669 +- RTL8365MB_MAKE_MIB_COUNTER(64, 2, dot3StatsSingleCollisionFrames),
23670 +- RTL8365MB_MAKE_MIB_COUNTER(66, 2, dot3StatsMultipleCollisionFrames),
23671 +- RTL8365MB_MAKE_MIB_COUNTER(68, 2, dot3StatsDeferredTransmissions),
23672 +- RTL8365MB_MAKE_MIB_COUNTER(70, 2, dot3StatsLateCollisions),
23673 +- RTL8365MB_MAKE_MIB_COUNTER(72, 2, etherStatsCollisions),
23674 +- RTL8365MB_MAKE_MIB_COUNTER(74, 2, dot3StatsExcessiveCollisions),
23675 +- RTL8365MB_MAKE_MIB_COUNTER(76, 2, dot3OutPauseFrames),
23676 +- RTL8365MB_MAKE_MIB_COUNTER(78, 2, ifOutDiscards),
23677 +- RTL8365MB_MAKE_MIB_COUNTER(80, 2, dot1dTpPortInDiscards),
23678 +- RTL8365MB_MAKE_MIB_COUNTER(82, 2, ifOutUcastPkts),
23679 +- RTL8365MB_MAKE_MIB_COUNTER(84, 2, ifOutMulticastPkts),
23680 +- RTL8365MB_MAKE_MIB_COUNTER(86, 2, ifOutBroadcastPkts),
23681 +- RTL8365MB_MAKE_MIB_COUNTER(88, 2, outOampduPkts),
23682 +- RTL8365MB_MAKE_MIB_COUNTER(90, 2, inOampduPkts),
23683 +- RTL8365MB_MAKE_MIB_COUNTER(92, 4, inIgmpJoinsSuccess),
23684 +- RTL8365MB_MAKE_MIB_COUNTER(96, 2, inIgmpJoinsFail),
23685 +- RTL8365MB_MAKE_MIB_COUNTER(98, 2, inMldJoinsSuccess),
23686 +- RTL8365MB_MAKE_MIB_COUNTER(100, 2, inMldJoinsFail),
23687 +- RTL8365MB_MAKE_MIB_COUNTER(102, 2, inReportSuppressionDrop),
23688 +- RTL8365MB_MAKE_MIB_COUNTER(104, 2, inLeaveSuppressionDrop),
23689 +- RTL8365MB_MAKE_MIB_COUNTER(106, 2, outIgmpReports),
23690 +- RTL8365MB_MAKE_MIB_COUNTER(108, 2, outIgmpLeaves),
23691 +- RTL8365MB_MAKE_MIB_COUNTER(110, 2, outIgmpGeneralQuery),
23692 +- RTL8365MB_MAKE_MIB_COUNTER(112, 2, outIgmpSpecificQuery),
23693 +- RTL8365MB_MAKE_MIB_COUNTER(114, 2, outMldReports),
23694 +- RTL8365MB_MAKE_MIB_COUNTER(116, 2, outMldLeaves),
23695 +- RTL8365MB_MAKE_MIB_COUNTER(118, 2, outMldGeneralQuery),
23696 +- RTL8365MB_MAKE_MIB_COUNTER(120, 2, outMldSpecificQuery),
23697 +- RTL8365MB_MAKE_MIB_COUNTER(122, 2, inKnownMulticastPkts),
23698 +-};
23699 +-
23700 +-static_assert(ARRAY_SIZE(rtl8365mb_mib_counters) == RTL8365MB_MIB_END);
23701 +-
23702 +-struct rtl8365mb_jam_tbl_entry {
23703 +- u16 reg;
23704 +- u16 val;
23705 +-};
23706 +-
23707 +-/* Lifted from the vendor driver sources */
23708 +-static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_8365mb_vc[] = {
23709 +- { 0x13EB, 0x15BB }, { 0x1303, 0x06D6 }, { 0x1304, 0x0700 },
23710 +- { 0x13E2, 0x003F }, { 0x13F9, 0x0090 }, { 0x121E, 0x03CA },
23711 +- { 0x1233, 0x0352 }, { 0x1237, 0x00A0 }, { 0x123A, 0x0030 },
23712 +- { 0x1239, 0x0084 }, { 0x0301, 0x1000 }, { 0x1349, 0x001F },
23713 +- { 0x18E0, 0x4004 }, { 0x122B, 0x241C }, { 0x1305, 0xC000 },
23714 +- { 0x13F0, 0x0000 },
23715 +-};
23716 +-
23717 +-static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
23718 +- { 0x1200, 0x7FCB }, { 0x0884, 0x0003 }, { 0x06EB, 0x0001 },
23719 +- { 0x03Fa, 0x0007 }, { 0x08C8, 0x00C0 }, { 0x0A30, 0x020E },
23720 +- { 0x0800, 0x0000 }, { 0x0802, 0x0000 }, { 0x09DA, 0x0013 },
23721 +- { 0x1D32, 0x0002 },
23722 +-};
23723 +-
23724 +-enum rtl8365mb_stp_state {
23725 +- RTL8365MB_STP_STATE_DISABLED = 0,
23726 +- RTL8365MB_STP_STATE_BLOCKING = 1,
23727 +- RTL8365MB_STP_STATE_LEARNING = 2,
23728 +- RTL8365MB_STP_STATE_FORWARDING = 3,
23729 +-};
23730 +-
23731 +-enum rtl8365mb_cpu_insert {
23732 +- RTL8365MB_CPU_INSERT_TO_ALL = 0,
23733 +- RTL8365MB_CPU_INSERT_TO_TRAPPING = 1,
23734 +- RTL8365MB_CPU_INSERT_TO_NONE = 2,
23735 +-};
23736 +-
23737 +-enum rtl8365mb_cpu_position {
23738 +- RTL8365MB_CPU_POS_AFTER_SA = 0,
23739 +- RTL8365MB_CPU_POS_BEFORE_CRC = 1,
23740 +-};
23741 +-
23742 +-enum rtl8365mb_cpu_format {
23743 +- RTL8365MB_CPU_FORMAT_8BYTES = 0,
23744 +- RTL8365MB_CPU_FORMAT_4BYTES = 1,
23745 +-};
23746 +-
23747 +-enum rtl8365mb_cpu_rxlen {
23748 +- RTL8365MB_CPU_RXLEN_72BYTES = 0,
23749 +- RTL8365MB_CPU_RXLEN_64BYTES = 1,
23750 +-};
23751 +-
23752 +-/**
23753 +- * struct rtl8365mb_cpu - CPU port configuration
23754 +- * @enable: enable/disable hardware insertion of CPU tag in switch->CPU frames
23755 +- * @mask: port mask of ports that parse should parse CPU tags
23756 +- * @trap_port: forward trapped frames to this port
23757 +- * @insert: CPU tag insertion mode in switch->CPU frames
23758 +- * @position: position of CPU tag in frame
23759 +- * @rx_length: minimum CPU RX length
23760 +- * @format: CPU tag format
23761 +- *
23762 +- * Represents the CPU tagging and CPU port configuration of the switch. These
23763 +- * settings are configurable at runtime.
23764 +- */
23765 +-struct rtl8365mb_cpu {
23766 +- bool enable;
23767 +- u32 mask;
23768 +- u32 trap_port;
23769 +- enum rtl8365mb_cpu_insert insert;
23770 +- enum rtl8365mb_cpu_position position;
23771 +- enum rtl8365mb_cpu_rxlen rx_length;
23772 +- enum rtl8365mb_cpu_format format;
23773 +-};
23774 +-
23775 +-/**
23776 +- * struct rtl8365mb_port - private per-port data
23777 +- * @smi: pointer to parent realtek_smi data
23778 +- * @index: DSA port index, same as dsa_port::index
23779 +- * @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
23780 +- * access via rtl8365mb_get_stats64
23781 +- * @stats_lock: protect the stats structure during read/update
23782 +- * @mib_work: delayed work for polling MIB counters
23783 +- */
23784 +-struct rtl8365mb_port {
23785 +- struct realtek_smi *smi;
23786 +- unsigned int index;
23787 +- struct rtnl_link_stats64 stats;
23788 +- spinlock_t stats_lock;
23789 +- struct delayed_work mib_work;
23790 +-};
23791 +-
23792 +-/**
23793 +- * struct rtl8365mb - private chip-specific driver data
23794 +- * @smi: pointer to parent realtek_smi data
23795 +- * @irq: registered IRQ or zero
23796 +- * @chip_id: chip identifier
23797 +- * @chip_ver: chip silicon revision
23798 +- * @port_mask: mask of all ports
23799 +- * @learn_limit_max: maximum number of L2 addresses the chip can learn
23800 +- * @cpu: CPU tagging and CPU port configuration for this chip
23801 +- * @mib_lock: prevent concurrent reads of MIB counters
23802 +- * @ports: per-port data
23803 +- * @jam_table: chip-specific initialization jam table
23804 +- * @jam_size: size of the chip's jam table
23805 +- *
23806 +- * Private data for this driver.
23807 +- */
23808 +-struct rtl8365mb {
23809 +- struct realtek_smi *smi;
23810 +- int irq;
23811 +- u32 chip_id;
23812 +- u32 chip_ver;
23813 +- u32 port_mask;
23814 +- u32 learn_limit_max;
23815 +- struct rtl8365mb_cpu cpu;
23816 +- struct mutex mib_lock;
23817 +- struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
23818 +- const struct rtl8365mb_jam_tbl_entry *jam_table;
23819 +- size_t jam_size;
23820 +-};
23821 +-
23822 +-static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
23823 +-{
23824 +- u32 val;
23825 +-
23826 +- return regmap_read_poll_timeout(smi->map,
23827 +- RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
23828 +- val, !val, 10, 100);
23829 +-}
23830 +-
23831 +-static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
23832 +- u32 ocp_addr)
23833 +-{
23834 +- u32 val;
23835 +- int ret;
23836 +-
23837 +- /* Set OCP prefix */
23838 +- val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
23839 +- ret = regmap_update_bits(
23840 +- smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
23841 +- RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
23842 +- FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
23843 +- if (ret)
23844 +- return ret;
23845 +-
23846 +- /* Set PHY register address */
23847 +- val = RTL8365MB_PHY_BASE;
23848 +- val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK, phy);
23849 +- val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK,
23850 +- ocp_addr >> 1);
23851 +- val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
23852 +- ocp_addr >> 6);
23853 +- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
23854 +- val);
23855 +- if (ret)
23856 +- return ret;
23857 +-
23858 +- return 0;
23859 +-}
23860 +-
23861 +-static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
23862 +- u32 ocp_addr, u16 *data)
23863 +-{
23864 +- u32 val;
23865 +- int ret;
23866 +-
23867 +- ret = rtl8365mb_phy_poll_busy(smi);
23868 +- if (ret)
23869 +- return ret;
23870 +-
23871 +- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
23872 +- if (ret)
23873 +- return ret;
23874 +-
23875 +- /* Execute read operation */
23876 +- val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
23877 +- RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
23878 +- FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
23879 +- RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
23880 +- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
23881 +- if (ret)
23882 +- return ret;
23883 +-
23884 +- ret = rtl8365mb_phy_poll_busy(smi);
23885 +- if (ret)
23886 +- return ret;
23887 +-
23888 +- /* Get PHY register data */
23889 +- ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
23890 +- &val);
23891 +- if (ret)
23892 +- return ret;
23893 +-
23894 +- *data = val & 0xFFFF;
23895 +-
23896 +- return 0;
23897 +-}
23898 +-
23899 +-static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
23900 +- u32 ocp_addr, u16 data)
23901 +-{
23902 +- u32 val;
23903 +- int ret;
23904 +-
23905 +- ret = rtl8365mb_phy_poll_busy(smi);
23906 +- if (ret)
23907 +- return ret;
23908 +-
23909 +- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
23910 +- if (ret)
23911 +- return ret;
23912 +-
23913 +- /* Set PHY register data */
23914 +- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
23915 +- data);
23916 +- if (ret)
23917 +- return ret;
23918 +-
23919 +- /* Execute write operation */
23920 +- val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
23921 +- RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
23922 +- FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
23923 +- RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
23924 +- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
23925 +- if (ret)
23926 +- return ret;
23927 +-
23928 +- ret = rtl8365mb_phy_poll_busy(smi);
23929 +- if (ret)
23930 +- return ret;
23931 +-
23932 +- return 0;
23933 +-}
23934 +-
23935 +-static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
23936 +-{
23937 +- u32 ocp_addr;
23938 +- u16 val;
23939 +- int ret;
23940 +-
23941 +- if (phy > RTL8365MB_PHYADDRMAX)
23942 +- return -EINVAL;
23943 +-
23944 +- if (regnum > RTL8365MB_PHYREGMAX)
23945 +- return -EINVAL;
23946 +-
23947 +- ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
23948 +-
23949 +- ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val);
23950 +- if (ret) {
23951 +- dev_err(smi->dev,
23952 +- "failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
23953 +- regnum, ocp_addr, ret);
23954 +- return ret;
23955 +- }
23956 +-
23957 +- dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
23958 +- phy, regnum, ocp_addr, val);
23959 +-
23960 +- return val;
23961 +-}
23962 +-
23963 +-static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
23964 +- u16 val)
23965 +-{
23966 +- u32 ocp_addr;
23967 +- int ret;
23968 +-
23969 +- if (phy > RTL8365MB_PHYADDRMAX)
23970 +- return -EINVAL;
23971 +-
23972 +- if (regnum > RTL8365MB_PHYREGMAX)
23973 +- return -EINVAL;
23974 +-
23975 +- ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
23976 +-
23977 +- ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val);
23978 +- if (ret) {
23979 +- dev_err(smi->dev,
23980 +- "failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
23981 +- regnum, ocp_addr, ret);
23982 +- return ret;
23983 +- }
23984 +-
23985 +- dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
23986 +- phy, regnum, ocp_addr, val);
23987 +-
23988 +- return 0;
23989 +-}
23990 +-
23991 +-static enum dsa_tag_protocol
23992 +-rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
23993 +- enum dsa_tag_protocol mp)
23994 +-{
23995 +- return DSA_TAG_PROTO_RTL8_4;
23996 +-}
23997 +-
23998 +-static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
23999 +- phy_interface_t interface)
24000 +-{
24001 +- struct device_node *dn;
24002 +- struct dsa_port *dp;
24003 +- int tx_delay = 0;
24004 +- int rx_delay = 0;
24005 +- int ext_port;
24006 +- u32 val;
24007 +- int ret;
24008 +-
24009 +- if (port == smi->cpu_port) {
24010 +- ext_port = 1;
24011 +- } else {
24012 +- dev_err(smi->dev, "only one EXT port is currently supported\n");
24013 +- return -EINVAL;
24014 +- }
24015 +-
24016 +- dp = dsa_to_port(smi->ds, port);
24017 +- dn = dp->dn;
24018 +-
24019 +- /* Set the RGMII TX/RX delay
24020 +- *
24021 +- * The Realtek vendor driver indicates the following possible
24022 +- * configuration settings:
24023 +- *
24024 +- * TX delay:
24025 +- * 0 = no delay, 1 = 2 ns delay
24026 +- * RX delay:
24027 +- * 0 = no delay, 7 = maximum delay
24028 +- * Each step is approximately 0.3 ns, so the maximum delay is about
24029 +- * 2.1 ns.
24030 +- *
24031 +- * The vendor driver also states that this must be configured *before*
24032 +- * forcing the external interface into a particular mode, which is done
24033 +- * in the rtl8365mb_phylink_mac_link_{up,down} functions.
24034 +- *
24035 +- * Only configure an RGMII TX (resp. RX) delay if the
24036 +- * tx-internal-delay-ps (resp. rx-internal-delay-ps) OF property is
24037 +- * specified. We ignore the detail of the RGMII interface mode
24038 +- * (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only
24039 +- * property.
24040 +- */
24041 +- if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) {
24042 +- val = val / 1000; /* convert to ns */
24043 +-
24044 +- if (val == 0 || val == 2)
24045 +- tx_delay = val / 2;
24046 +- else
24047 +- dev_warn(smi->dev,
24048 +- "EXT port TX delay must be 0 or 2 ns\n");
24049 +- }
24050 +-
24051 +- if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
24052 +- val = DIV_ROUND_CLOSEST(val, 300); /* convert to 0.3 ns step */
24053 +-
24054 +- if (val <= 7)
24055 +- rx_delay = val;
24056 +- else
24057 +- dev_warn(smi->dev,
24058 +- "EXT port RX delay must be 0 to 2.1 ns\n");
24059 +- }
24060 +-
24061 +- ret = regmap_update_bits(
24062 +- smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port),
24063 +- RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
24064 +- RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
24065 +- FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
24066 +- FIELD_PREP(RTL8365MB_EXT_RGMXF_RXDELAY_MASK, rx_delay));
24067 +- if (ret)
24068 +- return ret;
24069 +-
24070 +- ret = regmap_update_bits(
24071 +- smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port),
24072 +- RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port),
24073 +- RTL8365MB_EXT_PORT_MODE_RGMII
24074 +- << RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
24075 +- ext_port));
24076 +- if (ret)
24077 +- return ret;
24078 +-
24079 +- return 0;
24080 +-}
24081 +-
24082 +-static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
24083 +- bool link, int speed, int duplex,
24084 +- bool tx_pause, bool rx_pause)
24085 +-{
24086 +- u32 r_tx_pause;
24087 +- u32 r_rx_pause;
24088 +- u32 r_duplex;
24089 +- u32 r_speed;
24090 +- u32 r_link;
24091 +- int ext_port;
24092 +- int val;
24093 +- int ret;
24094 +-
24095 +- if (port == smi->cpu_port) {
24096 +- ext_port = 1;
24097 +- } else {
24098 +- dev_err(smi->dev, "only one EXT port is currently supported\n");
24099 +- return -EINVAL;
24100 +- }
24101 +-
24102 +- if (link) {
24103 +- /* Force the link up with the desired configuration */
24104 +- r_link = 1;
24105 +- r_rx_pause = rx_pause ? 1 : 0;
24106 +- r_tx_pause = tx_pause ? 1 : 0;
24107 +-
24108 +- if (speed == SPEED_1000) {
24109 +- r_speed = RTL8365MB_PORT_SPEED_1000M;
24110 +- } else if (speed == SPEED_100) {
24111 +- r_speed = RTL8365MB_PORT_SPEED_100M;
24112 +- } else if (speed == SPEED_10) {
24113 +- r_speed = RTL8365MB_PORT_SPEED_10M;
24114 +- } else {
24115 +- dev_err(smi->dev, "unsupported port speed %s\n",
24116 +- phy_speed_to_str(speed));
24117 +- return -EINVAL;
24118 +- }
24119 +-
24120 +- if (duplex == DUPLEX_FULL) {
24121 +- r_duplex = 1;
24122 +- } else if (duplex == DUPLEX_HALF) {
24123 +- r_duplex = 0;
24124 +- } else {
24125 +- dev_err(smi->dev, "unsupported duplex %s\n",
24126 +- phy_duplex_to_str(duplex));
24127 +- return -EINVAL;
24128 +- }
24129 +- } else {
24130 +- /* Force the link down and reset any programmed configuration */
24131 +- r_link = 0;
24132 +- r_tx_pause = 0;
24133 +- r_rx_pause = 0;
24134 +- r_speed = 0;
24135 +- r_duplex = 0;
24136 +- }
24137 +-
24138 +- val = FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK, 1) |
24139 +- FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK,
24140 +- r_tx_pause) |
24141 +- FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK,
24142 +- r_rx_pause) |
24143 +- FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK, r_link) |
24144 +- FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
24145 +- r_duplex) |
24146 +- FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
24147 +- ret = regmap_write(smi->map,
24148 +- RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port),
24149 +- val);
24150 +- if (ret)
24151 +- return ret;
24152 +-
24153 +- return 0;
24154 +-}
24155 +-
24156 +-static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
24157 +- phy_interface_t interface)
24158 +-{
24159 +- if (dsa_is_user_port(ds, port) &&
24160 +- (interface == PHY_INTERFACE_MODE_NA ||
24161 +- interface == PHY_INTERFACE_MODE_INTERNAL ||
24162 +- interface == PHY_INTERFACE_MODE_GMII))
24163 +- /* Internal PHY */
24164 +- return true;
24165 +- else if (dsa_is_cpu_port(ds, port) &&
24166 +- phy_interface_mode_is_rgmii(interface))
24167 +- /* Extension MAC */
24168 +- return true;
24169 +-
24170 +- return false;
24171 +-}
24172 +-
24173 +-static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port,
24174 +- unsigned long *supported,
24175 +- struct phylink_link_state *state)
24176 +-{
24177 +- struct realtek_smi *smi = ds->priv;
24178 +- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 };
24179 +-
24180 +- /* include/linux/phylink.h says:
24181 +- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
24182 +- * expects the MAC driver to return all supported link modes.
24183 +- */
24184 +- if (state->interface != PHY_INTERFACE_MODE_NA &&
24185 +- !rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
24186 +- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
24187 +- phy_modes(state->interface), port);
24188 +- linkmode_zero(supported);
24189 +- return;
24190 +- }
24191 +-
24192 +- phylink_set_port_modes(mask);
24193 +-
24194 +- phylink_set(mask, Autoneg);
24195 +- phylink_set(mask, Pause);
24196 +- phylink_set(mask, Asym_Pause);
24197 +-
24198 +- phylink_set(mask, 10baseT_Half);
24199 +- phylink_set(mask, 10baseT_Full);
24200 +- phylink_set(mask, 100baseT_Half);
24201 +- phylink_set(mask, 100baseT_Full);
24202 +- phylink_set(mask, 1000baseT_Full);
24203 +-
24204 +- linkmode_and(supported, supported, mask);
24205 +- linkmode_and(state->advertising, state->advertising, mask);
24206 +-}
24207 +-
24208 +-static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
24209 +- unsigned int mode,
24210 +- const struct phylink_link_state *state)
24211 +-{
24212 +- struct realtek_smi *smi = ds->priv;
24213 +- int ret;
24214 +-
24215 +- if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
24216 +- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
24217 +- phy_modes(state->interface), port);
24218 +- return;
24219 +- }
24220 +-
24221 +- if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
24222 +- dev_err(smi->dev,
24223 +- "port %d supports only conventional PHY or fixed-link\n",
24224 +- port);
24225 +- return;
24226 +- }
24227 +-
24228 +- if (phy_interface_mode_is_rgmii(state->interface)) {
24229 +- ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface);
24230 +- if (ret)
24231 +- dev_err(smi->dev,
24232 +- "failed to configure RGMII mode on port %d: %d\n",
24233 +- port, ret);
24234 +- return;
24235 +- }
24236 +-
24237 +- /* TODO: Implement MII and RMII modes, which the RTL8365MB-VC also
24238 +- * supports
24239 +- */
24240 +-}
24241 +-
24242 +-static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
24243 +- unsigned int mode,
24244 +- phy_interface_t interface)
24245 +-{
24246 +- struct realtek_smi *smi = ds->priv;
24247 +- struct rtl8365mb_port *p;
24248 +- struct rtl8365mb *mb;
24249 +- int ret;
24250 +-
24251 +- mb = smi->chip_data;
24252 +- p = &mb->ports[port];
24253 +- cancel_delayed_work_sync(&p->mib_work);
24254 +-
24255 +- if (phy_interface_mode_is_rgmii(interface)) {
24256 +- ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0,
24257 +- false, false);
24258 +- if (ret)
24259 +- dev_err(smi->dev,
24260 +- "failed to reset forced mode on port %d: %d\n",
24261 +- port, ret);
24262 +-
24263 +- return;
24264 +- }
24265 +-}
24266 +-
24267 +-static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
24268 +- unsigned int mode,
24269 +- phy_interface_t interface,
24270 +- struct phy_device *phydev, int speed,
24271 +- int duplex, bool tx_pause,
24272 +- bool rx_pause)
24273 +-{
24274 +- struct realtek_smi *smi = ds->priv;
24275 +- struct rtl8365mb_port *p;
24276 +- struct rtl8365mb *mb;
24277 +- int ret;
24278 +-
24279 +- mb = smi->chip_data;
24280 +- p = &mb->ports[port];
24281 +- schedule_delayed_work(&p->mib_work, 0);
24282 +-
24283 +- if (phy_interface_mode_is_rgmii(interface)) {
24284 +- ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed,
24285 +- duplex, tx_pause,
24286 +- rx_pause);
24287 +- if (ret)
24288 +- dev_err(smi->dev,
24289 +- "failed to force mode on port %d: %d\n", port,
24290 +- ret);
24291 +-
24292 +- return;
24293 +- }
24294 +-}
24295 +-
24296 +-static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
24297 +- u8 state)
24298 +-{
24299 +- struct realtek_smi *smi = ds->priv;
24300 +- enum rtl8365mb_stp_state val;
24301 +- int msti = 0;
24302 +-
24303 +- switch (state) {
24304 +- case BR_STATE_DISABLED:
24305 +- val = RTL8365MB_STP_STATE_DISABLED;
24306 +- break;
24307 +- case BR_STATE_BLOCKING:
24308 +- case BR_STATE_LISTENING:
24309 +- val = RTL8365MB_STP_STATE_BLOCKING;
24310 +- break;
24311 +- case BR_STATE_LEARNING:
24312 +- val = RTL8365MB_STP_STATE_LEARNING;
24313 +- break;
24314 +- case BR_STATE_FORWARDING:
24315 +- val = RTL8365MB_STP_STATE_FORWARDING;
24316 +- break;
24317 +- default:
24318 +- dev_err(smi->dev, "invalid STP state: %u\n", state);
24319 +- return;
24320 +- }
24321 +-
24322 +- regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
24323 +- RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
24324 +- val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
24325 +-}
24326 +-
24327 +-static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port,
24328 +- bool enable)
24329 +-{
24330 +- struct rtl8365mb *mb = smi->chip_data;
24331 +-
24332 +- /* Enable/disable learning by limiting the number of L2 addresses the
24333 +- * port can learn. Realtek documentation states that a limit of zero
24334 +- * disables learning. When enabling learning, set it to the chip's
24335 +- * maximum.
24336 +- */
24337 +- return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
24338 +- enable ? mb->learn_limit_max : 0);
24339 +-}
24340 +-
24341 +-static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port,
24342 +- u32 mask)
24343 +-{
24344 +- return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
24345 +-}
24346 +-
24347 +-static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
24348 +- u32 offset, u32 length, u64 *mibvalue)
24349 +-{
24350 +- u64 tmpvalue = 0;
24351 +- u32 val;
24352 +- int ret;
24353 +- int i;
24354 +-
24355 +- /* The MIB address is an SRAM address. We request a particular address
24356 +- * and then poll the control register before reading the value from some
24357 +- * counter registers.
24358 +- */
24359 +- ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG,
24360 +- RTL8365MB_MIB_ADDRESS(port, offset));
24361 +- if (ret)
24362 +- return ret;
24363 +-
24364 +- /* Poll for completion */
24365 +- ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val,
24366 +- !(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
24367 +- 10, 100);
24368 +- if (ret)
24369 +- return ret;
24370 +-
24371 +- /* Presumably this indicates a MIB counter read failure */
24372 +- if (val & RTL8365MB_MIB_CTRL0_RESET_MASK)
24373 +- return -EIO;
24374 +-
24375 +- /* There are four MIB counter registers each holding a 16 bit word of a
24376 +- * MIB counter. Depending on the offset, we should read from the upper
24377 +- * two or lower two registers. In case the MIB counter is 4 words, we
24378 +- * read from all four registers.
24379 +- */
24380 +- if (length == 4)
24381 +- offset = 3;
24382 +- else
24383 +- offset = (offset + 1) % 4;
24384 +-
24385 +- /* Read the MIB counter 16 bits at a time */
24386 +- for (i = 0; i < length; i++) {
24387 +- ret = regmap_read(smi->map,
24388 +- RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
24389 +- if (ret)
24390 +- return ret;
24391 +-
24392 +- tmpvalue = ((tmpvalue) << 16) | (val & 0xFFFF);
24393 +- }
24394 +-
24395 +- /* Only commit the result if no error occurred */
24396 +- *mibvalue = tmpvalue;
24397 +-
24398 +- return 0;
24399 +-}
24400 +-
24401 +-static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
24402 +-{
24403 +- struct realtek_smi *smi = ds->priv;
24404 +- struct rtl8365mb *mb;
24405 +- int ret;
24406 +- int i;
24407 +-
24408 +- mb = smi->chip_data;
24409 +-
24410 +- mutex_lock(&mb->mib_lock);
24411 +- for (i = 0; i < RTL8365MB_MIB_END; i++) {
24412 +- struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
24413 +-
24414 +- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
24415 +- mib->length, &data[i]);
24416 +- if (ret) {
24417 +- dev_err(smi->dev,
24418 +- "failed to read port %d counters: %d\n", port,
24419 +- ret);
24420 +- break;
24421 +- }
24422 +- }
24423 +- mutex_unlock(&mb->mib_lock);
24424 +-}
24425 +-
24426 +-static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data)
24427 +-{
24428 +- int i;
24429 +-
24430 +- if (stringset != ETH_SS_STATS)
24431 +- return;
24432 +-
24433 +- for (i = 0; i < RTL8365MB_MIB_END; i++) {
24434 +- struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
24435 +-
24436 +- strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN);
24437 +- }
24438 +-}
24439 +-
24440 +-static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
24441 +-{
24442 +- if (sset != ETH_SS_STATS)
24443 +- return -EOPNOTSUPP;
24444 +-
24445 +- return RTL8365MB_MIB_END;
24446 +-}
24447 +-
24448 +-static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
24449 +- struct ethtool_eth_phy_stats *phy_stats)
24450 +-{
24451 +- struct realtek_smi *smi = ds->priv;
24452 +- struct rtl8365mb_mib_counter *mib;
24453 +- struct rtl8365mb *mb;
24454 +-
24455 +- mb = smi->chip_data;
24456 +- mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
24457 +-
24458 +- mutex_lock(&mb->mib_lock);
24459 +- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
24460 +- &phy_stats->SymbolErrorDuringCarrier);
24461 +- mutex_unlock(&mb->mib_lock);
24462 +-}
24463 +-
24464 +-static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
24465 +- struct ethtool_eth_mac_stats *mac_stats)
24466 +-{
24467 +- u64 cnt[RTL8365MB_MIB_END] = {
24468 +- [RTL8365MB_MIB_ifOutOctets] = 1,
24469 +- [RTL8365MB_MIB_ifOutUcastPkts] = 1,
24470 +- [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
24471 +- [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
24472 +- [RTL8365MB_MIB_dot3OutPauseFrames] = 1,
24473 +- [RTL8365MB_MIB_ifOutDiscards] = 1,
24474 +- [RTL8365MB_MIB_ifInOctets] = 1,
24475 +- [RTL8365MB_MIB_ifInUcastPkts] = 1,
24476 +- [RTL8365MB_MIB_ifInMulticastPkts] = 1,
24477 +- [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
24478 +- [RTL8365MB_MIB_dot3InPauseFrames] = 1,
24479 +- [RTL8365MB_MIB_dot3StatsSingleCollisionFrames] = 1,
24480 +- [RTL8365MB_MIB_dot3StatsMultipleCollisionFrames] = 1,
24481 +- [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
24482 +- [RTL8365MB_MIB_dot3StatsDeferredTransmissions] = 1,
24483 +- [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
24484 +- [RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
24485 +-
24486 +- };
24487 +- struct realtek_smi *smi = ds->priv;
24488 +- struct rtl8365mb *mb;
24489 +- int ret;
24490 +- int i;
24491 +-
24492 +- mb = smi->chip_data;
24493 +-
24494 +- mutex_lock(&mb->mib_lock);
24495 +- for (i = 0; i < RTL8365MB_MIB_END; i++) {
24496 +- struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
24497 +-
24498 +- /* Only fetch required MIB counters (marked = 1 above) */
24499 +- if (!cnt[i])
24500 +- continue;
24501 +-
24502 +- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
24503 +- mib->length, &cnt[i]);
24504 +- if (ret)
24505 +- break;
24506 +- }
24507 +- mutex_unlock(&mb->mib_lock);
24508 +-
24509 +- /* The RTL8365MB-VC exposes MIB objects, which we have to translate into
24510 +- * IEEE 802.3 Managed Objects. This is not always completely faithful,
24511 +- * but we try out best. See RFC 3635 for a detailed treatment of the
24512 +- * subject.
24513 +- */
24514 +-
24515 +- mac_stats->FramesTransmittedOK = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
24516 +- cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
24517 +- cnt[RTL8365MB_MIB_ifOutBroadcastPkts] +
24518 +- cnt[RTL8365MB_MIB_dot3OutPauseFrames] -
24519 +- cnt[RTL8365MB_MIB_ifOutDiscards];
24520 +- mac_stats->SingleCollisionFrames =
24521 +- cnt[RTL8365MB_MIB_dot3StatsSingleCollisionFrames];
24522 +- mac_stats->MultipleCollisionFrames =
24523 +- cnt[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames];
24524 +- mac_stats->FramesReceivedOK = cnt[RTL8365MB_MIB_ifInUcastPkts] +
24525 +- cnt[RTL8365MB_MIB_ifInMulticastPkts] +
24526 +- cnt[RTL8365MB_MIB_ifInBroadcastPkts] +
24527 +- cnt[RTL8365MB_MIB_dot3InPauseFrames];
24528 +- mac_stats->FrameCheckSequenceErrors =
24529 +- cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
24530 +- mac_stats->OctetsTransmittedOK = cnt[RTL8365MB_MIB_ifOutOctets] -
24531 +- 18 * mac_stats->FramesTransmittedOK;
24532 +- mac_stats->FramesWithDeferredXmissions =
24533 +- cnt[RTL8365MB_MIB_dot3StatsDeferredTransmissions];
24534 +- mac_stats->LateCollisions = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
24535 +- mac_stats->FramesAbortedDueToXSColls =
24536 +- cnt[RTL8365MB_MIB_dot3StatsExcessiveCollisions];
24537 +- mac_stats->OctetsReceivedOK = cnt[RTL8365MB_MIB_ifInOctets] -
24538 +- 18 * mac_stats->FramesReceivedOK;
24539 +- mac_stats->MulticastFramesXmittedOK =
24540 +- cnt[RTL8365MB_MIB_ifOutMulticastPkts];
24541 +- mac_stats->BroadcastFramesXmittedOK =
24542 +- cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
24543 +- mac_stats->MulticastFramesReceivedOK =
24544 +- cnt[RTL8365MB_MIB_ifInMulticastPkts];
24545 +- mac_stats->BroadcastFramesReceivedOK =
24546 +- cnt[RTL8365MB_MIB_ifInBroadcastPkts];
24547 +-}
24548 +-
24549 +-static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
24550 +- struct ethtool_eth_ctrl_stats *ctrl_stats)
24551 +-{
24552 +- struct realtek_smi *smi = ds->priv;
24553 +- struct rtl8365mb_mib_counter *mib;
24554 +- struct rtl8365mb *mb;
24555 +-
24556 +- mb = smi->chip_data;
24557 +- mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
24558 +-
24559 +- mutex_lock(&mb->mib_lock);
24560 +- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
24561 +- &ctrl_stats->UnsupportedOpcodesReceived);
24562 +- mutex_unlock(&mb->mib_lock);
24563 +-}
24564 +-
24565 +-static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
24566 +-{
24567 +- u64 cnt[RTL8365MB_MIB_END] = {
24568 +- [RTL8365MB_MIB_ifOutOctets] = 1,
24569 +- [RTL8365MB_MIB_ifOutUcastPkts] = 1,
24570 +- [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
24571 +- [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
24572 +- [RTL8365MB_MIB_ifOutDiscards] = 1,
24573 +- [RTL8365MB_MIB_ifInOctets] = 1,
24574 +- [RTL8365MB_MIB_ifInUcastPkts] = 1,
24575 +- [RTL8365MB_MIB_ifInMulticastPkts] = 1,
24576 +- [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
24577 +- [RTL8365MB_MIB_etherStatsDropEvents] = 1,
24578 +- [RTL8365MB_MIB_etherStatsCollisions] = 1,
24579 +- [RTL8365MB_MIB_etherStatsFragments] = 1,
24580 +- [RTL8365MB_MIB_etherStatsJabbers] = 1,
24581 +- [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
24582 +- [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
24583 +- };
24584 +- struct rtl8365mb *mb = smi->chip_data;
24585 +- struct rtnl_link_stats64 *stats;
24586 +- int ret;
24587 +- int i;
24588 +-
24589 +- stats = &mb->ports[port].stats;
24590 +-
24591 +- mutex_lock(&mb->mib_lock);
24592 +- for (i = 0; i < RTL8365MB_MIB_END; i++) {
24593 +- struct rtl8365mb_mib_counter *c = &rtl8365mb_mib_counters[i];
24594 +-
24595 +- /* Only fetch required MIB counters (marked = 1 above) */
24596 +- if (!cnt[i])
24597 +- continue;
24598 +-
24599 +- ret = rtl8365mb_mib_counter_read(smi, port, c->offset,
24600 +- c->length, &cnt[i]);
24601 +- if (ret)
24602 +- break;
24603 +- }
24604 +- mutex_unlock(&mb->mib_lock);
24605 +-
24606 +- /* Don't update statistics if there was an error reading the counters */
24607 +- if (ret)
24608 +- return;
24609 +-
24610 +- spin_lock(&mb->ports[port].stats_lock);
24611 +-
24612 +- stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] +
24613 +- cnt[RTL8365MB_MIB_ifInMulticastPkts] +
24614 +- cnt[RTL8365MB_MIB_ifInBroadcastPkts] -
24615 +- cnt[RTL8365MB_MIB_ifOutDiscards];
24616 +-
24617 +- stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
24618 +- cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
24619 +- cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
24620 +-
24621 +- /* if{In,Out}Octets includes FCS - remove it */
24622 +- stats->rx_bytes = cnt[RTL8365MB_MIB_ifInOctets] - 4 * stats->rx_packets;
24623 +- stats->tx_bytes =
24624 +- cnt[RTL8365MB_MIB_ifOutOctets] - 4 * stats->tx_packets;
24625 +-
24626 +- stats->rx_dropped = cnt[RTL8365MB_MIB_etherStatsDropEvents];
24627 +- stats->tx_dropped = cnt[RTL8365MB_MIB_ifOutDiscards];
24628 +-
24629 +- stats->multicast = cnt[RTL8365MB_MIB_ifInMulticastPkts];
24630 +- stats->collisions = cnt[RTL8365MB_MIB_etherStatsCollisions];
24631 +-
24632 +- stats->rx_length_errors = cnt[RTL8365MB_MIB_etherStatsFragments] +
24633 +- cnt[RTL8365MB_MIB_etherStatsJabbers];
24634 +- stats->rx_crc_errors = cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
24635 +- stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors;
24636 +-
24637 +- stats->tx_aborted_errors = cnt[RTL8365MB_MIB_ifOutDiscards];
24638 +- stats->tx_window_errors = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
24639 +- stats->tx_errors = stats->tx_aborted_errors + stats->tx_window_errors;
24640 +-
24641 +- spin_unlock(&mb->ports[port].stats_lock);
24642 +-}
24643 +-
24644 +-static void rtl8365mb_stats_poll(struct work_struct *work)
24645 +-{
24646 +- struct rtl8365mb_port *p = container_of(to_delayed_work(work),
24647 +- struct rtl8365mb_port,
24648 +- mib_work);
24649 +- struct realtek_smi *smi = p->smi;
24650 +-
24651 +- rtl8365mb_stats_update(smi, p->index);
24652 +-
24653 +- schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
24654 +-}
24655 +-
24656 +-static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
24657 +- struct rtnl_link_stats64 *s)
24658 +-{
24659 +- struct realtek_smi *smi = ds->priv;
24660 +- struct rtl8365mb_port *p;
24661 +- struct rtl8365mb *mb;
24662 +-
24663 +- mb = smi->chip_data;
24664 +- p = &mb->ports[port];
24665 +-
24666 +- spin_lock(&p->stats_lock);
24667 +- memcpy(s, &p->stats, sizeof(*s));
24668 +- spin_unlock(&p->stats_lock);
24669 +-}
24670 +-
24671 +-static void rtl8365mb_stats_setup(struct realtek_smi *smi)
24672 +-{
24673 +- struct rtl8365mb *mb = smi->chip_data;
24674 +- int i;
24675 +-
24676 +- /* Per-chip global mutex to protect MIB counter access, since doing
24677 +- * so requires accessing a series of registers in a particular order.
24678 +- */
24679 +- mutex_init(&mb->mib_lock);
24680 +-
24681 +- for (i = 0; i < smi->num_ports; i++) {
24682 +- struct rtl8365mb_port *p = &mb->ports[i];
24683 +-
24684 +- if (dsa_is_unused_port(smi->ds, i))
24685 +- continue;
24686 +-
24687 +- /* Per-port spinlock to protect the stats64 data */
24688 +- spin_lock_init(&p->stats_lock);
24689 +-
24690 +- /* This work polls the MIB counters and keeps the stats64 data
24691 +- * up-to-date.
24692 +- */
24693 +- INIT_DELAYED_WORK(&p->mib_work, rtl8365mb_stats_poll);
24694 +- }
24695 +-}
24696 +-
24697 +-static void rtl8365mb_stats_teardown(struct realtek_smi *smi)
24698 +-{
24699 +- struct rtl8365mb *mb = smi->chip_data;
24700 +- int i;
24701 +-
24702 +- for (i = 0; i < smi->num_ports; i++) {
24703 +- struct rtl8365mb_port *p = &mb->ports[i];
24704 +-
24705 +- if (dsa_is_unused_port(smi->ds, i))
24706 +- continue;
24707 +-
24708 +- cancel_delayed_work_sync(&p->mib_work);
24709 +- }
24710 +-}
24711 +-
24712 +-static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg,
24713 +- u32 *val)
24714 +-{
24715 +- int ret;
24716 +-
24717 +- ret = regmap_read(smi->map, reg, val);
24718 +- if (ret)
24719 +- return ret;
24720 +-
24721 +- return regmap_write(smi->map, reg, *val);
24722 +-}
24723 +-
24724 +-static irqreturn_t rtl8365mb_irq(int irq, void *data)
24725 +-{
24726 +- struct realtek_smi *smi = data;
24727 +- unsigned long line_changes = 0;
24728 +- struct rtl8365mb *mb;
24729 +- u32 stat;
24730 +- int line;
24731 +- int ret;
24732 +-
24733 +- mb = smi->chip_data;
24734 +-
24735 +- ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG,
24736 +- &stat);
24737 +- if (ret)
24738 +- goto out_error;
24739 +-
24740 +- if (stat & RTL8365MB_INTR_LINK_CHANGE_MASK) {
24741 +- u32 linkdown_ind;
24742 +- u32 linkup_ind;
24743 +- u32 val;
24744 +-
24745 +- ret = rtl8365mb_get_and_clear_status_reg(
24746 +- smi, RTL8365MB_PORT_LINKUP_IND_REG, &val);
24747 +- if (ret)
24748 +- goto out_error;
24749 +-
24750 +- linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
24751 +-
24752 +- ret = rtl8365mb_get_and_clear_status_reg(
24753 +- smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
24754 +- if (ret)
24755 +- goto out_error;
24756 +-
24757 +- linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
24758 +-
24759 +- line_changes = (linkup_ind | linkdown_ind) & mb->port_mask;
24760 +- }
24761 +-
24762 +- if (!line_changes)
24763 +- goto out_none;
24764 +-
24765 +- for_each_set_bit(line, &line_changes, smi->num_ports) {
24766 +- int child_irq = irq_find_mapping(smi->irqdomain, line);
24767 +-
24768 +- handle_nested_irq(child_irq);
24769 +- }
24770 +-
24771 +- return IRQ_HANDLED;
24772 +-
24773 +-out_error:
24774 +- dev_err(smi->dev, "failed to read interrupt status: %d\n", ret);
24775 +-
24776 +-out_none:
24777 +- return IRQ_NONE;
24778 +-}
24779 +-
24780 +-static struct irq_chip rtl8365mb_irq_chip = {
24781 +- .name = "rtl8365mb",
24782 +- /* The hardware doesn't support masking IRQs on a per-port basis */
24783 +-};
24784 +-
24785 +-static int rtl8365mb_irq_map(struct irq_domain *domain, unsigned int irq,
24786 +- irq_hw_number_t hwirq)
24787 +-{
24788 +- irq_set_chip_data(irq, domain->host_data);
24789 +- irq_set_chip_and_handler(irq, &rtl8365mb_irq_chip, handle_simple_irq);
24790 +- irq_set_nested_thread(irq, 1);
24791 +- irq_set_noprobe(irq);
24792 +-
24793 +- return 0;
24794 +-}
24795 +-
24796 +-static void rtl8365mb_irq_unmap(struct irq_domain *d, unsigned int irq)
24797 +-{
24798 +- irq_set_nested_thread(irq, 0);
24799 +- irq_set_chip_and_handler(irq, NULL, NULL);
24800 +- irq_set_chip_data(irq, NULL);
24801 +-}
24802 +-
24803 +-static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
24804 +- .map = rtl8365mb_irq_map,
24805 +- .unmap = rtl8365mb_irq_unmap,
24806 +- .xlate = irq_domain_xlate_onecell,
24807 +-};
24808 +-
24809 +-static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable)
24810 +-{
24811 +- return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG,
24812 +- RTL8365MB_INTR_LINK_CHANGE_MASK,
24813 +- FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
24814 +- enable ? 1 : 0));
24815 +-}
24816 +-
24817 +-static int rtl8365mb_irq_enable(struct realtek_smi *smi)
24818 +-{
24819 +- return rtl8365mb_set_irq_enable(smi, true);
24820 +-}
24821 +-
24822 +-static int rtl8365mb_irq_disable(struct realtek_smi *smi)
24823 +-{
24824 +- return rtl8365mb_set_irq_enable(smi, false);
24825 +-}
24826 +-
24827 +-static int rtl8365mb_irq_setup(struct realtek_smi *smi)
24828 +-{
24829 +- struct rtl8365mb *mb = smi->chip_data;
24830 +- struct device_node *intc;
24831 +- u32 irq_trig;
24832 +- int virq;
24833 +- int irq;
24834 +- u32 val;
24835 +- int ret;
24836 +- int i;
24837 +-
24838 +- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
24839 +- if (!intc) {
24840 +- dev_err(smi->dev, "missing child interrupt-controller node\n");
24841 +- return -EINVAL;
24842 +- }
24843 +-
24844 +- /* rtl8365mb IRQs cascade off this one */
24845 +- irq = of_irq_get(intc, 0);
24846 +- if (irq <= 0) {
24847 +- if (irq != -EPROBE_DEFER)
24848 +- dev_err(smi->dev, "failed to get parent irq: %d\n",
24849 +- irq);
24850 +- ret = irq ? irq : -EINVAL;
24851 +- goto out_put_node;
24852 +- }
24853 +-
24854 +- smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports,
24855 +- &rtl8365mb_irqdomain_ops, smi);
24856 +- if (!smi->irqdomain) {
24857 +- dev_err(smi->dev, "failed to add irq domain\n");
24858 +- ret = -ENOMEM;
24859 +- goto out_put_node;
24860 +- }
24861 +-
24862 +- for (i = 0; i < smi->num_ports; i++) {
24863 +- virq = irq_create_mapping(smi->irqdomain, i);
24864 +- if (!virq) {
24865 +- dev_err(smi->dev,
24866 +- "failed to create irq domain mapping\n");
24867 +- ret = -EINVAL;
24868 +- goto out_remove_irqdomain;
24869 +- }
24870 +-
24871 +- irq_set_parent(virq, irq);
24872 +- }
24873 +-
24874 +- /* Configure chip interrupt signal polarity */
24875 +- irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
24876 +- switch (irq_trig) {
24877 +- case IRQF_TRIGGER_RISING:
24878 +- case IRQF_TRIGGER_HIGH:
24879 +- val = RTL8365MB_INTR_POLARITY_HIGH;
24880 +- break;
24881 +- case IRQF_TRIGGER_FALLING:
24882 +- case IRQF_TRIGGER_LOW:
24883 +- val = RTL8365MB_INTR_POLARITY_LOW;
24884 +- break;
24885 +- default:
24886 +- dev_err(smi->dev, "unsupported irq trigger type %u\n",
24887 +- irq_trig);
24888 +- ret = -EINVAL;
24889 +- goto out_remove_irqdomain;
24890 +- }
24891 +-
24892 +- ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG,
24893 +- RTL8365MB_INTR_POLARITY_MASK,
24894 +- FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
24895 +- if (ret)
24896 +- goto out_remove_irqdomain;
24897 +-
24898 +- /* Disable the interrupt in case the chip has it enabled on reset */
24899 +- ret = rtl8365mb_irq_disable(smi);
24900 +- if (ret)
24901 +- goto out_remove_irqdomain;
24902 +-
24903 +- /* Clear the interrupt status register */
24904 +- ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG,
24905 +- RTL8365MB_INTR_ALL_MASK);
24906 +- if (ret)
24907 +- goto out_remove_irqdomain;
24908 +-
24909 +- ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
24910 +- "rtl8365mb", smi);
24911 +- if (ret) {
24912 +- dev_err(smi->dev, "failed to request irq: %d\n", ret);
24913 +- goto out_remove_irqdomain;
24914 +- }
24915 +-
24916 +- /* Store the irq so that we know to free it during teardown */
24917 +- mb->irq = irq;
24918 +-
24919 +- ret = rtl8365mb_irq_enable(smi);
24920 +- if (ret)
24921 +- goto out_free_irq;
24922 +-
24923 +- of_node_put(intc);
24924 +-
24925 +- return 0;
24926 +-
24927 +-out_free_irq:
24928 +- free_irq(mb->irq, smi);
24929 +- mb->irq = 0;
24930 +-
24931 +-out_remove_irqdomain:
24932 +- for (i = 0; i < smi->num_ports; i++) {
24933 +- virq = irq_find_mapping(smi->irqdomain, i);
24934 +- irq_dispose_mapping(virq);
24935 +- }
24936 +-
24937 +- irq_domain_remove(smi->irqdomain);
24938 +- smi->irqdomain = NULL;
24939 +-
24940 +-out_put_node:
24941 +- of_node_put(intc);
24942 +-
24943 +- return ret;
24944 +-}
24945 +-
24946 +-static void rtl8365mb_irq_teardown(struct realtek_smi *smi)
24947 +-{
24948 +- struct rtl8365mb *mb = smi->chip_data;
24949 +- int virq;
24950 +- int i;
24951 +-
24952 +- if (mb->irq) {
24953 +- free_irq(mb->irq, smi);
24954 +- mb->irq = 0;
24955 +- }
24956 +-
24957 +- if (smi->irqdomain) {
24958 +- for (i = 0; i < smi->num_ports; i++) {
24959 +- virq = irq_find_mapping(smi->irqdomain, i);
24960 +- irq_dispose_mapping(virq);
24961 +- }
24962 +-
24963 +- irq_domain_remove(smi->irqdomain);
24964 +- smi->irqdomain = NULL;
24965 +- }
24966 +-}
24967 +-
24968 +-static int rtl8365mb_cpu_config(struct realtek_smi *smi)
24969 +-{
24970 +- struct rtl8365mb *mb = smi->chip_data;
24971 +- struct rtl8365mb_cpu *cpu = &mb->cpu;
24972 +- u32 val;
24973 +- int ret;
24974 +-
24975 +- ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG,
24976 +- RTL8365MB_CPU_PORT_MASK_MASK,
24977 +- FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
24978 +- cpu->mask));
24979 +- if (ret)
24980 +- return ret;
24981 +-
24982 +- val = FIELD_PREP(RTL8365MB_CPU_CTRL_EN_MASK, cpu->enable ? 1 : 0) |
24983 +- FIELD_PREP(RTL8365MB_CPU_CTRL_INSERTMODE_MASK, cpu->insert) |
24984 +- FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
24985 +- FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
24986 +- FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
24987 +- FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) |
24988 +- FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
24989 +- cpu->trap_port >> 3);
24990 +- ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val);
24991 +- if (ret)
24992 +- return ret;
24993 +-
24994 +- return 0;
24995 +-}
24996 +-
24997 +-static int rtl8365mb_switch_init(struct realtek_smi *smi)
24998 +-{
24999 +- struct rtl8365mb *mb = smi->chip_data;
25000 +- int ret;
25001 +- int i;
25002 +-
25003 +- /* Do any chip-specific init jam before getting to the common stuff */
25004 +- if (mb->jam_table) {
25005 +- for (i = 0; i < mb->jam_size; i++) {
25006 +- ret = regmap_write(smi->map, mb->jam_table[i].reg,
25007 +- mb->jam_table[i].val);
25008 +- if (ret)
25009 +- return ret;
25010 +- }
25011 +- }
25012 +-
25013 +- /* Common init jam */
25014 +- for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
25015 +- ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg,
25016 +- rtl8365mb_init_jam_common[i].val);
25017 +- if (ret)
25018 +- return ret;
25019 +- }
25020 +-
25021 +- return 0;
25022 +-}
25023 +-
25024 +-static int rtl8365mb_reset_chip(struct realtek_smi *smi)
25025 +-{
25026 +- u32 val;
25027 +-
25028 +- realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG,
25029 +- FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK,
25030 +- 1));
25031 +-
25032 +- /* Realtek documentation says the chip needs 1 second to reset. Sleep
25033 +- * for 100 ms before accessing any registers to prevent ACK timeouts.
25034 +- */
25035 +- msleep(100);
25036 +- return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val,
25037 +- !(val & RTL8365MB_CHIP_RESET_HW_MASK),
25038 +- 20000, 1e6);
25039 +-}
25040 +-
25041 +-static int rtl8365mb_setup(struct dsa_switch *ds)
25042 +-{
25043 +- struct realtek_smi *smi = ds->priv;
25044 +- struct rtl8365mb *mb;
25045 +- int ret;
25046 +- int i;
25047 +-
25048 +- mb = smi->chip_data;
25049 +-
25050 +- ret = rtl8365mb_reset_chip(smi);
25051 +- if (ret) {
25052 +- dev_err(smi->dev, "failed to reset chip: %d\n", ret);
25053 +- goto out_error;
25054 +- }
25055 +-
25056 +- /* Configure switch to vendor-defined initial state */
25057 +- ret = rtl8365mb_switch_init(smi);
25058 +- if (ret) {
25059 +- dev_err(smi->dev, "failed to initialize switch: %d\n", ret);
25060 +- goto out_error;
25061 +- }
25062 +-
25063 +- /* Set up cascading IRQs */
25064 +- ret = rtl8365mb_irq_setup(smi);
25065 +- if (ret == -EPROBE_DEFER)
25066 +- return ret;
25067 +- else if (ret)
25068 +- dev_info(smi->dev, "no interrupt support\n");
25069 +-
25070 +- /* Configure CPU tagging */
25071 +- ret = rtl8365mb_cpu_config(smi);
25072 +- if (ret)
25073 +- goto out_teardown_irq;
25074 +-
25075 +- /* Configure ports */
25076 +- for (i = 0; i < smi->num_ports; i++) {
25077 +- struct rtl8365mb_port *p = &mb->ports[i];
25078 +-
25079 +- if (dsa_is_unused_port(smi->ds, i))
25080 +- continue;
25081 +-
25082 +- /* Set up per-port private data */
25083 +- p->smi = smi;
25084 +- p->index = i;
25085 +-
25086 +- /* Forward only to the CPU */
25087 +- ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port));
25088 +- if (ret)
25089 +- goto out_teardown_irq;
25090 +-
25091 +- /* Disable learning */
25092 +- ret = rtl8365mb_port_set_learning(smi, i, false);
25093 +- if (ret)
25094 +- goto out_teardown_irq;
25095 +-
25096 +- /* Set the initial STP state of all ports to DISABLED, otherwise
25097 +- * ports will still forward frames to the CPU despite being
25098 +- * administratively down by default.
25099 +- */
25100 +- rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED);
25101 +- }
25102 +-
25103 +- /* Set maximum packet length to 1536 bytes */
25104 +- ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG,
25105 +- RTL8365MB_CFG0_MAX_LEN_MASK,
25106 +- FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
25107 +- if (ret)
25108 +- goto out_teardown_irq;
25109 +-
25110 +- ret = realtek_smi_setup_mdio(smi);
25111 +- if (ret) {
25112 +- dev_err(smi->dev, "could not set up MDIO bus\n");
25113 +- goto out_teardown_irq;
25114 +- }
25115 +-
25116 +- /* Start statistics counter polling */
25117 +- rtl8365mb_stats_setup(smi);
25118 +-
25119 +- return 0;
25120 +-
25121 +-out_teardown_irq:
25122 +- rtl8365mb_irq_teardown(smi);
25123 +-
25124 +-out_error:
25125 +- return ret;
25126 +-}
25127 +-
25128 +-static void rtl8365mb_teardown(struct dsa_switch *ds)
25129 +-{
25130 +- struct realtek_smi *smi = ds->priv;
25131 +-
25132 +- rtl8365mb_stats_teardown(smi);
25133 +- rtl8365mb_irq_teardown(smi);
25134 +-}
25135 +-
25136 +-static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
25137 +-{
25138 +- int ret;
25139 +-
25140 +- /* For some reason we have to write a magic value to an arbitrary
25141 +- * register whenever accessing the chip ID/version registers.
25142 +- */
25143 +- ret = regmap_write(map, RTL8365MB_MAGIC_REG, RTL8365MB_MAGIC_VALUE);
25144 +- if (ret)
25145 +- return ret;
25146 +-
25147 +- ret = regmap_read(map, RTL8365MB_CHIP_ID_REG, id);
25148 +- if (ret)
25149 +- return ret;
25150 +-
25151 +- ret = regmap_read(map, RTL8365MB_CHIP_VER_REG, ver);
25152 +- if (ret)
25153 +- return ret;
25154 +-
25155 +- /* Reset magic register */
25156 +- ret = regmap_write(map, RTL8365MB_MAGIC_REG, 0);
25157 +- if (ret)
25158 +- return ret;
25159 +-
25160 +- return 0;
25161 +-}
25162 +-
25163 +-static int rtl8365mb_detect(struct realtek_smi *smi)
25164 +-{
25165 +- struct rtl8365mb *mb = smi->chip_data;
25166 +- u32 chip_id;
25167 +- u32 chip_ver;
25168 +- int ret;
25169 +-
25170 +- ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver);
25171 +- if (ret) {
25172 +- dev_err(smi->dev, "failed to read chip id and version: %d\n",
25173 +- ret);
25174 +- return ret;
25175 +- }
25176 +-
25177 +- switch (chip_id) {
25178 +- case RTL8365MB_CHIP_ID_8365MB_VC:
25179 +- dev_info(smi->dev,
25180 +- "found an RTL8365MB-VC switch (ver=0x%04x)\n",
25181 +- chip_ver);
25182 +-
25183 +- smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC;
25184 +- smi->num_ports = smi->cpu_port + 1;
25185 +-
25186 +- mb->smi = smi;
25187 +- mb->chip_id = chip_id;
25188 +- mb->chip_ver = chip_ver;
25189 +- mb->port_mask = BIT(smi->num_ports) - 1;
25190 +- mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC;
25191 +- mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
25192 +- mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
25193 +-
25194 +- mb->cpu.enable = 1;
25195 +- mb->cpu.mask = BIT(smi->cpu_port);
25196 +- mb->cpu.trap_port = smi->cpu_port;
25197 +- mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
25198 +- mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
25199 +- mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
25200 +- mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
25201 +-
25202 +- break;
25203 +- default:
25204 +- dev_err(smi->dev,
25205 +- "found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
25206 +- chip_id, chip_ver);
25207 +- return -ENODEV;
25208 +- }
25209 +-
25210 +- return 0;
25211 +-}
25212 +-
25213 +-static const struct dsa_switch_ops rtl8365mb_switch_ops = {
25214 +- .get_tag_protocol = rtl8365mb_get_tag_protocol,
25215 +- .setup = rtl8365mb_setup,
25216 +- .teardown = rtl8365mb_teardown,
25217 +- .phylink_validate = rtl8365mb_phylink_validate,
25218 +- .phylink_mac_config = rtl8365mb_phylink_mac_config,
25219 +- .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
25220 +- .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
25221 +- .port_stp_state_set = rtl8365mb_port_stp_state_set,
25222 +- .get_strings = rtl8365mb_get_strings,
25223 +- .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
25224 +- .get_sset_count = rtl8365mb_get_sset_count,
25225 +- .get_eth_phy_stats = rtl8365mb_get_phy_stats,
25226 +- .get_eth_mac_stats = rtl8365mb_get_mac_stats,
25227 +- .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
25228 +- .get_stats64 = rtl8365mb_get_stats64,
25229 +-};
25230 +-
25231 +-static const struct realtek_smi_ops rtl8365mb_smi_ops = {
25232 +- .detect = rtl8365mb_detect,
25233 +- .phy_read = rtl8365mb_phy_read,
25234 +- .phy_write = rtl8365mb_phy_write,
25235 +-};
25236 +-
25237 +-const struct realtek_smi_variant rtl8365mb_variant = {
25238 +- .ds_ops = &rtl8365mb_switch_ops,
25239 +- .ops = &rtl8365mb_smi_ops,
25240 +- .clk_delay = 10,
25241 +- .cmd_read = 0xb9,
25242 +- .cmd_write = 0xb8,
25243 +- .chip_data_sz = sizeof(struct rtl8365mb),
25244 +-};
25245 +-EXPORT_SYMBOL_GPL(rtl8365mb_variant);
25246 +diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
25247 +deleted file mode 100644
25248 +index bdb8d8d348807..0000000000000
25249 +--- a/drivers/net/dsa/rtl8366.c
25250 ++++ /dev/null
25251 +@@ -1,448 +0,0 @@
25252 +-// SPDX-License-Identifier: GPL-2.0
25253 +-/* Realtek SMI library helpers for the RTL8366x variants
25254 +- * RTL8366RB and RTL8366S
25255 +- *
25256 +- * Copyright (C) 2017 Linus Walleij <linus.walleij@××××××.org>
25257 +- * Copyright (C) 2009-2010 Gabor Juhos <juhosg@×××××××.org>
25258 +- * Copyright (C) 2010 Antti Seppälä <a.seppala@×××××.com>
25259 +- * Copyright (C) 2010 Roman Yeryomin <roman@×××××.lv>
25260 +- * Copyright (C) 2011 Colin Leitner <colin.leitner@××××××××××.com>
25261 +- */
25262 +-#include <linux/if_bridge.h>
25263 +-#include <net/dsa.h>
25264 +-
25265 +-#include "realtek-smi-core.h"
25266 +-
25267 +-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
25268 +-{
25269 +- int ret;
25270 +- int i;
25271 +-
25272 +- *used = 0;
25273 +- for (i = 0; i < smi->num_ports; i++) {
25274 +- int index = 0;
25275 +-
25276 +- ret = smi->ops->get_mc_index(smi, i, &index);
25277 +- if (ret)
25278 +- return ret;
25279 +-
25280 +- if (mc_index == index) {
25281 +- *used = 1;
25282 +- break;
25283 +- }
25284 +- }
25285 +-
25286 +- return 0;
25287 +-}
25288 +-EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
25289 +-
25290 +-/**
25291 +- * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
25292 +- * @smi: the Realtek SMI device instance
25293 +- * @vid: the VLAN ID to look up or allocate
25294 +- * @vlanmc: the pointer will be assigned to a pointer to a valid member config
25295 +- * if successful
25296 +- * @return: index of a new member config or negative error number
25297 +- */
25298 +-static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
25299 +- struct rtl8366_vlan_mc *vlanmc)
25300 +-{
25301 +- struct rtl8366_vlan_4k vlan4k;
25302 +- int ret;
25303 +- int i;
25304 +-
25305 +- /* Try to find an existing member config entry for this VID */
25306 +- for (i = 0; i < smi->num_vlan_mc; i++) {
25307 +- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
25308 +- if (ret) {
25309 +- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
25310 +- i, vid);
25311 +- return ret;
25312 +- }
25313 +-
25314 +- if (vid == vlanmc->vid)
25315 +- return i;
25316 +- }
25317 +-
25318 +- /* We have no MC entry for this VID, try to find an empty one */
25319 +- for (i = 0; i < smi->num_vlan_mc; i++) {
25320 +- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
25321 +- if (ret) {
25322 +- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
25323 +- i, vid);
25324 +- return ret;
25325 +- }
25326 +-
25327 +- if (vlanmc->vid == 0 && vlanmc->member == 0) {
25328 +- /* Update the entry from the 4K table */
25329 +- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
25330 +- if (ret) {
25331 +- dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
25332 +- i, vid);
25333 +- return ret;
25334 +- }
25335 +-
25336 +- vlanmc->vid = vid;
25337 +- vlanmc->member = vlan4k.member;
25338 +- vlanmc->untag = vlan4k.untag;
25339 +- vlanmc->fid = vlan4k.fid;
25340 +- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
25341 +- if (ret) {
25342 +- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
25343 +- i, vid);
25344 +- return ret;
25345 +- }
25346 +-
25347 +- dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
25348 +- i, vid);
25349 +- return i;
25350 +- }
25351 +- }
25352 +-
25353 +- /* MC table is full, try to find an unused entry and replace it */
25354 +- for (i = 0; i < smi->num_vlan_mc; i++) {
25355 +- int used;
25356 +-
25357 +- ret = rtl8366_mc_is_used(smi, i, &used);
25358 +- if (ret)
25359 +- return ret;
25360 +-
25361 +- if (!used) {
25362 +- /* Update the entry from the 4K table */
25363 +- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
25364 +- if (ret)
25365 +- return ret;
25366 +-
25367 +- vlanmc->vid = vid;
25368 +- vlanmc->member = vlan4k.member;
25369 +- vlanmc->untag = vlan4k.untag;
25370 +- vlanmc->fid = vlan4k.fid;
25371 +- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
25372 +- if (ret) {
25373 +- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
25374 +- i, vid);
25375 +- return ret;
25376 +- }
25377 +- dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
25378 +- i, vid);
25379 +- return i;
25380 +- }
25381 +- }
25382 +-
25383 +- dev_err(smi->dev, "all VLAN member configurations are in use\n");
25384 +- return -ENOSPC;
25385 +-}
25386 +-
25387 +-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
25388 +- u32 untag, u32 fid)
25389 +-{
25390 +- struct rtl8366_vlan_mc vlanmc;
25391 +- struct rtl8366_vlan_4k vlan4k;
25392 +- int mc;
25393 +- int ret;
25394 +-
25395 +- if (!smi->ops->is_vlan_valid(smi, vid))
25396 +- return -EINVAL;
25397 +-
25398 +- dev_dbg(smi->dev,
25399 +- "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
25400 +- vid, member, untag);
25401 +-
25402 +- /* Update the 4K table */
25403 +- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
25404 +- if (ret)
25405 +- return ret;
25406 +-
25407 +- vlan4k.member |= member;
25408 +- vlan4k.untag |= untag;
25409 +- vlan4k.fid = fid;
25410 +- ret = smi->ops->set_vlan_4k(smi, &vlan4k);
25411 +- if (ret)
25412 +- return ret;
25413 +-
25414 +- dev_dbg(smi->dev,
25415 +- "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
25416 +- vid, vlan4k.member, vlan4k.untag);
25417 +-
25418 +- /* Find or allocate a member config for this VID */
25419 +- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
25420 +- if (ret < 0)
25421 +- return ret;
25422 +- mc = ret;
25423 +-
25424 +- /* Update the MC entry */
25425 +- vlanmc.member |= member;
25426 +- vlanmc.untag |= untag;
25427 +- vlanmc.fid = fid;
25428 +-
25429 +- /* Commit updates to the MC entry */
25430 +- ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
25431 +- if (ret)
25432 +- dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
25433 +- mc, vid);
25434 +- else
25435 +- dev_dbg(smi->dev,
25436 +- "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
25437 +- vid, vlanmc.member, vlanmc.untag);
25438 +-
25439 +- return ret;
25440 +-}
25441 +-EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
25442 +-
25443 +-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
25444 +- unsigned int vid)
25445 +-{
25446 +- struct rtl8366_vlan_mc vlanmc;
25447 +- int mc;
25448 +- int ret;
25449 +-
25450 +- if (!smi->ops->is_vlan_valid(smi, vid))
25451 +- return -EINVAL;
25452 +-
25453 +- /* Find or allocate a member config for this VID */
25454 +- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
25455 +- if (ret < 0)
25456 +- return ret;
25457 +- mc = ret;
25458 +-
25459 +- ret = smi->ops->set_mc_index(smi, port, mc);
25460 +- if (ret) {
25461 +- dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
25462 +- mc, port);
25463 +- return ret;
25464 +- }
25465 +-
25466 +- dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
25467 +- port, vid, mc);
25468 +-
25469 +- return 0;
25470 +-}
25471 +-EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
25472 +-
25473 +-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
25474 +-{
25475 +- int ret;
25476 +-
25477 +- /* To enable 4k VLAN, ordinary VLAN must be enabled first,
25478 +- * but if we disable 4k VLAN it is fine to leave ordinary
25479 +- * VLAN enabled.
25480 +- */
25481 +- if (enable) {
25482 +- /* Make sure VLAN is ON */
25483 +- ret = smi->ops->enable_vlan(smi, true);
25484 +- if (ret)
25485 +- return ret;
25486 +-
25487 +- smi->vlan_enabled = true;
25488 +- }
25489 +-
25490 +- ret = smi->ops->enable_vlan4k(smi, enable);
25491 +- if (ret)
25492 +- return ret;
25493 +-
25494 +- smi->vlan4k_enabled = enable;
25495 +- return 0;
25496 +-}
25497 +-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
25498 +-
25499 +-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable)
25500 +-{
25501 +- int ret;
25502 +-
25503 +- ret = smi->ops->enable_vlan(smi, enable);
25504 +- if (ret)
25505 +- return ret;
25506 +-
25507 +- smi->vlan_enabled = enable;
25508 +-
25509 +- /* If we turn VLAN off, make sure that we turn off
25510 +- * 4k VLAN as well, if that happened to be on.
25511 +- */
25512 +- if (!enable) {
25513 +- smi->vlan4k_enabled = false;
25514 +- ret = smi->ops->enable_vlan4k(smi, false);
25515 +- }
25516 +-
25517 +- return ret;
25518 +-}
25519 +-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
25520 +-
25521 +-int rtl8366_reset_vlan(struct realtek_smi *smi)
25522 +-{
25523 +- struct rtl8366_vlan_mc vlanmc;
25524 +- int ret;
25525 +- int i;
25526 +-
25527 +- rtl8366_enable_vlan(smi, false);
25528 +- rtl8366_enable_vlan4k(smi, false);
25529 +-
25530 +- /* Clear the 16 VLAN member configurations */
25531 +- vlanmc.vid = 0;
25532 +- vlanmc.priority = 0;
25533 +- vlanmc.member = 0;
25534 +- vlanmc.untag = 0;
25535 +- vlanmc.fid = 0;
25536 +- for (i = 0; i < smi->num_vlan_mc; i++) {
25537 +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
25538 +- if (ret)
25539 +- return ret;
25540 +- }
25541 +-
25542 +- return 0;
25543 +-}
25544 +-EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
25545 +-
25546 +-int rtl8366_vlan_add(struct dsa_switch *ds, int port,
25547 +- const struct switchdev_obj_port_vlan *vlan,
25548 +- struct netlink_ext_ack *extack)
25549 +-{
25550 +- bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
25551 +- bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
25552 +- struct realtek_smi *smi = ds->priv;
25553 +- u32 member = 0;
25554 +- u32 untag = 0;
25555 +- int ret;
25556 +-
25557 +- if (!smi->ops->is_vlan_valid(smi, vlan->vid)) {
25558 +- NL_SET_ERR_MSG_MOD(extack, "VLAN ID not valid");
25559 +- return -EINVAL;
25560 +- }
25561 +-
25562 +- /* Enable VLAN in the hardware
25563 +- * FIXME: what's with this 4k business?
25564 +- * Just rtl8366_enable_vlan() seems inconclusive.
25565 +- */
25566 +- ret = rtl8366_enable_vlan4k(smi, true);
25567 +- if (ret) {
25568 +- NL_SET_ERR_MSG_MOD(extack, "Failed to enable VLAN 4K");
25569 +- return ret;
25570 +- }
25571 +-
25572 +- dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
25573 +- vlan->vid, port, untagged ? "untagged" : "tagged",
25574 +- pvid ? "PVID" : "no PVID");
25575 +-
25576 +- member |= BIT(port);
25577 +-
25578 +- if (untagged)
25579 +- untag |= BIT(port);
25580 +-
25581 +- ret = rtl8366_set_vlan(smi, vlan->vid, member, untag, 0);
25582 +- if (ret) {
25583 +- dev_err(smi->dev, "failed to set up VLAN %04x", vlan->vid);
25584 +- return ret;
25585 +- }
25586 +-
25587 +- if (!pvid)
25588 +- return 0;
25589 +-
25590 +- ret = rtl8366_set_pvid(smi, port, vlan->vid);
25591 +- if (ret) {
25592 +- dev_err(smi->dev, "failed to set PVID on port %d to VLAN %04x",
25593 +- port, vlan->vid);
25594 +- return ret;
25595 +- }
25596 +-
25597 +- return 0;
25598 +-}
25599 +-EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
25600 +-
25601 +-int rtl8366_vlan_del(struct dsa_switch *ds, int port,
25602 +- const struct switchdev_obj_port_vlan *vlan)
25603 +-{
25604 +- struct realtek_smi *smi = ds->priv;
25605 +- int ret, i;
25606 +-
25607 +- dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
25608 +-
25609 +- for (i = 0; i < smi->num_vlan_mc; i++) {
25610 +- struct rtl8366_vlan_mc vlanmc;
25611 +-
25612 +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
25613 +- if (ret)
25614 +- return ret;
25615 +-
25616 +- if (vlan->vid == vlanmc.vid) {
25617 +- /* Remove this port from the VLAN */
25618 +- vlanmc.member &= ~BIT(port);
25619 +- vlanmc.untag &= ~BIT(port);
25620 +- /*
25621 +- * If no ports are members of this VLAN
25622 +- * anymore then clear the whole member
25623 +- * config so it can be reused.
25624 +- */
25625 +- if (!vlanmc.member) {
25626 +- vlanmc.vid = 0;
25627 +- vlanmc.priority = 0;
25628 +- vlanmc.fid = 0;
25629 +- }
25630 +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
25631 +- if (ret) {
25632 +- dev_err(smi->dev,
25633 +- "failed to remove VLAN %04x\n",
25634 +- vlan->vid);
25635 +- return ret;
25636 +- }
25637 +- break;
25638 +- }
25639 +- }
25640 +-
25641 +- return 0;
25642 +-}
25643 +-EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
25644 +-
25645 +-void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
25646 +- uint8_t *data)
25647 +-{
25648 +- struct realtek_smi *smi = ds->priv;
25649 +- struct rtl8366_mib_counter *mib;
25650 +- int i;
25651 +-
25652 +- if (port >= smi->num_ports)
25653 +- return;
25654 +-
25655 +- for (i = 0; i < smi->num_mib_counters; i++) {
25656 +- mib = &smi->mib_counters[i];
25657 +- strncpy(data + i * ETH_GSTRING_LEN,
25658 +- mib->name, ETH_GSTRING_LEN);
25659 +- }
25660 +-}
25661 +-EXPORT_SYMBOL_GPL(rtl8366_get_strings);
25662 +-
25663 +-int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
25664 +-{
25665 +- struct realtek_smi *smi = ds->priv;
25666 +-
25667 +- /* We only support SS_STATS */
25668 +- if (sset != ETH_SS_STATS)
25669 +- return 0;
25670 +- if (port >= smi->num_ports)
25671 +- return -EINVAL;
25672 +-
25673 +- return smi->num_mib_counters;
25674 +-}
25675 +-EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
25676 +-
25677 +-void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
25678 +-{
25679 +- struct realtek_smi *smi = ds->priv;
25680 +- int i;
25681 +- int ret;
25682 +-
25683 +- if (port >= smi->num_ports)
25684 +- return;
25685 +-
25686 +- for (i = 0; i < smi->num_mib_counters; i++) {
25687 +- struct rtl8366_mib_counter *mib;
25688 +- u64 mibvalue = 0;
25689 +-
25690 +- mib = &smi->mib_counters[i];
25691 +- ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue);
25692 +- if (ret) {
25693 +- dev_err(smi->dev, "error reading MIB counter %s\n",
25694 +- mib->name);
25695 +- }
25696 +- data[i] = mibvalue;
25697 +- }
25698 +-}
25699 +-EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
25700 +diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
25701 +deleted file mode 100644
25702 +index ecc19bd5115f0..0000000000000
25703 +--- a/drivers/net/dsa/rtl8366rb.c
25704 ++++ /dev/null
25705 +@@ -1,1814 +0,0 @@
25706 +-// SPDX-License-Identifier: GPL-2.0
25707 +-/* Realtek SMI subdriver for the Realtek RTL8366RB ethernet switch
25708 +- *
25709 +- * This is a sparsely documented chip, the only viable documentation seems
25710 +- * to be a patched up code drop from the vendor that appear in various
25711 +- * GPL source trees.
25712 +- *
25713 +- * Copyright (C) 2017 Linus Walleij <linus.walleij@××××××.org>
25714 +- * Copyright (C) 2009-2010 Gabor Juhos <juhosg@×××××××.org>
25715 +- * Copyright (C) 2010 Antti Seppälä <a.seppala@×××××.com>
25716 +- * Copyright (C) 2010 Roman Yeryomin <roman@×××××.lv>
25717 +- * Copyright (C) 2011 Colin Leitner <colin.leitner@××××××××××.com>
25718 +- */
25719 +-
25720 +-#include <linux/bitops.h>
25721 +-#include <linux/etherdevice.h>
25722 +-#include <linux/if_bridge.h>
25723 +-#include <linux/interrupt.h>
25724 +-#include <linux/irqdomain.h>
25725 +-#include <linux/irqchip/chained_irq.h>
25726 +-#include <linux/of_irq.h>
25727 +-#include <linux/regmap.h>
25728 +-
25729 +-#include "realtek-smi-core.h"
25730 +-
25731 +-#define RTL8366RB_PORT_NUM_CPU 5
25732 +-#define RTL8366RB_NUM_PORTS 6
25733 +-#define RTL8366RB_PHY_NO_MAX 4
25734 +-#define RTL8366RB_PHY_ADDR_MAX 31
25735 +-
25736 +-/* Switch Global Configuration register */
25737 +-#define RTL8366RB_SGCR 0x0000
25738 +-#define RTL8366RB_SGCR_EN_BC_STORM_CTRL BIT(0)
25739 +-#define RTL8366RB_SGCR_MAX_LENGTH(a) ((a) << 4)
25740 +-#define RTL8366RB_SGCR_MAX_LENGTH_MASK RTL8366RB_SGCR_MAX_LENGTH(0x3)
25741 +-#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0)
25742 +-#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1)
25743 +-#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2)
25744 +-#define RTL8366RB_SGCR_MAX_LENGTH_16000 RTL8366RB_SGCR_MAX_LENGTH(0x3)
25745 +-#define RTL8366RB_SGCR_EN_VLAN BIT(13)
25746 +-#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14)
25747 +-
25748 +-/* Port Enable Control register */
25749 +-#define RTL8366RB_PECR 0x0001
25750 +-
25751 +-/* Switch per-port learning disablement register */
25752 +-#define RTL8366RB_PORT_LEARNDIS_CTRL 0x0002
25753 +-
25754 +-/* Security control, actually aging register */
25755 +-#define RTL8366RB_SECURITY_CTRL 0x0003
25756 +-
25757 +-#define RTL8366RB_SSCR2 0x0004
25758 +-#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
25759 +-
25760 +-/* Port Mode Control registers */
25761 +-#define RTL8366RB_PMC0 0x0005
25762 +-#define RTL8366RB_PMC0_SPI BIT(0)
25763 +-#define RTL8366RB_PMC0_EN_AUTOLOAD BIT(1)
25764 +-#define RTL8366RB_PMC0_PROBE BIT(2)
25765 +-#define RTL8366RB_PMC0_DIS_BISR BIT(3)
25766 +-#define RTL8366RB_PMC0_ADCTEST BIT(4)
25767 +-#define RTL8366RB_PMC0_SRAM_DIAG BIT(5)
25768 +-#define RTL8366RB_PMC0_EN_SCAN BIT(6)
25769 +-#define RTL8366RB_PMC0_P4_IOMODE_SHIFT 7
25770 +-#define RTL8366RB_PMC0_P4_IOMODE_MASK GENMASK(9, 7)
25771 +-#define RTL8366RB_PMC0_P5_IOMODE_SHIFT 10
25772 +-#define RTL8366RB_PMC0_P5_IOMODE_MASK GENMASK(12, 10)
25773 +-#define RTL8366RB_PMC0_SDSMODE_SHIFT 13
25774 +-#define RTL8366RB_PMC0_SDSMODE_MASK GENMASK(15, 13)
25775 +-#define RTL8366RB_PMC1 0x0006
25776 +-
25777 +-/* Port Mirror Control Register */
25778 +-#define RTL8366RB_PMCR 0x0007
25779 +-#define RTL8366RB_PMCR_SOURCE_PORT(a) (a)
25780 +-#define RTL8366RB_PMCR_SOURCE_PORT_MASK 0x000f
25781 +-#define RTL8366RB_PMCR_MONITOR_PORT(a) ((a) << 4)
25782 +-#define RTL8366RB_PMCR_MONITOR_PORT_MASK 0x00f0
25783 +-#define RTL8366RB_PMCR_MIRROR_RX BIT(8)
25784 +-#define RTL8366RB_PMCR_MIRROR_TX BIT(9)
25785 +-#define RTL8366RB_PMCR_MIRROR_SPC BIT(10)
25786 +-#define RTL8366RB_PMCR_MIRROR_ISO BIT(11)
25787 +-
25788 +-/* bits 0..7 = port 0, bits 8..15 = port 1 */
25789 +-#define RTL8366RB_PAACR0 0x0010
25790 +-/* bits 0..7 = port 2, bits 8..15 = port 3 */
25791 +-#define RTL8366RB_PAACR1 0x0011
25792 +-/* bits 0..7 = port 4, bits 8..15 = port 5 */
25793 +-#define RTL8366RB_PAACR2 0x0012
25794 +-#define RTL8366RB_PAACR_SPEED_10M 0
25795 +-#define RTL8366RB_PAACR_SPEED_100M 1
25796 +-#define RTL8366RB_PAACR_SPEED_1000M 2
25797 +-#define RTL8366RB_PAACR_FULL_DUPLEX BIT(2)
25798 +-#define RTL8366RB_PAACR_LINK_UP BIT(4)
25799 +-#define RTL8366RB_PAACR_TX_PAUSE BIT(5)
25800 +-#define RTL8366RB_PAACR_RX_PAUSE BIT(6)
25801 +-#define RTL8366RB_PAACR_AN BIT(7)
25802 +-
25803 +-#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \
25804 +- RTL8366RB_PAACR_FULL_DUPLEX | \
25805 +- RTL8366RB_PAACR_LINK_UP | \
25806 +- RTL8366RB_PAACR_TX_PAUSE | \
25807 +- RTL8366RB_PAACR_RX_PAUSE)
25808 +-
25809 +-/* bits 0..7 = port 0, bits 8..15 = port 1 */
25810 +-#define RTL8366RB_PSTAT0 0x0014
25811 +-/* bits 0..7 = port 2, bits 8..15 = port 3 */
25812 +-#define RTL8366RB_PSTAT1 0x0015
25813 +-/* bits 0..7 = port 4, bits 8..15 = port 5 */
25814 +-#define RTL8366RB_PSTAT2 0x0016
25815 +-
25816 +-#define RTL8366RB_POWER_SAVING_REG 0x0021
25817 +-
25818 +-/* Spanning tree status (STP) control, two bits per port per FID */
25819 +-#define RTL8366RB_STP_STATE_BASE 0x0050 /* 0x0050..0x0057 */
25820 +-#define RTL8366RB_STP_STATE_DISABLED 0x0
25821 +-#define RTL8366RB_STP_STATE_BLOCKING 0x1
25822 +-#define RTL8366RB_STP_STATE_LEARNING 0x2
25823 +-#define RTL8366RB_STP_STATE_FORWARDING 0x3
25824 +-#define RTL8366RB_STP_MASK GENMASK(1, 0)
25825 +-#define RTL8366RB_STP_STATE(port, state) \
25826 +- ((state) << ((port) * 2))
25827 +-#define RTL8366RB_STP_STATE_MASK(port) \
25828 +- RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK)
25829 +-
25830 +-/* CPU port control reg */
25831 +-#define RTL8368RB_CPU_CTRL_REG 0x0061
25832 +-#define RTL8368RB_CPU_PORTS_MSK 0x00FF
25833 +-/* Disables inserting custom tag length/type 0x8899 */
25834 +-#define RTL8368RB_CPU_NO_TAG BIT(15)
25835 +-
25836 +-#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */
25837 +-#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */
25838 +-#define RTL8366RB_SMAR2 0x0072 /* bits 32..47 */
25839 +-
25840 +-#define RTL8366RB_RESET_CTRL_REG 0x0100
25841 +-#define RTL8366RB_CHIP_CTRL_RESET_HW BIT(0)
25842 +-#define RTL8366RB_CHIP_CTRL_RESET_SW BIT(1)
25843 +-
25844 +-#define RTL8366RB_CHIP_ID_REG 0x0509
25845 +-#define RTL8366RB_CHIP_ID_8366 0x5937
25846 +-#define RTL8366RB_CHIP_VERSION_CTRL_REG 0x050A
25847 +-#define RTL8366RB_CHIP_VERSION_MASK 0xf
25848 +-
25849 +-/* PHY registers control */
25850 +-#define RTL8366RB_PHY_ACCESS_CTRL_REG 0x8000
25851 +-#define RTL8366RB_PHY_CTRL_READ BIT(0)
25852 +-#define RTL8366RB_PHY_CTRL_WRITE 0
25853 +-#define RTL8366RB_PHY_ACCESS_BUSY_REG 0x8001
25854 +-#define RTL8366RB_PHY_INT_BUSY BIT(0)
25855 +-#define RTL8366RB_PHY_EXT_BUSY BIT(4)
25856 +-#define RTL8366RB_PHY_ACCESS_DATA_REG 0x8002
25857 +-#define RTL8366RB_PHY_EXT_CTRL_REG 0x8010
25858 +-#define RTL8366RB_PHY_EXT_WRDATA_REG 0x8011
25859 +-#define RTL8366RB_PHY_EXT_RDDATA_REG 0x8012
25860 +-
25861 +-#define RTL8366RB_PHY_REG_MASK 0x1f
25862 +-#define RTL8366RB_PHY_PAGE_OFFSET 5
25863 +-#define RTL8366RB_PHY_PAGE_MASK (0xf << 5)
25864 +-#define RTL8366RB_PHY_NO_OFFSET 9
25865 +-#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
25866 +-
25867 +-/* VLAN Ingress Control Register 1, one bit per port.
25868 +- * bit 0 .. 5 will make the switch drop ingress frames without
25869 +- * VID such as untagged or priority-tagged frames for respective
25870 +- * port.
25871 +- * bit 6 .. 11 will make the switch drop ingress frames carrying
25872 +- * a C-tag with VID != 0 for respective port.
25873 +- */
25874 +-#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E
25875 +-#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6))
25876 +-
25877 +-/* VLAN Ingress Control Register 2, one bit per port.
25878 +- * bit0 .. bit5 will make the switch drop all ingress frames with
25879 +- * a VLAN classification that does not include the port is in its
25880 +- * member set.
25881 +- */
25882 +-#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
25883 +-
25884 +-/* LED control registers */
25885 +-#define RTL8366RB_LED_BLINKRATE_REG 0x0430
25886 +-#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
25887 +-#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
25888 +-#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
25889 +-#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
25890 +-#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
25891 +-#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
25892 +-#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
25893 +-
25894 +-#define RTL8366RB_LED_CTRL_REG 0x0431
25895 +-#define RTL8366RB_LED_OFF 0x0
25896 +-#define RTL8366RB_LED_DUP_COL 0x1
25897 +-#define RTL8366RB_LED_LINK_ACT 0x2
25898 +-#define RTL8366RB_LED_SPD1000 0x3
25899 +-#define RTL8366RB_LED_SPD100 0x4
25900 +-#define RTL8366RB_LED_SPD10 0x5
25901 +-#define RTL8366RB_LED_SPD1000_ACT 0x6
25902 +-#define RTL8366RB_LED_SPD100_ACT 0x7
25903 +-#define RTL8366RB_LED_SPD10_ACT 0x8
25904 +-#define RTL8366RB_LED_SPD100_10_ACT 0x9
25905 +-#define RTL8366RB_LED_FIBER 0xa
25906 +-#define RTL8366RB_LED_AN_FAULT 0xb
25907 +-#define RTL8366RB_LED_LINK_RX 0xc
25908 +-#define RTL8366RB_LED_LINK_TX 0xd
25909 +-#define RTL8366RB_LED_MASTER 0xe
25910 +-#define RTL8366RB_LED_FORCE 0xf
25911 +-#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
25912 +-#define RTL8366RB_LED_1_OFFSET 6
25913 +-#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
25914 +-#define RTL8366RB_LED_3_OFFSET 6
25915 +-
25916 +-#define RTL8366RB_MIB_COUNT 33
25917 +-#define RTL8366RB_GLOBAL_MIB_COUNT 1
25918 +-#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
25919 +-#define RTL8366RB_MIB_COUNTER_BASE 0x1000
25920 +-#define RTL8366RB_MIB_CTRL_REG 0x13F0
25921 +-#define RTL8366RB_MIB_CTRL_USER_MASK 0x0FFC
25922 +-#define RTL8366RB_MIB_CTRL_BUSY_MASK BIT(0)
25923 +-#define RTL8366RB_MIB_CTRL_RESET_MASK BIT(1)
25924 +-#define RTL8366RB_MIB_CTRL_PORT_RESET(_p) BIT(2 + (_p))
25925 +-#define RTL8366RB_MIB_CTRL_GLOBAL_RESET BIT(11)
25926 +-
25927 +-#define RTL8366RB_PORT_VLAN_CTRL_BASE 0x0063
25928 +-#define RTL8366RB_PORT_VLAN_CTRL_REG(_p) \
25929 +- (RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4)
25930 +-#define RTL8366RB_PORT_VLAN_CTRL_MASK 0xf
25931 +-#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4))
25932 +-
25933 +-#define RTL8366RB_VLAN_TABLE_READ_BASE 0x018C
25934 +-#define RTL8366RB_VLAN_TABLE_WRITE_BASE 0x0185
25935 +-
25936 +-#define RTL8366RB_TABLE_ACCESS_CTRL_REG 0x0180
25937 +-#define RTL8366RB_TABLE_VLAN_READ_CTRL 0x0E01
25938 +-#define RTL8366RB_TABLE_VLAN_WRITE_CTRL 0x0F01
25939 +-
25940 +-#define RTL8366RB_VLAN_MC_BASE(_x) (0x0020 + (_x) * 3)
25941 +-
25942 +-#define RTL8366RB_PORT_LINK_STATUS_BASE 0x0014
25943 +-#define RTL8366RB_PORT_STATUS_SPEED_MASK 0x0003
25944 +-#define RTL8366RB_PORT_STATUS_DUPLEX_MASK 0x0004
25945 +-#define RTL8366RB_PORT_STATUS_LINK_MASK 0x0010
25946 +-#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK 0x0020
25947 +-#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK 0x0040
25948 +-#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
25949 +-
25950 +-#define RTL8366RB_NUM_VLANS 16
25951 +-#define RTL8366RB_NUM_LEDGROUPS 4
25952 +-#define RTL8366RB_NUM_VIDS 4096
25953 +-#define RTL8366RB_PRIORITYMAX 7
25954 +-#define RTL8366RB_NUM_FIDS 8
25955 +-#define RTL8366RB_FIDMAX 7
25956 +-
25957 +-#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
25958 +-#define RTL8366RB_PORT_2 BIT(1) /* In userspace port 1 */
25959 +-#define RTL8366RB_PORT_3 BIT(2) /* In userspace port 2 */
25960 +-#define RTL8366RB_PORT_4 BIT(3) /* In userspace port 3 */
25961 +-#define RTL8366RB_PORT_5 BIT(4) /* In userspace port 4 */
25962 +-
25963 +-#define RTL8366RB_PORT_CPU BIT(5) /* CPU port */
25964 +-
25965 +-#define RTL8366RB_PORT_ALL (RTL8366RB_PORT_1 | \
25966 +- RTL8366RB_PORT_2 | \
25967 +- RTL8366RB_PORT_3 | \
25968 +- RTL8366RB_PORT_4 | \
25969 +- RTL8366RB_PORT_5 | \
25970 +- RTL8366RB_PORT_CPU)
25971 +-
25972 +-#define RTL8366RB_PORT_ALL_BUT_CPU (RTL8366RB_PORT_1 | \
25973 +- RTL8366RB_PORT_2 | \
25974 +- RTL8366RB_PORT_3 | \
25975 +- RTL8366RB_PORT_4 | \
25976 +- RTL8366RB_PORT_5)
25977 +-
25978 +-#define RTL8366RB_PORT_ALL_EXTERNAL (RTL8366RB_PORT_1 | \
25979 +- RTL8366RB_PORT_2 | \
25980 +- RTL8366RB_PORT_3 | \
25981 +- RTL8366RB_PORT_4)
25982 +-
25983 +-#define RTL8366RB_PORT_ALL_INTERNAL RTL8366RB_PORT_CPU
25984 +-
25985 +-/* First configuration word per member config, VID and prio */
25986 +-#define RTL8366RB_VLAN_VID_MASK 0xfff
25987 +-#define RTL8366RB_VLAN_PRIORITY_SHIFT 12
25988 +-#define RTL8366RB_VLAN_PRIORITY_MASK 0x7
25989 +-/* Second configuration word per member config, member and untagged */
25990 +-#define RTL8366RB_VLAN_UNTAG_SHIFT 8
25991 +-#define RTL8366RB_VLAN_UNTAG_MASK 0xff
25992 +-#define RTL8366RB_VLAN_MEMBER_MASK 0xff
25993 +-/* Third config word per member config, STAG currently unused */
25994 +-#define RTL8366RB_VLAN_STAG_MBR_MASK 0xff
25995 +-#define RTL8366RB_VLAN_STAG_MBR_SHIFT 8
25996 +-#define RTL8366RB_VLAN_STAG_IDX_MASK 0x7
25997 +-#define RTL8366RB_VLAN_STAG_IDX_SHIFT 5
25998 +-#define RTL8366RB_VLAN_FID_MASK 0x7
25999 +-
26000 +-/* Port ingress bandwidth control */
26001 +-#define RTL8366RB_IB_BASE 0x0200
26002 +-#define RTL8366RB_IB_REG(pnum) (RTL8366RB_IB_BASE + (pnum))
26003 +-#define RTL8366RB_IB_BDTH_MASK 0x3fff
26004 +-#define RTL8366RB_IB_PREIFG BIT(14)
26005 +-
26006 +-/* Port egress bandwidth control */
26007 +-#define RTL8366RB_EB_BASE 0x02d1
26008 +-#define RTL8366RB_EB_REG(pnum) (RTL8366RB_EB_BASE + (pnum))
26009 +-#define RTL8366RB_EB_BDTH_MASK 0x3fff
26010 +-#define RTL8366RB_EB_PREIFG_REG 0x02f8
26011 +-#define RTL8366RB_EB_PREIFG BIT(9)
26012 +-
26013 +-#define RTL8366RB_BDTH_SW_MAX 1048512 /* 1048576? */
26014 +-#define RTL8366RB_BDTH_UNIT 64
26015 +-#define RTL8366RB_BDTH_REG_DEFAULT 16383
26016 +-
26017 +-/* QOS */
26018 +-#define RTL8366RB_QOS BIT(15)
26019 +-/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */
26020 +-#define RTL8366RB_QOS_DEFAULT_PREIFG 1
26021 +-
26022 +-/* Interrupt handling */
26023 +-#define RTL8366RB_INTERRUPT_CONTROL_REG 0x0440
26024 +-#define RTL8366RB_INTERRUPT_POLARITY BIT(0)
26025 +-#define RTL8366RB_P4_RGMII_LED BIT(2)
26026 +-#define RTL8366RB_INTERRUPT_MASK_REG 0x0441
26027 +-#define RTL8366RB_INTERRUPT_LINK_CHGALL GENMASK(11, 0)
26028 +-#define RTL8366RB_INTERRUPT_ACLEXCEED BIT(8)
26029 +-#define RTL8366RB_INTERRUPT_STORMEXCEED BIT(9)
26030 +-#define RTL8366RB_INTERRUPT_P4_FIBER BIT(12)
26031 +-#define RTL8366RB_INTERRUPT_P4_UTP BIT(13)
26032 +-#define RTL8366RB_INTERRUPT_VALID (RTL8366RB_INTERRUPT_LINK_CHGALL | \
26033 +- RTL8366RB_INTERRUPT_ACLEXCEED | \
26034 +- RTL8366RB_INTERRUPT_STORMEXCEED | \
26035 +- RTL8366RB_INTERRUPT_P4_FIBER | \
26036 +- RTL8366RB_INTERRUPT_P4_UTP)
26037 +-#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
26038 +-#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
26039 +-
26040 +-/* Port isolation registers */
26041 +-#define RTL8366RB_PORT_ISO_BASE 0x0F08
26042 +-#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum))
26043 +-#define RTL8366RB_PORT_ISO_EN BIT(0)
26044 +-#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1)
26045 +-#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1)
26046 +-
26047 +-/* bits 0..5 enable force when cleared */
26048 +-#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
26049 +-
26050 +-#define RTL8366RB_OAM_PARSER_REG 0x0F14
26051 +-#define RTL8366RB_OAM_MULTIPLEXER_REG 0x0F15
26052 +-
26053 +-#define RTL8366RB_GREEN_FEATURE_REG 0x0F51
26054 +-#define RTL8366RB_GREEN_FEATURE_MSK 0x0007
26055 +-#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
26056 +-#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
26057 +-
26058 +-/**
26059 +- * struct rtl8366rb - RTL8366RB-specific data
26060 +- * @max_mtu: per-port max MTU setting
26061 +- * @pvid_enabled: if PVID is set for respective port
26062 +- */
26063 +-struct rtl8366rb {
26064 +- unsigned int max_mtu[RTL8366RB_NUM_PORTS];
26065 +- bool pvid_enabled[RTL8366RB_NUM_PORTS];
26066 +-};
26067 +-
26068 +-static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
26069 +- { 0, 0, 4, "IfInOctets" },
26070 +- { 0, 4, 4, "EtherStatsOctets" },
26071 +- { 0, 8, 2, "EtherStatsUnderSizePkts" },
26072 +- { 0, 10, 2, "EtherFragments" },
26073 +- { 0, 12, 2, "EtherStatsPkts64Octets" },
26074 +- { 0, 14, 2, "EtherStatsPkts65to127Octets" },
26075 +- { 0, 16, 2, "EtherStatsPkts128to255Octets" },
26076 +- { 0, 18, 2, "EtherStatsPkts256to511Octets" },
26077 +- { 0, 20, 2, "EtherStatsPkts512to1023Octets" },
26078 +- { 0, 22, 2, "EtherStatsPkts1024to1518Octets" },
26079 +- { 0, 24, 2, "EtherOversizeStats" },
26080 +- { 0, 26, 2, "EtherStatsJabbers" },
26081 +- { 0, 28, 2, "IfInUcastPkts" },
26082 +- { 0, 30, 2, "EtherStatsMulticastPkts" },
26083 +- { 0, 32, 2, "EtherStatsBroadcastPkts" },
26084 +- { 0, 34, 2, "EtherStatsDropEvents" },
26085 +- { 0, 36, 2, "Dot3StatsFCSErrors" },
26086 +- { 0, 38, 2, "Dot3StatsSymbolErrors" },
26087 +- { 0, 40, 2, "Dot3InPauseFrames" },
26088 +- { 0, 42, 2, "Dot3ControlInUnknownOpcodes" },
26089 +- { 0, 44, 4, "IfOutOctets" },
26090 +- { 0, 48, 2, "Dot3StatsSingleCollisionFrames" },
26091 +- { 0, 50, 2, "Dot3StatMultipleCollisionFrames" },
26092 +- { 0, 52, 2, "Dot3sDeferredTransmissions" },
26093 +- { 0, 54, 2, "Dot3StatsLateCollisions" },
26094 +- { 0, 56, 2, "EtherStatsCollisions" },
26095 +- { 0, 58, 2, "Dot3StatsExcessiveCollisions" },
26096 +- { 0, 60, 2, "Dot3OutPauseFrames" },
26097 +- { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" },
26098 +- { 0, 64, 2, "Dot1dTpPortInDiscards" },
26099 +- { 0, 66, 2, "IfOutUcastPkts" },
26100 +- { 0, 68, 2, "IfOutMulticastPkts" },
26101 +- { 0, 70, 2, "IfOutBroadcastPkts" },
26102 +-};
26103 +-
26104 +-static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
26105 +- int port,
26106 +- struct rtl8366_mib_counter *mib,
26107 +- u64 *mibvalue)
26108 +-{
26109 +- u32 addr, val;
26110 +- int ret;
26111 +- int i;
26112 +-
26113 +- addr = RTL8366RB_MIB_COUNTER_BASE +
26114 +- RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) +
26115 +- mib->offset;
26116 +-
26117 +- /* Writing access counter address first
26118 +- * then ASIC will prepare 64bits counter wait for being retrived
26119 +- */
26120 +- ret = regmap_write(smi->map, addr, 0); /* Write whatever */
26121 +- if (ret)
26122 +- return ret;
26123 +-
26124 +- /* Read MIB control register */
26125 +- ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val);
26126 +- if (ret)
26127 +- return -EIO;
26128 +-
26129 +- if (val & RTL8366RB_MIB_CTRL_BUSY_MASK)
26130 +- return -EBUSY;
26131 +-
26132 +- if (val & RTL8366RB_MIB_CTRL_RESET_MASK)
26133 +- return -EIO;
26134 +-
26135 +- /* Read each individual MIB 16 bits at the time */
26136 +- *mibvalue = 0;
26137 +- for (i = mib->length; i > 0; i--) {
26138 +- ret = regmap_read(smi->map, addr + (i - 1), &val);
26139 +- if (ret)
26140 +- return ret;
26141 +- *mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
26142 +- }
26143 +- return 0;
26144 +-}
26145 +-
26146 +-static u32 rtl8366rb_get_irqmask(struct irq_data *d)
26147 +-{
26148 +- int line = irqd_to_hwirq(d);
26149 +- u32 val;
26150 +-
26151 +- /* For line interrupts we combine link down in bits
26152 +- * 6..11 with link up in bits 0..5 into one interrupt.
26153 +- */
26154 +- if (line < 12)
26155 +- val = BIT(line) | BIT(line + 6);
26156 +- else
26157 +- val = BIT(line);
26158 +- return val;
26159 +-}
26160 +-
26161 +-static void rtl8366rb_mask_irq(struct irq_data *d)
26162 +-{
26163 +- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
26164 +- int ret;
26165 +-
26166 +- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
26167 +- rtl8366rb_get_irqmask(d), 0);
26168 +- if (ret)
26169 +- dev_err(smi->dev, "could not mask IRQ\n");
26170 +-}
26171 +-
26172 +-static void rtl8366rb_unmask_irq(struct irq_data *d)
26173 +-{
26174 +- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
26175 +- int ret;
26176 +-
26177 +- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
26178 +- rtl8366rb_get_irqmask(d),
26179 +- rtl8366rb_get_irqmask(d));
26180 +- if (ret)
26181 +- dev_err(smi->dev, "could not unmask IRQ\n");
26182 +-}
26183 +-
26184 +-static irqreturn_t rtl8366rb_irq(int irq, void *data)
26185 +-{
26186 +- struct realtek_smi *smi = data;
26187 +- u32 stat;
26188 +- int ret;
26189 +-
26190 +- /* This clears the IRQ status register */
26191 +- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
26192 +- &stat);
26193 +- if (ret) {
26194 +- dev_err(smi->dev, "can't read interrupt status\n");
26195 +- return IRQ_NONE;
26196 +- }
26197 +- stat &= RTL8366RB_INTERRUPT_VALID;
26198 +- if (!stat)
26199 +- return IRQ_NONE;
26200 +- while (stat) {
26201 +- int line = __ffs(stat);
26202 +- int child_irq;
26203 +-
26204 +- stat &= ~BIT(line);
26205 +- /* For line interrupts we combine link down in bits
26206 +- * 6..11 with link up in bits 0..5 into one interrupt.
26207 +- */
26208 +- if (line < 12 && line > 5)
26209 +- line -= 5;
26210 +- child_irq = irq_find_mapping(smi->irqdomain, line);
26211 +- handle_nested_irq(child_irq);
26212 +- }
26213 +- return IRQ_HANDLED;
26214 +-}
26215 +-
26216 +-static struct irq_chip rtl8366rb_irq_chip = {
26217 +- .name = "RTL8366RB",
26218 +- .irq_mask = rtl8366rb_mask_irq,
26219 +- .irq_unmask = rtl8366rb_unmask_irq,
26220 +-};
26221 +-
26222 +-static int rtl8366rb_irq_map(struct irq_domain *domain, unsigned int irq,
26223 +- irq_hw_number_t hwirq)
26224 +-{
26225 +- irq_set_chip_data(irq, domain->host_data);
26226 +- irq_set_chip_and_handler(irq, &rtl8366rb_irq_chip, handle_simple_irq);
26227 +- irq_set_nested_thread(irq, 1);
26228 +- irq_set_noprobe(irq);
26229 +-
26230 +- return 0;
26231 +-}
26232 +-
26233 +-static void rtl8366rb_irq_unmap(struct irq_domain *d, unsigned int irq)
26234 +-{
26235 +- irq_set_nested_thread(irq, 0);
26236 +- irq_set_chip_and_handler(irq, NULL, NULL);
26237 +- irq_set_chip_data(irq, NULL);
26238 +-}
26239 +-
26240 +-static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
26241 +- .map = rtl8366rb_irq_map,
26242 +- .unmap = rtl8366rb_irq_unmap,
26243 +- .xlate = irq_domain_xlate_onecell,
26244 +-};
26245 +-
26246 +-static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
26247 +-{
26248 +- struct device_node *intc;
26249 +- unsigned long irq_trig;
26250 +- int irq;
26251 +- int ret;
26252 +- u32 val;
26253 +- int i;
26254 +-
26255 +- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
26256 +- if (!intc) {
26257 +- dev_err(smi->dev, "missing child interrupt-controller node\n");
26258 +- return -EINVAL;
26259 +- }
26260 +- /* RB8366RB IRQs cascade off this one */
26261 +- irq = of_irq_get(intc, 0);
26262 +- if (irq <= 0) {
26263 +- dev_err(smi->dev, "failed to get parent IRQ\n");
26264 +- ret = irq ? irq : -EINVAL;
26265 +- goto out_put_node;
26266 +- }
26267 +-
26268 +- /* This clears the IRQ status register */
26269 +- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
26270 +- &val);
26271 +- if (ret) {
26272 +- dev_err(smi->dev, "can't read interrupt status\n");
26273 +- goto out_put_node;
26274 +- }
26275 +-
26276 +- /* Fetch IRQ edge information from the descriptor */
26277 +- irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
26278 +- switch (irq_trig) {
26279 +- case IRQF_TRIGGER_RISING:
26280 +- case IRQF_TRIGGER_HIGH:
26281 +- dev_info(smi->dev, "active high/rising IRQ\n");
26282 +- val = 0;
26283 +- break;
26284 +- case IRQF_TRIGGER_FALLING:
26285 +- case IRQF_TRIGGER_LOW:
26286 +- dev_info(smi->dev, "active low/falling IRQ\n");
26287 +- val = RTL8366RB_INTERRUPT_POLARITY;
26288 +- break;
26289 +- }
26290 +- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG,
26291 +- RTL8366RB_INTERRUPT_POLARITY,
26292 +- val);
26293 +- if (ret) {
26294 +- dev_err(smi->dev, "could not configure IRQ polarity\n");
26295 +- goto out_put_node;
26296 +- }
26297 +-
26298 +- ret = devm_request_threaded_irq(smi->dev, irq, NULL,
26299 +- rtl8366rb_irq, IRQF_ONESHOT,
26300 +- "RTL8366RB", smi);
26301 +- if (ret) {
26302 +- dev_err(smi->dev, "unable to request irq: %d\n", ret);
26303 +- goto out_put_node;
26304 +- }
26305 +- smi->irqdomain = irq_domain_add_linear(intc,
26306 +- RTL8366RB_NUM_INTERRUPT,
26307 +- &rtl8366rb_irqdomain_ops,
26308 +- smi);
26309 +- if (!smi->irqdomain) {
26310 +- dev_err(smi->dev, "failed to create IRQ domain\n");
26311 +- ret = -EINVAL;
26312 +- goto out_put_node;
26313 +- }
26314 +- for (i = 0; i < smi->num_ports; i++)
26315 +- irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
26316 +-
26317 +-out_put_node:
26318 +- of_node_put(intc);
26319 +- return ret;
26320 +-}
26321 +-
26322 +-static int rtl8366rb_set_addr(struct realtek_smi *smi)
26323 +-{
26324 +- u8 addr[ETH_ALEN];
26325 +- u16 val;
26326 +- int ret;
26327 +-
26328 +- eth_random_addr(addr);
26329 +-
26330 +- dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
26331 +- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
26332 +- val = addr[0] << 8 | addr[1];
26333 +- ret = regmap_write(smi->map, RTL8366RB_SMAR0, val);
26334 +- if (ret)
26335 +- return ret;
26336 +- val = addr[2] << 8 | addr[3];
26337 +- ret = regmap_write(smi->map, RTL8366RB_SMAR1, val);
26338 +- if (ret)
26339 +- return ret;
26340 +- val = addr[4] << 8 | addr[5];
26341 +- ret = regmap_write(smi->map, RTL8366RB_SMAR2, val);
26342 +- if (ret)
26343 +- return ret;
26344 +-
26345 +- return 0;
26346 +-}
26347 +-
26348 +-/* Found in a vendor driver */
26349 +-
26350 +-/* Struct for handling the jam tables' entries */
26351 +-struct rtl8366rb_jam_tbl_entry {
26352 +- u16 reg;
26353 +- u16 val;
26354 +-};
26355 +-
26356 +-/* For the "version 0" early silicon, appear in most source releases */
26357 +-static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_0[] = {
26358 +- {0x000B, 0x0001}, {0x03A6, 0x0100}, {0x03A7, 0x0001}, {0x02D1, 0x3FFF},
26359 +- {0x02D2, 0x3FFF}, {0x02D3, 0x3FFF}, {0x02D4, 0x3FFF}, {0x02D5, 0x3FFF},
26360 +- {0x02D6, 0x3FFF}, {0x02D7, 0x3FFF}, {0x02D8, 0x3FFF}, {0x022B, 0x0688},
26361 +- {0x022C, 0x0FAC}, {0x03D0, 0x4688}, {0x03D1, 0x01F5}, {0x0000, 0x0830},
26362 +- {0x02F9, 0x0200}, {0x02F7, 0x7FFF}, {0x02F8, 0x03FF}, {0x0080, 0x03E8},
26363 +- {0x0081, 0x00CE}, {0x0082, 0x00DA}, {0x0083, 0x0230}, {0xBE0F, 0x2000},
26364 +- {0x0231, 0x422A}, {0x0232, 0x422A}, {0x0233, 0x422A}, {0x0234, 0x422A},
26365 +- {0x0235, 0x422A}, {0x0236, 0x422A}, {0x0237, 0x422A}, {0x0238, 0x422A},
26366 +- {0x0239, 0x422A}, {0x023A, 0x422A}, {0x023B, 0x422A}, {0x023C, 0x422A},
26367 +- {0x023D, 0x422A}, {0x023E, 0x422A}, {0x023F, 0x422A}, {0x0240, 0x422A},
26368 +- {0x0241, 0x422A}, {0x0242, 0x422A}, {0x0243, 0x422A}, {0x0244, 0x422A},
26369 +- {0x0245, 0x422A}, {0x0246, 0x422A}, {0x0247, 0x422A}, {0x0248, 0x422A},
26370 +- {0x0249, 0x0146}, {0x024A, 0x0146}, {0x024B, 0x0146}, {0xBE03, 0xC961},
26371 +- {0x024D, 0x0146}, {0x024E, 0x0146}, {0x024F, 0x0146}, {0x0250, 0x0146},
26372 +- {0xBE64, 0x0226}, {0x0252, 0x0146}, {0x0253, 0x0146}, {0x024C, 0x0146},
26373 +- {0x0251, 0x0146}, {0x0254, 0x0146}, {0xBE62, 0x3FD0}, {0x0084, 0x0320},
26374 +- {0x0255, 0x0146}, {0x0256, 0x0146}, {0x0257, 0x0146}, {0x0258, 0x0146},
26375 +- {0x0259, 0x0146}, {0x025A, 0x0146}, {0x025B, 0x0146}, {0x025C, 0x0146},
26376 +- {0x025D, 0x0146}, {0x025E, 0x0146}, {0x025F, 0x0146}, {0x0260, 0x0146},
26377 +- {0x0261, 0xA23F}, {0x0262, 0x0294}, {0x0263, 0xA23F}, {0x0264, 0x0294},
26378 +- {0x0265, 0xA23F}, {0x0266, 0x0294}, {0x0267, 0xA23F}, {0x0268, 0x0294},
26379 +- {0x0269, 0xA23F}, {0x026A, 0x0294}, {0x026B, 0xA23F}, {0x026C, 0x0294},
26380 +- {0x026D, 0xA23F}, {0x026E, 0x0294}, {0x026F, 0xA23F}, {0x0270, 0x0294},
26381 +- {0x02F5, 0x0048}, {0xBE09, 0x0E00}, {0xBE1E, 0x0FA0}, {0xBE14, 0x8448},
26382 +- {0xBE15, 0x1007}, {0xBE4A, 0xA284}, {0xC454, 0x3F0B}, {0xC474, 0x3F0B},
26383 +- {0xBE48, 0x3672}, {0xBE4B, 0x17A7}, {0xBE4C, 0x0B15}, {0xBE52, 0x0EDD},
26384 +- {0xBE49, 0x8C00}, {0xBE5B, 0x785C}, {0xBE5C, 0x785C}, {0xBE5D, 0x785C},
26385 +- {0xBE61, 0x368A}, {0xBE63, 0x9B84}, {0xC456, 0xCC13}, {0xC476, 0xCC13},
26386 +- {0xBE65, 0x307D}, {0xBE6D, 0x0005}, {0xBE6E, 0xE120}, {0xBE2E, 0x7BAF},
26387 +-};
26388 +-
26389 +-/* This v1 init sequence is from Belkin F5D8235 U-Boot release */
26390 +-static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_1[] = {
26391 +- {0x0000, 0x0830}, {0x0001, 0x8000}, {0x0400, 0x8130}, {0xBE78, 0x3C3C},
26392 +- {0x0431, 0x5432}, {0xBE37, 0x0CE4}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
26393 +- {0xC44C, 0x1585}, {0xC44C, 0x1185}, {0xC44C, 0x1585}, {0xC46C, 0x1585},
26394 +- {0xC46C, 0x1185}, {0xC46C, 0x1585}, {0xC451, 0x2135}, {0xC471, 0x2135},
26395 +- {0xBE10, 0x8140}, {0xBE15, 0x0007}, {0xBE6E, 0xE120}, {0xBE69, 0xD20F},
26396 +- {0xBE6B, 0x0320}, {0xBE24, 0xB000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF20},
26397 +- {0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800}, {0xBE24, 0x0000},
26398 +- {0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60}, {0xBE21, 0x0140},
26399 +- {0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000}, {0xBE2E, 0x7B7A},
26400 +- {0xBE36, 0x0CE4}, {0x02F5, 0x0048}, {0xBE77, 0x2940}, {0x000A, 0x83E0},
26401 +- {0xBE79, 0x3C3C}, {0xBE00, 0x1340},
26402 +-};
26403 +-
26404 +-/* This v2 init sequence is from Belkin F5D8235 U-Boot release */
26405 +-static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_2[] = {
26406 +- {0x0450, 0x0000}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
26407 +- {0xC44F, 0x6250}, {0xC46F, 0x6250}, {0xC456, 0x0C14}, {0xC476, 0x0C14},
26408 +- {0xC44C, 0x1C85}, {0xC44C, 0x1885}, {0xC44C, 0x1C85}, {0xC46C, 0x1C85},
26409 +- {0xC46C, 0x1885}, {0xC46C, 0x1C85}, {0xC44C, 0x0885}, {0xC44C, 0x0881},
26410 +- {0xC44C, 0x0885}, {0xC46C, 0x0885}, {0xC46C, 0x0881}, {0xC46C, 0x0885},
26411 +- {0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
26412 +- {0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6E, 0x0320},
26413 +- {0xBE77, 0x2940}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
26414 +- {0x8000, 0x0001}, {0xBE15, 0x1007}, {0x8000, 0x0000}, {0xBE15, 0x1007},
26415 +- {0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160}, {0xBE10, 0x8140},
26416 +- {0xBE00, 0x1340}, {0x0F51, 0x0010},
26417 +-};
26418 +-
26419 +-/* Appears in a DDWRT code dump */
26420 +-static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_3[] = {
26421 +- {0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
26422 +- {0x0F51, 0x0017}, {0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
26423 +- {0xC456, 0x0C14}, {0xC476, 0x0C14}, {0xC454, 0x3F8B}, {0xC474, 0x3F8B},
26424 +- {0xC450, 0x2071}, {0xC470, 0x2071}, {0xC451, 0x226B}, {0xC471, 0x226B},
26425 +- {0xC452, 0xA293}, {0xC472, 0xA293}, {0xC44C, 0x1585}, {0xC44C, 0x1185},
26426 +- {0xC44C, 0x1585}, {0xC46C, 0x1585}, {0xC46C, 0x1185}, {0xC46C, 0x1585},
26427 +- {0xC44C, 0x0185}, {0xC44C, 0x0181}, {0xC44C, 0x0185}, {0xC46C, 0x0185},
26428 +- {0xC46C, 0x0181}, {0xC46C, 0x0185}, {0xBE24, 0xB000}, {0xBE23, 0xFF51},
26429 +- {0xBE22, 0xDF20}, {0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800},
26430 +- {0xBE24, 0x0000}, {0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60},
26431 +- {0xBE21, 0x0140}, {0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000},
26432 +- {0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
26433 +- {0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6B, 0x0320},
26434 +- {0xBE77, 0x2800}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
26435 +- {0x8000, 0x0001}, {0xBE10, 0x8140}, {0x8000, 0x0000}, {0xBE10, 0x8140},
26436 +- {0xBE15, 0x1007}, {0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160},
26437 +- {0xBE10, 0x8140}, {0xBE00, 0x1340}, {0x0450, 0x0000}, {0x0401, 0x0000},
26438 +-};
26439 +-
26440 +-/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */
26441 +-static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_f5d8235[] = {
26442 +- {0x0242, 0x02BF}, {0x0245, 0x02BF}, {0x0248, 0x02BF}, {0x024B, 0x02BF},
26443 +- {0x024E, 0x02BF}, {0x0251, 0x02BF}, {0x0254, 0x0A3F}, {0x0256, 0x0A3F},
26444 +- {0x0258, 0x0A3F}, {0x025A, 0x0A3F}, {0x025C, 0x0A3F}, {0x025E, 0x0A3F},
26445 +- {0x0263, 0x007C}, {0x0100, 0x0004}, {0xBE5B, 0x3500}, {0x800E, 0x200F},
26446 +- {0xBE1D, 0x0F00}, {0x8001, 0x5011}, {0x800A, 0xA2F4}, {0x800B, 0x17A3},
26447 +- {0xBE4B, 0x17A3}, {0xBE41, 0x5011}, {0xBE17, 0x2100}, {0x8000, 0x8304},
26448 +- {0xBE40, 0x8304}, {0xBE4A, 0xA2F4}, {0x800C, 0xA8D5}, {0x8014, 0x5500},
26449 +- {0x8015, 0x0004}, {0xBE4C, 0xA8D5}, {0xBE59, 0x0008}, {0xBE09, 0x0E00},
26450 +- {0xBE36, 0x1036}, {0xBE37, 0x1036}, {0x800D, 0x00FF}, {0xBE4D, 0x00FF},
26451 +-};
26452 +-
26453 +-/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */
26454 +-static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_dgn3500[] = {
26455 +- {0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0F51, 0x0017},
26456 +- {0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0}, {0x0450, 0x0000},
26457 +- {0x0401, 0x0000}, {0x0431, 0x0960},
26458 +-};
26459 +-
26460 +-/* This jam table activates "green ethernet", which means low power mode
26461 +- * and is claimed to detect the cable length and not use more power than
26462 +- * necessary, and the ports should enter power saving mode 10 seconds after
26463 +- * a cable is disconnected. Seems to always be the same.
26464 +- */
26465 +-static const struct rtl8366rb_jam_tbl_entry rtl8366rb_green_jam[] = {
26466 +- {0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7},
26467 +- {0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C},
26468 +- {0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C},
26469 +-};
26470 +-
26471 +-/* Function that jams the tables in the proper registers */
26472 +-static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
26473 +- int jam_size, struct realtek_smi *smi,
26474 +- bool write_dbg)
26475 +-{
26476 +- u32 val;
26477 +- int ret;
26478 +- int i;
26479 +-
26480 +- for (i = 0; i < jam_size; i++) {
26481 +- if ((jam_table[i].reg & 0xBE00) == 0xBE00) {
26482 +- ret = regmap_read(smi->map,
26483 +- RTL8366RB_PHY_ACCESS_BUSY_REG,
26484 +- &val);
26485 +- if (ret)
26486 +- return ret;
26487 +- if (!(val & RTL8366RB_PHY_INT_BUSY)) {
26488 +- ret = regmap_write(smi->map,
26489 +- RTL8366RB_PHY_ACCESS_CTRL_REG,
26490 +- RTL8366RB_PHY_CTRL_WRITE);
26491 +- if (ret)
26492 +- return ret;
26493 +- }
26494 +- }
26495 +- if (write_dbg)
26496 +- dev_dbg(smi->dev, "jam %04x into register %04x\n",
26497 +- jam_table[i].val,
26498 +- jam_table[i].reg);
26499 +- ret = regmap_write(smi->map,
26500 +- jam_table[i].reg,
26501 +- jam_table[i].val);
26502 +- if (ret)
26503 +- return ret;
26504 +- }
26505 +- return 0;
26506 +-}
26507 +-
26508 +-static int rtl8366rb_setup(struct dsa_switch *ds)
26509 +-{
26510 +- struct realtek_smi *smi = ds->priv;
26511 +- const struct rtl8366rb_jam_tbl_entry *jam_table;
26512 +- struct rtl8366rb *rb;
26513 +- u32 chip_ver = 0;
26514 +- u32 chip_id = 0;
26515 +- int jam_size;
26516 +- u32 val;
26517 +- int ret;
26518 +- int i;
26519 +-
26520 +- rb = smi->chip_data;
26521 +-
26522 +- ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
26523 +- if (ret) {
26524 +- dev_err(smi->dev, "unable to read chip id\n");
26525 +- return ret;
26526 +- }
26527 +-
26528 +- switch (chip_id) {
26529 +- case RTL8366RB_CHIP_ID_8366:
26530 +- break;
26531 +- default:
26532 +- dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id);
26533 +- return -ENODEV;
26534 +- }
26535 +-
26536 +- ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
26537 +- &chip_ver);
26538 +- if (ret) {
26539 +- dev_err(smi->dev, "unable to read chip version\n");
26540 +- return ret;
26541 +- }
26542 +-
26543 +- dev_info(smi->dev, "RTL%04x ver %u chip found\n",
26544 +- chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
26545 +-
26546 +- /* Do the init dance using the right jam table */
26547 +- switch (chip_ver) {
26548 +- case 0:
26549 +- jam_table = rtl8366rb_init_jam_ver_0;
26550 +- jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_0);
26551 +- break;
26552 +- case 1:
26553 +- jam_table = rtl8366rb_init_jam_ver_1;
26554 +- jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_1);
26555 +- break;
26556 +- case 2:
26557 +- jam_table = rtl8366rb_init_jam_ver_2;
26558 +- jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_2);
26559 +- break;
26560 +- default:
26561 +- jam_table = rtl8366rb_init_jam_ver_3;
26562 +- jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_3);
26563 +- break;
26564 +- }
26565 +-
26566 +- /* Special jam tables for special routers
26567 +- * TODO: are these necessary? Maintainers, please test
26568 +- * without them, using just the off-the-shelf tables.
26569 +- */
26570 +- if (of_machine_is_compatible("belkin,f5d8235-v1")) {
26571 +- jam_table = rtl8366rb_init_jam_f5d8235;
26572 +- jam_size = ARRAY_SIZE(rtl8366rb_init_jam_f5d8235);
26573 +- }
26574 +- if (of_machine_is_compatible("netgear,dgn3500") ||
26575 +- of_machine_is_compatible("netgear,dgn3500b")) {
26576 +- jam_table = rtl8366rb_init_jam_dgn3500;
26577 +- jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
26578 +- }
26579 +-
26580 +- ret = rtl8366rb_jam_table(jam_table, jam_size, smi, true);
26581 +- if (ret)
26582 +- return ret;
26583 +-
26584 +- /* Isolate all user ports so they can only send packets to itself and the CPU port */
26585 +- for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
26586 +- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
26587 +- RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
26588 +- RTL8366RB_PORT_ISO_EN);
26589 +- if (ret)
26590 +- return ret;
26591 +- }
26592 +- /* CPU port can send packets to all ports */
26593 +- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
26594 +- RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
26595 +- RTL8366RB_PORT_ISO_EN);
26596 +- if (ret)
26597 +- return ret;
26598 +-
26599 +- /* Set up the "green ethernet" feature */
26600 +- ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
26601 +- ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
26602 +- if (ret)
26603 +- return ret;
26604 +-
26605 +- ret = regmap_write(smi->map,
26606 +- RTL8366RB_GREEN_FEATURE_REG,
26607 +- (chip_ver == 1) ? 0x0007 : 0x0003);
26608 +- if (ret)
26609 +- return ret;
26610 +-
26611 +- /* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
26612 +- ret = regmap_write(smi->map, 0x0c, 0x240);
26613 +- if (ret)
26614 +- return ret;
26615 +- ret = regmap_write(smi->map, 0x0d, 0x240);
26616 +- if (ret)
26617 +- return ret;
26618 +-
26619 +- /* Set some random MAC address */
26620 +- ret = rtl8366rb_set_addr(smi);
26621 +- if (ret)
26622 +- return ret;
26623 +-
26624 +- /* Enable CPU port with custom DSA tag 8899.
26625 +- *
26626 +- * If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers
26627 +- * the custom tag is turned off.
26628 +- */
26629 +- ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
26630 +- 0xFFFF,
26631 +- BIT(smi->cpu_port));
26632 +- if (ret)
26633 +- return ret;
26634 +-
26635 +- /* Make sure we default-enable the fixed CPU port */
26636 +- ret = regmap_update_bits(smi->map, RTL8366RB_PECR,
26637 +- BIT(smi->cpu_port),
26638 +- 0);
26639 +- if (ret)
26640 +- return ret;
26641 +-
26642 +- /* Set maximum packet length to 1536 bytes */
26643 +- ret = regmap_update_bits(smi->map, RTL8366RB_SGCR,
26644 +- RTL8366RB_SGCR_MAX_LENGTH_MASK,
26645 +- RTL8366RB_SGCR_MAX_LENGTH_1536);
26646 +- if (ret)
26647 +- return ret;
26648 +- for (i = 0; i < RTL8366RB_NUM_PORTS; i++)
26649 +- /* layer 2 size, see rtl8366rb_change_mtu() */
26650 +- rb->max_mtu[i] = 1532;
26651 +-
26652 +- /* Disable learning for all ports */
26653 +- ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
26654 +- RTL8366RB_PORT_ALL);
26655 +- if (ret)
26656 +- return ret;
26657 +-
26658 +- /* Enable auto ageing for all ports */
26659 +- ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
26660 +- if (ret)
26661 +- return ret;
26662 +-
26663 +- /* Port 4 setup: this enables Port 4, usually the WAN port,
26664 +- * common PHY IO mode is apparently mode 0, and this is not what
26665 +- * the port is initialized to. There is no explanation of the
26666 +- * IO modes in the Realtek source code, if your WAN port is
26667 +- * connected to something exotic such as fiber, then this might
26668 +- * be worth experimenting with.
26669 +- */
26670 +- ret = regmap_update_bits(smi->map, RTL8366RB_PMC0,
26671 +- RTL8366RB_PMC0_P4_IOMODE_MASK,
26672 +- 0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
26673 +- if (ret)
26674 +- return ret;
26675 +-
26676 +- /* Accept all packets by default, we enable filtering on-demand */
26677 +- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
26678 +- 0);
26679 +- if (ret)
26680 +- return ret;
26681 +- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
26682 +- 0);
26683 +- if (ret)
26684 +- return ret;
26685 +-
26686 +- /* Don't drop packets whose DA has not been learned */
26687 +- ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2,
26688 +- RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
26689 +- if (ret)
26690 +- return ret;
26691 +-
26692 +- /* Set blinking, TODO: make this configurable */
26693 +- ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG,
26694 +- RTL8366RB_LED_BLINKRATE_MASK,
26695 +- RTL8366RB_LED_BLINKRATE_56MS);
26696 +- if (ret)
26697 +- return ret;
26698 +-
26699 +- /* Set up LED activity:
26700 +- * Each port has 4 LEDs, we configure all ports to the same
26701 +- * behaviour (no individual config) but we can set up each
26702 +- * LED separately.
26703 +- */
26704 +- if (smi->leds_disabled) {
26705 +- /* Turn everything off */
26706 +- regmap_update_bits(smi->map,
26707 +- RTL8366RB_LED_0_1_CTRL_REG,
26708 +- 0x0FFF, 0);
26709 +- regmap_update_bits(smi->map,
26710 +- RTL8366RB_LED_2_3_CTRL_REG,
26711 +- 0x0FFF, 0);
26712 +- regmap_update_bits(smi->map,
26713 +- RTL8366RB_INTERRUPT_CONTROL_REG,
26714 +- RTL8366RB_P4_RGMII_LED,
26715 +- 0);
26716 +- val = RTL8366RB_LED_OFF;
26717 +- } else {
26718 +- /* TODO: make this configurable per LED */
26719 +- val = RTL8366RB_LED_FORCE;
26720 +- }
26721 +- for (i = 0; i < 4; i++) {
26722 +- ret = regmap_update_bits(smi->map,
26723 +- RTL8366RB_LED_CTRL_REG,
26724 +- 0xf << (i * 4),
26725 +- val << (i * 4));
26726 +- if (ret)
26727 +- return ret;
26728 +- }
26729 +-
26730 +- ret = rtl8366_reset_vlan(smi);
26731 +- if (ret)
26732 +- return ret;
26733 +-
26734 +- ret = rtl8366rb_setup_cascaded_irq(smi);
26735 +- if (ret)
26736 +- dev_info(smi->dev, "no interrupt support\n");
26737 +-
26738 +- ret = realtek_smi_setup_mdio(smi);
26739 +- if (ret) {
26740 +- dev_info(smi->dev, "could not set up MDIO bus\n");
26741 +- return -ENODEV;
26742 +- }
26743 +-
26744 +- return 0;
26745 +-}
26746 +-
26747 +-static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
26748 +- int port,
26749 +- enum dsa_tag_protocol mp)
26750 +-{
26751 +- /* This switch uses the 4 byte protocol A Realtek DSA tag */
26752 +- return DSA_TAG_PROTO_RTL4_A;
26753 +-}
26754 +-
26755 +-static void
26756 +-rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
26757 +- phy_interface_t interface, struct phy_device *phydev,
26758 +- int speed, int duplex, bool tx_pause, bool rx_pause)
26759 +-{
26760 +- struct realtek_smi *smi = ds->priv;
26761 +- int ret;
26762 +-
26763 +- if (port != smi->cpu_port)
26764 +- return;
26765 +-
26766 +- dev_dbg(smi->dev, "MAC link up on CPU port (%d)\n", port);
26767 +-
26768 +- /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
26769 +- ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
26770 +- BIT(port), BIT(port));
26771 +- if (ret) {
26772 +- dev_err(smi->dev, "failed to force 1Gbit on CPU port\n");
26773 +- return;
26774 +- }
26775 +-
26776 +- ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
26777 +- 0xFF00U,
26778 +- RTL8366RB_PAACR_CPU_PORT << 8);
26779 +- if (ret) {
26780 +- dev_err(smi->dev, "failed to set PAACR on CPU port\n");
26781 +- return;
26782 +- }
26783 +-
26784 +- /* Enable the CPU port */
26785 +- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
26786 +- 0);
26787 +- if (ret) {
26788 +- dev_err(smi->dev, "failed to enable the CPU port\n");
26789 +- return;
26790 +- }
26791 +-}
26792 +-
26793 +-static void
26794 +-rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
26795 +- phy_interface_t interface)
26796 +-{
26797 +- struct realtek_smi *smi = ds->priv;
26798 +- int ret;
26799 +-
26800 +- if (port != smi->cpu_port)
26801 +- return;
26802 +-
26803 +- dev_dbg(smi->dev, "MAC link down on CPU port (%d)\n", port);
26804 +-
26805 +- /* Disable the CPU port */
26806 +- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
26807 +- BIT(port));
26808 +- if (ret) {
26809 +- dev_err(smi->dev, "failed to disable the CPU port\n");
26810 +- return;
26811 +- }
26812 +-}
26813 +-
26814 +-static void rb8366rb_set_port_led(struct realtek_smi *smi,
26815 +- int port, bool enable)
26816 +-{
26817 +- u16 val = enable ? 0x3f : 0;
26818 +- int ret;
26819 +-
26820 +- if (smi->leds_disabled)
26821 +- return;
26822 +-
26823 +- switch (port) {
26824 +- case 0:
26825 +- ret = regmap_update_bits(smi->map,
26826 +- RTL8366RB_LED_0_1_CTRL_REG,
26827 +- 0x3F, val);
26828 +- break;
26829 +- case 1:
26830 +- ret = regmap_update_bits(smi->map,
26831 +- RTL8366RB_LED_0_1_CTRL_REG,
26832 +- 0x3F << RTL8366RB_LED_1_OFFSET,
26833 +- val << RTL8366RB_LED_1_OFFSET);
26834 +- break;
26835 +- case 2:
26836 +- ret = regmap_update_bits(smi->map,
26837 +- RTL8366RB_LED_2_3_CTRL_REG,
26838 +- 0x3F, val);
26839 +- break;
26840 +- case 3:
26841 +- ret = regmap_update_bits(smi->map,
26842 +- RTL8366RB_LED_2_3_CTRL_REG,
26843 +- 0x3F << RTL8366RB_LED_3_OFFSET,
26844 +- val << RTL8366RB_LED_3_OFFSET);
26845 +- break;
26846 +- case 4:
26847 +- ret = regmap_update_bits(smi->map,
26848 +- RTL8366RB_INTERRUPT_CONTROL_REG,
26849 +- RTL8366RB_P4_RGMII_LED,
26850 +- enable ? RTL8366RB_P4_RGMII_LED : 0);
26851 +- break;
26852 +- default:
26853 +- dev_err(smi->dev, "no LED for port %d\n", port);
26854 +- return;
26855 +- }
26856 +- if (ret)
26857 +- dev_err(smi->dev, "error updating LED on port %d\n", port);
26858 +-}
26859 +-
26860 +-static int
26861 +-rtl8366rb_port_enable(struct dsa_switch *ds, int port,
26862 +- struct phy_device *phy)
26863 +-{
26864 +- struct realtek_smi *smi = ds->priv;
26865 +- int ret;
26866 +-
26867 +- dev_dbg(smi->dev, "enable port %d\n", port);
26868 +- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
26869 +- 0);
26870 +- if (ret)
26871 +- return ret;
26872 +-
26873 +- rb8366rb_set_port_led(smi, port, true);
26874 +- return 0;
26875 +-}
26876 +-
26877 +-static void
26878 +-rtl8366rb_port_disable(struct dsa_switch *ds, int port)
26879 +-{
26880 +- struct realtek_smi *smi = ds->priv;
26881 +- int ret;
26882 +-
26883 +- dev_dbg(smi->dev, "disable port %d\n", port);
26884 +- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
26885 +- BIT(port));
26886 +- if (ret)
26887 +- return;
26888 +-
26889 +- rb8366rb_set_port_led(smi, port, false);
26890 +-}
26891 +-
26892 +-static int
26893 +-rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
26894 +- struct dsa_bridge bridge,
26895 +- bool *tx_fwd_offload)
26896 +-{
26897 +- struct realtek_smi *smi = ds->priv;
26898 +- unsigned int port_bitmap = 0;
26899 +- int ret, i;
26900 +-
26901 +- /* Loop over all other ports than the current one */
26902 +- for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
26903 +- /* Current port handled last */
26904 +- if (i == port)
26905 +- continue;
26906 +- /* Not on this bridge */
26907 +- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
26908 +- continue;
26909 +- /* Join this port to each other port on the bridge */
26910 +- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
26911 +- RTL8366RB_PORT_ISO_PORTS(BIT(port)),
26912 +- RTL8366RB_PORT_ISO_PORTS(BIT(port)));
26913 +- if (ret)
26914 +- dev_err(smi->dev, "failed to join port %d\n", port);
26915 +-
26916 +- port_bitmap |= BIT(i);
26917 +- }
26918 +-
26919 +- /* Set the bits for the ports we can access */
26920 +- return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
26921 +- RTL8366RB_PORT_ISO_PORTS(port_bitmap),
26922 +- RTL8366RB_PORT_ISO_PORTS(port_bitmap));
26923 +-}
26924 +-
26925 +-static void
26926 +-rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
26927 +- struct dsa_bridge bridge)
26928 +-{
26929 +- struct realtek_smi *smi = ds->priv;
26930 +- unsigned int port_bitmap = 0;
26931 +- int ret, i;
26932 +-
26933 +- /* Loop over all other ports than this one */
26934 +- for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
26935 +- /* Current port handled last */
26936 +- if (i == port)
26937 +- continue;
26938 +- /* Not on this bridge */
26939 +- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
26940 +- continue;
26941 +- /* Remove this port from any other port on the bridge */
26942 +- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
26943 +- RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
26944 +- if (ret)
26945 +- dev_err(smi->dev, "failed to leave port %d\n", port);
26946 +-
26947 +- port_bitmap |= BIT(i);
26948 +- }
26949 +-
26950 +- /* Clear the bits for the ports we can not access, leave ourselves */
26951 +- regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
26952 +- RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
26953 +-}
26954 +-
26955 +-/**
26956 +- * rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
26957 +- * @smi: SMI state container
26958 +- * @port: the port to drop untagged and C-tagged frames on
26959 +- * @drop: whether to drop or pass untagged and C-tagged frames
26960 +- */
26961 +-static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
26962 +-{
26963 +- return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
26964 +- RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
26965 +- drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
26966 +-}
26967 +-
26968 +-static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
26969 +- bool vlan_filtering,
26970 +- struct netlink_ext_ack *extack)
26971 +-{
26972 +- struct realtek_smi *smi = ds->priv;
26973 +- struct rtl8366rb *rb;
26974 +- int ret;
26975 +-
26976 +- rb = smi->chip_data;
26977 +-
26978 +- dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
26979 +- vlan_filtering ? "enable" : "disable");
26980 +-
26981 +- /* If the port is not in the member set, the frame will be dropped */
26982 +- ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
26983 +- BIT(port), vlan_filtering ? BIT(port) : 0);
26984 +- if (ret)
26985 +- return ret;
26986 +-
26987 +- /* If VLAN filtering is enabled and PVID is also enabled, we must
26988 +- * not drop any untagged or C-tagged frames. If we turn off VLAN
26989 +- * filtering on a port, we need to accept any frames.
26990 +- */
26991 +- if (vlan_filtering)
26992 +- ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
26993 +- else
26994 +- ret = rtl8366rb_drop_untagged(smi, port, false);
26995 +-
26996 +- return ret;
26997 +-}
26998 +-
26999 +-static int
27000 +-rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port,
27001 +- struct switchdev_brport_flags flags,
27002 +- struct netlink_ext_ack *extack)
27003 +-{
27004 +- /* We support enabling/disabling learning */
27005 +- if (flags.mask & ~(BR_LEARNING))
27006 +- return -EINVAL;
27007 +-
27008 +- return 0;
27009 +-}
27010 +-
27011 +-static int
27012 +-rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
27013 +- struct switchdev_brport_flags flags,
27014 +- struct netlink_ext_ack *extack)
27015 +-{
27016 +- struct realtek_smi *smi = ds->priv;
27017 +- int ret;
27018 +-
27019 +- if (flags.mask & BR_LEARNING) {
27020 +- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
27021 +- BIT(port),
27022 +- (flags.val & BR_LEARNING) ? 0 : BIT(port));
27023 +- if (ret)
27024 +- return ret;
27025 +- }
27026 +-
27027 +- return 0;
27028 +-}
27029 +-
27030 +-static void
27031 +-rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
27032 +-{
27033 +- struct realtek_smi *smi = ds->priv;
27034 +- u32 val;
27035 +- int i;
27036 +-
27037 +- switch (state) {
27038 +- case BR_STATE_DISABLED:
27039 +- val = RTL8366RB_STP_STATE_DISABLED;
27040 +- break;
27041 +- case BR_STATE_BLOCKING:
27042 +- case BR_STATE_LISTENING:
27043 +- val = RTL8366RB_STP_STATE_BLOCKING;
27044 +- break;
27045 +- case BR_STATE_LEARNING:
27046 +- val = RTL8366RB_STP_STATE_LEARNING;
27047 +- break;
27048 +- case BR_STATE_FORWARDING:
27049 +- val = RTL8366RB_STP_STATE_FORWARDING;
27050 +- break;
27051 +- default:
27052 +- dev_err(smi->dev, "unknown bridge state requested\n");
27053 +- return;
27054 +- }
27055 +-
27056 +- /* Set the same status for the port on all the FIDs */
27057 +- for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
27058 +- regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
27059 +- RTL8366RB_STP_STATE_MASK(port),
27060 +- RTL8366RB_STP_STATE(port, val));
27061 +- }
27062 +-}
27063 +-
27064 +-static void
27065 +-rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
27066 +-{
27067 +- struct realtek_smi *smi = ds->priv;
27068 +-
27069 +- /* This will age out any learned L2 entries */
27070 +- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
27071 +- BIT(port), BIT(port));
27072 +- /* Restore the normal state of things */
27073 +- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
27074 +- BIT(port), 0);
27075 +-}
27076 +-
27077 +-static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
27078 +-{
27079 +- struct realtek_smi *smi = ds->priv;
27080 +- struct rtl8366rb *rb;
27081 +- unsigned int max_mtu;
27082 +- u32 len;
27083 +- int i;
27084 +-
27085 +- /* Cache the per-port MTU setting */
27086 +- rb = smi->chip_data;
27087 +- rb->max_mtu[port] = new_mtu;
27088 +-
27089 +- /* Roof out the MTU for the entire switch to the greatest
27090 +- * common denominator: the biggest set for any one port will
27091 +- * be the biggest MTU for the switch.
27092 +- *
27093 +- * The first setting, 1522 bytes, is max IP packet 1500 bytes,
27094 +- * plus ethernet header, 1518 bytes, plus CPU tag, 4 bytes.
27095 +- * This function should consider the parameter an SDU, so the
27096 +- * MTU passed for this setting is 1518 bytes. The same logic
27097 +- * of subtracting the DSA tag of 4 bytes apply to the other
27098 +- * settings.
27099 +- */
27100 +- max_mtu = 1518;
27101 +- for (i = 0; i < RTL8366RB_NUM_PORTS; i++) {
27102 +- if (rb->max_mtu[i] > max_mtu)
27103 +- max_mtu = rb->max_mtu[i];
27104 +- }
27105 +- if (max_mtu <= 1518)
27106 +- len = RTL8366RB_SGCR_MAX_LENGTH_1522;
27107 +- else if (max_mtu > 1518 && max_mtu <= 1532)
27108 +- len = RTL8366RB_SGCR_MAX_LENGTH_1536;
27109 +- else if (max_mtu > 1532 && max_mtu <= 1548)
27110 +- len = RTL8366RB_SGCR_MAX_LENGTH_1552;
27111 +- else
27112 +- len = RTL8366RB_SGCR_MAX_LENGTH_16000;
27113 +-
27114 +- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
27115 +- RTL8366RB_SGCR_MAX_LENGTH_MASK,
27116 +- len);
27117 +-}
27118 +-
27119 +-static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
27120 +-{
27121 +- /* The max MTU is 16000 bytes, so we subtract the CPU tag
27122 +- * and the max presented to the system is 15996 bytes.
27123 +- */
27124 +- return 15996;
27125 +-}
27126 +-
27127 +-static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
27128 +- struct rtl8366_vlan_4k *vlan4k)
27129 +-{
27130 +- u32 data[3];
27131 +- int ret;
27132 +- int i;
27133 +-
27134 +- memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
27135 +-
27136 +- if (vid >= RTL8366RB_NUM_VIDS)
27137 +- return -EINVAL;
27138 +-
27139 +- /* write VID */
27140 +- ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
27141 +- vid & RTL8366RB_VLAN_VID_MASK);
27142 +- if (ret)
27143 +- return ret;
27144 +-
27145 +- /* write table access control word */
27146 +- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
27147 +- RTL8366RB_TABLE_VLAN_READ_CTRL);
27148 +- if (ret)
27149 +- return ret;
27150 +-
27151 +- for (i = 0; i < 3; i++) {
27152 +- ret = regmap_read(smi->map,
27153 +- RTL8366RB_VLAN_TABLE_READ_BASE + i,
27154 +- &data[i]);
27155 +- if (ret)
27156 +- return ret;
27157 +- }
27158 +-
27159 +- vlan4k->vid = vid;
27160 +- vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
27161 +- RTL8366RB_VLAN_UNTAG_MASK;
27162 +- vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
27163 +- vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
27164 +-
27165 +- return 0;
27166 +-}
27167 +-
27168 +-static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
27169 +- const struct rtl8366_vlan_4k *vlan4k)
27170 +-{
27171 +- u32 data[3];
27172 +- int ret;
27173 +- int i;
27174 +-
27175 +- if (vlan4k->vid >= RTL8366RB_NUM_VIDS ||
27176 +- vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK ||
27177 +- vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK ||
27178 +- vlan4k->fid > RTL8366RB_FIDMAX)
27179 +- return -EINVAL;
27180 +-
27181 +- data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK;
27182 +- data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) |
27183 +- ((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
27184 +- RTL8366RB_VLAN_UNTAG_SHIFT);
27185 +- data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
27186 +-
27187 +- for (i = 0; i < 3; i++) {
27188 +- ret = regmap_write(smi->map,
27189 +- RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
27190 +- data[i]);
27191 +- if (ret)
27192 +- return ret;
27193 +- }
27194 +-
27195 +- /* write table access control word */
27196 +- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
27197 +- RTL8366RB_TABLE_VLAN_WRITE_CTRL);
27198 +-
27199 +- return ret;
27200 +-}
27201 +-
27202 +-static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
27203 +- struct rtl8366_vlan_mc *vlanmc)
27204 +-{
27205 +- u32 data[3];
27206 +- int ret;
27207 +- int i;
27208 +-
27209 +- memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
27210 +-
27211 +- if (index >= RTL8366RB_NUM_VLANS)
27212 +- return -EINVAL;
27213 +-
27214 +- for (i = 0; i < 3; i++) {
27215 +- ret = regmap_read(smi->map,
27216 +- RTL8366RB_VLAN_MC_BASE(index) + i,
27217 +- &data[i]);
27218 +- if (ret)
27219 +- return ret;
27220 +- }
27221 +-
27222 +- vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK;
27223 +- vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) &
27224 +- RTL8366RB_VLAN_PRIORITY_MASK;
27225 +- vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
27226 +- RTL8366RB_VLAN_UNTAG_MASK;
27227 +- vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
27228 +- vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
27229 +-
27230 +- return 0;
27231 +-}
27232 +-
27233 +-static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
27234 +- const struct rtl8366_vlan_mc *vlanmc)
27235 +-{
27236 +- u32 data[3];
27237 +- int ret;
27238 +- int i;
27239 +-
27240 +- if (index >= RTL8366RB_NUM_VLANS ||
27241 +- vlanmc->vid >= RTL8366RB_NUM_VIDS ||
27242 +- vlanmc->priority > RTL8366RB_PRIORITYMAX ||
27243 +- vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK ||
27244 +- vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK ||
27245 +- vlanmc->fid > RTL8366RB_FIDMAX)
27246 +- return -EINVAL;
27247 +-
27248 +- data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) |
27249 +- ((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) <<
27250 +- RTL8366RB_VLAN_PRIORITY_SHIFT);
27251 +- data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) |
27252 +- ((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
27253 +- RTL8366RB_VLAN_UNTAG_SHIFT);
27254 +- data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
27255 +-
27256 +- for (i = 0; i < 3; i++) {
27257 +- ret = regmap_write(smi->map,
27258 +- RTL8366RB_VLAN_MC_BASE(index) + i,
27259 +- data[i]);
27260 +- if (ret)
27261 +- return ret;
27262 +- }
27263 +-
27264 +- return 0;
27265 +-}
27266 +-
27267 +-static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
27268 +-{
27269 +- u32 data;
27270 +- int ret;
27271 +-
27272 +- if (port >= smi->num_ports)
27273 +- return -EINVAL;
27274 +-
27275 +- ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
27276 +- &data);
27277 +- if (ret)
27278 +- return ret;
27279 +-
27280 +- *val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) &
27281 +- RTL8366RB_PORT_VLAN_CTRL_MASK;
27282 +-
27283 +- return 0;
27284 +-}
27285 +-
27286 +-static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
27287 +-{
27288 +- struct rtl8366rb *rb;
27289 +- bool pvid_enabled;
27290 +- int ret;
27291 +-
27292 +- rb = smi->chip_data;
27293 +- pvid_enabled = !!index;
27294 +-
27295 +- if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
27296 +- return -EINVAL;
27297 +-
27298 +- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
27299 +- RTL8366RB_PORT_VLAN_CTRL_MASK <<
27300 +- RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
27301 +- (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
27302 +- RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
27303 +- if (ret)
27304 +- return ret;
27305 +-
27306 +- rb->pvid_enabled[port] = pvid_enabled;
27307 +-
27308 +- /* If VLAN filtering is enabled and PVID is also enabled, we must
27309 +- * not drop any untagged or C-tagged frames. Make sure to update the
27310 +- * filtering setting.
27311 +- */
27312 +- if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
27313 +- ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
27314 +-
27315 +- return ret;
27316 +-}
27317 +-
27318 +-static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
27319 +-{
27320 +- unsigned int max = RTL8366RB_NUM_VLANS - 1;
27321 +-
27322 +- if (smi->vlan4k_enabled)
27323 +- max = RTL8366RB_NUM_VIDS - 1;
27324 +-
27325 +- if (vlan > max)
27326 +- return false;
27327 +-
27328 +- return true;
27329 +-}
27330 +-
27331 +-static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable)
27332 +-{
27333 +- dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable");
27334 +- return regmap_update_bits(smi->map,
27335 +- RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
27336 +- enable ? RTL8366RB_SGCR_EN_VLAN : 0);
27337 +-}
27338 +-
27339 +-static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable)
27340 +-{
27341 +- dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
27342 +- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
27343 +- RTL8366RB_SGCR_EN_VLAN_4KTB,
27344 +- enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
27345 +-}
27346 +-
27347 +-static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
27348 +-{
27349 +- u32 val;
27350 +- u32 reg;
27351 +- int ret;
27352 +-
27353 +- if (phy > RTL8366RB_PHY_NO_MAX)
27354 +- return -EINVAL;
27355 +-
27356 +- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
27357 +- RTL8366RB_PHY_CTRL_READ);
27358 +- if (ret)
27359 +- return ret;
27360 +-
27361 +- reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
27362 +-
27363 +- ret = regmap_write(smi->map, reg, 0);
27364 +- if (ret) {
27365 +- dev_err(smi->dev,
27366 +- "failed to write PHY%d reg %04x @ %04x, ret %d\n",
27367 +- phy, regnum, reg, ret);
27368 +- return ret;
27369 +- }
27370 +-
27371 +- ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
27372 +- if (ret)
27373 +- return ret;
27374 +-
27375 +- dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
27376 +- phy, regnum, reg, val);
27377 +-
27378 +- return val;
27379 +-}
27380 +-
27381 +-static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
27382 +- u16 val)
27383 +-{
27384 +- u32 reg;
27385 +- int ret;
27386 +-
27387 +- if (phy > RTL8366RB_PHY_NO_MAX)
27388 +- return -EINVAL;
27389 +-
27390 +- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
27391 +- RTL8366RB_PHY_CTRL_WRITE);
27392 +- if (ret)
27393 +- return ret;
27394 +-
27395 +- reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
27396 +-
27397 +- dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
27398 +- phy, regnum, reg, val);
27399 +-
27400 +- ret = regmap_write(smi->map, reg, val);
27401 +- if (ret)
27402 +- return ret;
27403 +-
27404 +- return 0;
27405 +-}
27406 +-
27407 +-static int rtl8366rb_reset_chip(struct realtek_smi *smi)
27408 +-{
27409 +- int timeout = 10;
27410 +- u32 val;
27411 +- int ret;
27412 +-
27413 +- realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
27414 +- RTL8366RB_CHIP_CTRL_RESET_HW);
27415 +- do {
27416 +- usleep_range(20000, 25000);
27417 +- ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val);
27418 +- if (ret)
27419 +- return ret;
27420 +-
27421 +- if (!(val & RTL8366RB_CHIP_CTRL_RESET_HW))
27422 +- break;
27423 +- } while (--timeout);
27424 +-
27425 +- if (!timeout) {
27426 +- dev_err(smi->dev, "timeout waiting for the switch to reset\n");
27427 +- return -EIO;
27428 +- }
27429 +-
27430 +- return 0;
27431 +-}
27432 +-
27433 +-static int rtl8366rb_detect(struct realtek_smi *smi)
27434 +-{
27435 +- struct device *dev = smi->dev;
27436 +- int ret;
27437 +- u32 val;
27438 +-
27439 +- /* Detect device */
27440 +- ret = regmap_read(smi->map, 0x5c, &val);
27441 +- if (ret) {
27442 +- dev_err(dev, "can't get chip ID (%d)\n", ret);
27443 +- return ret;
27444 +- }
27445 +-
27446 +- switch (val) {
27447 +- case 0x6027:
27448 +- dev_info(dev, "found an RTL8366S switch\n");
27449 +- dev_err(dev, "this switch is not yet supported, submit patches!\n");
27450 +- return -ENODEV;
27451 +- case 0x5937:
27452 +- dev_info(dev, "found an RTL8366RB switch\n");
27453 +- smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
27454 +- smi->num_ports = RTL8366RB_NUM_PORTS;
27455 +- smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
27456 +- smi->mib_counters = rtl8366rb_mib_counters;
27457 +- smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
27458 +- break;
27459 +- default:
27460 +- dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
27461 +- val);
27462 +- break;
27463 +- }
27464 +-
27465 +- ret = rtl8366rb_reset_chip(smi);
27466 +- if (ret)
27467 +- return ret;
27468 +-
27469 +- return 0;
27470 +-}
27471 +-
27472 +-static const struct dsa_switch_ops rtl8366rb_switch_ops = {
27473 +- .get_tag_protocol = rtl8366_get_tag_protocol,
27474 +- .setup = rtl8366rb_setup,
27475 +- .phylink_mac_link_up = rtl8366rb_mac_link_up,
27476 +- .phylink_mac_link_down = rtl8366rb_mac_link_down,
27477 +- .get_strings = rtl8366_get_strings,
27478 +- .get_ethtool_stats = rtl8366_get_ethtool_stats,
27479 +- .get_sset_count = rtl8366_get_sset_count,
27480 +- .port_bridge_join = rtl8366rb_port_bridge_join,
27481 +- .port_bridge_leave = rtl8366rb_port_bridge_leave,
27482 +- .port_vlan_filtering = rtl8366rb_vlan_filtering,
27483 +- .port_vlan_add = rtl8366_vlan_add,
27484 +- .port_vlan_del = rtl8366_vlan_del,
27485 +- .port_enable = rtl8366rb_port_enable,
27486 +- .port_disable = rtl8366rb_port_disable,
27487 +- .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
27488 +- .port_bridge_flags = rtl8366rb_port_bridge_flags,
27489 +- .port_stp_state_set = rtl8366rb_port_stp_state_set,
27490 +- .port_fast_age = rtl8366rb_port_fast_age,
27491 +- .port_change_mtu = rtl8366rb_change_mtu,
27492 +- .port_max_mtu = rtl8366rb_max_mtu,
27493 +-};
27494 +-
27495 +-static const struct realtek_smi_ops rtl8366rb_smi_ops = {
27496 +- .detect = rtl8366rb_detect,
27497 +- .get_vlan_mc = rtl8366rb_get_vlan_mc,
27498 +- .set_vlan_mc = rtl8366rb_set_vlan_mc,
27499 +- .get_vlan_4k = rtl8366rb_get_vlan_4k,
27500 +- .set_vlan_4k = rtl8366rb_set_vlan_4k,
27501 +- .get_mc_index = rtl8366rb_get_mc_index,
27502 +- .set_mc_index = rtl8366rb_set_mc_index,
27503 +- .get_mib_counter = rtl8366rb_get_mib_counter,
27504 +- .is_vlan_valid = rtl8366rb_is_vlan_valid,
27505 +- .enable_vlan = rtl8366rb_enable_vlan,
27506 +- .enable_vlan4k = rtl8366rb_enable_vlan4k,
27507 +- .phy_read = rtl8366rb_phy_read,
27508 +- .phy_write = rtl8366rb_phy_write,
27509 +-};
27510 +-
27511 +-const struct realtek_smi_variant rtl8366rb_variant = {
27512 +- .ds_ops = &rtl8366rb_switch_ops,
27513 +- .ops = &rtl8366rb_smi_ops,
27514 +- .clk_delay = 10,
27515 +- .cmd_read = 0xa9,
27516 +- .cmd_write = 0xa8,
27517 +- .chip_data_sz = sizeof(struct rtl8366rb),
27518 +-};
27519 +-EXPORT_SYMBOL_GPL(rtl8366rb_variant);
27520 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
27521 +index 48520967746ff..c75c5ae64d5d8 100644
27522 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
27523 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
27524 +@@ -329,7 +329,7 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
27525 + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
27526 + ptp_info);
27527 + struct bnxt *bp = ptp->bp;
27528 +- u8 pin_id;
27529 ++ int pin_id;
27530 + int rc;
27531 +
27532 + switch (rq->type) {
27533 +@@ -337,6 +337,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
27534 + /* Configure an External PPS IN */
27535 + pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
27536 + rq->extts.index);
27537 ++ if (!TSIO_PIN_VALID(pin_id))
27538 ++ return -EOPNOTSUPP;
27539 + if (!on)
27540 + break;
27541 + rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN);
27542 +@@ -350,6 +352,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
27543 + /* Configure a Periodic PPS OUT */
27544 + pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
27545 + rq->perout.index);
27546 ++ if (!TSIO_PIN_VALID(pin_id))
27547 ++ return -EOPNOTSUPP;
27548 + if (!on)
27549 + break;
27550 +
27551 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
27552 +index 7c528e1f8713e..8205140db829e 100644
27553 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
27554 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
27555 +@@ -31,7 +31,7 @@ struct pps_pin {
27556 + u8 state;
27557 + };
27558 +
27559 +-#define TSIO_PIN_VALID(pin) ((pin) < (BNXT_MAX_TSIO_PINS))
27560 ++#define TSIO_PIN_VALID(pin) ((pin) >= 0 && (pin) < (BNXT_MAX_TSIO_PINS))
27561 +
27562 + #define EVENT_DATA2_PPS_EVENT_TYPE(data2) \
27563 + ((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE)
27564 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
27565 +index 2da804f84b480..bd5998012a876 100644
27566 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
27567 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
27568 +@@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
27569 + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
27570 + __raw_writel(value, offset);
27571 + else
27572 +- writel_relaxed(value, offset);
27573 ++ writel(value, offset);
27574 + }
27575 +
27576 + static inline u32 bcmgenet_readl(void __iomem *offset)
27577 +@@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
27578 + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
27579 + return __raw_readl(offset);
27580 + else
27581 +- return readl_relaxed(offset);
27582 ++ return readl(offset);
27583 + }
27584 +
27585 + static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
27586 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
27587 +index fa5b4f885b177..60ec64bfb3f0b 100644
27588 +--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
27589 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
27590 +@@ -674,7 +674,10 @@ static int enetc_get_ts_info(struct net_device *ndev,
27591 + #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
27592 + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
27593 + SOF_TIMESTAMPING_RX_HARDWARE |
27594 +- SOF_TIMESTAMPING_RAW_HARDWARE;
27595 ++ SOF_TIMESTAMPING_RAW_HARDWARE |
27596 ++ SOF_TIMESTAMPING_TX_SOFTWARE |
27597 ++ SOF_TIMESTAMPING_RX_SOFTWARE |
27598 ++ SOF_TIMESTAMPING_SOFTWARE;
27599 +
27600 + info->tx_types = (1 << HWTSTAMP_TX_OFF) |
27601 + (1 << HWTSTAMP_TX_ON) |
27602 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
27603 +index 3555c12edb45a..d3d7172e0fcc5 100644
27604 +--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
27605 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
27606 +@@ -45,6 +45,7 @@ void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
27607 + | pspeed);
27608 + }
27609 +
27610 ++#define ENETC_QOS_ALIGN 64
27611 + static int enetc_setup_taprio(struct net_device *ndev,
27612 + struct tc_taprio_qopt_offload *admin_conf)
27613 + {
27614 +@@ -52,10 +53,11 @@ static int enetc_setup_taprio(struct net_device *ndev,
27615 + struct enetc_cbd cbd = {.cmd = 0};
27616 + struct tgs_gcl_conf *gcl_config;
27617 + struct tgs_gcl_data *gcl_data;
27618 ++ dma_addr_t dma, dma_align;
27619 + struct gce *gce;
27620 +- dma_addr_t dma;
27621 + u16 data_size;
27622 + u16 gcl_len;
27623 ++ void *tmp;
27624 + u32 tge;
27625 + int err;
27626 + int i;
27627 +@@ -82,9 +84,16 @@ static int enetc_setup_taprio(struct net_device *ndev,
27628 + gcl_config = &cbd.gcl_conf;
27629 +
27630 + data_size = struct_size(gcl_data, entry, gcl_len);
27631 +- gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
27632 +- if (!gcl_data)
27633 ++ tmp = dma_alloc_coherent(&priv->si->pdev->dev,
27634 ++ data_size + ENETC_QOS_ALIGN,
27635 ++ &dma, GFP_KERNEL);
27636 ++ if (!tmp) {
27637 ++ dev_err(&priv->si->pdev->dev,
27638 ++ "DMA mapping of taprio gate list failed!\n");
27639 + return -ENOMEM;
27640 ++ }
27641 ++ dma_align = ALIGN(dma, ENETC_QOS_ALIGN);
27642 ++ gcl_data = (struct tgs_gcl_data *)PTR_ALIGN(tmp, ENETC_QOS_ALIGN);
27643 +
27644 + gce = (struct gce *)(gcl_data + 1);
27645 +
27646 +@@ -110,16 +119,8 @@ static int enetc_setup_taprio(struct net_device *ndev,
27647 + cbd.length = cpu_to_le16(data_size);
27648 + cbd.status_flags = 0;
27649 +
27650 +- dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
27651 +- data_size, DMA_TO_DEVICE);
27652 +- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
27653 +- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
27654 +- kfree(gcl_data);
27655 +- return -ENOMEM;
27656 +- }
27657 +-
27658 +- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
27659 +- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
27660 ++ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
27661 ++ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
27662 + cbd.cls = BDCR_CMD_PORT_GCL;
27663 + cbd.status_flags = 0;
27664 +
27665 +@@ -132,8 +133,8 @@ static int enetc_setup_taprio(struct net_device *ndev,
27666 + ENETC_QBV_PTGCR_OFFSET,
27667 + tge & (~ENETC_QBV_TGE));
27668 +
27669 +- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
27670 +- kfree(gcl_data);
27671 ++ dma_free_coherent(&priv->si->pdev->dev, data_size + ENETC_QOS_ALIGN,
27672 ++ tmp, dma);
27673 +
27674 + return err;
27675 + }
27676 +@@ -463,8 +464,9 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
27677 + struct enetc_cbd cbd = {.cmd = 0};
27678 + struct streamid_data *si_data;
27679 + struct streamid_conf *si_conf;
27680 ++ dma_addr_t dma, dma_align;
27681 + u16 data_size;
27682 +- dma_addr_t dma;
27683 ++ void *tmp;
27684 + int port;
27685 + int err;
27686 +
27687 +@@ -485,21 +487,20 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
27688 + cbd.status_flags = 0;
27689 +
27690 + data_size = sizeof(struct streamid_data);
27691 +- si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
27692 +- if (!si_data)
27693 ++ tmp = dma_alloc_coherent(&priv->si->pdev->dev,
27694 ++ data_size + ENETC_QOS_ALIGN,
27695 ++ &dma, GFP_KERNEL);
27696 ++ if (!tmp) {
27697 ++ dev_err(&priv->si->pdev->dev,
27698 ++ "DMA mapping of stream identify failed!\n");
27699 + return -ENOMEM;
27700 +- cbd.length = cpu_to_le16(data_size);
27701 +-
27702 +- dma = dma_map_single(&priv->si->pdev->dev, si_data,
27703 +- data_size, DMA_FROM_DEVICE);
27704 +- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
27705 +- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
27706 +- err = -ENOMEM;
27707 +- goto out;
27708 + }
27709 ++ dma_align = ALIGN(dma, ENETC_QOS_ALIGN);
27710 ++ si_data = (struct streamid_data *)PTR_ALIGN(tmp, ENETC_QOS_ALIGN);
27711 +
27712 +- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
27713 +- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
27714 ++ cbd.length = cpu_to_le16(data_size);
27715 ++ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
27716 ++ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
27717 + eth_broadcast_addr(si_data->dmac);
27718 + si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
27719 + + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
27720 +@@ -539,8 +540,8 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
27721 +
27722 + cbd.length = cpu_to_le16(data_size);
27723 +
27724 +- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
27725 +- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
27726 ++ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
27727 ++ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
27728 +
27729 + /* VIDM default to be 1.
27730 + * VID Match. If set (b1) then the VID must match, otherwise
27731 +@@ -561,10 +562,8 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
27732 +
27733 + err = enetc_send_cmd(priv->si, &cbd);
27734 + out:
27735 +- if (!dma_mapping_error(&priv->si->pdev->dev, dma))
27736 +- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
27737 +-
27738 +- kfree(si_data);
27739 ++ dma_free_coherent(&priv->si->pdev->dev, data_size + ENETC_QOS_ALIGN,
27740 ++ tmp, dma);
27741 +
27742 + return err;
27743 + }
27744 +@@ -633,8 +632,9 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
27745 + {
27746 + struct enetc_cbd cbd = { .cmd = 2 };
27747 + struct sfi_counter_data *data_buf;
27748 +- dma_addr_t dma;
27749 ++ dma_addr_t dma, dma_align;
27750 + u16 data_size;
27751 ++ void *tmp;
27752 + int err;
27753 +
27754 + cbd.index = cpu_to_le16((u16)index);
27755 +@@ -643,19 +643,19 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
27756 + cbd.status_flags = 0;
27757 +
27758 + data_size = sizeof(struct sfi_counter_data);
27759 +- data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
27760 +- if (!data_buf)
27761 ++ tmp = dma_alloc_coherent(&priv->si->pdev->dev,
27762 ++ data_size + ENETC_QOS_ALIGN,
27763 ++ &dma, GFP_KERNEL);
27764 ++ if (!tmp) {
27765 ++ dev_err(&priv->si->pdev->dev,
27766 ++ "DMA mapping of stream counter failed!\n");
27767 + return -ENOMEM;
27768 +-
27769 +- dma = dma_map_single(&priv->si->pdev->dev, data_buf,
27770 +- data_size, DMA_FROM_DEVICE);
27771 +- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
27772 +- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
27773 +- err = -ENOMEM;
27774 +- goto exit;
27775 + }
27776 +- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
27777 +- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
27778 ++ dma_align = ALIGN(dma, ENETC_QOS_ALIGN);
27779 ++ data_buf = (struct sfi_counter_data *)PTR_ALIGN(tmp, ENETC_QOS_ALIGN);
27780 ++
27781 ++ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
27782 ++ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
27783 +
27784 + cbd.length = cpu_to_le16(data_size);
27785 +
27786 +@@ -684,7 +684,9 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
27787 + data_buf->flow_meter_dropl;
27788 +
27789 + exit:
27790 +- kfree(data_buf);
27791 ++ dma_free_coherent(&priv->si->pdev->dev, data_size + ENETC_QOS_ALIGN,
27792 ++ tmp, dma);
27793 ++
27794 + return err;
27795 + }
27796 +
27797 +@@ -723,9 +725,10 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
27798 + struct sgcl_conf *sgcl_config;
27799 + struct sgcl_data *sgcl_data;
27800 + struct sgce *sgce;
27801 +- dma_addr_t dma;
27802 ++ dma_addr_t dma, dma_align;
27803 + u16 data_size;
27804 + int err, i;
27805 ++ void *tmp;
27806 + u64 now;
27807 +
27808 + cbd.index = cpu_to_le16(sgi->index);
27809 +@@ -772,24 +775,20 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
27810 + sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
27811 +
27812 + data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
27813 +-
27814 +- sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
27815 +- if (!sgcl_data)
27816 +- return -ENOMEM;
27817 +-
27818 +- cbd.length = cpu_to_le16(data_size);
27819 +-
27820 +- dma = dma_map_single(&priv->si->pdev->dev,
27821 +- sgcl_data, data_size,
27822 +- DMA_FROM_DEVICE);
27823 +- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
27824 +- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
27825 +- kfree(sgcl_data);
27826 ++ tmp = dma_alloc_coherent(&priv->si->pdev->dev,
27827 ++ data_size + ENETC_QOS_ALIGN,
27828 ++ &dma, GFP_KERNEL);
27829 ++ if (!tmp) {
27830 ++ dev_err(&priv->si->pdev->dev,
27831 ++ "DMA mapping of stream counter failed!\n");
27832 + return -ENOMEM;
27833 + }
27834 ++ dma_align = ALIGN(dma, ENETC_QOS_ALIGN);
27835 ++ sgcl_data = (struct sgcl_data *)PTR_ALIGN(tmp, ENETC_QOS_ALIGN);
27836 +
27837 +- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
27838 +- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
27839 ++ cbd.length = cpu_to_le16(data_size);
27840 ++ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
27841 ++ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
27842 +
27843 + sgce = &sgcl_data->sgcl[0];
27844 +
27845 +@@ -844,7 +843,8 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
27846 + err = enetc_send_cmd(priv->si, &cbd);
27847 +
27848 + exit:
27849 +- kfree(sgcl_data);
27850 ++ dma_free_coherent(&priv->si->pdev->dev, data_size + ENETC_QOS_ALIGN,
27851 ++ tmp, dma);
27852 +
27853 + return err;
27854 + }
27855 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
27856 +index 9298fbecb31ac..8184a954f6481 100644
27857 +--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
27858 ++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
27859 +@@ -536,6 +536,8 @@ struct hnae3_ae_dev {
27860 + * Get 1588 rx hwstamp
27861 + * get_ts_info
27862 + * Get phc info
27863 ++ * clean_vf_config
27864 ++ * Clean residual vf info after disable sriov
27865 + */
27866 + struct hnae3_ae_ops {
27867 + int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
27868 +@@ -729,6 +731,7 @@ struct hnae3_ae_ops {
27869 + struct ethtool_ts_info *info);
27870 + int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
27871 + u32 *status_code);
27872 ++ void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
27873 + };
27874 +
27875 + struct hnae3_dcb_ops {
27876 +@@ -841,6 +844,7 @@ struct hnae3_handle {
27877 + struct dentry *hnae3_dbgfs;
27878 + /* protects concurrent contention between debugfs commands */
27879 + struct mutex dbgfs_lock;
27880 ++ char **dbgfs_buf;
27881 +
27882 + /* Network interface message level enabled bits */
27883 + u32 msg_enable;
27884 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
27885 +index f726a5b70f9e2..44d9b560b3374 100644
27886 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
27887 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
27888 +@@ -1227,7 +1227,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
27889 + return ret;
27890 +
27891 + mutex_lock(&handle->dbgfs_lock);
27892 +- save_buf = &hns3_dbg_cmd[index].buf;
27893 ++ save_buf = &handle->dbgfs_buf[index];
27894 +
27895 + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
27896 + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
27897 +@@ -1332,6 +1332,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
27898 + int ret;
27899 + u32 i;
27900 +
27901 ++ handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
27902 ++ ARRAY_SIZE(hns3_dbg_cmd),
27903 ++ sizeof(*handle->dbgfs_buf),
27904 ++ GFP_KERNEL);
27905 ++ if (!handle->dbgfs_buf)
27906 ++ return -ENOMEM;
27907 ++
27908 + hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
27909 + debugfs_create_dir(name, hns3_dbgfs_root);
27910 + handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
27911 +@@ -1380,9 +1387,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
27912 + u32 i;
27913 +
27914 + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
27915 +- if (hns3_dbg_cmd[i].buf) {
27916 +- kvfree(hns3_dbg_cmd[i].buf);
27917 +- hns3_dbg_cmd[i].buf = NULL;
27918 ++ if (handle->dbgfs_buf[i]) {
27919 ++ kvfree(handle->dbgfs_buf[i]);
27920 ++ handle->dbgfs_buf[i] = NULL;
27921 + }
27922 +
27923 + mutex_destroy(&handle->dbgfs_lock);
27924 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
27925 +index 83aa1450ab9fe..97578eabb7d8b 100644
27926 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
27927 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
27928 +@@ -49,7 +49,6 @@ struct hns3_dbg_cmd_info {
27929 + enum hnae3_dbg_cmd cmd;
27930 + enum hns3_dbg_dentry_type dentry;
27931 + u32 buf_len;
27932 +- char *buf;
27933 + int (*init)(struct hnae3_handle *handle, unsigned int cmd);
27934 + };
27935 +
27936 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
27937 +index babc5d7a3b526..f6082be7481c1 100644
27938 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
27939 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
27940 +@@ -1028,46 +1028,56 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
27941 +
27942 + static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
27943 + {
27944 ++ u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
27945 + struct hns3_tx_spare *tx_spare;
27946 + struct page *page;
27947 +- u32 alloc_size;
27948 + dma_addr_t dma;
27949 + int order;
27950 +
27951 +- alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
27952 + if (!alloc_size)
27953 + return;
27954 +
27955 + order = get_order(alloc_size);
27956 ++ if (order >= MAX_ORDER) {
27957 ++ if (net_ratelimit())
27958 ++ dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
27959 ++ return;
27960 ++ }
27961 ++
27962 + tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
27963 + GFP_KERNEL);
27964 + if (!tx_spare) {
27965 + /* The driver still work without the tx spare buffer */
27966 + dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
27967 +- return;
27968 ++ goto devm_kzalloc_error;
27969 + }
27970 +
27971 + page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
27972 + GFP_KERNEL, order);
27973 + if (!page) {
27974 + dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
27975 +- devm_kfree(ring_to_dev(ring), tx_spare);
27976 +- return;
27977 ++ goto alloc_pages_error;
27978 + }
27979 +
27980 + dma = dma_map_page(ring_to_dev(ring), page, 0,
27981 + PAGE_SIZE << order, DMA_TO_DEVICE);
27982 + if (dma_mapping_error(ring_to_dev(ring), dma)) {
27983 + dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
27984 +- put_page(page);
27985 +- devm_kfree(ring_to_dev(ring), tx_spare);
27986 +- return;
27987 ++ goto dma_mapping_error;
27988 + }
27989 +
27990 + tx_spare->dma = dma;
27991 + tx_spare->buf = page_address(page);
27992 + tx_spare->len = PAGE_SIZE << order;
27993 + ring->tx_spare = tx_spare;
27994 ++ return;
27995 ++
27996 ++dma_mapping_error:
27997 ++ put_page(page);
27998 ++alloc_pages_error:
27999 ++ devm_kfree(ring_to_dev(ring), tx_spare);
28000 ++devm_kzalloc_error:
28001 ++ ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
28002 + }
28003 +
28004 + /* Use hns3_tx_spare_space() to make sure there is enough buffer
28005 +@@ -2982,6 +2992,21 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
28006 + return ret;
28007 + }
28008 +
28009 ++/**
28010 ++ * hns3_clean_vf_config
28011 ++ * @pdev: pointer to a pci_dev structure
28012 ++ * @num_vfs: number of VFs allocated
28013 ++ *
28014 ++ * Clean residual vf config after disable sriov
28015 ++ **/
28016 ++static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs)
28017 ++{
28018 ++ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
28019 ++
28020 ++ if (ae_dev->ops->clean_vf_config)
28021 ++ ae_dev->ops->clean_vf_config(ae_dev, num_vfs);
28022 ++}
28023 ++
28024 + /* hns3_remove - Device removal routine
28025 + * @pdev: PCI device information struct
28026 + */
28027 +@@ -3020,7 +3045,10 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
28028 + else
28029 + return num_vfs;
28030 + } else if (!pci_vfs_assigned(pdev)) {
28031 ++ int num_vfs_pre = pci_num_vf(pdev);
28032 ++
28033 + pci_disable_sriov(pdev);
28034 ++ hns3_clean_vf_config(pdev, num_vfs_pre);
28035 + } else {
28036 + dev_warn(&pdev->dev,
28037 + "Unable to free VFs because some are assigned to VMs.\n");
28038 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
28039 +index c06c39ece80da..cbf36cc86803a 100644
28040 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
28041 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
28042 +@@ -651,8 +651,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
28043 + struct hnae3_handle *h = priv->ae_handle;
28044 + int rx_queue_index = h->kinfo.num_tqps;
28045 +
28046 +- if (hns3_nic_resetting(netdev)) {
28047 +- netdev_err(netdev, "dev resetting!");
28048 ++ if (hns3_nic_resetting(netdev) || !priv->ring) {
28049 ++ netdev_err(netdev, "failed to get ringparam value, due to dev resetting or uninited\n");
28050 + return;
28051 + }
28052 +
28053 +@@ -1072,8 +1072,14 @@ static int hns3_check_ringparam(struct net_device *ndev,
28054 + {
28055 + #define RX_BUF_LEN_2K 2048
28056 + #define RX_BUF_LEN_4K 4096
28057 +- if (hns3_nic_resetting(ndev))
28058 ++
28059 ++ struct hns3_nic_priv *priv = netdev_priv(ndev);
28060 ++
28061 ++ if (hns3_nic_resetting(ndev) || !priv->ring) {
28062 ++ netdev_err(ndev, "failed to set ringparam value, due to dev resetting or uninited\n");
28063 + return -EBUSY;
28064 ++ }
28065 ++
28066 +
28067 + if (param->rx_mini_pending || param->rx_jumbo_pending)
28068 + return -EINVAL;
28069 +@@ -1764,9 +1770,6 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
28070 + struct hnae3_handle *h = priv->ae_handle;
28071 + int ret;
28072 +
28073 +- if (hns3_nic_resetting(netdev))
28074 +- return -EBUSY;
28075 +-
28076 + h->kinfo.tx_spare_buf_size = data;
28077 +
28078 + ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
28079 +@@ -1797,6 +1800,11 @@ static int hns3_set_tunable(struct net_device *netdev,
28080 + struct hnae3_handle *h = priv->ae_handle;
28081 + int i, ret = 0;
28082 +
28083 ++ if (hns3_nic_resetting(netdev) || !priv->ring) {
28084 ++ netdev_err(netdev, "failed to set tunable value, dev resetting!");
28085 ++ return -EBUSY;
28086 ++ }
28087 ++
28088 + switch (tuna->id) {
28089 + case ETHTOOL_TX_COPYBREAK:
28090 + priv->tx_copybreak = *(u32 *)data;
28091 +@@ -1816,7 +1824,8 @@ static int hns3_set_tunable(struct net_device *netdev,
28092 + old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
28093 + new_tx_spare_buf_size = *(u32 *)data;
28094 + ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
28095 +- if (ret) {
28096 ++ if (ret ||
28097 ++ (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) {
28098 + int ret1;
28099 +
28100 + netdev_warn(netdev,
28101 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
28102 +index 24f7afacae028..e96bc61a0a877 100644
28103 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
28104 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
28105 +@@ -1863,6 +1863,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
28106 + vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
28107 + vport->mps = HCLGE_MAC_DEFAULT_FRAME;
28108 + vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
28109 ++ vport->port_base_vlan_cfg.tbl_sta = true;
28110 + vport->rxvlan_cfg.rx_vlan_offload_en = true;
28111 + vport->req_vlan_fltr_en = true;
28112 + INIT_LIST_HEAD(&vport->vlan_list);
28113 +@@ -8429,12 +8430,11 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
28114 + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
28115 + hclge_prepare_mac_addr(&req, addr, false);
28116 + ret = hclge_remove_mac_vlan_tbl(vport, &req);
28117 +- if (!ret) {
28118 ++ if (!ret || ret == -ENOENT) {
28119 + mutex_lock(&hdev->vport_lock);
28120 + hclge_update_umv_space(vport, true);
28121 + mutex_unlock(&hdev->vport_lock);
28122 +- } else if (ret == -ENOENT) {
28123 +- ret = 0;
28124 ++ return 0;
28125 + }
28126 +
28127 + return ret;
28128 +@@ -8984,11 +8984,16 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
28129 +
28130 + ether_addr_copy(vport->vf_info.mac, mac_addr);
28131 +
28132 ++ /* there is a timewindow for PF to know VF unalive, it may
28133 ++ * cause send mailbox fail, but it doesn't matter, VF will
28134 ++ * query it when reinit.
28135 ++ */
28136 + if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
28137 + dev_info(&hdev->pdev->dev,
28138 + "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
28139 + vf, format_mac_addr);
28140 +- return hclge_inform_reset_assert_to_vf(vport);
28141 ++ (void)hclge_inform_reset_assert_to_vf(vport);
28142 ++ return 0;
28143 + }
28144 +
28145 + dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
28146 +@@ -9809,19 +9814,28 @@ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
28147 + bool writen_to_tbl)
28148 + {
28149 + struct hclge_vport_vlan_cfg *vlan, *tmp;
28150 ++ struct hclge_dev *hdev = vport->back;
28151 +
28152 +- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
28153 +- if (vlan->vlan_id == vlan_id)
28154 ++ mutex_lock(&hdev->vport_lock);
28155 ++
28156 ++ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
28157 ++ if (vlan->vlan_id == vlan_id) {
28158 ++ mutex_unlock(&hdev->vport_lock);
28159 + return;
28160 ++ }
28161 ++ }
28162 +
28163 + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
28164 +- if (!vlan)
28165 ++ if (!vlan) {
28166 ++ mutex_unlock(&hdev->vport_lock);
28167 + return;
28168 ++ }
28169 +
28170 + vlan->hd_tbl_status = writen_to_tbl;
28171 + vlan->vlan_id = vlan_id;
28172 +
28173 + list_add_tail(&vlan->node, &vport->vlan_list);
28174 ++ mutex_unlock(&hdev->vport_lock);
28175 + }
28176 +
28177 + static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
28178 +@@ -9830,6 +9844,8 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
28179 + struct hclge_dev *hdev = vport->back;
28180 + int ret;
28181 +
28182 ++ mutex_lock(&hdev->vport_lock);
28183 ++
28184 + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
28185 + if (!vlan->hd_tbl_status) {
28186 + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
28187 +@@ -9839,12 +9855,16 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
28188 + dev_err(&hdev->pdev->dev,
28189 + "restore vport vlan list failed, ret=%d\n",
28190 + ret);
28191 ++
28192 ++ mutex_unlock(&hdev->vport_lock);
28193 + return ret;
28194 + }
28195 + }
28196 + vlan->hd_tbl_status = true;
28197 + }
28198 +
28199 ++ mutex_unlock(&hdev->vport_lock);
28200 ++
28201 + return 0;
28202 + }
28203 +
28204 +@@ -9854,6 +9874,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
28205 + struct hclge_vport_vlan_cfg *vlan, *tmp;
28206 + struct hclge_dev *hdev = vport->back;
28207 +
28208 ++ mutex_lock(&hdev->vport_lock);
28209 ++
28210 + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
28211 + if (vlan->vlan_id == vlan_id) {
28212 + if (is_write_tbl && vlan->hd_tbl_status)
28213 +@@ -9868,6 +9890,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
28214 + break;
28215 + }
28216 + }
28217 ++
28218 ++ mutex_unlock(&hdev->vport_lock);
28219 + }
28220 +
28221 + void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
28222 +@@ -9875,6 +9899,8 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
28223 + struct hclge_vport_vlan_cfg *vlan, *tmp;
28224 + struct hclge_dev *hdev = vport->back;
28225 +
28226 ++ mutex_lock(&hdev->vport_lock);
28227 ++
28228 + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
28229 + if (vlan->hd_tbl_status)
28230 + hclge_set_vlan_filter_hw(hdev,
28231 +@@ -9890,6 +9916,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
28232 + }
28233 + }
28234 + clear_bit(vport->vport_id, hdev->vf_vlan_full);
28235 ++ mutex_unlock(&hdev->vport_lock);
28236 + }
28237 +
28238 + void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
28239 +@@ -9898,6 +9925,8 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
28240 + struct hclge_vport *vport;
28241 + int i;
28242 +
28243 ++ mutex_lock(&hdev->vport_lock);
28244 ++
28245 + for (i = 0; i < hdev->num_alloc_vport; i++) {
28246 + vport = &hdev->vport[i];
28247 + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
28248 +@@ -9905,37 +9934,61 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
28249 + kfree(vlan);
28250 + }
28251 + }
28252 ++
28253 ++ mutex_unlock(&hdev->vport_lock);
28254 + }
28255 +
28256 +-void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
28257 ++void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
28258 + {
28259 +- struct hclge_vport_vlan_cfg *vlan, *tmp;
28260 +- struct hclge_dev *hdev = vport->back;
28261 ++ struct hclge_vlan_info *vlan_info;
28262 ++ struct hclge_vport *vport;
28263 + u16 vlan_proto;
28264 + u16 vlan_id;
28265 + u16 state;
28266 ++ int vf_id;
28267 + int ret;
28268 +
28269 +- vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
28270 +- vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
28271 +- state = vport->port_base_vlan_cfg.state;
28272 ++ /* PF should restore all vfs port base vlan */
28273 ++ for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
28274 ++ vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
28275 ++ vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
28276 ++ &vport->port_base_vlan_cfg.vlan_info :
28277 ++ &vport->port_base_vlan_cfg.old_vlan_info;
28278 +
28279 +- if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
28280 +- clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
28281 +- hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
28282 +- vport->vport_id, vlan_id,
28283 +- false);
28284 +- return;
28285 ++ vlan_id = vlan_info->vlan_tag;
28286 ++ vlan_proto = vlan_info->vlan_proto;
28287 ++ state = vport->port_base_vlan_cfg.state;
28288 ++
28289 ++ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
28290 ++ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
28291 ++ ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
28292 ++ vport->vport_id,
28293 ++ vlan_id, false);
28294 ++ vport->port_base_vlan_cfg.tbl_sta = ret == 0;
28295 ++ }
28296 + }
28297 ++}
28298 +
28299 +- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
28300 +- ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
28301 +- vport->vport_id,
28302 +- vlan->vlan_id, false);
28303 +- if (ret)
28304 +- break;
28305 +- vlan->hd_tbl_status = true;
28306 ++void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
28307 ++{
28308 ++ struct hclge_vport_vlan_cfg *vlan, *tmp;
28309 ++ struct hclge_dev *hdev = vport->back;
28310 ++ int ret;
28311 ++
28312 ++ mutex_lock(&hdev->vport_lock);
28313 ++
28314 ++ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
28315 ++ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
28316 ++ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
28317 ++ vport->vport_id,
28318 ++ vlan->vlan_id, false);
28319 ++ if (ret)
28320 ++ break;
28321 ++ vlan->hd_tbl_status = true;
28322 ++ }
28323 + }
28324 ++
28325 ++ mutex_unlock(&hdev->vport_lock);
28326 + }
28327 +
28328 + /* For global reset and imp reset, hardware will clear the mac table,
28329 +@@ -9975,6 +10028,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev)
28330 + struct hnae3_handle *handle = &vport->nic;
28331 +
28332 + hclge_restore_mac_table_common(vport);
28333 ++ hclge_restore_vport_port_base_vlan_config(hdev);
28334 + hclge_restore_vport_vlan_table(vport);
28335 + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
28336 + hclge_restore_fd_entries(handle);
28337 +@@ -10031,6 +10085,8 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
28338 + false);
28339 + }
28340 +
28341 ++ vport->port_base_vlan_cfg.tbl_sta = false;
28342 ++
28343 + /* force add VLAN 0 */
28344 + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
28345 + if (ret)
28346 +@@ -10120,7 +10176,9 @@ out:
28347 + else
28348 + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
28349 +
28350 ++ vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
28351 + vport->port_base_vlan_cfg.vlan_info = *vlan_info;
28352 ++ vport->port_base_vlan_cfg.tbl_sta = true;
28353 + hclge_set_vport_vlan_fltr_change(vport);
28354 +
28355 + return 0;
28356 +@@ -10188,14 +10246,17 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
28357 + return ret;
28358 + }
28359 +
28360 +- /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
28361 ++ /* there is a timewindow for PF to know VF unalive, it may
28362 ++ * cause send mailbox fail, but it doesn't matter, VF will
28363 ++ * query it when reinit.
28364 ++ * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
28365 + * VLAN state.
28366 + */
28367 + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
28368 + test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
28369 +- hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
28370 +- vport->vport_id, state,
28371 +- &vlan_info);
28372 ++ (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
28373 ++ vport->vport_id,
28374 ++ state, &vlan_info);
28375 +
28376 + return 0;
28377 + }
28378 +@@ -10253,11 +10314,11 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
28379 + }
28380 +
28381 + if (!ret) {
28382 +- if (is_kill)
28383 +- hclge_rm_vport_vlan_table(vport, vlan_id, false);
28384 +- else
28385 ++ if (!is_kill)
28386 + hclge_add_vport_vlan_table(vport, vlan_id,
28387 + writen_to_tbl);
28388 ++ else if (is_kill && vlan_id != 0)
28389 ++ hclge_rm_vport_vlan_table(vport, vlan_id, false);
28390 + } else if (is_kill) {
28391 + /* when remove hw vlan filter failed, record the vlan id,
28392 + * and try to remove it from hw later, to be consistence
28393 +@@ -11831,8 +11892,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
28394 + hclge_misc_irq_uninit(hdev);
28395 + hclge_devlink_uninit(hdev);
28396 + hclge_pci_uninit(hdev);
28397 +- mutex_destroy(&hdev->vport_lock);
28398 + hclge_uninit_vport_vlan_table(hdev);
28399 ++ mutex_destroy(&hdev->vport_lock);
28400 + ae_dev->priv = NULL;
28401 + }
28402 +
28403 +@@ -12656,6 +12717,55 @@ static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
28404 + return 0;
28405 + }
28406 +
28407 ++/* After disable sriov, VF still has some config and info need clean,
28408 ++ * which configed by PF.
28409 ++ */
28410 ++static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
28411 ++{
28412 ++ struct hclge_dev *hdev = vport->back;
28413 ++ struct hclge_vlan_info vlan_info;
28414 ++ int ret;
28415 ++
28416 ++ /* after disable sriov, clean VF rate configured by PF */
28417 ++ ret = hclge_tm_qs_shaper_cfg(vport, 0);
28418 ++ if (ret)
28419 ++ dev_err(&hdev->pdev->dev,
28420 ++ "failed to clean vf%d rate config, ret = %d\n",
28421 ++ vfid, ret);
28422 ++
28423 ++ vlan_info.vlan_tag = 0;
28424 ++ vlan_info.qos = 0;
28425 ++ vlan_info.vlan_proto = ETH_P_8021Q;
28426 ++ ret = hclge_update_port_base_vlan_cfg(vport,
28427 ++ HNAE3_PORT_BASE_VLAN_DISABLE,
28428 ++ &vlan_info);
28429 ++ if (ret)
28430 ++ dev_err(&hdev->pdev->dev,
28431 ++ "failed to clean vf%d port base vlan, ret = %d\n",
28432 ++ vfid, ret);
28433 ++
28434 ++ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
28435 ++ if (ret)
28436 ++ dev_err(&hdev->pdev->dev,
28437 ++ "failed to clean vf%d spoof config, ret = %d\n",
28438 ++ vfid, ret);
28439 ++
28440 ++ memset(&vport->vf_info, 0, sizeof(vport->vf_info));
28441 ++}
28442 ++
28443 ++static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
28444 ++{
28445 ++ struct hclge_dev *hdev = ae_dev->priv;
28446 ++ struct hclge_vport *vport;
28447 ++ int i;
28448 ++
28449 ++ for (i = 0; i < num_vfs; i++) {
28450 ++ vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
28451 ++
28452 ++ hclge_clear_vport_vf_info(vport, i);
28453 ++ }
28454 ++}
28455 ++
28456 + static const struct hnae3_ae_ops hclge_ops = {
28457 + .init_ae_dev = hclge_init_ae_dev,
28458 + .uninit_ae_dev = hclge_uninit_ae_dev,
28459 +@@ -12757,6 +12867,7 @@ static const struct hnae3_ae_ops hclge_ops = {
28460 + .get_rx_hwts = hclge_ptp_get_rx_hwts,
28461 + .get_ts_info = hclge_ptp_get_ts_info,
28462 + .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
28463 ++ .clean_vf_config = hclge_clean_vport_config,
28464 + };
28465 +
28466 + static struct hnae3_ae_algo ae_algo = {
28467 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
28468 +index adfb26e792621..63197257dd4e4 100644
28469 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
28470 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
28471 +@@ -977,7 +977,9 @@ struct hclge_vlan_info {
28472 +
28473 + struct hclge_port_base_vlan_config {
28474 + u16 state;
28475 ++ bool tbl_sta;
28476 + struct hclge_vlan_info vlan_info;
28477 ++ struct hclge_vlan_info old_vlan_info;
28478 + };
28479 +
28480 + struct hclge_vf_info {
28481 +@@ -1023,6 +1025,7 @@ struct hclge_vport {
28482 + spinlock_t mac_list_lock; /* protect mac address need to add/detele */
28483 + struct list_head uc_mac_list; /* Store VF unicast table */
28484 + struct list_head mc_mac_list; /* Store VF multicast table */
28485 ++
28486 + struct list_head vlan_list; /* Store VF vlan table */
28487 + };
28488 +
28489 +@@ -1097,6 +1100,7 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
28490 + void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
28491 + void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
28492 + void hclge_restore_mac_table_common(struct hclge_vport *vport);
28493 ++void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev);
28494 + void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
28495 + int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
28496 + struct hclge_vlan_info *vlan_info);
28497 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
28498 +index 63d2be4349e3e..03d63b6a9b2bc 100644
28499 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
28500 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
28501 +@@ -48,7 +48,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
28502 + int ret;
28503 +
28504 + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
28505 +- return 0;
28506 ++ return -EBUSY;
28507 +
28508 + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
28509 +
28510 +@@ -86,7 +86,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
28511 + int ret;
28512 +
28513 + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
28514 +- return 0;
28515 ++ return -EBUSY;
28516 +
28517 + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
28518 +
28519 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
28520 +index 21442a9bb9961..90c6197d9374c 100644
28521 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
28522 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
28523 +@@ -2855,6 +2855,11 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
28524 + return ret;
28525 + }
28526 +
28527 ++ /* get current port based vlan state from PF */
28528 ++ ret = hclgevf_get_port_base_vlan_filter_state(hdev);
28529 ++ if (ret)
28530 ++ return ret;
28531 ++
28532 + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
28533 +
28534 + hclgevf_init_rxd_adv_layout(hdev);
28535 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
28536 +index b423e94956f10..b4804ce63151f 100644
28537 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
28538 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
28539 +@@ -1429,6 +1429,15 @@ static int __ibmvnic_open(struct net_device *netdev)
28540 + return rc;
28541 + }
28542 +
28543 ++ adapter->tx_queues_active = true;
28544 ++
28545 ++ /* Since queues were stopped until now, there shouldn't be any
28546 ++ * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
28547 ++ * don't need the synchronize_rcu()? Leaving it for consistency
28548 ++ * with setting ->tx_queues_active = false.
28549 ++ */
28550 ++ synchronize_rcu();
28551 ++
28552 + netif_tx_start_all_queues(netdev);
28553 +
28554 + if (prev_state == VNIC_CLOSED) {
28555 +@@ -1603,6 +1612,14 @@ static void ibmvnic_cleanup(struct net_device *netdev)
28556 + struct ibmvnic_adapter *adapter = netdev_priv(netdev);
28557 +
28558 + /* ensure that transmissions are stopped if called by do_reset */
28559 ++
28560 ++ adapter->tx_queues_active = false;
28561 ++
28562 ++ /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
28563 ++ * update so they don't restart a queue after we stop it below.
28564 ++ */
28565 ++ synchronize_rcu();
28566 ++
28567 + if (test_bit(0, &adapter->resetting))
28568 + netif_tx_disable(netdev);
28569 + else
28570 +@@ -1842,14 +1859,21 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
28571 + tx_buff->skb = NULL;
28572 + adapter->netdev->stats.tx_dropped++;
28573 + }
28574 ++
28575 + ind_bufp->index = 0;
28576 ++
28577 + if (atomic_sub_return(entries, &tx_scrq->used) <=
28578 + (adapter->req_tx_entries_per_subcrq / 2) &&
28579 +- __netif_subqueue_stopped(adapter->netdev, queue_num) &&
28580 +- !test_bit(0, &adapter->resetting)) {
28581 +- netif_wake_subqueue(adapter->netdev, queue_num);
28582 +- netdev_dbg(adapter->netdev, "Started queue %d\n",
28583 +- queue_num);
28584 ++ __netif_subqueue_stopped(adapter->netdev, queue_num)) {
28585 ++ rcu_read_lock();
28586 ++
28587 ++ if (adapter->tx_queues_active) {
28588 ++ netif_wake_subqueue(adapter->netdev, queue_num);
28589 ++ netdev_dbg(adapter->netdev, "Started queue %d\n",
28590 ++ queue_num);
28591 ++ }
28592 ++
28593 ++ rcu_read_unlock();
28594 + }
28595 + }
28596 +
28597 +@@ -1904,11 +1928,12 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
28598 + int index = 0;
28599 + u8 proto = 0;
28600 +
28601 +- tx_scrq = adapter->tx_scrq[queue_num];
28602 +- txq = netdev_get_tx_queue(netdev, queue_num);
28603 +- ind_bufp = &tx_scrq->ind_buf;
28604 +-
28605 +- if (test_bit(0, &adapter->resetting)) {
28606 ++ /* If a reset is in progress, drop the packet since
28607 ++ * the scrqs may get torn down. Otherwise use the
28608 ++ * rcu to ensure reset waits for us to complete.
28609 ++ */
28610 ++ rcu_read_lock();
28611 ++ if (!adapter->tx_queues_active) {
28612 + dev_kfree_skb_any(skb);
28613 +
28614 + tx_send_failed++;
28615 +@@ -1917,6 +1942,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
28616 + goto out;
28617 + }
28618 +
28619 ++ tx_scrq = adapter->tx_scrq[queue_num];
28620 ++ txq = netdev_get_tx_queue(netdev, queue_num);
28621 ++ ind_bufp = &tx_scrq->ind_buf;
28622 ++
28623 + if (ibmvnic_xmit_workarounds(skb, netdev)) {
28624 + tx_dropped++;
28625 + tx_send_failed++;
28626 +@@ -1924,6 +1953,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
28627 + ibmvnic_tx_scrq_flush(adapter, tx_scrq);
28628 + goto out;
28629 + }
28630 ++
28631 + if (skb_is_gso(skb))
28632 + tx_pool = &adapter->tso_pool[queue_num];
28633 + else
28634 +@@ -2078,6 +2108,7 @@ tx_err:
28635 + netif_carrier_off(netdev);
28636 + }
28637 + out:
28638 ++ rcu_read_unlock();
28639 + netdev->stats.tx_dropped += tx_dropped;
28640 + netdev->stats.tx_bytes += tx_bytes;
28641 + netdev->stats.tx_packets += tx_packets;
28642 +@@ -3732,9 +3763,15 @@ restart_loop:
28643 + (adapter->req_tx_entries_per_subcrq / 2) &&
28644 + __netif_subqueue_stopped(adapter->netdev,
28645 + scrq->pool_index)) {
28646 +- netif_wake_subqueue(adapter->netdev, scrq->pool_index);
28647 +- netdev_dbg(adapter->netdev, "Started queue %d\n",
28648 +- scrq->pool_index);
28649 ++ rcu_read_lock();
28650 ++ if (adapter->tx_queues_active) {
28651 ++ netif_wake_subqueue(adapter->netdev,
28652 ++ scrq->pool_index);
28653 ++ netdev_dbg(adapter->netdev,
28654 ++ "Started queue %d\n",
28655 ++ scrq->pool_index);
28656 ++ }
28657 ++ rcu_read_unlock();
28658 + }
28659 + }
28660 +
28661 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
28662 +index fa2d607a7b1b9..8f5cefb932dd1 100644
28663 +--- a/drivers/net/ethernet/ibm/ibmvnic.h
28664 ++++ b/drivers/net/ethernet/ibm/ibmvnic.h
28665 +@@ -1006,11 +1006,14 @@ struct ibmvnic_adapter {
28666 + struct work_struct ibmvnic_reset;
28667 + struct delayed_work ibmvnic_delayed_reset;
28668 + unsigned long resetting;
28669 +- bool napi_enabled, from_passive_init;
28670 +- bool login_pending;
28671 + /* last device reset time */
28672 + unsigned long last_reset_time;
28673 +
28674 ++ bool napi_enabled;
28675 ++ bool from_passive_init;
28676 ++ bool login_pending;
28677 ++ /* protected by rcu */
28678 ++ bool tx_queues_active;
28679 + bool failover_pending;
28680 + bool force_reset_recovery;
28681 +
28682 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
28683 +index 945b1bb9c6f40..e5e72b5bb6196 100644
28684 +--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
28685 ++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
28686 +@@ -218,7 +218,6 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
28687 + ntu += nb_buffs;
28688 + if (ntu == rx_ring->count) {
28689 + rx_desc = I40E_RX_DESC(rx_ring, 0);
28690 +- xdp = i40e_rx_bi(rx_ring, 0);
28691 + ntu = 0;
28692 + }
28693 +
28694 +@@ -241,21 +240,25 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
28695 + static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
28696 + struct xdp_buff *xdp)
28697 + {
28698 ++ unsigned int totalsize = xdp->data_end - xdp->data_meta;
28699 + unsigned int metasize = xdp->data - xdp->data_meta;
28700 +- unsigned int datasize = xdp->data_end - xdp->data;
28701 + struct sk_buff *skb;
28702 +
28703 ++ net_prefetch(xdp->data_meta);
28704 ++
28705 + /* allocate a skb to store the frags */
28706 +- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
28707 +- xdp->data_end - xdp->data_hard_start,
28708 ++ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
28709 + GFP_ATOMIC | __GFP_NOWARN);
28710 + if (unlikely(!skb))
28711 + goto out;
28712 +
28713 +- skb_reserve(skb, xdp->data - xdp->data_hard_start);
28714 +- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
28715 +- if (metasize)
28716 ++ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
28717 ++ ALIGN(totalsize, sizeof(long)));
28718 ++
28719 ++ if (metasize) {
28720 + skb_metadata_set(skb, metasize);
28721 ++ __skb_pull(skb, metasize);
28722 ++ }
28723 +
28724 + out:
28725 + xsk_buff_free(xdp);
28726 +@@ -324,11 +327,11 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
28727 + int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
28728 + {
28729 + unsigned int total_rx_bytes = 0, total_rx_packets = 0;
28730 +- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
28731 + u16 next_to_clean = rx_ring->next_to_clean;
28732 + u16 count_mask = rx_ring->count - 1;
28733 + unsigned int xdp_res, xdp_xmit = 0;
28734 + bool failure = false;
28735 ++ u16 cleaned_count;
28736 +
28737 + while (likely(total_rx_packets < (unsigned int)budget)) {
28738 + union i40e_rx_desc *rx_desc;
28739 +diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
28740 +index bea1d1e39fa27..2f60230d332a6 100644
28741 +--- a/drivers/net/ethernet/intel/ice/ice.h
28742 ++++ b/drivers/net/ethernet/intel/ice/ice.h
28743 +@@ -290,6 +290,7 @@ enum ice_pf_state {
28744 + ICE_LINK_DEFAULT_OVERRIDE_PENDING,
28745 + ICE_PHY_INIT_COMPLETE,
28746 + ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */
28747 ++ ICE_AUX_ERR_PENDING,
28748 + ICE_STATE_NBITS /* must be last */
28749 + };
28750 +
28751 +@@ -559,6 +560,7 @@ struct ice_pf {
28752 + wait_queue_head_t reset_wait_queue;
28753 +
28754 + u32 hw_csum_rx_error;
28755 ++ u32 oicr_err_reg;
28756 + u16 oicr_idx; /* Other interrupt cause MSIX vector index */
28757 + u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
28758 + u16 max_pf_txqs; /* Total Tx queues PF wide */
28759 +@@ -710,7 +712,7 @@ static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
28760 + struct ice_vsi *vsi = ring->vsi;
28761 + u16 qid;
28762 +
28763 +- qid = ring->q_index - vsi->num_xdp_txq;
28764 ++ qid = ring->q_index - vsi->alloc_txq;
28765 +
28766 + if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
28767 + return NULL;
28768 +diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
28769 +index fc3580167e7b5..5559230eff8b5 100644
28770 +--- a/drivers/net/ethernet/intel/ice/ice_idc.c
28771 ++++ b/drivers/net/ethernet/intel/ice/ice_idc.c
28772 +@@ -34,6 +34,9 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
28773 + {
28774 + struct iidc_auxiliary_drv *iadrv;
28775 +
28776 ++ if (WARN_ON_ONCE(!in_task()))
28777 ++ return;
28778 ++
28779 + if (!pf->adev)
28780 + return;
28781 +
28782 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
28783 +index b7e8744b0c0a6..296f9d5f74084 100644
28784 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
28785 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
28786 +@@ -2255,6 +2255,19 @@ static void ice_service_task(struct work_struct *work)
28787 + return;
28788 + }
28789 +
28790 ++ if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
28791 ++ struct iidc_event *event;
28792 ++
28793 ++ event = kzalloc(sizeof(*event), GFP_KERNEL);
28794 ++ if (event) {
28795 ++ set_bit(IIDC_EVENT_CRIT_ERR, event->type);
28796 ++ /* report the entire OICR value to AUX driver */
28797 ++ swap(event->reg, pf->oicr_err_reg);
28798 ++ ice_send_event_to_aux(pf, event);
28799 ++ kfree(event);
28800 ++ }
28801 ++ }
28802 ++
28803 + if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
28804 + /* Plug aux device per request */
28805 + ice_plug_aux_dev(pf);
28806 +@@ -3041,17 +3054,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
28807 +
28808 + #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
28809 + if (oicr & ICE_AUX_CRIT_ERR) {
28810 +- struct iidc_event *event;
28811 +-
28812 ++ pf->oicr_err_reg |= oicr;
28813 ++ set_bit(ICE_AUX_ERR_PENDING, pf->state);
28814 + ena_mask &= ~ICE_AUX_CRIT_ERR;
28815 +- event = kzalloc(sizeof(*event), GFP_ATOMIC);
28816 +- if (event) {
28817 +- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
28818 +- /* report the entire OICR value to AUX driver */
28819 +- event->reg = oicr;
28820 +- ice_send_event_to_aux(pf, event);
28821 +- kfree(event);
28822 +- }
28823 + }
28824 +
28825 + /* Report any remaining unexpected interrupts */
28826 +diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
28827 +index 2388837d6d6c9..feb874bde171f 100644
28828 +--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
28829 ++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
28830 +@@ -428,20 +428,24 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
28831 + static struct sk_buff *
28832 + ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
28833 + {
28834 +- unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
28835 ++ unsigned int totalsize = xdp->data_end - xdp->data_meta;
28836 + unsigned int metasize = xdp->data - xdp->data_meta;
28837 +- unsigned int datasize = xdp->data_end - xdp->data;
28838 + struct sk_buff *skb;
28839 +
28840 +- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
28841 ++ net_prefetch(xdp->data_meta);
28842 ++
28843 ++ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
28844 + GFP_ATOMIC | __GFP_NOWARN);
28845 + if (unlikely(!skb))
28846 + return NULL;
28847 +
28848 +- skb_reserve(skb, xdp->data - xdp->data_hard_start);
28849 +- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
28850 +- if (metasize)
28851 ++ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
28852 ++ ALIGN(totalsize, sizeof(long)));
28853 ++
28854 ++ if (metasize) {
28855 + skb_metadata_set(skb, metasize);
28856 ++ __skb_pull(skb, metasize);
28857 ++ }
28858 +
28859 + xsk_buff_free(xdp);
28860 + return skb;
28861 +diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
28862 +index 51a2dcaf553de..2a5782063f4c8 100644
28863 +--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
28864 ++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
28865 +@@ -965,10 +965,6 @@ static int igb_set_ringparam(struct net_device *netdev,
28866 + memcpy(&temp_ring[i], adapter->rx_ring[i],
28867 + sizeof(struct igb_ring));
28868 +
28869 +- /* Clear copied XDP RX-queue info */
28870 +- memset(&temp_ring[i].xdp_rxq, 0,
28871 +- sizeof(temp_ring[i].xdp_rxq));
28872 +-
28873 + temp_ring[i].count = new_rx_count;
28874 + err = igb_setup_rx_resources(&temp_ring[i]);
28875 + if (err) {
28876 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
28877 +index 38ba92022cd45..c1e4ad65b02de 100644
28878 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
28879 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
28880 +@@ -4352,7 +4352,18 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
28881 + {
28882 + struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
28883 + struct device *dev = rx_ring->dev;
28884 +- int size;
28885 ++ int size, res;
28886 ++
28887 ++ /* XDP RX-queue info */
28888 ++ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
28889 ++ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
28890 ++ res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
28891 ++ rx_ring->queue_index, 0);
28892 ++ if (res < 0) {
28893 ++ dev_err(dev, "Failed to register xdp_rxq index %u\n",
28894 ++ rx_ring->queue_index);
28895 ++ return res;
28896 ++ }
28897 +
28898 + size = sizeof(struct igb_rx_buffer) * rx_ring->count;
28899 +
28900 +@@ -4375,14 +4386,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
28901 +
28902 + rx_ring->xdp_prog = adapter->xdp_prog;
28903 +
28904 +- /* XDP RX-queue info */
28905 +- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
28906 +- rx_ring->queue_index, 0) < 0)
28907 +- goto err;
28908 +-
28909 + return 0;
28910 +
28911 + err:
28912 ++ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
28913 + vfree(rx_ring->rx_buffer_info);
28914 + rx_ring->rx_buffer_info = NULL;
28915 + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
28916 +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
28917 +index 2f17f36e94fde..2a9ae53238f73 100644
28918 +--- a/drivers/net/ethernet/intel/igc/igc_main.c
28919 ++++ b/drivers/net/ethernet/intel/igc/igc_main.c
28920 +@@ -505,6 +505,9 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
28921 + u8 index = rx_ring->queue_index;
28922 + int size, desc_len, res;
28923 +
28924 ++ /* XDP RX-queue info */
28925 ++ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
28926 ++ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
28927 + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
28928 + rx_ring->q_vector->napi.napi_id);
28929 + if (res < 0) {
28930 +@@ -2446,19 +2449,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
28931 + static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
28932 + struct xdp_buff *xdp)
28933 + {
28934 ++ unsigned int totalsize = xdp->data_end - xdp->data_meta;
28935 + unsigned int metasize = xdp->data - xdp->data_meta;
28936 +- unsigned int datasize = xdp->data_end - xdp->data;
28937 +- unsigned int totalsize = metasize + datasize;
28938 + struct sk_buff *skb;
28939 +
28940 +- skb = __napi_alloc_skb(&ring->q_vector->napi,
28941 +- xdp->data_end - xdp->data_hard_start,
28942 ++ net_prefetch(xdp->data_meta);
28943 ++
28944 ++ skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
28945 + GFP_ATOMIC | __GFP_NOWARN);
28946 + if (unlikely(!skb))
28947 + return NULL;
28948 +
28949 +- skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
28950 +- memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
28951 ++ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
28952 ++ ALIGN(totalsize, sizeof(long)));
28953 ++
28954 + if (metasize) {
28955 + skb_metadata_set(skb, metasize);
28956 + __skb_pull(skb, metasize);
28957 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
28958 +index 6a5e9cf6b5dac..dd7ff66d422f0 100644
28959 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
28960 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
28961 +@@ -207,26 +207,28 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
28962 + }
28963 +
28964 + static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
28965 +- struct ixgbe_rx_buffer *bi)
28966 ++ const struct xdp_buff *xdp)
28967 + {
28968 +- unsigned int metasize = bi->xdp->data - bi->xdp->data_meta;
28969 +- unsigned int datasize = bi->xdp->data_end - bi->xdp->data;
28970 ++ unsigned int totalsize = xdp->data_end - xdp->data_meta;
28971 ++ unsigned int metasize = xdp->data - xdp->data_meta;
28972 + struct sk_buff *skb;
28973 +
28974 ++ net_prefetch(xdp->data_meta);
28975 ++
28976 + /* allocate a skb to store the frags */
28977 +- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
28978 +- bi->xdp->data_end - bi->xdp->data_hard_start,
28979 ++ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
28980 + GFP_ATOMIC | __GFP_NOWARN);
28981 + if (unlikely(!skb))
28982 + return NULL;
28983 +
28984 +- skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start);
28985 +- memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize);
28986 +- if (metasize)
28987 ++ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
28988 ++ ALIGN(totalsize, sizeof(long)));
28989 ++
28990 ++ if (metasize) {
28991 + skb_metadata_set(skb, metasize);
28992 ++ __skb_pull(skb, metasize);
28993 ++ }
28994 +
28995 +- xsk_buff_free(bi->xdp);
28996 +- bi->xdp = NULL;
28997 + return skb;
28998 + }
28999 +
29000 +@@ -317,12 +319,15 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
29001 + }
29002 +
29003 + /* XDP_PASS path */
29004 +- skb = ixgbe_construct_skb_zc(rx_ring, bi);
29005 ++ skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
29006 + if (!skb) {
29007 + rx_ring->rx_stats.alloc_rx_buff_failed++;
29008 + break;
29009 + }
29010 +
29011 ++ xsk_buff_free(bi->xdp);
29012 ++ bi->xdp = NULL;
29013 ++
29014 + cleaned_count++;
29015 + ixgbe_inc_ntc(rx_ring);
29016 +
29017 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
29018 +index 91f86d77cd41b..3a31fb8cc1554 100644
29019 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
29020 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
29021 +@@ -605,7 +605,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
29022 + struct npc_install_flow_req req = { 0 };
29023 + struct npc_install_flow_rsp rsp = { 0 };
29024 + struct npc_mcam *mcam = &rvu->hw->mcam;
29025 +- struct nix_rx_action action;
29026 ++ struct nix_rx_action action = { 0 };
29027 + int blkaddr, index;
29028 +
29029 + /* AF's and SDP VFs work in promiscuous mode */
29030 +@@ -626,7 +626,6 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
29031 + *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
29032 + blkaddr, index);
29033 + } else {
29034 +- *(u64 *)&action = 0x00;
29035 + action.op = NIX_RX_ACTIONOP_UCAST;
29036 + action.pf_func = pcifunc;
29037 + }
29038 +@@ -657,7 +656,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
29039 + struct npc_mcam *mcam = &rvu->hw->mcam;
29040 + struct rvu_hwinfo *hw = rvu->hw;
29041 + int blkaddr, ucast_idx, index;
29042 +- struct nix_rx_action action;
29043 ++ struct nix_rx_action action = { 0 };
29044 + u64 relaxed_mask;
29045 +
29046 + if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
29047 +@@ -685,14 +684,14 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
29048 + blkaddr, ucast_idx);
29049 +
29050 + if (action.op != NIX_RX_ACTIONOP_RSS) {
29051 +- *(u64 *)&action = 0x00;
29052 ++ *(u64 *)&action = 0;
29053 + action.op = NIX_RX_ACTIONOP_UCAST;
29054 + }
29055 +
29056 + /* RX_ACTION set to MCAST for CGX PF's */
29057 + if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
29058 + is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
29059 +- *(u64 *)&action = 0x00;
29060 ++ *(u64 *)&action = 0;
29061 + action.op = NIX_RX_ACTIONOP_MCAST;
29062 + pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
29063 + action.index = pfvf->promisc_mce_idx;
29064 +@@ -832,7 +831,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
29065 + struct rvu_hwinfo *hw = rvu->hw;
29066 + int blkaddr, ucast_idx, index;
29067 + u8 mac_addr[ETH_ALEN] = { 0 };
29068 +- struct nix_rx_action action;
29069 ++ struct nix_rx_action action = { 0 };
29070 + struct rvu_pfvf *pfvf;
29071 + u16 vf_func;
29072 +
29073 +@@ -861,14 +860,14 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
29074 + blkaddr, ucast_idx);
29075 +
29076 + if (action.op != NIX_RX_ACTIONOP_RSS) {
29077 +- *(u64 *)&action = 0x00;
29078 ++ *(u64 *)&action = 0;
29079 + action.op = NIX_RX_ACTIONOP_UCAST;
29080 + action.pf_func = pcifunc;
29081 + }
29082 +
29083 + /* RX_ACTION set to MCAST for CGX PF's */
29084 + if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
29085 +- *(u64 *)&action = 0x00;
29086 ++ *(u64 *)&action = 0;
29087 + action.op = NIX_RX_ACTIONOP_MCAST;
29088 + action.index = pfvf->mcast_mce_idx;
29089 + }
29090 +diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
29091 +index 7bdbb2d09a148..cc5e48e1bb4c3 100644
29092 +--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
29093 ++++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
29094 +@@ -4,6 +4,8 @@ config SPARX5_SWITCH
29095 + depends on HAS_IOMEM
29096 + depends on OF
29097 + depends on ARCH_SPARX5 || COMPILE_TEST
29098 ++ depends on PTP_1588_CLOCK_OPTIONAL
29099 ++ depends on BRIDGE || BRIDGE=n
29100 + select PHYLINK
29101 + select PHY_SPARX5_SERDES
29102 + select RESET_CONTROLLER
29103 +diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
29104 +index 7436f62fa1525..174ad95e746a3 100644
29105 +--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
29106 ++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
29107 +@@ -420,6 +420,8 @@ static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5)
29108 + db_hw->dataptr = phys;
29109 + db_hw->status = 0;
29110 + db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL);
29111 ++ if (!db)
29112 ++ return -ENOMEM;
29113 + db->cpu_addr = cpu_addr;
29114 + list_add_tail(&db->list, &tx->db_list);
29115 + }
29116 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
29117 +index 7e296fa71b368..40fa5bce2ac2c 100644
29118 +--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
29119 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
29120 +@@ -331,6 +331,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
29121 + goto err_out_deregister_lifs;
29122 + }
29123 +
29124 ++ mod_timer(&ionic->watchdog_timer,
29125 ++ round_jiffies(jiffies + ionic->watchdog_period));
29126 ++
29127 + return 0;
29128 +
29129 + err_out_deregister_lifs:
29130 +@@ -348,7 +351,6 @@ err_out_port_reset:
29131 + err_out_reset:
29132 + ionic_reset(ionic);
29133 + err_out_teardown:
29134 +- del_timer_sync(&ionic->watchdog_timer);
29135 + pci_clear_master(pdev);
29136 + /* Don't fail the probe for these errors, keep
29137 + * the hw interface around for inspection
29138 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
29139 +index d57e80d44c9df..2c7ce820a1fa7 100644
29140 +--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
29141 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
29142 +@@ -122,9 +122,6 @@ int ionic_dev_setup(struct ionic *ionic)
29143 + idev->fw_generation = IONIC_FW_STS_F_GENERATION &
29144 + ioread8(&idev->dev_info_regs->fw_status);
29145 +
29146 +- mod_timer(&ionic->watchdog_timer,
29147 +- round_jiffies(jiffies + ionic->watchdog_period));
29148 +-
29149 + idev->db_pages = bar->vaddr;
29150 + idev->phy_db_pages = bar->bus_addr;
29151 +
29152 +@@ -132,6 +129,16 @@ int ionic_dev_setup(struct ionic *ionic)
29153 + }
29154 +
29155 + /* Devcmd Interface */
29156 ++bool ionic_is_fw_running(struct ionic_dev *idev)
29157 ++{
29158 ++ u8 fw_status = ioread8(&idev->dev_info_regs->fw_status);
29159 ++
29160 ++ /* firmware is useful only if the running bit is set and
29161 ++ * fw_status != 0xff (bad PCI read)
29162 ++ */
29163 ++ return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
29164 ++}
29165 ++
29166 + int ionic_heartbeat_check(struct ionic *ionic)
29167 + {
29168 + struct ionic_dev *idev = &ionic->idev;
29169 +@@ -155,13 +162,10 @@ do_check_time:
29170 + goto do_check_time;
29171 + }
29172 +
29173 +- /* firmware is useful only if the running bit is set and
29174 +- * fw_status != 0xff (bad PCI read)
29175 +- * If fw_status is not ready don't bother with the generation.
29176 +- */
29177 + fw_status = ioread8(&idev->dev_info_regs->fw_status);
29178 +
29179 +- if (fw_status == 0xff || !(fw_status & IONIC_FW_STS_F_RUNNING)) {
29180 ++ /* If fw_status is not ready don't bother with the generation */
29181 ++ if (!ionic_is_fw_running(idev)) {
29182 + fw_status_ready = false;
29183 + } else {
29184 + fw_generation = fw_status & IONIC_FW_STS_F_GENERATION;
29185 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
29186 +index e5acf3bd62b2d..73b950ac12722 100644
29187 +--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
29188 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
29189 +@@ -353,5 +353,6 @@ void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start);
29190 + void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
29191 + unsigned int stop_index);
29192 + int ionic_heartbeat_check(struct ionic *ionic);
29193 ++bool ionic_is_fw_running(struct ionic_dev *idev);
29194 +
29195 + #endif /* _IONIC_DEV_H_ */
29196 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
29197 +index 875f4ec42efee..a0f9136b2d899 100644
29198 +--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
29199 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
29200 +@@ -215,9 +215,13 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
29201 + void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
29202 + u8 status, int err)
29203 + {
29204 ++ const char *stat_str;
29205 ++
29206 ++ stat_str = (err == -ETIMEDOUT) ? "TIMEOUT" :
29207 ++ ionic_error_to_str(status);
29208 ++
29209 + netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n",
29210 +- ionic_opcode_to_str(opcode), opcode,
29211 +- ionic_error_to_str(status), err);
29212 ++ ionic_opcode_to_str(opcode), opcode, stat_str, err);
29213 + }
29214 +
29215 + static int ionic_adminq_check_err(struct ionic_lif *lif,
29216 +@@ -318,6 +322,7 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
29217 + if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
29218 + netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
29219 + name, ctx->cmd.cmd.opcode, err);
29220 ++ ctx->comp.comp.status = IONIC_RC_ERROR;
29221 + return err;
29222 + }
29223 +
29224 +@@ -336,6 +341,7 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
29225 + if (do_msg)
29226 + netdev_err(netdev, "%s (%d) interrupted, FW in reset\n",
29227 + name, ctx->cmd.cmd.opcode);
29228 ++ ctx->comp.comp.status = IONIC_RC_ERROR;
29229 + return -ENXIO;
29230 + }
29231 +
29232 +@@ -370,10 +376,10 @@ int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *
29233 +
29234 + static void ionic_dev_cmd_clean(struct ionic *ionic)
29235 + {
29236 +- union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
29237 ++ struct ionic_dev *idev = &ionic->idev;
29238 +
29239 +- iowrite32(0, &regs->doorbell);
29240 +- memset_io(&regs->cmd, 0, sizeof(regs->cmd));
29241 ++ iowrite32(0, &idev->dev_cmd_regs->doorbell);
29242 ++ memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
29243 + }
29244 +
29245 + int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
29246 +@@ -540,6 +546,9 @@ int ionic_reset(struct ionic *ionic)
29247 + struct ionic_dev *idev = &ionic->idev;
29248 + int err;
29249 +
29250 ++ if (!ionic_is_fw_running(idev))
29251 ++ return 0;
29252 ++
29253 + mutex_lock(&ionic->dev_cmd_lock);
29254 + ionic_dev_cmd_reset(idev);
29255 + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
29256 +@@ -612,15 +621,17 @@ int ionic_port_init(struct ionic *ionic)
29257 + int ionic_port_reset(struct ionic *ionic)
29258 + {
29259 + struct ionic_dev *idev = &ionic->idev;
29260 +- int err;
29261 ++ int err = 0;
29262 +
29263 + if (!idev->port_info)
29264 + return 0;
29265 +
29266 +- mutex_lock(&ionic->dev_cmd_lock);
29267 +- ionic_dev_cmd_port_reset(idev);
29268 +- err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
29269 +- mutex_unlock(&ionic->dev_cmd_lock);
29270 ++ if (ionic_is_fw_running(idev)) {
29271 ++ mutex_lock(&ionic->dev_cmd_lock);
29272 ++ ionic_dev_cmd_port_reset(idev);
29273 ++ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
29274 ++ mutex_unlock(&ionic->dev_cmd_lock);
29275 ++ }
29276 +
29277 + dma_free_coherent(ionic->dev, idev->port_info_sz,
29278 + idev->port_info, idev->port_info_pa);
29279 +@@ -628,9 +639,6 @@ int ionic_port_reset(struct ionic *ionic)
29280 + idev->port_info = NULL;
29281 + idev->port_info_pa = 0;
29282 +
29283 +- if (err)
29284 +- dev_err(ionic->dev, "Failed to reset port\n");
29285 +-
29286 + return err;
29287 + }
29288 +
29289 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
29290 +index 48cf4355bc47a..0848b5529d48a 100644
29291 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
29292 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
29293 +@@ -2984,12 +2984,16 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
29294 + u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
29295 + struct qed_filter_accept_flags *flags = &params->accept_flags;
29296 + struct qed_public_vf_info *vf_info;
29297 ++ u16 tlv_mask;
29298 ++
29299 ++ tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) |
29300 ++ BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN);
29301 +
29302 + /* Untrusted VFs can't even be trusted to know that fact.
29303 + * Simply indicate everything is configured fine, and trace
29304 + * configuration 'behind their back'.
29305 + */
29306 +- if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
29307 ++ if (!(*tlvs & tlv_mask))
29308 + return 0;
29309 +
29310 + vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
29311 +@@ -3006,6 +3010,13 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
29312 + flags->tx_accept_filter &= ~mask;
29313 + }
29314 +
29315 ++ if (params->update_accept_any_vlan_flg) {
29316 ++ vf_info->accept_any_vlan = params->accept_any_vlan;
29317 ++
29318 ++ if (vf_info->forced_vlan && !vf_info->is_trusted_configured)
29319 ++ params->accept_any_vlan = false;
29320 ++ }
29321 ++
29322 + return 0;
29323 + }
29324 +
29325 +@@ -4719,6 +4730,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
29326 + tx_rate = vf_info->tx_rate;
29327 + ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
29328 + ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
29329 ++ ivi->trusted = vf_info->is_trusted_request;
29330 +
29331 + return 0;
29332 + }
29333 +@@ -5149,6 +5161,12 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
29334 +
29335 + params.update_ctl_frame_check = 1;
29336 + params.mac_chk_en = !vf_info->is_trusted_configured;
29337 ++ params.update_accept_any_vlan_flg = 0;
29338 ++
29339 ++ if (vf_info->accept_any_vlan && vf_info->forced_vlan) {
29340 ++ params.update_accept_any_vlan_flg = 1;
29341 ++ params.accept_any_vlan = vf_info->accept_any_vlan;
29342 ++ }
29343 +
29344 + if (vf_info->rx_accept_mode & mask) {
29345 + flags->update_rx_mode_config = 1;
29346 +@@ -5164,13 +5182,20 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
29347 + if (!vf_info->is_trusted_configured) {
29348 + flags->rx_accept_filter &= ~mask;
29349 + flags->tx_accept_filter &= ~mask;
29350 ++ params.accept_any_vlan = false;
29351 + }
29352 +
29353 + if (flags->update_rx_mode_config ||
29354 + flags->update_tx_mode_config ||
29355 +- params.update_ctl_frame_check)
29356 ++ params.update_ctl_frame_check ||
29357 ++ params.update_accept_any_vlan_flg) {
29358 ++ DP_VERBOSE(hwfn, QED_MSG_IOV,
29359 ++ "vport update config for %s VF[abs 0x%x rel 0x%x]\n",
29360 ++ vf_info->is_trusted_configured ? "trusted" : "untrusted",
29361 ++ vf->abs_vf_id, vf->relative_vf_id);
29362 + qed_sp_vport_update(hwfn, &params,
29363 + QED_SPQ_MODE_EBLOCK, NULL);
29364 ++ }
29365 + }
29366 + }
29367 +
29368 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
29369 +index f448e3dd6c8ba..6ee2493de1642 100644
29370 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
29371 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
29372 +@@ -62,6 +62,7 @@ struct qed_public_vf_info {
29373 + bool is_trusted_request;
29374 + u8 rx_accept_mode;
29375 + u8 tx_accept_mode;
29376 ++ bool accept_any_vlan;
29377 + };
29378 +
29379 + struct qed_iov_vf_init_params {
29380 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
29381 +index 5d79ee4370bcd..7519773eaca6e 100644
29382 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
29383 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
29384 +@@ -51,7 +51,7 @@ static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
29385 + if (dcb && dcb->ops->get_hw_capability)
29386 + return dcb->ops->get_hw_capability(dcb);
29387 +
29388 +- return 0;
29389 ++ return -EOPNOTSUPP;
29390 + }
29391 +
29392 + static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
29393 +@@ -65,7 +65,7 @@ static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
29394 + if (dcb && dcb->ops->attach)
29395 + return dcb->ops->attach(dcb);
29396 +
29397 +- return 0;
29398 ++ return -EOPNOTSUPP;
29399 + }
29400 +
29401 + static inline int
29402 +@@ -74,7 +74,7 @@ qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
29403 + if (dcb && dcb->ops->query_hw_capability)
29404 + return dcb->ops->query_hw_capability(dcb, buf);
29405 +
29406 +- return 0;
29407 ++ return -EOPNOTSUPP;
29408 + }
29409 +
29410 + static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
29411 +@@ -89,7 +89,7 @@ qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
29412 + if (dcb && dcb->ops->query_cee_param)
29413 + return dcb->ops->query_cee_param(dcb, buf, type);
29414 +
29415 +- return 0;
29416 ++ return -EOPNOTSUPP;
29417 + }
29418 +
29419 + static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
29420 +@@ -97,7 +97,7 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
29421 + if (dcb && dcb->ops->get_cee_cfg)
29422 + return dcb->ops->get_cee_cfg(dcb);
29423 +
29424 +- return 0;
29425 ++ return -EOPNOTSUPP;
29426 + }
29427 +
29428 + static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
29429 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
29430 +index 2ffa0a11eea56..569683f33804c 100644
29431 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
29432 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
29433 +@@ -460,6 +460,13 @@ static int ethqos_clks_config(void *priv, bool enabled)
29434 + dev_err(&ethqos->pdev->dev, "rgmii_clk enable failed\n");
29435 + return ret;
29436 + }
29437 ++
29438 ++ /* Enable functional clock to prevent DMA reset to timeout due
29439 ++ * to lacking PHY clock after the hardware block has been power
29440 ++ * cycled. The actual configuration will be adjusted once
29441 ++ * ethqos_fix_mac_speed() is invoked.
29442 ++ */
29443 ++ ethqos_set_func_clk_en(ethqos);
29444 + } else {
29445 + clk_disable_unprepare(ethqos->rgmii_clk);
29446 + }
29447 +diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
29448 +index aa42141be3c0e..a557a477d0393 100644
29449 +--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
29450 ++++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
29451 +@@ -364,11 +364,9 @@ int cpsw_ethtool_op_begin(struct net_device *ndev)
29452 + struct cpsw_common *cpsw = priv->cpsw;
29453 + int ret;
29454 +
29455 +- ret = pm_runtime_get_sync(cpsw->dev);
29456 +- if (ret < 0) {
29457 ++ ret = pm_runtime_resume_and_get(cpsw->dev);
29458 ++ if (ret < 0)
29459 + cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
29460 +- pm_runtime_put_noidle(cpsw->dev);
29461 +- }
29462 +
29463 + return ret;
29464 + }
29465 +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
29466 +index 377c94ec24869..90d96eb79984e 100644
29467 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
29468 ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
29469 +@@ -857,46 +857,53 @@ static void axienet_recv(struct net_device *ndev)
29470 + while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
29471 + dma_addr_t phys;
29472 +
29473 +- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
29474 +-
29475 + /* Ensure we see complete descriptor update */
29476 + dma_rmb();
29477 +- phys = desc_get_phys_addr(lp, cur_p);
29478 +- dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
29479 +- DMA_FROM_DEVICE);
29480 +
29481 + skb = cur_p->skb;
29482 + cur_p->skb = NULL;
29483 +- length = cur_p->app4 & 0x0000FFFF;
29484 +-
29485 +- skb_put(skb, length);
29486 +- skb->protocol = eth_type_trans(skb, ndev);
29487 +- /*skb_checksum_none_assert(skb);*/
29488 +- skb->ip_summed = CHECKSUM_NONE;
29489 +-
29490 +- /* if we're doing Rx csum offload, set it up */
29491 +- if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
29492 +- csumstatus = (cur_p->app2 &
29493 +- XAE_FULL_CSUM_STATUS_MASK) >> 3;
29494 +- if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
29495 +- (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
29496 +- skb->ip_summed = CHECKSUM_UNNECESSARY;
29497 ++
29498 ++ /* skb could be NULL if a previous pass already received the
29499 ++ * packet for this slot in the ring, but failed to refill it
29500 ++ * with a newly allocated buffer. In this case, don't try to
29501 ++ * receive it again.
29502 ++ */
29503 ++ if (likely(skb)) {
29504 ++ length = cur_p->app4 & 0x0000FFFF;
29505 ++
29506 ++ phys = desc_get_phys_addr(lp, cur_p);
29507 ++ dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
29508 ++ DMA_FROM_DEVICE);
29509 ++
29510 ++ skb_put(skb, length);
29511 ++ skb->protocol = eth_type_trans(skb, ndev);
29512 ++ /*skb_checksum_none_assert(skb);*/
29513 ++ skb->ip_summed = CHECKSUM_NONE;
29514 ++
29515 ++ /* if we're doing Rx csum offload, set it up */
29516 ++ if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
29517 ++ csumstatus = (cur_p->app2 &
29518 ++ XAE_FULL_CSUM_STATUS_MASK) >> 3;
29519 ++ if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
29520 ++ csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
29521 ++ skb->ip_summed = CHECKSUM_UNNECESSARY;
29522 ++ }
29523 ++ } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
29524 ++ skb->protocol == htons(ETH_P_IP) &&
29525 ++ skb->len > 64) {
29526 ++ skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
29527 ++ skb->ip_summed = CHECKSUM_COMPLETE;
29528 + }
29529 +- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
29530 +- skb->protocol == htons(ETH_P_IP) &&
29531 +- skb->len > 64) {
29532 +- skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
29533 +- skb->ip_summed = CHECKSUM_COMPLETE;
29534 +- }
29535 +
29536 +- netif_rx(skb);
29537 ++ netif_rx(skb);
29538 +
29539 +- size += length;
29540 +- packets++;
29541 ++ size += length;
29542 ++ packets++;
29543 ++ }
29544 +
29545 + new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
29546 + if (!new_skb)
29547 +- return;
29548 ++ break;
29549 +
29550 + phys = dma_map_single(ndev->dev.parent, new_skb->data,
29551 + lp->max_frm_size,
29552 +@@ -905,7 +912,7 @@ static void axienet_recv(struct net_device *ndev)
29553 + if (net_ratelimit())
29554 + netdev_err(ndev, "RX DMA mapping error\n");
29555 + dev_kfree_skb(new_skb);
29556 +- return;
29557 ++ break;
29558 + }
29559 + desc_set_phys_addr(lp, phys, cur_p);
29560 +
29561 +@@ -913,6 +920,11 @@ static void axienet_recv(struct net_device *ndev)
29562 + cur_p->status = 0;
29563 + cur_p->skb = new_skb;
29564 +
29565 ++ /* Only update tail_p to mark this slot as usable after it has
29566 ++ * been successfully refilled.
29567 ++ */
29568 ++ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
29569 ++
29570 + if (++lp->rx_bd_ci >= lp->rx_bd_num)
29571 + lp->rx_bd_ci = 0;
29572 + cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
29573 +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
29574 +index afa81a9480ccd..e675d1016c3c8 100644
29575 +--- a/drivers/net/hyperv/netvsc.c
29576 ++++ b/drivers/net/hyperv/netvsc.c
29577 +@@ -154,19 +154,15 @@ static void free_netvsc_device(struct rcu_head *head)
29578 +
29579 + kfree(nvdev->extension);
29580 +
29581 +- if (nvdev->recv_original_buf) {
29582 +- hv_unmap_memory(nvdev->recv_buf);
29583 ++ if (nvdev->recv_original_buf)
29584 + vfree(nvdev->recv_original_buf);
29585 +- } else {
29586 ++ else
29587 + vfree(nvdev->recv_buf);
29588 +- }
29589 +
29590 +- if (nvdev->send_original_buf) {
29591 +- hv_unmap_memory(nvdev->send_buf);
29592 ++ if (nvdev->send_original_buf)
29593 + vfree(nvdev->send_original_buf);
29594 +- } else {
29595 ++ else
29596 + vfree(nvdev->send_buf);
29597 +- }
29598 +
29599 + bitmap_free(nvdev->send_section_map);
29600 +
29601 +@@ -765,6 +761,12 @@ void netvsc_device_remove(struct hv_device *device)
29602 + netvsc_teardown_send_gpadl(device, net_device, ndev);
29603 + }
29604 +
29605 ++ if (net_device->recv_original_buf)
29606 ++ hv_unmap_memory(net_device->recv_buf);
29607 ++
29608 ++ if (net_device->send_original_buf)
29609 ++ hv_unmap_memory(net_device->send_buf);
29610 ++
29611 + /* Release all resources */
29612 + free_netvsc_device_rcu(net_device);
29613 + }
29614 +@@ -1821,6 +1823,12 @@ cleanup:
29615 + netif_napi_del(&net_device->chan_table[0].napi);
29616 +
29617 + cleanup2:
29618 ++ if (net_device->recv_original_buf)
29619 ++ hv_unmap_memory(net_device->recv_buf);
29620 ++
29621 ++ if (net_device->send_original_buf)
29622 ++ hv_unmap_memory(net_device->send_buf);
29623 ++
29624 + free_netvsc_device(&net_device->rcu);
29625 +
29626 + return ERR_PTR(ret);
29627 +diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
29628 +index 29aa811af430f..a8794065b250b 100644
29629 +--- a/drivers/net/phy/at803x.c
29630 ++++ b/drivers/net/phy/at803x.c
29631 +@@ -784,25 +784,7 @@ static int at803x_probe(struct phy_device *phydev)
29632 + return ret;
29633 + }
29634 +
29635 +- /* Some bootloaders leave the fiber page selected.
29636 +- * Switch to the copper page, as otherwise we read
29637 +- * the PHY capabilities from the fiber side.
29638 +- */
29639 +- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
29640 +- phy_lock_mdio_bus(phydev);
29641 +- ret = at803x_write_page(phydev, AT803X_PAGE_COPPER);
29642 +- phy_unlock_mdio_bus(phydev);
29643 +- if (ret)
29644 +- goto err;
29645 +- }
29646 +-
29647 + return 0;
29648 +-
29649 +-err:
29650 +- if (priv->vddio)
29651 +- regulator_disable(priv->vddio);
29652 +-
29653 +- return ret;
29654 + }
29655 +
29656 + static void at803x_remove(struct phy_device *phydev)
29657 +@@ -912,6 +894,22 @@ static int at803x_config_init(struct phy_device *phydev)
29658 + {
29659 + int ret;
29660 +
29661 ++ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
29662 ++ /* Some bootloaders leave the fiber page selected.
29663 ++ * Switch to the copper page, as otherwise we read
29664 ++ * the PHY capabilities from the fiber side.
29665 ++ */
29666 ++ phy_lock_mdio_bus(phydev);
29667 ++ ret = at803x_write_page(phydev, AT803X_PAGE_COPPER);
29668 ++ phy_unlock_mdio_bus(phydev);
29669 ++ if (ret)
29670 ++ return ret;
29671 ++
29672 ++ ret = at8031_pll_config(phydev);
29673 ++ if (ret < 0)
29674 ++ return ret;
29675 ++ }
29676 ++
29677 + /* The RX and TX delay default is:
29678 + * after HW reset: RX delay enabled and TX delay disabled
29679 + * after SW reset: RX delay enabled, while TX delay retains the
29680 +@@ -941,12 +939,6 @@ static int at803x_config_init(struct phy_device *phydev)
29681 + if (ret < 0)
29682 + return ret;
29683 +
29684 +- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
29685 +- ret = at8031_pll_config(phydev);
29686 +- if (ret < 0)
29687 +- return ret;
29688 +- }
29689 +-
29690 + /* Ar803x extended next page bit is enabled by default. Cisco
29691 + * multigig switches read this bit and attempt to negotiate 10Gbps
29692 + * rates even if the next page bit is disabled. This is incorrect
29693 +diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
29694 +index 3c683e0e40e9e..e36809aa6d300 100644
29695 +--- a/drivers/net/phy/broadcom.c
29696 ++++ b/drivers/net/phy/broadcom.c
29697 +@@ -11,6 +11,7 @@
29698 + */
29699 +
29700 + #include "bcm-phy-lib.h"
29701 ++#include <linux/delay.h>
29702 + #include <linux/module.h>
29703 + #include <linux/phy.h>
29704 + #include <linux/brcmphy.h>
29705 +@@ -602,6 +603,26 @@ static int brcm_fet_config_init(struct phy_device *phydev)
29706 + if (err < 0)
29707 + return err;
29708 +
29709 ++ /* The datasheet indicates the PHY needs up to 1us to complete a reset,
29710 ++ * build some slack here.
29711 ++ */
29712 ++ usleep_range(1000, 2000);
29713 ++
29714 ++ /* The PHY requires 65 MDC clock cycles to complete a write operation
29715 ++ * and turnaround the line properly.
29716 ++ *
29717 ++ * We ignore -EIO here as the MDIO controller (e.g.: mdio-bcm-unimac)
29718 ++ * may flag the lack of turn-around as a read failure. This is
29719 ++ * particularly true with this combination since the MDIO controller
29720 ++ * only used 64 MDC cycles. This is not a critical failure in this
29721 ++ * specific case and it has no functional impact otherwise, so we let
29722 ++ * that one go through. If there is a genuine bus error, the next read
29723 ++ * of MII_BRCM_FET_INTREG will error out.
29724 ++ */
29725 ++ err = phy_read(phydev, MII_BMCR);
29726 ++ if (err < 0 && err != -EIO)
29727 ++ return err;
29728 ++
29729 + reg = phy_read(phydev, MII_BRCM_FET_INTREG);
29730 + if (reg < 0)
29731 + return reg;
29732 +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
29733 +index a7ebcdab415b5..281cebc3d00cc 100644
29734 +--- a/drivers/net/phy/micrel.c
29735 ++++ b/drivers/net/phy/micrel.c
29736 +@@ -1596,11 +1596,13 @@ static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
29737 + {
29738 + u32 data;
29739 +
29740 +- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
29741 +- phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
29742 +- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
29743 +- (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
29744 +- data = phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA);
29745 ++ phy_lock_mdio_bus(phydev);
29746 ++ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
29747 ++ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
29748 ++ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
29749 ++ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
29750 ++ data = __phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA);
29751 ++ phy_unlock_mdio_bus(phydev);
29752 +
29753 + return data;
29754 + }
29755 +@@ -1608,18 +1610,18 @@ static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
29756 + static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
29757 + u16 val)
29758 + {
29759 +- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
29760 +- phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
29761 +- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
29762 +- (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
29763 ++ phy_lock_mdio_bus(phydev);
29764 ++ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
29765 ++ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
29766 ++ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
29767 ++ page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC);
29768 +
29769 +- val = phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val);
29770 +- if (val) {
29771 ++ val = __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val);
29772 ++ if (val != 0)
29773 + phydev_err(phydev, "Error: phy_write has returned error %d\n",
29774 + val);
29775 +- return val;
29776 +- }
29777 +- return 0;
29778 ++ phy_unlock_mdio_bus(phydev);
29779 ++ return val;
29780 + }
29781 +
29782 + static int lan8814_config_init(struct phy_device *phydev)
29783 +diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
29784 +index 2a1e31defe718..4334aafab59a4 100644
29785 +--- a/drivers/net/usb/asix.h
29786 ++++ b/drivers/net/usb/asix.h
29787 +@@ -192,8 +192,8 @@ extern const struct driver_info ax88172a_info;
29788 + /* ASIX specific flags */
29789 + #define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */
29790 +
29791 +-int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
29792 +- u16 size, void *data, int in_pm);
29793 ++int __must_check asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
29794 ++ u16 size, void *data, int in_pm);
29795 +
29796 + int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
29797 + u16 size, void *data, int in_pm);
29798 +diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
29799 +index 71682970be584..524805285019a 100644
29800 +--- a/drivers/net/usb/asix_common.c
29801 ++++ b/drivers/net/usb/asix_common.c
29802 +@@ -11,8 +11,8 @@
29803 +
29804 + #define AX_HOST_EN_RETRIES 30
29805 +
29806 +-int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
29807 +- u16 size, void *data, int in_pm)
29808 ++int __must_check asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
29809 ++ u16 size, void *data, int in_pm)
29810 + {
29811 + int ret;
29812 + int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
29813 +@@ -27,9 +27,12 @@ int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
29814 + ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
29815 + value, index, data, size);
29816 +
29817 +- if (unlikely(ret < 0))
29818 ++ if (unlikely(ret < size)) {
29819 ++ ret = ret < 0 ? ret : -ENODATA;
29820 ++
29821 + netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
29822 + index, ret);
29823 ++ }
29824 +
29825 + return ret;
29826 + }
29827 +@@ -79,7 +82,7 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
29828 + 0, 0, 1, &smsr, in_pm);
29829 + if (ret == -ENODEV)
29830 + break;
29831 +- else if (ret < sizeof(smsr))
29832 ++ else if (ret < 0)
29833 + continue;
29834 + else if (smsr & AX_HOST_EN)
29835 + break;
29836 +@@ -579,8 +582,12 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
29837 + return ret;
29838 + }
29839 +
29840 +- asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
29841 +- (__u16)loc, 2, &res, 1);
29842 ++ ret = asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
29843 ++ (__u16)loc, 2, &res, 1);
29844 ++ if (ret < 0) {
29845 ++ mutex_unlock(&dev->phy_mutex);
29846 ++ return ret;
29847 ++ }
29848 + asix_set_hw_mii(dev, 1);
29849 + mutex_unlock(&dev->phy_mutex);
29850 +
29851 +diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
29852 +index 4514d35ef4c48..6b2fbdf4e0fde 100644
29853 +--- a/drivers/net/usb/asix_devices.c
29854 ++++ b/drivers/net/usb/asix_devices.c
29855 +@@ -755,7 +755,12 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
29856 + priv->phy_addr = ret;
29857 + priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10);
29858 +
29859 +- asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
29860 ++ ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
29861 ++ if (ret < 0) {
29862 ++ netdev_dbg(dev->net, "Failed to read STATMNGSTS_REG: %d\n", ret);
29863 ++ return ret;
29864 ++ }
29865 ++
29866 + chipcode &= AX_CHIPCODE_MASK;
29867 +
29868 + ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
29869 +@@ -920,11 +925,21 @@ static int ax88178_reset(struct usbnet *dev)
29870 + int gpio0 = 0;
29871 + u32 phyid;
29872 +
29873 +- asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0);
29874 ++ ret = asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0);
29875 ++ if (ret < 0) {
29876 ++ netdev_dbg(dev->net, "Failed to read GPIOS: %d\n", ret);
29877 ++ return ret;
29878 ++ }
29879 ++
29880 + netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
29881 +
29882 + asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL, 0);
29883 +- asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0);
29884 ++ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0);
29885 ++ if (ret < 0) {
29886 ++ netdev_dbg(dev->net, "Failed to read EEPROM: %d\n", ret);
29887 ++ return ret;
29888 ++ }
29889 ++
29890 + asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL, 0);
29891 +
29892 + netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
29893 +diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
29894 +index 1de413b19e342..8084e7408c0ae 100644
29895 +--- a/drivers/net/wireguard/queueing.c
29896 ++++ b/drivers/net/wireguard/queueing.c
29897 +@@ -4,6 +4,7 @@
29898 + */
29899 +
29900 + #include "queueing.h"
29901 ++#include <linux/skb_array.h>
29902 +
29903 + struct multicore_worker __percpu *
29904 + wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
29905 +@@ -42,7 +43,7 @@ void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
29906 + {
29907 + free_percpu(queue->worker);
29908 + WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
29909 +- ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
29910 ++ ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
29911 + }
29912 +
29913 + #define NEXT(skb) ((skb)->prev)
29914 +diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
29915 +index 6f07b949cb81d..0414d7a6ce741 100644
29916 +--- a/drivers/net/wireguard/socket.c
29917 ++++ b/drivers/net/wireguard/socket.c
29918 +@@ -160,6 +160,7 @@ out:
29919 + rcu_read_unlock_bh();
29920 + return ret;
29921 + #else
29922 ++ kfree_skb(skb);
29923 + return -EAFNOSUPPORT;
29924 + #endif
29925 + }
29926 +@@ -241,7 +242,7 @@ int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
29927 + endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
29928 + endpoint->src4.s_addr = ip_hdr(skb)->daddr;
29929 + endpoint->src_if4 = skb->skb_iif;
29930 +- } else if (skb->protocol == htons(ETH_P_IPV6)) {
29931 ++ } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
29932 + endpoint->addr6.sin6_family = AF_INET6;
29933 + endpoint->addr6.sin6_port = udp_hdr(skb)->source;
29934 + endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
29935 +@@ -284,7 +285,7 @@ void wg_socket_set_peer_endpoint(struct wg_peer *peer,
29936 + peer->endpoint.addr4 = endpoint->addr4;
29937 + peer->endpoint.src4 = endpoint->src4;
29938 + peer->endpoint.src_if4 = endpoint->src_if4;
29939 +- } else if (endpoint->addr.sa_family == AF_INET6) {
29940 ++ } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) {
29941 + peer->endpoint.addr6 = endpoint->addr6;
29942 + peer->endpoint.src6 = endpoint->src6;
29943 + } else {
29944 +diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
29945 +index 9513ab696fff1..f79dd9a716906 100644
29946 +--- a/drivers/net/wireless/ath/ath10k/snoc.c
29947 ++++ b/drivers/net/wireless/ath/ath10k/snoc.c
29948 +@@ -1556,11 +1556,11 @@ static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
29949 + node = of_parse_phandle(dev->of_node, "memory-region", 0);
29950 + if (node) {
29951 + ret = of_address_to_resource(node, 0, &r);
29952 ++ of_node_put(node);
29953 + if (ret) {
29954 + dev_err(dev, "failed to resolve msa fixed region\n");
29955 + return ret;
29956 + }
29957 +- of_node_put(node);
29958 +
29959 + ar->msa.paddr = r.start;
29960 + ar->msa.mem_size = resource_size(&r);
29961 +diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
29962 +index 7d65c115669fe..20b9aa8ddf7d5 100644
29963 +--- a/drivers/net/wireless/ath/ath10k/wow.c
29964 ++++ b/drivers/net/wireless/ath/ath10k/wow.c
29965 +@@ -337,14 +337,15 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
29966 + if (patterns[i].mask[j / 8] & BIT(j % 8))
29967 + bitmask[j] = 0xff;
29968 + old_pattern.mask = bitmask;
29969 +- new_pattern = old_pattern;
29970 +
29971 + if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
29972 +- if (patterns[i].pkt_offset < ETH_HLEN)
29973 ++ if (patterns[i].pkt_offset < ETH_HLEN) {
29974 + ath10k_wow_convert_8023_to_80211(&new_pattern,
29975 + &old_pattern);
29976 +- else
29977 ++ } else {
29978 ++ new_pattern = old_pattern;
29979 + new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
29980 ++ }
29981 + }
29982 +
29983 + if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
29984 +diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
29985 +index c212a789421ee..e432f8dc05d61 100644
29986 +--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
29987 ++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
29988 +@@ -2642,9 +2642,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
29989 +
29990 + spin_lock_bh(&srng->lock);
29991 +
29992 ++try_again:
29993 + ath11k_hal_srng_access_begin(ab, srng);
29994 +
29995 +-try_again:
29996 + while (likely(desc =
29997 + (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
29998 + srng))) {
29999 +diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
30000 +index 91d6244b65435..8402961c66887 100644
30001 +--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
30002 ++++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
30003 +@@ -426,7 +426,7 @@ void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
30004 + struct ath11k_sta *arsta;
30005 + struct ieee80211_sta *sta;
30006 + u16 rate, ru_tones;
30007 +- u8 mcs, rate_idx, ofdma;
30008 ++ u8 mcs, rate_idx = 0, ofdma;
30009 + int ret;
30010 +
30011 + spin_lock_bh(&ab->base_lock);
30012 +diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
30013 +index 07f499d5ec92b..08e33778f63b8 100644
30014 +--- a/drivers/net/wireless/ath/ath11k/mac.c
30015 ++++ b/drivers/net/wireless/ath/ath11k/mac.c
30016 +@@ -2319,6 +2319,9 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
30017 + if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->he_6ghz_capa.capa)
30018 + return;
30019 +
30020 ++ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
30021 ++ arg->bw_40 = true;
30022 ++
30023 + if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
30024 + arg->bw_80 = true;
30025 +
30026 +@@ -4504,24 +4507,30 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
30027 + sta->addr, arvif->vdev_id);
30028 + } else if ((old_state == IEEE80211_STA_NONE &&
30029 + new_state == IEEE80211_STA_NOTEXIST)) {
30030 +- ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
30031 ++ bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay &&
30032 ++ vif->type == NL80211_IFTYPE_STATION;
30033 +
30034 +- if (ar->ab->hw_params.vdev_start_delay &&
30035 +- vif->type == NL80211_IFTYPE_STATION)
30036 +- goto free;
30037 ++ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
30038 +
30039 +- ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
30040 +- if (ret)
30041 +- ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
30042 +- sta->addr, arvif->vdev_id);
30043 +- else
30044 +- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
30045 +- sta->addr, arvif->vdev_id);
30046 ++ if (!skip_peer_delete) {
30047 ++ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
30048 ++ if (ret)
30049 ++ ath11k_warn(ar->ab,
30050 ++ "Failed to delete peer: %pM for VDEV: %d\n",
30051 ++ sta->addr, arvif->vdev_id);
30052 ++ else
30053 ++ ath11k_dbg(ar->ab,
30054 ++ ATH11K_DBG_MAC,
30055 ++ "Removed peer: %pM for VDEV: %d\n",
30056 ++ sta->addr, arvif->vdev_id);
30057 ++ }
30058 +
30059 + ath11k_mac_dec_num_stations(arvif, sta);
30060 + spin_lock_bh(&ar->ab->base_lock);
30061 + peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
30062 +- if (peer && peer->sta == sta) {
30063 ++ if (skip_peer_delete && peer) {
30064 ++ peer->sta = NULL;
30065 ++ } else if (peer && peer->sta == sta) {
30066 + ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
30067 + vif->addr, arvif->vdev_id);
30068 + peer->sta = NULL;
30069 +@@ -4531,7 +4540,6 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
30070 + }
30071 + spin_unlock_bh(&ar->ab->base_lock);
30072 +
30073 +-free:
30074 + kfree(arsta->tx_stats);
30075 + arsta->tx_stats = NULL;
30076 +
30077 +diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
30078 +index e4250ba8dfee2..cccaa348cf212 100644
30079 +--- a/drivers/net/wireless/ath/ath11k/mhi.c
30080 ++++ b/drivers/net/wireless/ath/ath11k/mhi.c
30081 +@@ -332,6 +332,7 @@ static int ath11k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl)
30082 + return -ENOENT;
30083 +
30084 + ret = of_address_to_resource(np, 0, &res);
30085 ++ of_node_put(np);
30086 + if (ret)
30087 + return ret;
30088 +
30089 +diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
30090 +index 65d3c6ba35ae6..d0701e8eca9c0 100644
30091 +--- a/drivers/net/wireless/ath/ath11k/qmi.c
30092 ++++ b/drivers/net/wireless/ath/ath11k/qmi.c
30093 +@@ -1932,10 +1932,11 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
30094 + if (!hremote_node) {
30095 + ath11k_dbg(ab, ATH11K_DBG_QMI,
30096 + "qmi fail to get hremote_node\n");
30097 +- return ret;
30098 ++ return -ENODEV;
30099 + }
30100 +
30101 + ret = of_address_to_resource(hremote_node, 0, &res);
30102 ++ of_node_put(hremote_node);
30103 + if (ret) {
30104 + ath11k_dbg(ab, ATH11K_DBG_QMI,
30105 + "qmi fail to get reg from hremote\n");
30106 +diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
30107 +index 510e61e97dbcb..994ec48b2f669 100644
30108 +--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
30109 ++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
30110 +@@ -30,6 +30,7 @@ static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
30111 + hdr->endpoint_id = epid;
30112 + hdr->flags = flags;
30113 + hdr->payload_len = cpu_to_be16(len);
30114 ++ memset(hdr->control, 0, sizeof(hdr->control));
30115 +
30116 + status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb);
30117 +
30118 +@@ -272,6 +273,10 @@ int htc_connect_service(struct htc_target *target,
30119 + conn_msg->dl_pipeid = endpoint->dl_pipeid;
30120 + conn_msg->ul_pipeid = endpoint->ul_pipeid;
30121 +
30122 ++ /* To prevent infoleak */
30123 ++ conn_msg->svc_meta_len = 0;
30124 ++ conn_msg->pad = 0;
30125 ++
30126 + ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
30127 + if (ret)
30128 + goto err;
30129 +diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
30130 +index 49f7ee1c912b8..2208ec8004821 100644
30131 +--- a/drivers/net/wireless/ath/carl9170/main.c
30132 ++++ b/drivers/net/wireless/ath/carl9170/main.c
30133 +@@ -1914,7 +1914,7 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
30134 + WARN_ON(!(tx_streams >= 1 && tx_streams <=
30135 + IEEE80211_HT_MCS_TX_MAX_STREAMS));
30136 +
30137 +- tx_params = (tx_streams - 1) <<
30138 ++ tx_params |= (tx_streams - 1) <<
30139 + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
30140 +
30141 + carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
30142 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
30143 +index d99140960a820..dcbe55b56e437 100644
30144 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
30145 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
30146 +@@ -207,6 +207,8 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
30147 + size = BRCMF_FW_MAX_NVRAM_SIZE;
30148 + else
30149 + size = data_len;
30150 ++ /* Add space for properties we may add */
30151 ++ size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1;
30152 + /* Alloc for extra 0 byte + roundup by 4 + length field */
30153 + size += 1 + 3 + sizeof(u32);
30154 + nvp->nvram = kzalloc(size, GFP_KERNEL);
30155 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
30156 +index 8b149996fc000..3ff4997e1c97a 100644
30157 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
30158 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
30159 +@@ -12,6 +12,7 @@
30160 + #include <linux/interrupt.h>
30161 + #include <linux/bcma/bcma.h>
30162 + #include <linux/sched.h>
30163 ++#include <linux/io.h>
30164 + #include <asm/unaligned.h>
30165 +
30166 + #include <soc.h>
30167 +@@ -59,6 +60,13 @@ BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
30168 + BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
30169 + BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
30170 +
30171 ++/* firmware config files */
30172 ++MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
30173 ++MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
30174 ++
30175 ++/* per-board firmware binaries */
30176 ++MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.bin");
30177 ++
30178 + static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
30179 + BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
30180 + BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
30181 +@@ -447,47 +455,6 @@ brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
30182 + }
30183 +
30184 +
30185 +-static void
30186 +-brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
30187 +- void *srcaddr, u32 len)
30188 +-{
30189 +- void __iomem *address = devinfo->tcm + mem_offset;
30190 +- __le32 *src32;
30191 +- __le16 *src16;
30192 +- u8 *src8;
30193 +-
30194 +- if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
30195 +- if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
30196 +- src8 = (u8 *)srcaddr;
30197 +- while (len) {
30198 +- iowrite8(*src8, address);
30199 +- address++;
30200 +- src8++;
30201 +- len--;
30202 +- }
30203 +- } else {
30204 +- len = len / 2;
30205 +- src16 = (__le16 *)srcaddr;
30206 +- while (len) {
30207 +- iowrite16(le16_to_cpu(*src16), address);
30208 +- address += 2;
30209 +- src16++;
30210 +- len--;
30211 +- }
30212 +- }
30213 +- } else {
30214 +- len = len / 4;
30215 +- src32 = (__le32 *)srcaddr;
30216 +- while (len) {
30217 +- iowrite32(le32_to_cpu(*src32), address);
30218 +- address += 4;
30219 +- src32++;
30220 +- len--;
30221 +- }
30222 +- }
30223 +-}
30224 +-
30225 +-
30226 + static void
30227 + brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
30228 + void *dstaddr, u32 len)
30229 +@@ -1348,6 +1315,18 @@ static void brcmf_pcie_down(struct device *dev)
30230 + {
30231 + }
30232 +
30233 ++static int brcmf_pcie_preinit(struct device *dev)
30234 ++{
30235 ++ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
30236 ++ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
30237 ++
30238 ++ brcmf_dbg(PCIE, "Enter\n");
30239 ++
30240 ++ brcmf_pcie_intr_enable(buspub->devinfo);
30241 ++ brcmf_pcie_hostready(buspub->devinfo);
30242 ++
30243 ++ return 0;
30244 ++}
30245 +
30246 + static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
30247 + {
30248 +@@ -1456,6 +1435,7 @@ static int brcmf_pcie_reset(struct device *dev)
30249 + }
30250 +
30251 + static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
30252 ++ .preinit = brcmf_pcie_preinit,
30253 + .txdata = brcmf_pcie_tx,
30254 + .stop = brcmf_pcie_down,
30255 + .txctl = brcmf_pcie_tx_ctlpkt,
30256 +@@ -1563,8 +1543,8 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
30257 + return err;
30258 +
30259 + brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
30260 +- brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
30261 +- (void *)fw->data, fw->size);
30262 ++ memcpy_toio(devinfo->tcm + devinfo->ci->rambase,
30263 ++ (void *)fw->data, fw->size);
30264 +
30265 + resetintr = get_unaligned_le32(fw->data);
30266 + release_firmware(fw);
30267 +@@ -1578,7 +1558,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
30268 + brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
30269 + address = devinfo->ci->rambase + devinfo->ci->ramsize -
30270 + nvram_len;
30271 +- brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
30272 ++ memcpy_toio(devinfo->tcm + address, nvram, nvram_len);
30273 + brcmf_fw_nvram_free(nvram);
30274 + } else {
30275 + brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
30276 +@@ -1777,6 +1757,8 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
30277 + ret = brcmf_chip_get_raminfo(devinfo->ci);
30278 + if (ret) {
30279 + brcmf_err(bus, "Failed to get RAM info\n");
30280 ++ release_firmware(fw);
30281 ++ brcmf_fw_nvram_free(nvram);
30282 + goto fail;
30283 + }
30284 +
30285 +@@ -1826,9 +1808,6 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
30286 +
30287 + init_waitqueue_head(&devinfo->mbdata_resp_wait);
30288 +
30289 +- brcmf_pcie_intr_enable(devinfo);
30290 +- brcmf_pcie_hostready(devinfo);
30291 +-
30292 + ret = brcmf_attach(&devinfo->pdev->dev);
30293 + if (ret)
30294 + goto fail;
30295 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
30296 +index 8effeb7a7269b..5d156e591b35c 100644
30297 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
30298 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
30299 +@@ -629,7 +629,6 @@ BRCMF_FW_CLM_DEF(43752, "brcmfmac43752-sdio");
30300 +
30301 + /* firmware config files */
30302 + MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt");
30303 +-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
30304 +
30305 + /* per-board firmware binaries */
30306 + MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.bin");
30307 +diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
30308 +index 754876cd27ce8..e8bd4f0e3d2dc 100644
30309 +--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
30310 ++++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
30311 +@@ -299,7 +299,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
30312 +
30313 + priv->is_open = 1;
30314 + IWL_DEBUG_MAC80211(priv, "leave\n");
30315 +- return 0;
30316 ++ return ret;
30317 + }
30318 +
30319 + static void iwlagn_mac_stop(struct ieee80211_hw *hw)
30320 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
30321 +index 7ad9cee925da5..372cc950cc884 100644
30322 +--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
30323 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
30324 +@@ -1561,8 +1561,6 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
30325 + return -EBUSY;
30326 +
30327 + range->range_data_size = reg->dev_addr.size;
30328 +- iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG,
30329 +- DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK);
30330 + for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
30331 + prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
30332 + DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
30333 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
30334 +index c73672d613562..42f6f8bb83be9 100644
30335 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
30336 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
30337 +@@ -861,11 +861,18 @@ static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
30338 + case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
30339 + struct iwl_dbgc1_info dram_info = {};
30340 + struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
30341 +- __le64 dram_base_addr = cpu_to_le64(frags->physical);
30342 +- __le32 dram_size = cpu_to_le32(frags->size);
30343 +- u64 dram_addr = le64_to_cpu(dram_base_addr);
30344 ++ __le64 dram_base_addr;
30345 ++ __le32 dram_size;
30346 ++ u64 dram_addr;
30347 + u32 ret;
30348 +
30349 ++ if (!frags)
30350 ++ break;
30351 ++
30352 ++ dram_base_addr = cpu_to_le64(frags->physical);
30353 ++ dram_size = cpu_to_le32(frags->size);
30354 ++ dram_addr = le64_to_cpu(dram_base_addr);
30355 ++
30356 + IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
30357 + dram_base_addr, dram_size);
30358 + IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
30359 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
30360 +index 95b3dae7b504b..9331a6b6bf36c 100644
30361 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
30362 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
30363 +@@ -354,8 +354,6 @@
30364 + #define WFPM_GP2 0xA030B4
30365 +
30366 + /* DBGI SRAM Register details */
30367 +-#define DBGI_SRAM_TARGET_ACCESS_CFG 0x00A2E14C
30368 +-#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK 0x10000
30369 + #define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154
30370 + #define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158
30371 +
30372 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
30373 +index b400867e94f0a..3f284836e7076 100644
30374 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
30375 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
30376 +@@ -2704,7 +2704,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
30377 +
30378 + /* start pseudo D3 */
30379 + rtnl_lock();
30380 ++ wiphy_lock(mvm->hw->wiphy);
30381 + err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
30382 ++ wiphy_unlock(mvm->hw->wiphy);
30383 + rtnl_unlock();
30384 + if (err > 0)
30385 + err = -EINVAL;
30386 +@@ -2760,7 +2762,9 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
30387 + iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
30388 +
30389 + rtnl_lock();
30390 ++ wiphy_lock(mvm->hw->wiphy);
30391 + __iwl_mvm_resume(mvm, true);
30392 ++ wiphy_unlock(mvm->hw->wiphy);
30393 + rtnl_unlock();
30394 +
30395 + iwl_mvm_resume_tcm(mvm);
30396 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
30397 +index ae589b3b8c46e..ee031a5897140 100644
30398 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
30399 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
30400 +@@ -1658,8 +1658,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
30401 + while (!sband && i < NUM_NL80211_BANDS)
30402 + sband = mvm->hw->wiphy->bands[i++];
30403 +
30404 +- if (WARN_ON_ONCE(!sband))
30405 ++ if (WARN_ON_ONCE(!sband)) {
30406 ++ ret = -ENODEV;
30407 + goto error;
30408 ++ }
30409 +
30410 + chan = &sband->channels[0];
30411 +
30412 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
30413 +index 1f8b97995b943..069d54501e30e 100644
30414 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
30415 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
30416 +@@ -235,7 +235,8 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
30417 + */
30418 + mvm->fw_static_smps_request =
30419 + req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
30420 +- ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
30421 ++ ieee80211_iterate_interfaces(mvm->hw,
30422 ++ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
30423 + iwl_mvm_intf_dual_chain_req, NULL);
30424 + }
30425 +
30426 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
30427 +index 64446a11ef980..9a46468bd4345 100644
30428 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
30429 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
30430 +@@ -640,7 +640,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
30431 + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
30432 + u16 vif_id = mvmvif->id;
30433 +
30434 +- if (WARN_ONCE(vif_id > MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
30435 ++ if (WARN_ONCE(vif_id >= MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
30436 + return;
30437 +
30438 + if (vif->type != NL80211_IFTYPE_STATION)
30439 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
30440 +index 9213f8518f10d..40daced97b9e8 100644
30441 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
30442 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
30443 +@@ -318,15 +318,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
30444 +
30445 + /* info->control is only relevant for non HW rate control */
30446 + if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
30447 +- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
30448 +-
30449 + /* HT rate doesn't make sense for a non data frame */
30450 + WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
30451 + !ieee80211_is_data(fc),
30452 + "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
30453 + info->control.rates[0].flags,
30454 + info->control.rates[0].idx,
30455 +- le16_to_cpu(fc), sta ? mvmsta->sta_state : -1);
30456 ++ le16_to_cpu(fc),
30457 ++ sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
30458 +
30459 + rate_idx = info->control.rates[0].idx;
30460 + }
30461 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
30462 +index ef14584fc0a17..4b08eb46617c7 100644
30463 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
30464 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
30465 +@@ -1112,7 +1112,7 @@ static const struct iwl_causes_list causes_list_pre_bz[] = {
30466 + };
30467 +
30468 + static const struct iwl_causes_list causes_list_bz[] = {
30469 +- {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x29},
30470 ++ {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x15},
30471 + };
30472 +
30473 + static void iwl_pcie_map_list(struct iwl_trans *trans,
30474 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
30475 +index 404c3d1a70d69..1f6f7a44d3f00 100644
30476 +--- a/drivers/net/wireless/mediatek/mt76/mt76.h
30477 ++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
30478 +@@ -224,7 +224,7 @@ enum mt76_wcid_flags {
30479 + MT_WCID_FLAG_HDR_TRANS,
30480 + };
30481 +
30482 +-#define MT76_N_WCIDS 288
30483 ++#define MT76_N_WCIDS 544
30484 +
30485 + /* stored in ieee80211_tx_info::hw_queue */
30486 + #define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
30487 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
30488 +index 2b546bc05d822..83c5eec5b1633 100644
30489 +--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
30490 ++++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
30491 +@@ -641,6 +641,9 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
30492 + struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
30493 + int i;
30494 +
30495 ++ if (!sta_rates)
30496 ++ return;
30497 ++
30498 + spin_lock_bh(&dev->mt76.lock);
30499 + for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
30500 + msta->rates[i].idx = sta_rates->rate[i].idx;
30501 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
30502 +index ec25e5a95d442..ba31bb7caaf90 100644
30503 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
30504 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
30505 +@@ -253,13 +253,13 @@ static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
30506 + static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
30507 + {
30508 + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
30509 ++ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
30510 + struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid;
30511 ++ __le32 *rxd = (__le32 *)skb->data;
30512 + struct ieee80211_sta *sta;
30513 + struct ieee80211_vif *vif;
30514 + struct ieee80211_hdr hdr;
30515 +- struct ethhdr eth_hdr;
30516 +- __le32 *rxd = (__le32 *)skb->data;
30517 +- __le32 qos_ctrl, ht_ctrl;
30518 ++ u16 frame_control;
30519 +
30520 + if (FIELD_GET(MT_RXD1_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[1])) !=
30521 + MT_RXD1_NORMAL_U2M)
30522 +@@ -275,47 +275,53 @@ static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
30523 + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
30524 +
30525 + /* store the info from RXD and ethhdr to avoid being overridden */
30526 +- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
30527 +- hdr.frame_control = FIELD_GET(MT_RXD4_FRAME_CONTROL, rxd[4]);
30528 +- hdr.seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, rxd[6]);
30529 +- qos_ctrl = FIELD_GET(MT_RXD6_QOS_CTL, rxd[6]);
30530 +- ht_ctrl = FIELD_GET(MT_RXD7_HT_CONTROL, rxd[7]);
30531 +-
30532 ++ frame_control = le32_get_bits(rxd[4], MT_RXD4_FRAME_CONTROL);
30533 ++ hdr.frame_control = cpu_to_le16(frame_control);
30534 ++ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_SEQ_CTRL));
30535 + hdr.duration_id = 0;
30536 ++
30537 + ether_addr_copy(hdr.addr1, vif->addr);
30538 + ether_addr_copy(hdr.addr2, sta->addr);
30539 +- switch (le16_to_cpu(hdr.frame_control) &
30540 +- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
30541 ++ switch (frame_control & (IEEE80211_FCTL_TODS |
30542 ++ IEEE80211_FCTL_FROMDS)) {
30543 + case 0:
30544 + ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
30545 + break;
30546 + case IEEE80211_FCTL_FROMDS:
30547 +- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
30548 ++ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
30549 + break;
30550 + case IEEE80211_FCTL_TODS:
30551 +- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
30552 ++ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
30553 + break;
30554 + case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
30555 +- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
30556 +- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
30557 ++ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
30558 ++ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
30559 + break;
30560 + default:
30561 + break;
30562 + }
30563 +
30564 + skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
30565 +- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
30566 +- eth_hdr.h_proto == htons(ETH_P_IPX))
30567 ++ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
30568 ++ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
30569 + ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
30570 +- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
30571 ++ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
30572 + ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
30573 + else
30574 + skb_pull(skb, 2);
30575 +
30576 + if (ieee80211_has_order(hdr.frame_control))
30577 +- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
30578 +- if (ieee80211_is_data_qos(hdr.frame_control))
30579 +- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
30580 ++ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[7],
30581 ++ IEEE80211_HT_CTL_LEN);
30582 ++
30583 ++ if (ieee80211_is_data_qos(hdr.frame_control)) {
30584 ++ __le16 qos_ctrl;
30585 ++
30586 ++ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_QOS_CTL));
30587 ++ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
30588 ++ IEEE80211_QOS_CTL_LEN);
30589 ++ }
30590 ++
30591 + if (ieee80211_has_a4(hdr.frame_control))
30592 + memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
30593 + else
30594 +@@ -2103,6 +2109,14 @@ void mt7615_pm_power_save_work(struct work_struct *work)
30595 + test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
30596 + goto out;
30597 +
30598 ++ if (mutex_is_locked(&dev->mt76.mutex))
30599 ++ /* if mt76 mutex is held we should not put the device
30600 ++ * to sleep since we are currently accessing device
30601 ++ * register map. We need to wait for the next power_save
30602 ++ * trigger.
30603 ++ */
30604 ++ goto out;
30605 ++
30606 + if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
30607 + delta = dev->pm.last_activity + delta - jiffies;
30608 + goto out;
30609 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
30610 +index 82d625a16a62c..ce902b107ce33 100644
30611 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
30612 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
30613 +@@ -683,6 +683,9 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
30614 + struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
30615 + int i;
30616 +
30617 ++ if (!sta_rates)
30618 ++ return;
30619 ++
30620 + spin_lock_bh(&dev->mt76.lock);
30621 + for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
30622 + msta->rates[i].idx = sta_rates->rate[i].idx;
30623 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
30624 +index f79e3d5084f39..5664f119447bc 100644
30625 +--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
30626 ++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
30627 +@@ -310,7 +310,7 @@ mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
30628 + }
30629 +
30630 + if (sta_hdr)
30631 +- sta_hdr->len = cpu_to_le16(sizeof(hdr));
30632 ++ le16_add_cpu(&sta_hdr->len, sizeof(hdr));
30633 +
30634 + return skb_put_data(nskb, &hdr, sizeof(hdr));
30635 + }
30636 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
30637 +index 5baf8370b7bd8..93c783a3af7c5 100644
30638 +--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
30639 ++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
30640 +@@ -996,7 +996,8 @@ enum {
30641 + MCU_CE_CMD_SET_BSS_CONNECTED = 0x16,
30642 + MCU_CE_CMD_SET_BSS_ABORT = 0x17,
30643 + MCU_CE_CMD_CANCEL_HW_SCAN = 0x1b,
30644 +- MCU_CE_CMD_SET_ROC = 0x1d,
30645 ++ MCU_CE_CMD_SET_ROC = 0x1c,
30646 ++ MCU_CE_CMD_SET_EDCA_PARMS = 0x1d,
30647 + MCU_CE_CMD_SET_P2P_OPPPS = 0x33,
30648 + MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d,
30649 + MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
30650 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
30651 +index d054cdecd5f70..29517ca08de0c 100644
30652 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
30653 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
30654 +@@ -399,7 +399,7 @@ static void mt7915_mac_init(struct mt7915_dev *dev)
30655 + /* enable hardware de-agg */
30656 + mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
30657 +
30658 +- for (i = 0; i < MT7915_WTBL_SIZE; i++)
30659 ++ for (i = 0; i < mt7915_wtbl_size(dev); i++)
30660 + mt7915_mac_wtbl_update(dev, i,
30661 + MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
30662 + for (i = 0; i < 2; i++)
30663 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
30664 +index 48f1155022823..db267642924d0 100644
30665 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
30666 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
30667 +@@ -391,13 +391,13 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
30668 + static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
30669 + {
30670 + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
30671 ++ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
30672 + struct mt7915_sta *msta = (struct mt7915_sta *)status->wcid;
30673 ++ __le32 *rxd = (__le32 *)skb->data;
30674 + struct ieee80211_sta *sta;
30675 + struct ieee80211_vif *vif;
30676 + struct ieee80211_hdr hdr;
30677 +- struct ethhdr eth_hdr;
30678 +- __le32 *rxd = (__le32 *)skb->data;
30679 +- __le32 qos_ctrl, ht_ctrl;
30680 ++ u16 frame_control;
30681 +
30682 + if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
30683 + MT_RXD3_NORMAL_U2M)
30684 +@@ -413,47 +413,52 @@ static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
30685 + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
30686 +
30687 + /* store the info from RXD and ethhdr to avoid being overridden */
30688 +- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
30689 +- hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
30690 +- hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
30691 +- qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
30692 +- ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
30693 +-
30694 ++ frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
30695 ++ hdr.frame_control = cpu_to_le16(frame_control);
30696 ++ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
30697 + hdr.duration_id = 0;
30698 ++
30699 + ether_addr_copy(hdr.addr1, vif->addr);
30700 + ether_addr_copy(hdr.addr2, sta->addr);
30701 +- switch (le16_to_cpu(hdr.frame_control) &
30702 +- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
30703 ++ switch (frame_control & (IEEE80211_FCTL_TODS |
30704 ++ IEEE80211_FCTL_FROMDS)) {
30705 + case 0:
30706 + ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
30707 + break;
30708 + case IEEE80211_FCTL_FROMDS:
30709 +- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
30710 ++ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
30711 + break;
30712 + case IEEE80211_FCTL_TODS:
30713 +- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
30714 ++ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
30715 + break;
30716 + case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
30717 +- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
30718 +- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
30719 ++ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
30720 ++ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
30721 + break;
30722 + default:
30723 + break;
30724 + }
30725 +
30726 + skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
30727 +- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
30728 +- eth_hdr.h_proto == htons(ETH_P_IPX))
30729 ++ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
30730 ++ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
30731 + ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
30732 +- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
30733 ++ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
30734 + ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
30735 + else
30736 + skb_pull(skb, 2);
30737 +
30738 + if (ieee80211_has_order(hdr.frame_control))
30739 +- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
30740 +- if (ieee80211_is_data_qos(hdr.frame_control))
30741 +- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
30742 ++ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
30743 ++ IEEE80211_HT_CTL_LEN);
30744 ++ if (ieee80211_is_data_qos(hdr.frame_control)) {
30745 ++ __le16 qos_ctrl;
30746 ++
30747 ++ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
30748 ++ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
30749 ++ IEEE80211_QOS_CTL_LEN);
30750 ++ }
30751 ++
30752 + if (ieee80211_has_a4(hdr.frame_control))
30753 + memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
30754 + else
30755 +@@ -1512,7 +1517,6 @@ mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
30756 + break;
30757 + case MT_PHY_TYPE_HT:
30758 + case MT_PHY_TYPE_HT_GF:
30759 +- rate.mcs += (rate.nss - 1) * 8;
30760 + if (rate.mcs > 31)
30761 + goto out;
30762 +
30763 +@@ -1594,7 +1598,7 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
30764 + if (pid < MT_PACKET_ID_FIRST)
30765 + return;
30766 +
30767 +- if (wcidx >= MT7915_WTBL_SIZE)
30768 ++ if (wcidx >= mt7915_wtbl_size(dev))
30769 + return;
30770 +
30771 + rcu_read_lock();
30772 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
30773 +index 0911b6f973b5a..31634d7ed1737 100644
30774 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
30775 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
30776 +@@ -211,24 +211,12 @@ mt7915_mcu_get_sta_nss(u16 mcs_map)
30777 +
30778 + static void
30779 + mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
30780 +- const u16 *mask)
30781 ++ u16 mcs_map)
30782 + {
30783 + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
30784 +- struct cfg80211_chan_def *chandef = &msta->vif->phy->mt76->chandef;
30785 ++ enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
30786 ++ const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
30787 + int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
30788 +- u16 mcs_map;
30789 +-
30790 +- switch (chandef->width) {
30791 +- case NL80211_CHAN_WIDTH_80P80:
30792 +- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80p80);
30793 +- break;
30794 +- case NL80211_CHAN_WIDTH_160:
30795 +- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
30796 +- break;
30797 +- default:
30798 +- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
30799 +- break;
30800 +- }
30801 +
30802 + for (nss = 0; nss < max_nss; nss++) {
30803 + int mcs;
30804 +@@ -1264,8 +1252,11 @@ mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
30805 + generic = (struct wtbl_generic *)tlv;
30806 +
30807 + if (sta) {
30808 ++ if (vif->type == NL80211_IFTYPE_STATION)
30809 ++ generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
30810 ++ else
30811 ++ generic->partial_aid = cpu_to_le16(sta->aid);
30812 + memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
30813 +- generic->partial_aid = cpu_to_le16(sta->aid);
30814 + generic->muar_idx = mvif->mt76.omac_idx;
30815 + generic->qos = sta->wme;
30816 + } else {
30817 +@@ -1319,12 +1310,15 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
30818 + case NL80211_IFTYPE_MESH_POINT:
30819 + case NL80211_IFTYPE_AP:
30820 + basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
30821 ++ basic->aid = cpu_to_le16(sta->aid);
30822 + break;
30823 + case NL80211_IFTYPE_STATION:
30824 + basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
30825 ++ basic->aid = cpu_to_le16(vif->bss_conf.aid);
30826 + break;
30827 + case NL80211_IFTYPE_ADHOC:
30828 + basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
30829 ++ basic->aid = cpu_to_le16(sta->aid);
30830 + break;
30831 + default:
30832 + WARN_ON(1);
30833 +@@ -1332,7 +1326,6 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
30834 + }
30835 +
30836 + memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
30837 +- basic->aid = cpu_to_le16(sta->aid);
30838 + basic->qos = sta->wme;
30839 + }
30840 +
30841 +@@ -1340,11 +1333,9 @@ static void
30842 + mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
30843 + struct ieee80211_vif *vif)
30844 + {
30845 +- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
30846 + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
30847 + struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
30848 +- enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
30849 +- const u16 *mcs_mask = msta->vif->bitrate_mask.control[band].he_mcs;
30850 ++ struct ieee80211_he_mcs_nss_supp mcs_map;
30851 + struct sta_rec_he *he;
30852 + struct tlv *tlv;
30853 + u32 cap = 0;
30854 +@@ -1434,22 +1425,23 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
30855 +
30856 + he->he_cap = cpu_to_le32(cap);
30857 +
30858 ++ mcs_map = sta->he_cap.he_mcs_nss_supp;
30859 + switch (sta->bandwidth) {
30860 + case IEEE80211_STA_RX_BW_160:
30861 + if (elem->phy_cap_info[0] &
30862 + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
30863 + mt7915_mcu_set_sta_he_mcs(sta,
30864 + &he->max_nss_mcs[CMD_HE_MCS_BW8080],
30865 +- mcs_mask);
30866 ++ le16_to_cpu(mcs_map.rx_mcs_80p80));
30867 +
30868 + mt7915_mcu_set_sta_he_mcs(sta,
30869 + &he->max_nss_mcs[CMD_HE_MCS_BW160],
30870 +- mcs_mask);
30871 ++ le16_to_cpu(mcs_map.rx_mcs_160));
30872 + fallthrough;
30873 + default:
30874 + mt7915_mcu_set_sta_he_mcs(sta,
30875 + &he->max_nss_mcs[CMD_HE_MCS_BW80],
30876 +- mcs_mask);
30877 ++ le16_to_cpu(mcs_map.rx_mcs_80));
30878 + break;
30879 + }
30880 +
30881 +@@ -1524,9 +1516,6 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
30882 + vif->type != NL80211_IFTYPE_AP)
30883 + return;
30884 +
30885 +- if (!sta->vht_cap.vht_supported)
30886 +- return;
30887 +-
30888 + tlv = mt7915_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
30889 +
30890 + muru = (struct sta_rec_muru *)tlv;
30891 +@@ -1534,9 +1523,12 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
30892 + muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer ||
30893 + mvif->cap.vht_mu_ebfer ||
30894 + mvif->cap.vht_mu_ebfee;
30895 ++ muru->cfg.mimo_ul_en = true;
30896 ++ muru->cfg.ofdma_dl_en = true;
30897 +
30898 +- muru->mimo_dl.vht_mu_bfee =
30899 +- !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
30900 ++ if (sta->vht_cap.vht_supported)
30901 ++ muru->mimo_dl.vht_mu_bfee =
30902 ++ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
30903 +
30904 + if (!sta->he_cap.has_he)
30905 + return;
30906 +@@ -1544,13 +1536,11 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
30907 + muru->mimo_dl.partial_bw_dl_mimo =
30908 + HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]);
30909 +
30910 +- muru->cfg.mimo_ul_en = true;
30911 + muru->mimo_ul.full_ul_mimo =
30912 + HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]);
30913 + muru->mimo_ul.partial_ul_mimo =
30914 + HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
30915 +
30916 +- muru->cfg.ofdma_dl_en = true;
30917 + muru->ofdma_dl.punc_pream_rx =
30918 + HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
30919 + muru->ofdma_dl.he_20m_in_40m_2g =
30920 +@@ -2134,9 +2124,12 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
30921 + phy.sgi |= gi << (i << (_he)); \
30922 + phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\
30923 + } \
30924 +- for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) \
30925 +- nrates += hweight16(mask->control[band]._mcs[i]); \
30926 +- phy.mcs = ffs(mask->control[band]._mcs[0]) - 1; \
30927 ++ for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) { \
30928 ++ if (!mask->control[band]._mcs[i]) \
30929 ++ continue; \
30930 ++ nrates += hweight16(mask->control[band]._mcs[i]); \
30931 ++ phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \
30932 ++ } \
30933 + } while (0)
30934 +
30935 + if (sta->he_cap.has_he) {
30936 +@@ -2394,8 +2387,10 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
30937 + }
30938 +
30939 + ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
30940 +- if (ret)
30941 ++ if (ret) {
30942 ++ dev_kfree_skb(skb);
30943 + return ret;
30944 ++ }
30945 +
30946 + if (sta && sta->ht_cap.ht_supported) {
30947 + /* starec amsdu */
30948 +@@ -2409,8 +2404,10 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
30949 + }
30950 +
30951 + ret = mt7915_mcu_add_group(dev, vif, sta);
30952 +- if (ret)
30953 ++ if (ret) {
30954 ++ dev_kfree_skb(skb);
30955 + return ret;
30956 ++ }
30957 + out:
30958 + return mt76_mcu_skb_send_msg(&dev->mt76, skb,
30959 + MCU_EXT_CMD(STA_REC_UPDATE), true);
30960 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
30961 +index 42d887383e8d8..12ca545664614 100644
30962 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
30963 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
30964 +@@ -12,7 +12,8 @@
30965 + #define MT7915_MAX_INTERFACES 19
30966 + #define MT7915_MAX_WMM_SETS 4
30967 + #define MT7915_WTBL_SIZE 288
30968 +-#define MT7915_WTBL_RESERVED (MT7915_WTBL_SIZE - 1)
30969 ++#define MT7916_WTBL_SIZE 544
30970 ++#define MT7915_WTBL_RESERVED (mt7915_wtbl_size(dev) - 1)
30971 + #define MT7915_WTBL_STA (MT7915_WTBL_RESERVED - \
30972 + MT7915_MAX_INTERFACES)
30973 +
30974 +@@ -449,6 +450,11 @@ static inline bool is_mt7915(struct mt76_dev *dev)
30975 + return mt76_chip(dev) == 0x7915;
30976 + }
30977 +
30978 ++static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
30979 ++{
30980 ++ return is_mt7915(&dev->mt76) ? MT7915_WTBL_SIZE : MT7916_WTBL_SIZE;
30981 ++}
30982 ++
30983 + void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
30984 + u32 clear, u32 set);
30985 +
30986 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
30987 +index 86fd7292b229f..196b50e616fe0 100644
30988 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
30989 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
30990 +@@ -129,23 +129,22 @@ mt7921_queues_acq(struct seq_file *s, void *data)
30991 +
30992 + mt7921_mutex_acquire(dev);
30993 +
30994 +- for (i = 0; i < 16; i++) {
30995 +- int j, acs = i / 4, index = i % 4;
30996 ++ for (i = 0; i < 4; i++) {
30997 + u32 ctrl, val, qlen = 0;
30998 ++ int j;
30999 +
31000 +- val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
31001 +- ctrl = BIT(31) | BIT(15) | (acs << 8);
31002 ++ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(i));
31003 ++ ctrl = BIT(31) | BIT(11) | (i << 24);
31004 +
31005 + for (j = 0; j < 32; j++) {
31006 + if (val & BIT(j))
31007 + continue;
31008 +
31009 +- mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
31010 +- ctrl | (j + (index << 5)));
31011 ++ mt76_wr(dev, MT_PLE_FL_Q0_CTRL, ctrl | j);
31012 + qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
31013 + GENMASK(11, 0));
31014 + }
31015 +- seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
31016 ++ seq_printf(s, "AC%d: queued=%d\n", i, qlen);
31017 + }
31018 +
31019 + mt7921_mutex_release(dev);
31020 +@@ -291,13 +290,12 @@ mt7921_pm_set(void *data, u64 val)
31021 + pm->enable = false;
31022 + mt76_connac_pm_wake(&dev->mphy, pm);
31023 +
31024 ++ pm->enable = val;
31025 + ieee80211_iterate_active_interfaces(mt76_hw(dev),
31026 + IEEE80211_IFACE_ITER_RESUME_ALL,
31027 + mt7921_pm_interface_iter, dev);
31028 +
31029 + mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
31030 +-
31031 +- pm->enable = val;
31032 + mt76_connac_power_save_sched(&dev->mphy, pm);
31033 + out:
31034 + mutex_unlock(&dev->mt76.mutex);
31035 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
31036 +index cdff1fd52d93a..39d6ce4ecddd7 100644
31037 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
31038 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
31039 +@@ -78,110 +78,6 @@ static void mt7921_dma_prefetch(struct mt7921_dev *dev)
31040 + mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
31041 + }
31042 +
31043 +-static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
31044 +-{
31045 +- static const struct {
31046 +- u32 phys;
31047 +- u32 mapped;
31048 +- u32 size;
31049 +- } fixed_map[] = {
31050 +- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
31051 +- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
31052 +- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
31053 +- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
31054 +- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
31055 +- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
31056 +- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
31057 +- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
31058 +- { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
31059 +- { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
31060 +- { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
31061 +- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
31062 +- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
31063 +- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
31064 +- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
31065 +- { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
31066 +- { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
31067 +- { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
31068 +- { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
31069 +- { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
31070 +- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
31071 +- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
31072 +- { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
31073 +- { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
31074 +- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
31075 +- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
31076 +- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
31077 +- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
31078 +- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
31079 +- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
31080 +- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
31081 +- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
31082 +- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
31083 +- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
31084 +- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
31085 +- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
31086 +- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
31087 +- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
31088 +- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
31089 +- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
31090 +- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
31091 +- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
31092 +- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
31093 +- };
31094 +- int i;
31095 +-
31096 +- if (addr < 0x100000)
31097 +- return addr;
31098 +-
31099 +- for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
31100 +- u32 ofs;
31101 +-
31102 +- if (addr < fixed_map[i].phys)
31103 +- continue;
31104 +-
31105 +- ofs = addr - fixed_map[i].phys;
31106 +- if (ofs > fixed_map[i].size)
31107 +- continue;
31108 +-
31109 +- return fixed_map[i].mapped + ofs;
31110 +- }
31111 +-
31112 +- if ((addr >= 0x18000000 && addr < 0x18c00000) ||
31113 +- (addr >= 0x70000000 && addr < 0x78000000) ||
31114 +- (addr >= 0x7c000000 && addr < 0x7c400000))
31115 +- return mt7921_reg_map_l1(dev, addr);
31116 +-
31117 +- dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
31118 +- addr);
31119 +-
31120 +- return 0;
31121 +-}
31122 +-
31123 +-static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
31124 +-{
31125 +- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
31126 +- u32 addr = __mt7921_reg_addr(dev, offset);
31127 +-
31128 +- return dev->bus_ops->rr(mdev, addr);
31129 +-}
31130 +-
31131 +-static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
31132 +-{
31133 +- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
31134 +- u32 addr = __mt7921_reg_addr(dev, offset);
31135 +-
31136 +- dev->bus_ops->wr(mdev, addr, val);
31137 +-}
31138 +-
31139 +-static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
31140 +-{
31141 +- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
31142 +- u32 addr = __mt7921_reg_addr(dev, offset);
31143 +-
31144 +- return dev->bus_ops->rmw(mdev, addr, mask, val);
31145 +-}
31146 +-
31147 + static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
31148 + {
31149 + if (force) {
31150 +@@ -341,23 +237,8 @@ int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
31151 +
31152 + int mt7921_dma_init(struct mt7921_dev *dev)
31153 + {
31154 +- struct mt76_bus_ops *bus_ops;
31155 + int ret;
31156 +
31157 +- dev->phy.dev = dev;
31158 +- dev->phy.mt76 = &dev->mt76.phy;
31159 +- dev->mt76.phy.priv = &dev->phy;
31160 +- dev->bus_ops = dev->mt76.bus;
31161 +- bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
31162 +- GFP_KERNEL);
31163 +- if (!bus_ops)
31164 +- return -ENOMEM;
31165 +-
31166 +- bus_ops->rr = mt7921_rr;
31167 +- bus_ops->wr = mt7921_wr;
31168 +- bus_ops->rmw = mt7921_rmw;
31169 +- dev->mt76.bus = bus_ops;
31170 +-
31171 + mt76_dma_attach(&dev->mt76);
31172 +
31173 + ret = mt7921_dma_disable(dev, true);
31174 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
31175 +index ec10f95a46495..84f72dd1bf930 100644
31176 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
31177 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
31178 +@@ -402,13 +402,13 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
31179 + static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
31180 + {
31181 + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
31182 ++ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
31183 + struct mt7921_sta *msta = (struct mt7921_sta *)status->wcid;
31184 ++ __le32 *rxd = (__le32 *)skb->data;
31185 + struct ieee80211_sta *sta;
31186 + struct ieee80211_vif *vif;
31187 + struct ieee80211_hdr hdr;
31188 +- struct ethhdr eth_hdr;
31189 +- __le32 *rxd = (__le32 *)skb->data;
31190 +- __le32 qos_ctrl, ht_ctrl;
31191 ++ u16 frame_control;
31192 +
31193 + if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
31194 + MT_RXD3_NORMAL_U2M)
31195 +@@ -424,47 +424,52 @@ static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
31196 + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
31197 +
31198 + /* store the info from RXD and ethhdr to avoid being overridden */
31199 +- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
31200 +- hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
31201 +- hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
31202 +- qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
31203 +- ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
31204 +-
31205 ++ frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
31206 ++ hdr.frame_control = cpu_to_le16(frame_control);
31207 ++ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
31208 + hdr.duration_id = 0;
31209 ++
31210 + ether_addr_copy(hdr.addr1, vif->addr);
31211 + ether_addr_copy(hdr.addr2, sta->addr);
31212 +- switch (le16_to_cpu(hdr.frame_control) &
31213 +- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
31214 ++ switch (frame_control & (IEEE80211_FCTL_TODS |
31215 ++ IEEE80211_FCTL_FROMDS)) {
31216 + case 0:
31217 + ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
31218 + break;
31219 + case IEEE80211_FCTL_FROMDS:
31220 +- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
31221 ++ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
31222 + break;
31223 + case IEEE80211_FCTL_TODS:
31224 +- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
31225 ++ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
31226 + break;
31227 + case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
31228 +- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
31229 +- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
31230 ++ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
31231 ++ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
31232 + break;
31233 + default:
31234 + break;
31235 + }
31236 +
31237 + skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
31238 +- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
31239 +- eth_hdr.h_proto == htons(ETH_P_IPX))
31240 ++ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
31241 ++ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
31242 + ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
31243 +- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
31244 ++ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
31245 + ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
31246 + else
31247 + skb_pull(skb, 2);
31248 +
31249 + if (ieee80211_has_order(hdr.frame_control))
31250 +- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
31251 +- if (ieee80211_is_data_qos(hdr.frame_control))
31252 +- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
31253 ++ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
31254 ++ IEEE80211_HT_CTL_LEN);
31255 ++ if (ieee80211_is_data_qos(hdr.frame_control)) {
31256 ++ __le16 qos_ctrl;
31257 ++
31258 ++ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
31259 ++ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
31260 ++ IEEE80211_QOS_CTL_LEN);
31261 ++ }
31262 ++
31263 + if (ieee80211_has_a4(hdr.frame_control))
31264 + memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
31265 + else
31266 +@@ -914,9 +919,15 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
31267 + txwi[3] |= cpu_to_le32(val);
31268 + }
31269 +
31270 +- val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
31271 +- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
31272 +- txwi[7] |= cpu_to_le32(val);
31273 ++ if (mt76_is_mmio(&dev->mt76)) {
31274 ++ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
31275 ++ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
31276 ++ txwi[7] |= cpu_to_le32(val);
31277 ++ } else {
31278 ++ val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
31279 ++ FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
31280 ++ txwi[8] |= cpu_to_le32(val);
31281 ++ }
31282 + }
31283 +
31284 + void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
31285 +@@ -1092,7 +1103,6 @@ mt7921_mac_add_txs_skb(struct mt7921_dev *dev, struct mt76_wcid *wcid, int pid,
31286 + break;
31287 + case MT_PHY_TYPE_HT:
31288 + case MT_PHY_TYPE_HT_GF:
31289 +- rate.mcs += (rate.nss - 1) * 8;
31290 + if (rate.mcs > 31)
31291 + goto out;
31292 +
31293 +@@ -1551,6 +1561,14 @@ void mt7921_pm_power_save_work(struct work_struct *work)
31294 + test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
31295 + goto out;
31296 +
31297 ++ if (mutex_is_locked(&dev->mt76.mutex))
31298 ++ /* if mt76 mutex is held we should not put the device
31299 ++ * to sleep since we are currently accessing device
31300 ++ * register map. We need to wait for the next power_save
31301 ++ * trigger.
31302 ++ */
31303 ++ goto out;
31304 ++
31305 + if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
31306 + delta = dev->pm.last_activity + delta - jiffies;
31307 + goto out;
31308 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
31309 +index 544a1c33126a4..12e1cf8abe6ea 100644
31310 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
31311 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
31312 +@@ -284,6 +284,9 @@ enum tx_mcu_port_q_idx {
31313 + #define MT_TXD7_HW_AMSDU BIT(10)
31314 + #define MT_TXD7_TX_TIME GENMASK(9, 0)
31315 +
31316 ++#define MT_TXD8_L_TYPE GENMASK(5, 4)
31317 ++#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0)
31318 ++
31319 + #define MT_TX_RATE_STBC BIT(13)
31320 + #define MT_TX_RATE_NSS GENMASK(12, 10)
31321 + #define MT_TX_RATE_MODE GENMASK(9, 6)
31322 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
31323 +index ef1e1ef91611b..e82545a7fcc11 100644
31324 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
31325 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
31326 +@@ -707,12 +707,8 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
31327 + if (mt76_is_sdio(&dev->mt76)) {
31328 + /* activate again */
31329 + ret = __mt7921_mcu_fw_pmctrl(dev);
31330 +- if (ret)
31331 +- return ret;
31332 +-
31333 +- ret = __mt7921_mcu_drv_pmctrl(dev);
31334 +- if (ret)
31335 +- return ret;
31336 ++ if (!ret)
31337 ++ ret = __mt7921_mcu_drv_pmctrl(dev);
31338 + }
31339 +
31340 + out:
31341 +@@ -920,33 +916,28 @@ EXPORT_SYMBOL_GPL(mt7921_mcu_exit);
31342 +
31343 + int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
31344 + {
31345 +-#define WMM_AIFS_SET BIT(0)
31346 +-#define WMM_CW_MIN_SET BIT(1)
31347 +-#define WMM_CW_MAX_SET BIT(2)
31348 +-#define WMM_TXOP_SET BIT(3)
31349 +-#define WMM_PARAM_SET GENMASK(3, 0)
31350 +-#define TX_CMD_MODE 1
31351 ++ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
31352 ++
31353 + struct edca {
31354 +- u8 queue;
31355 +- u8 set;
31356 +- u8 aifs;
31357 +- u8 cw_min;
31358 ++ __le16 cw_min;
31359 + __le16 cw_max;
31360 + __le16 txop;
31361 +- };
31362 ++ __le16 aifs;
31363 ++ u8 guardtime;
31364 ++ u8 acm;
31365 ++ } __packed;
31366 + struct mt7921_mcu_tx {
31367 +- u8 total;
31368 +- u8 action;
31369 +- u8 valid;
31370 +- u8 mode;
31371 +-
31372 + struct edca edca[IEEE80211_NUM_ACS];
31373 ++ u8 bss_idx;
31374 ++ u8 qos;
31375 ++ u8 wmm_idx;
31376 ++ u8 pad;
31377 + } __packed req = {
31378 +- .valid = true,
31379 +- .mode = TX_CMD_MODE,
31380 +- .total = IEEE80211_NUM_ACS,
31381 ++ .bss_idx = mvif->mt76.idx,
31382 ++ .qos = vif->bss_conf.qos,
31383 ++ .wmm_idx = mvif->mt76.wmm_idx,
31384 + };
31385 +- struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
31386 ++
31387 + struct mu_edca {
31388 + u8 cw_min;
31389 + u8 cw_max;
31390 +@@ -970,30 +961,29 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
31391 + .qos = vif->bss_conf.qos,
31392 + .wmm_idx = mvif->mt76.wmm_idx,
31393 + };
31394 ++ int to_aci[] = {1, 0, 2, 3};
31395 + int ac, ret;
31396 +
31397 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
31398 + struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
31399 +- struct edca *e = &req.edca[ac];
31400 ++ struct edca *e = &req.edca[to_aci[ac]];
31401 +
31402 +- e->set = WMM_PARAM_SET;
31403 +- e->queue = ac + mvif->mt76.wmm_idx * MT7921_MAX_WMM_SETS;
31404 + e->aifs = q->aifs;
31405 + e->txop = cpu_to_le16(q->txop);
31406 +
31407 + if (q->cw_min)
31408 +- e->cw_min = fls(q->cw_min);
31409 ++ e->cw_min = cpu_to_le16(q->cw_min);
31410 + else
31411 + e->cw_min = 5;
31412 +
31413 + if (q->cw_max)
31414 +- e->cw_max = cpu_to_le16(fls(q->cw_max));
31415 ++ e->cw_max = cpu_to_le16(q->cw_max);
31416 + else
31417 + e->cw_max = cpu_to_le16(10);
31418 + }
31419 +
31420 +- ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE),
31421 +- &req, sizeof(req), true);
31422 ++ ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req,
31423 ++ sizeof(req), false);
31424 + if (ret)
31425 + return ret;
31426 +
31427 +@@ -1003,7 +993,6 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
31428 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
31429 + struct ieee80211_he_mu_edca_param_ac_rec *q;
31430 + struct mu_edca *e;
31431 +- int to_aci[] = {1, 0, 2, 3};
31432 +
31433 + if (!mvif->queue_params[ac].mu_edca)
31434 + break;
31435 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
31436 +index 96647801850a5..33f8e5b541b35 100644
31437 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
31438 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
31439 +@@ -452,6 +452,7 @@ int mt7921e_mcu_init(struct mt7921_dev *dev);
31440 + int mt7921s_wfsys_reset(struct mt7921_dev *dev);
31441 + int mt7921s_mac_reset(struct mt7921_dev *dev);
31442 + int mt7921s_init_reset(struct mt7921_dev *dev);
31443 ++int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
31444 + int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
31445 + int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev);
31446 +
31447 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
31448 +index 9dae2f5972bf9..9a71a5d864819 100644
31449 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
31450 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
31451 +@@ -121,6 +121,110 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
31452 + mt76_free_device(&dev->mt76);
31453 + }
31454 +
31455 ++static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
31456 ++{
31457 ++ static const struct {
31458 ++ u32 phys;
31459 ++ u32 mapped;
31460 ++ u32 size;
31461 ++ } fixed_map[] = {
31462 ++ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
31463 ++ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
31464 ++ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
31465 ++ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
31466 ++ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
31467 ++ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
31468 ++ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
31469 ++ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
31470 ++ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
31471 ++ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
31472 ++ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
31473 ++ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
31474 ++ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
31475 ++ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
31476 ++ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
31477 ++ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
31478 ++ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
31479 ++ { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
31480 ++ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
31481 ++ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
31482 ++ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
31483 ++ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
31484 ++ { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
31485 ++ { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
31486 ++ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
31487 ++ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
31488 ++ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
31489 ++ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
31490 ++ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
31491 ++ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
31492 ++ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
31493 ++ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
31494 ++ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
31495 ++ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
31496 ++ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
31497 ++ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
31498 ++ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
31499 ++ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
31500 ++ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
31501 ++ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
31502 ++ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
31503 ++ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
31504 ++ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
31505 ++ };
31506 ++ int i;
31507 ++
31508 ++ if (addr < 0x100000)
31509 ++ return addr;
31510 ++
31511 ++ for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
31512 ++ u32 ofs;
31513 ++
31514 ++ if (addr < fixed_map[i].phys)
31515 ++ continue;
31516 ++
31517 ++ ofs = addr - fixed_map[i].phys;
31518 ++ if (ofs > fixed_map[i].size)
31519 ++ continue;
31520 ++
31521 ++ return fixed_map[i].mapped + ofs;
31522 ++ }
31523 ++
31524 ++ if ((addr >= 0x18000000 && addr < 0x18c00000) ||
31525 ++ (addr >= 0x70000000 && addr < 0x78000000) ||
31526 ++ (addr >= 0x7c000000 && addr < 0x7c400000))
31527 ++ return mt7921_reg_map_l1(dev, addr);
31528 ++
31529 ++ dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
31530 ++ addr);
31531 ++
31532 ++ return 0;
31533 ++}
31534 ++
31535 ++static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
31536 ++{
31537 ++ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
31538 ++ u32 addr = __mt7921_reg_addr(dev, offset);
31539 ++
31540 ++ return dev->bus_ops->rr(mdev, addr);
31541 ++}
31542 ++
31543 ++static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
31544 ++{
31545 ++ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
31546 ++ u32 addr = __mt7921_reg_addr(dev, offset);
31547 ++
31548 ++ dev->bus_ops->wr(mdev, addr, val);
31549 ++}
31550 ++
31551 ++static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
31552 ++{
31553 ++ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
31554 ++ u32 addr = __mt7921_reg_addr(dev, offset);
31555 ++
31556 ++ return dev->bus_ops->rmw(mdev, addr, mask, val);
31557 ++}
31558 ++
31559 + static int mt7921_pci_probe(struct pci_dev *pdev,
31560 + const struct pci_device_id *id)
31561 + {
31562 +@@ -151,6 +255,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
31563 + .fw_own = mt7921e_mcu_fw_pmctrl,
31564 + };
31565 +
31566 ++ struct mt76_bus_ops *bus_ops;
31567 + struct mt7921_dev *dev;
31568 + struct mt76_dev *mdev;
31569 + int ret;
31570 +@@ -188,6 +293,25 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
31571 +
31572 + mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
31573 + tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
31574 ++
31575 ++ dev->phy.dev = dev;
31576 ++ dev->phy.mt76 = &dev->mt76.phy;
31577 ++ dev->mt76.phy.priv = &dev->phy;
31578 ++ dev->bus_ops = dev->mt76.bus;
31579 ++ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
31580 ++ GFP_KERNEL);
31581 ++ if (!bus_ops)
31582 ++ return -ENOMEM;
31583 ++
31584 ++ bus_ops->rr = mt7921_rr;
31585 ++ bus_ops->wr = mt7921_wr;
31586 ++ bus_ops->rmw = mt7921_rmw;
31587 ++ dev->mt76.bus = bus_ops;
31588 ++
31589 ++ ret = __mt7921e_mcu_drv_pmctrl(dev);
31590 ++ if (ret)
31591 ++ return ret;
31592 ++
31593 + mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
31594 + (mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
31595 + dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
31596 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
31597 +index a020352122a12..daa73c92426ca 100644
31598 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
31599 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
31600 +@@ -59,10 +59,8 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
31601 + return err;
31602 + }
31603 +
31604 +-int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
31605 ++int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
31606 + {
31607 +- struct mt76_phy *mphy = &dev->mt76.phy;
31608 +- struct mt76_connac_pm *pm = &dev->pm;
31609 + int i, err = 0;
31610 +
31611 + for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
31612 +@@ -75,9 +73,21 @@ int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
31613 + if (i == MT7921_DRV_OWN_RETRY_COUNT) {
31614 + dev_err(dev->mt76.dev, "driver own failed\n");
31615 + err = -EIO;
31616 +- goto out;
31617 + }
31618 +
31619 ++ return err;
31620 ++}
31621 ++
31622 ++int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
31623 ++{
31624 ++ struct mt76_phy *mphy = &dev->mt76.phy;
31625 ++ struct mt76_connac_pm *pm = &dev->pm;
31626 ++ int err;
31627 ++
31628 ++ err = __mt7921e_mcu_drv_pmctrl(dev);
31629 ++ if (err < 0)
31630 ++ goto out;
31631 ++
31632 + mt7921_wpdma_reinit_cond(dev);
31633 + clear_bit(MT76_STATE_PM, &mphy->state);
31634 +
31635 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
31636 +index cbd38122c510f..c8c92faa4624f 100644
31637 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
31638 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
31639 +@@ -17,13 +17,12 @@
31640 + #define MT_PLE_BASE 0x820c0000
31641 + #define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
31642 +
31643 +-#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
31644 +-#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
31645 +-#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
31646 +-#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc)
31647 ++#define MT_PLE_FL_Q0_CTRL MT_PLE(0x3e0)
31648 ++#define MT_PLE_FL_Q1_CTRL MT_PLE(0x3e4)
31649 ++#define MT_PLE_FL_Q2_CTRL MT_PLE(0x3e8)
31650 ++#define MT_PLE_FL_Q3_CTRL MT_PLE(0x3ec)
31651 +
31652 +-#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
31653 +- ((n) << 2))
31654 ++#define MT_PLE_AC_QEMPTY(_n) MT_PLE(0x500 + 0x40 * (_n))
31655 + #define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
31656 +
31657 + #define MT_MDP_BASE 0x820cd000
31658 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
31659 +index d20f2ff01be17..5d8af18c70267 100644
31660 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
31661 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
31662 +@@ -49,6 +49,26 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
31663 + return ret;
31664 + }
31665 +
31666 ++static u32 mt7921s_read_rm3r(struct mt7921_dev *dev)
31667 ++{
31668 ++ struct mt76_sdio *sdio = &dev->mt76.sdio;
31669 ++
31670 ++ return sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
31671 ++}
31672 ++
31673 ++static u32 mt7921s_clear_rm3r_drv_own(struct mt7921_dev *dev)
31674 ++{
31675 ++ struct mt76_sdio *sdio = &dev->mt76.sdio;
31676 ++ u32 val;
31677 ++
31678 ++ val = sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
31679 ++ if (val)
31680 ++ sdio_writel(sdio->func, H2D_SW_INT_CLEAR_MAILBOX_ACK,
31681 ++ MCR_WSICR, NULL);
31682 ++
31683 ++ return val;
31684 ++}
31685 ++
31686 + int mt7921s_mcu_init(struct mt7921_dev *dev)
31687 + {
31688 + static const struct mt76_mcu_ops mt7921s_mcu_ops = {
31689 +@@ -88,6 +108,12 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev)
31690 +
31691 + err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
31692 + status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
31693 ++
31694 ++ if (!err && test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
31695 ++ err = readx_poll_timeout(mt7921s_read_rm3r, dev, status,
31696 ++ status & D2HRM3R_IS_DRIVER_OWN,
31697 ++ 2000, 1000000);
31698 ++
31699 + sdio_release_host(func);
31700 +
31701 + if (err < 0) {
31702 +@@ -115,12 +141,24 @@ int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev)
31703 +
31704 + sdio_claim_host(func);
31705 +
31706 ++ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
31707 ++ err = readx_poll_timeout(mt7921s_clear_rm3r_drv_own,
31708 ++ dev, status,
31709 ++ !(status & D2HRM3R_IS_DRIVER_OWN),
31710 ++ 2000, 1000000);
31711 ++ if (err < 0) {
31712 ++ dev_err(dev->mt76.dev, "mailbox ACK not cleared\n");
31713 ++ goto err;
31714 ++ }
31715 ++ }
31716 ++
31717 + sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL);
31718 +
31719 + err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
31720 + !(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
31721 + sdio_release_host(func);
31722 +
31723 ++err:
31724 + if (err < 0) {
31725 + dev_err(dev->mt76.dev, "firmware own failed\n");
31726 + clear_bit(MT76_STATE_PM, &mphy->state);
31727 +diff --git a/drivers/net/wireless/mediatek/mt76/sdio.h b/drivers/net/wireless/mediatek/mt76/sdio.h
31728 +index 99db4ad93b7c7..27d5d2077ebae 100644
31729 +--- a/drivers/net/wireless/mediatek/mt76/sdio.h
31730 ++++ b/drivers/net/wireless/mediatek/mt76/sdio.h
31731 +@@ -65,6 +65,7 @@
31732 + #define MCR_H2DSM0R 0x0070
31733 + #define H2D_SW_INT_READ BIT(16)
31734 + #define H2D_SW_INT_WRITE BIT(17)
31735 ++#define H2D_SW_INT_CLEAR_MAILBOX_ACK BIT(22)
31736 +
31737 + #define MCR_H2DSM1R 0x0074
31738 + #define MCR_D2HRM0R 0x0078
31739 +@@ -109,6 +110,7 @@
31740 + #define MCR_H2DSM2R 0x0160 /* supported in CONNAC2 */
31741 + #define MCR_H2DSM3R 0x0164 /* supported in CONNAC2 */
31742 + #define MCR_D2HRM3R 0x0174 /* supported in CONNAC2 */
31743 ++#define D2HRM3R_IS_DRIVER_OWN BIT(0)
31744 + #define MCR_WTQCR8 0x0190 /* supported in CONNAC2 */
31745 + #define MCR_WTQCR9 0x0194 /* supported in CONNAC2 */
31746 + #define MCR_WTQCR10 0x0198 /* supported in CONNAC2 */
31747 +diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
31748 +index 2987ad9271f64..87e98ab068ed7 100644
31749 +--- a/drivers/net/wireless/ray_cs.c
31750 ++++ b/drivers/net/wireless/ray_cs.c
31751 +@@ -382,6 +382,8 @@ static int ray_config(struct pcmcia_device *link)
31752 + goto failed;
31753 + local->sram = ioremap(link->resource[2]->start,
31754 + resource_size(link->resource[2]));
31755 ++ if (!local->sram)
31756 ++ goto failed;
31757 +
31758 + /*** Set up 16k window for shared memory (receive buffer) ***************/
31759 + link->resource[3]->flags |=
31760 +@@ -396,6 +398,8 @@ static int ray_config(struct pcmcia_device *link)
31761 + goto failed;
31762 + local->rmem = ioremap(link->resource[3]->start,
31763 + resource_size(link->resource[3]));
31764 ++ if (!local->rmem)
31765 ++ goto failed;
31766 +
31767 + /*** Set up window for attribute memory ***********************************/
31768 + link->resource[4]->flags |=
31769 +@@ -410,6 +414,8 @@ static int ray_config(struct pcmcia_device *link)
31770 + goto failed;
31771 + local->amem = ioremap(link->resource[4]->start,
31772 + resource_size(link->resource[4]));
31773 ++ if (!local->amem)
31774 ++ goto failed;
31775 +
31776 + dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram);
31777 + dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem);
31778 +diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
31779 +index 2f7c036f90221..4c8e5ea5d069c 100644
31780 +--- a/drivers/net/wireless/realtek/rtw88/fw.c
31781 ++++ b/drivers/net/wireless/realtek/rtw88/fw.c
31782 +@@ -1784,9 +1784,9 @@ void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
31783 + rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
31784 + }
31785 +
31786 +-static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
31787 +- struct sk_buff_head *list,
31788 +- struct rtw_vif *rtwvif)
31789 ++static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
31790 ++ struct sk_buff_head *list, u8 *bands,
31791 ++ struct rtw_vif *rtwvif)
31792 + {
31793 + struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
31794 + struct rtw_chip_info *chip = rtwdev->chip;
31795 +@@ -1797,19 +1797,24 @@ static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
31796 + if (!(BIT(idx) & chip->band))
31797 + continue;
31798 + new = skb_copy(skb, GFP_KERNEL);
31799 ++ if (!new)
31800 ++ return -ENOMEM;
31801 + skb_put_data(new, ies->ies[idx], ies->len[idx]);
31802 + skb_put_data(new, ies->common_ies, ies->common_ie_len);
31803 + skb_queue_tail(list, new);
31804 ++ (*bands)++;
31805 + }
31806 ++
31807 ++ return 0;
31808 + }
31809 +
31810 +-static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
31811 ++static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
31812 + struct sk_buff_head *probe_req_list)
31813 + {
31814 + struct rtw_chip_info *chip = rtwdev->chip;
31815 + struct sk_buff *skb, *tmp;
31816 + u8 page_offset = 1, *buf, page_size = chip->page_size;
31817 +- u8 pages = page_offset + num_ssids * RTW_PROBE_PG_CNT;
31818 ++ u8 pages = page_offset + num_probes * RTW_PROBE_PG_CNT;
31819 + u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
31820 + u16 buf_offset = page_size * page_offset;
31821 + u8 tx_desc_sz = chip->tx_pkt_desc_sz;
31822 +@@ -1848,6 +1853,8 @@ static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
31823 + rtwdev->scan_info.probe_pg_size = page_offset;
31824 + out:
31825 + kfree(buf);
31826 ++ skb_queue_walk_safe(probe_req_list, skb, tmp)
31827 ++ kfree_skb(skb);
31828 +
31829 + return ret;
31830 + }
31831 +@@ -1857,8 +1864,9 @@ static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
31832 + {
31833 + struct cfg80211_scan_request *req = rtwvif->scan_req;
31834 + struct sk_buff_head list;
31835 +- struct sk_buff *skb;
31836 +- u8 num = req->n_ssids, i;
31837 ++ struct sk_buff *skb, *tmp;
31838 ++ u8 num = req->n_ssids, i, bands = 0;
31839 ++ int ret;
31840 +
31841 + skb_queue_head_init(&list);
31842 + for (i = 0; i < num; i++) {
31843 +@@ -1866,11 +1874,25 @@ static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
31844 + req->ssids[i].ssid,
31845 + req->ssids[i].ssid_len,
31846 + req->ie_len);
31847 +- rtw_append_probe_req_ie(rtwdev, skb, &list, rtwvif);
31848 ++ if (!skb) {
31849 ++ ret = -ENOMEM;
31850 ++ goto out;
31851 ++ }
31852 ++ ret = rtw_append_probe_req_ie(rtwdev, skb, &list, &bands,
31853 ++ rtwvif);
31854 ++ if (ret)
31855 ++ goto out;
31856 ++
31857 + kfree_skb(skb);
31858 + }
31859 +
31860 +- return _rtw_hw_scan_update_probe_req(rtwdev, num, &list);
31861 ++ return _rtw_hw_scan_update_probe_req(rtwdev, num * bands, &list);
31862 ++
31863 ++out:
31864 ++ skb_queue_walk_safe(&list, skb, tmp)
31865 ++ kfree_skb(skb);
31866 ++
31867 ++ return ret;
31868 + }
31869 +
31870 + static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info,
31871 +@@ -2022,7 +2044,7 @@ void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
31872 + rtwdev->hal.rcr |= BIT_CBSSID_BCN;
31873 + rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
31874 +
31875 +- rtw_core_scan_complete(rtwdev, vif);
31876 ++ rtw_core_scan_complete(rtwdev, vif, true);
31877 +
31878 + ieee80211_wake_queues(rtwdev->hw);
31879 + ieee80211_scan_completed(rtwdev->hw, &info);
31880 +diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
31881 +index ae7d97de5fdf4..647d2662955ba 100644
31882 +--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
31883 ++++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
31884 +@@ -72,6 +72,9 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
31885 + struct rtw_dev *rtwdev = hw->priv;
31886 + int ret = 0;
31887 +
31888 ++ /* let previous ips work finish to ensure we don't leave ips twice */
31889 ++ cancel_work_sync(&rtwdev->ips_work);
31890 ++
31891 + mutex_lock(&rtwdev->mutex);
31892 +
31893 + rtw_leave_lps_deep(rtwdev);
31894 +@@ -614,7 +617,7 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
31895 + struct rtw_dev *rtwdev = hw->priv;
31896 +
31897 + mutex_lock(&rtwdev->mutex);
31898 +- rtw_core_scan_complete(rtwdev, vif);
31899 ++ rtw_core_scan_complete(rtwdev, vif, false);
31900 + mutex_unlock(&rtwdev->mutex);
31901 + }
31902 +
31903 +diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
31904 +index 38252113c4a87..39c223a2e3e2d 100644
31905 +--- a/drivers/net/wireless/realtek/rtw88/main.c
31906 ++++ b/drivers/net/wireless/realtek/rtw88/main.c
31907 +@@ -272,6 +272,15 @@ static void rtw_c2h_work(struct work_struct *work)
31908 + }
31909 + }
31910 +
31911 ++static void rtw_ips_work(struct work_struct *work)
31912 ++{
31913 ++ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ips_work);
31914 ++
31915 ++ mutex_lock(&rtwdev->mutex);
31916 ++ rtw_enter_ips(rtwdev);
31917 ++ mutex_unlock(&rtwdev->mutex);
31918 ++}
31919 ++
31920 + static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
31921 + {
31922 + unsigned long mac_id;
31923 +@@ -1339,7 +1348,8 @@ void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
31924 + set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
31925 + }
31926 +
31927 +-void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
31928 ++void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
31929 ++ bool hw_scan)
31930 + {
31931 + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
31932 + u32 config = 0;
31933 +@@ -1354,6 +1364,9 @@ void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
31934 + rtw_vif_port_config(rtwdev, rtwvif, config);
31935 +
31936 + rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH);
31937 ++
31938 ++ if (rtwvif->net_type == RTW_NET_NO_LINK && hw_scan)
31939 ++ ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
31940 + }
31941 +
31942 + int rtw_core_start(struct rtw_dev *rtwdev)
31943 +@@ -1919,6 +1932,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
31944 + INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work);
31945 + INIT_WORK(&rtwdev->tx_work, rtw_tx_work);
31946 + INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
31947 ++ INIT_WORK(&rtwdev->ips_work, rtw_ips_work);
31948 + INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work);
31949 + INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
31950 + skb_queue_head_init(&rtwdev->c2h_queue);
31951 +diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
31952 +index dc1cd9bd4b8a3..36e1e408933db 100644
31953 +--- a/drivers/net/wireless/realtek/rtw88/main.h
31954 ++++ b/drivers/net/wireless/realtek/rtw88/main.h
31955 +@@ -1960,6 +1960,7 @@ struct rtw_dev {
31956 + /* c2h cmd queue & handler work */
31957 + struct sk_buff_head c2h_queue;
31958 + struct work_struct c2h_work;
31959 ++ struct work_struct ips_work;
31960 + struct work_struct fw_recovery_work;
31961 +
31962 + /* used to protect txqs list */
31963 +@@ -2101,7 +2102,8 @@ void rtw_tx_report_purge_timer(struct timer_list *t);
31964 + void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
31965 + void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
31966 + const u8 *mac_addr, bool hw_scan);
31967 +-void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif);
31968 ++void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
31969 ++ bool hw_scan);
31970 + int rtw_core_start(struct rtw_dev *rtwdev);
31971 + void rtw_core_stop(struct rtw_dev *rtwdev);
31972 + int rtw_chip_info_setup(struct rtw_dev *rtwdev);
31973 +diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c
31974 +index 5dfa2eba6014c..17d46f4d29139 100644
31975 +--- a/drivers/net/wwan/qcom_bam_dmux.c
31976 ++++ b/drivers/net/wwan/qcom_bam_dmux.c
31977 +@@ -755,7 +755,7 @@ static int __maybe_unused bam_dmux_runtime_resume(struct device *dev)
31978 + return 0;
31979 +
31980 + dmux->tx = dma_request_chan(dev, "tx");
31981 +- if (IS_ERR(dmux->rx)) {
31982 ++ if (IS_ERR(dmux->tx)) {
31983 + dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx);
31984 + dmux->tx = NULL;
31985 + bam_dmux_runtime_suspend(dev);
31986 +diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
31987 +index 9ccf3d6087993..70ad891a76bae 100644
31988 +--- a/drivers/nvdimm/region_devs.c
31989 ++++ b/drivers/nvdimm/region_devs.c
31990 +@@ -1025,6 +1025,9 @@ static unsigned long default_align(struct nd_region *nd_region)
31991 + }
31992 + }
31993 +
31994 ++ if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX)
31995 ++ align = PAGE_SIZE;
31996 ++
31997 + mappings = max_t(u16, 1, nd_region->ndr_mappings);
31998 + div_u64_rem(align, mappings, &remainder);
31999 + if (remainder)
32000 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
32001 +index fd4720d37cc0b..6215d50ed3e7d 100644
32002 +--- a/drivers/nvme/host/core.c
32003 ++++ b/drivers/nvme/host/core.c
32004 +@@ -1683,13 +1683,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
32005 + blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
32006 + }
32007 +
32008 +-static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
32009 +-{
32010 +- return !uuid_is_null(&ids->uuid) ||
32011 +- memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
32012 +- memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
32013 +-}
32014 +-
32015 + static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
32016 + {
32017 + return uuid_equal(&a->uuid, &b->uuid) &&
32018 +@@ -1864,9 +1857,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
32019 + nvme_config_discard(disk, ns);
32020 + blk_queue_max_write_zeroes_sectors(disk->queue,
32021 + ns->ctrl->max_zeroes_sectors);
32022 +-
32023 +- set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
32024 +- test_bit(NVME_NS_FORCE_RO, &ns->flags));
32025 + }
32026 +
32027 + static inline bool nvme_first_scan(struct gendisk *disk)
32028 +@@ -1925,6 +1915,8 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
32029 + goto out_unfreeze;
32030 + }
32031 +
32032 ++ set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
32033 ++ test_bit(NVME_NS_FORCE_RO, &ns->flags));
32034 + set_bit(NVME_NS_READY, &ns->flags);
32035 + blk_mq_unfreeze_queue(ns->disk->queue);
32036 +
32037 +@@ -1937,6 +1929,9 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
32038 + if (nvme_ns_head_multipath(ns->head)) {
32039 + blk_mq_freeze_queue(ns->head->disk->queue);
32040 + nvme_update_disk_info(ns->head->disk, ns, id);
32041 ++ set_disk_ro(ns->head->disk,
32042 ++ (id->nsattr & NVME_NS_ATTR_RO) ||
32043 ++ test_bit(NVME_NS_FORCE_RO, &ns->flags));
32044 + nvme_mpath_revalidate_paths(ns);
32045 + blk_stack_limits(&ns->head->disk->queue->limits,
32046 + &ns->queue->limits, 0);
32047 +@@ -3581,15 +3576,20 @@ static const struct attribute_group *nvme_dev_attr_groups[] = {
32048 + NULL,
32049 + };
32050 +
32051 +-static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
32052 ++static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
32053 + unsigned nsid)
32054 + {
32055 + struct nvme_ns_head *h;
32056 +
32057 +- lockdep_assert_held(&subsys->lock);
32058 ++ lockdep_assert_held(&ctrl->subsys->lock);
32059 +
32060 +- list_for_each_entry(h, &subsys->nsheads, entry) {
32061 +- if (h->ns_id != nsid)
32062 ++ list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
32063 ++ /*
32064 ++ * Private namespaces can share NSIDs under some conditions.
32065 ++ * In that case we can't use the same ns_head for namespaces
32066 ++ * with the same NSID.
32067 ++ */
32068 ++ if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
32069 + continue;
32070 + if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
32071 + return h;
32072 +@@ -3598,16 +3598,24 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
32073 + return NULL;
32074 + }
32075 +
32076 +-static int __nvme_check_ids(struct nvme_subsystem *subsys,
32077 +- struct nvme_ns_head *new)
32078 ++static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
32079 ++ struct nvme_ns_ids *ids)
32080 + {
32081 ++ bool has_uuid = !uuid_is_null(&ids->uuid);
32082 ++ bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
32083 ++ bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
32084 + struct nvme_ns_head *h;
32085 +
32086 + lockdep_assert_held(&subsys->lock);
32087 +
32088 + list_for_each_entry(h, &subsys->nsheads, entry) {
32089 +- if (nvme_ns_ids_valid(&new->ids) &&
32090 +- nvme_ns_ids_equal(&new->ids, &h->ids))
32091 ++ if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
32092 ++ return -EINVAL;
32093 ++ if (has_nguid &&
32094 ++ memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
32095 ++ return -EINVAL;
32096 ++ if (has_eui64 &&
32097 ++ memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
32098 + return -EINVAL;
32099 + }
32100 +
32101 +@@ -3706,7 +3714,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
32102 + head->ids = *ids;
32103 + kref_init(&head->ref);
32104 +
32105 +- ret = __nvme_check_ids(ctrl->subsys, head);
32106 ++ ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &head->ids);
32107 + if (ret) {
32108 + dev_err(ctrl->device,
32109 + "duplicate IDs for nsid %d\n", nsid);
32110 +@@ -3749,7 +3757,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
32111 + int ret = 0;
32112 +
32113 + mutex_lock(&ctrl->subsys->lock);
32114 +- head = nvme_find_ns_head(ctrl->subsys, nsid);
32115 ++ head = nvme_find_ns_head(ctrl, nsid);
32116 + if (!head) {
32117 + head = nvme_alloc_ns_head(ctrl, nsid, ids);
32118 + if (IS_ERR(head)) {
32119 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
32120 +index ff775235534cf..a703f1f5fb64c 100644
32121 +--- a/drivers/nvme/host/multipath.c
32122 ++++ b/drivers/nvme/host/multipath.c
32123 +@@ -504,10 +504,11 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
32124 +
32125 + /*
32126 + * Add a multipath node if the subsystems supports multiple controllers.
32127 +- * We also do this for private namespaces as the namespace sharing data could
32128 +- * change after a rescan.
32129 ++ * We also do this for private namespaces as the namespace sharing flag
32130 ++ * could change after a rescan.
32131 + */
32132 +- if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
32133 ++ if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
32134 ++ !nvme_is_unique_nsid(ctrl, head) || !multipath)
32135 + return 0;
32136 +
32137 + head->disk = blk_alloc_disk(ctrl->numa_node);
32138 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
32139 +index a162f6c6da6e1..730cc80d84ff7 100644
32140 +--- a/drivers/nvme/host/nvme.h
32141 ++++ b/drivers/nvme/host/nvme.h
32142 +@@ -716,6 +716,25 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
32143 + return queue_live;
32144 + return __nvme_check_ready(ctrl, rq, queue_live);
32145 + }
32146 ++
32147 ++/*
32148 ++ * NSID shall be unique for all shared namespaces, or if at least one of the
32149 ++ * following conditions is met:
32150 ++ * 1. Namespace Management is supported by the controller
32151 ++ * 2. ANA is supported by the controller
32152 ++ * 3. NVM Set are supported by the controller
32153 ++ *
32154 ++ * In other case, private namespace are not required to report a unique NSID.
32155 ++ */
32156 ++static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
32157 ++ struct nvme_ns_head *head)
32158 ++{
32159 ++ return head->shared ||
32160 ++ (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
32161 ++ (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
32162 ++ (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
32163 ++}
32164 ++
32165 + int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
32166 + void *buf, unsigned bufflen);
32167 + int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
32168 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
32169 +index 65e00c64a588b..d66e2de044e0a 100644
32170 +--- a/drivers/nvme/host/tcp.c
32171 ++++ b/drivers/nvme/host/tcp.c
32172 +@@ -30,6 +30,44 @@ static int so_priority;
32173 + module_param(so_priority, int, 0644);
32174 + MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32175 +
32176 ++#ifdef CONFIG_DEBUG_LOCK_ALLOC
32177 ++/* lockdep can detect a circular dependency of the form
32178 ++ * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
32179 ++ * because dependencies are tracked for both nvme-tcp and user contexts. Using
32180 ++ * a separate class prevents lockdep from conflating nvme-tcp socket use with
32181 ++ * user-space socket API use.
32182 ++ */
32183 ++static struct lock_class_key nvme_tcp_sk_key[2];
32184 ++static struct lock_class_key nvme_tcp_slock_key[2];
32185 ++
32186 ++static void nvme_tcp_reclassify_socket(struct socket *sock)
32187 ++{
32188 ++ struct sock *sk = sock->sk;
32189 ++
32190 ++ if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
32191 ++ return;
32192 ++
32193 ++ switch (sk->sk_family) {
32194 ++ case AF_INET:
32195 ++ sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
32196 ++ &nvme_tcp_slock_key[0],
32197 ++ "sk_lock-AF_INET-NVME",
32198 ++ &nvme_tcp_sk_key[0]);
32199 ++ break;
32200 ++ case AF_INET6:
32201 ++ sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
32202 ++ &nvme_tcp_slock_key[1],
32203 ++ "sk_lock-AF_INET6-NVME",
32204 ++ &nvme_tcp_sk_key[1]);
32205 ++ break;
32206 ++ default:
32207 ++ WARN_ON_ONCE(1);
32208 ++ }
32209 ++}
32210 ++#else
32211 ++static void nvme_tcp_reclassify_socket(struct socket *sock) { }
32212 ++#endif
32213 ++
32214 + enum nvme_tcp_send_state {
32215 + NVME_TCP_SEND_CMD_PDU = 0,
32216 + NVME_TCP_SEND_H2C_PDU,
32217 +@@ -1469,6 +1507,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
32218 + goto err_destroy_mutex;
32219 + }
32220 +
32221 ++ nvme_tcp_reclassify_socket(queue->sock);
32222 ++
32223 + /* Single syn retry */
32224 + tcp_sock_set_syncnt(queue->sock->sk, 1);
32225 +
32226 +diff --git a/drivers/pci/access.c b/drivers/pci/access.c
32227 +index 0d9f6b21babb1..708c7529647fd 100644
32228 +--- a/drivers/pci/access.c
32229 ++++ b/drivers/pci/access.c
32230 +@@ -159,9 +159,12 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
32231 + * write happen to have any RW1C (write-one-to-clear) bits set, we
32232 + * just inadvertently cleared something we shouldn't have.
32233 + */
32234 +- dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
32235 +- size, pci_domain_nr(bus), bus->number,
32236 +- PCI_SLOT(devfn), PCI_FUNC(devfn), where);
32237 ++ if (!bus->unsafe_warn) {
32238 ++ dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
32239 ++ size, pci_domain_nr(bus), bus->number,
32240 ++ PCI_SLOT(devfn), PCI_FUNC(devfn), where);
32241 ++ bus->unsafe_warn = 1;
32242 ++ }
32243 +
32244 + mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
32245 + tmp = readl(addr) & mask;
32246 +diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
32247 +index 6974bd5aa1165..343fe1429e3c2 100644
32248 +--- a/drivers/pci/controller/dwc/pci-imx6.c
32249 ++++ b/drivers/pci/controller/dwc/pci-imx6.c
32250 +@@ -453,10 +453,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
32251 + case IMX7D:
32252 + break;
32253 + case IMX8MM:
32254 +- ret = clk_prepare_enable(imx6_pcie->pcie_aux);
32255 +- if (ret)
32256 +- dev_err(dev, "unable to enable pcie_aux clock\n");
32257 +- break;
32258 + case IMX8MQ:
32259 + ret = clk_prepare_enable(imx6_pcie->pcie_aux);
32260 + if (ret) {
32261 +@@ -809,9 +805,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
32262 + /* Start LTSSM. */
32263 + imx6_pcie_ltssm_enable(dev);
32264 +
32265 +- ret = dw_pcie_wait_for_link(pci);
32266 +- if (ret)
32267 +- goto err_reset_phy;
32268 ++ dw_pcie_wait_for_link(pci);
32269 +
32270 + if (pci->link_gen == 2) {
32271 + /* Allow Gen2 mode after the link is up. */
32272 +@@ -847,11 +841,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
32273 + }
32274 +
32275 + /* Make sure link training is finished as well! */
32276 +- ret = dw_pcie_wait_for_link(pci);
32277 +- if (ret) {
32278 +- dev_err(dev, "Failed to bring link up!\n");
32279 +- goto err_reset_phy;
32280 +- }
32281 ++ dw_pcie_wait_for_link(pci);
32282 + } else {
32283 + dev_info(dev, "Link: Gen2 disabled\n");
32284 + }
32285 +@@ -983,6 +973,7 @@ static int imx6_pcie_suspend_noirq(struct device *dev)
32286 + case IMX8MM:
32287 + if (phy_power_off(imx6_pcie->phy))
32288 + dev_err(dev, "unable to power off PHY\n");
32289 ++ phy_exit(imx6_pcie->phy);
32290 + break;
32291 + default:
32292 + break;
32293 +diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
32294 +index 00cde9a248b5a..78d002be4f821 100644
32295 +--- a/drivers/pci/controller/dwc/pcie-fu740.c
32296 ++++ b/drivers/pci/controller/dwc/pcie-fu740.c
32297 +@@ -181,10 +181,59 @@ static int fu740_pcie_start_link(struct dw_pcie *pci)
32298 + {
32299 + struct device *dev = pci->dev;
32300 + struct fu740_pcie *afp = dev_get_drvdata(dev);
32301 ++ u8 cap_exp = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
32302 ++ int ret;
32303 ++ u32 orig, tmp;
32304 ++
32305 ++ /*
32306 ++ * Force 2.5GT/s when starting the link, due to some devices not
32307 ++ * probing at higher speeds. This happens with the PCIe switch
32308 ++ * on the Unmatched board when U-Boot has not initialised the PCIe.
32309 ++ * The fix in U-Boot is to force 2.5GT/s, which then gets cleared
32310 ++ * by the soft reset done by this driver.
32311 ++ */
32312 ++ dev_dbg(dev, "cap_exp at %x\n", cap_exp);
32313 ++ dw_pcie_dbi_ro_wr_en(pci);
32314 ++
32315 ++ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
32316 ++ orig = tmp & PCI_EXP_LNKCAP_SLS;
32317 ++ tmp &= ~PCI_EXP_LNKCAP_SLS;
32318 ++ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
32319 ++ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
32320 +
32321 + /* Enable LTSSM */
32322 + writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE);
32323 +- return 0;
32324 ++
32325 ++ ret = dw_pcie_wait_for_link(pci);
32326 ++ if (ret) {
32327 ++ dev_err(dev, "error: link did not start\n");
32328 ++ goto err;
32329 ++ }
32330 ++
32331 ++ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
32332 ++ if ((tmp & PCI_EXP_LNKCAP_SLS) != orig) {
32333 ++ dev_dbg(dev, "changing speed back to original\n");
32334 ++
32335 ++ tmp &= ~PCI_EXP_LNKCAP_SLS;
32336 ++ tmp |= orig;
32337 ++ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
32338 ++
32339 ++ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
32340 ++ tmp |= PORT_LOGIC_SPEED_CHANGE;
32341 ++ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
32342 ++
32343 ++ ret = dw_pcie_wait_for_link(pci);
32344 ++ if (ret) {
32345 ++ dev_err(dev, "error: link did not start at new speed\n");
32346 ++ goto err;
32347 ++ }
32348 ++ }
32349 ++
32350 ++ ret = 0;
32351 ++err:
32352 ++ WARN_ON(ret); /* we assume that errors will be very rare */
32353 ++ dw_pcie_dbi_ro_wr_dis(pci);
32354 ++ return ret;
32355 + }
32356 +
32357 + static int fu740_pcie_host_init(struct pcie_port *pp)
32358 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
32359 +index 4f5b44827d213..82e2c618d532d 100644
32360 +--- a/drivers/pci/controller/pci-aardvark.c
32361 ++++ b/drivers/pci/controller/pci-aardvark.c
32362 +@@ -846,7 +846,9 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
32363 + case PCI_EXP_RTSTA: {
32364 + u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG);
32365 + u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG);
32366 +- *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16);
32367 ++ *value = msglog >> 16;
32368 ++ if (isr0 & PCIE_MSG_PM_PME_MASK)
32369 ++ *value |= PCI_EXP_RTSTA_PME;
32370 + return PCI_BRIDGE_EMUL_HANDLED;
32371 + }
32372 +
32373 +@@ -1388,7 +1390,6 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
32374 + static void advk_pcie_handle_msi(struct advk_pcie *pcie)
32375 + {
32376 + u32 msi_val, msi_mask, msi_status, msi_idx;
32377 +- u16 msi_data;
32378 +
32379 + msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
32380 + msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
32381 +@@ -1398,13 +1399,9 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
32382 + if (!(BIT(msi_idx) & msi_status))
32383 + continue;
32384 +
32385 +- /*
32386 +- * msi_idx contains bits [4:0] of the msi_data and msi_data
32387 +- * contains 16bit MSI interrupt number
32388 +- */
32389 + advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
32390 +- msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK;
32391 +- generic_handle_irq(msi_data);
32392 ++ if (generic_handle_domain_irq(pcie->msi_inner_domain, msi_idx) == -EINVAL)
32393 ++ dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx);
32394 + }
32395 +
32396 + advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
32397 +diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
32398 +index 0d5acbfc7143f..7c763d820c52c 100644
32399 +--- a/drivers/pci/controller/pci-xgene.c
32400 ++++ b/drivers/pci/controller/pci-xgene.c
32401 +@@ -465,7 +465,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
32402 + return 1;
32403 + }
32404 +
32405 +- if ((size > SZ_1K) && (size < SZ_4G) && !(*ib_reg_mask & (1 << 0))) {
32406 ++ if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
32407 + *ib_reg_mask |= (1 << 0);
32408 + return 0;
32409 + }
32410 +@@ -479,28 +479,27 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
32411 + }
32412 +
32413 + static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port,
32414 +- struct resource_entry *entry,
32415 +- u8 *ib_reg_mask)
32416 ++ struct of_pci_range *range, u8 *ib_reg_mask)
32417 + {
32418 + void __iomem *cfg_base = port->cfg_base;
32419 + struct device *dev = port->dev;
32420 + void __iomem *bar_addr;
32421 + u32 pim_reg;
32422 +- u64 cpu_addr = entry->res->start;
32423 +- u64 pci_addr = cpu_addr - entry->offset;
32424 +- u64 size = resource_size(entry->res);
32425 ++ u64 cpu_addr = range->cpu_addr;
32426 ++ u64 pci_addr = range->pci_addr;
32427 ++ u64 size = range->size;
32428 + u64 mask = ~(size - 1) | EN_REG;
32429 + u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
32430 + u32 bar_low;
32431 + int region;
32432 +
32433 +- region = xgene_pcie_select_ib_reg(ib_reg_mask, size);
32434 ++ region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
32435 + if (region < 0) {
32436 + dev_warn(dev, "invalid pcie dma-range config\n");
32437 + return;
32438 + }
32439 +
32440 +- if (entry->res->flags & IORESOURCE_PREFETCH)
32441 ++ if (range->flags & IORESOURCE_PREFETCH)
32442 + flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
32443 +
32444 + bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
32445 +@@ -531,13 +530,25 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port,
32446 +
32447 + static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie *port)
32448 + {
32449 +- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
32450 +- struct resource_entry *entry;
32451 ++ struct device_node *np = port->node;
32452 ++ struct of_pci_range range;
32453 ++ struct of_pci_range_parser parser;
32454 ++ struct device *dev = port->dev;
32455 + u8 ib_reg_mask = 0;
32456 +
32457 +- resource_list_for_each_entry(entry, &bridge->dma_ranges)
32458 +- xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask);
32459 ++ if (of_pci_dma_range_parser_init(&parser, np)) {
32460 ++ dev_err(dev, "missing dma-ranges property\n");
32461 ++ return -EINVAL;
32462 ++ }
32463 ++
32464 ++ /* Get the dma-ranges from DT */
32465 ++ for_each_of_pci_range(&parser, &range) {
32466 ++ u64 end = range.cpu_addr + range.size - 1;
32467 +
32468 ++ dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
32469 ++ range.flags, range.cpu_addr, end, range.pci_addr);
32470 ++ xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
32471 ++ }
32472 + return 0;
32473 + }
32474 +
32475 +diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
32476 +index 1c1ebf3dad43c..85dce560831a8 100644
32477 +--- a/drivers/pci/hotplug/pciehp_hpc.c
32478 ++++ b/drivers/pci/hotplug/pciehp_hpc.c
32479 +@@ -98,6 +98,8 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
32480 + if (slot_status & PCI_EXP_SLTSTA_CC) {
32481 + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
32482 + PCI_EXP_SLTSTA_CC);
32483 ++ ctrl->cmd_busy = 0;
32484 ++ smp_mb();
32485 + return 1;
32486 + }
32487 + msleep(10);
32488 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
32489 +index 65f7f6b0576c6..da829274fc66d 100644
32490 +--- a/drivers/pci/quirks.c
32491 ++++ b/drivers/pci/quirks.c
32492 +@@ -1811,6 +1811,18 @@ static void quirk_alder_ioapic(struct pci_dev *pdev)
32493 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
32494 + #endif
32495 +
32496 ++static void quirk_no_msi(struct pci_dev *dev)
32497 ++{
32498 ++ pci_info(dev, "avoiding MSI to work around a hardware defect\n");
32499 ++ dev->no_msi = 1;
32500 ++}
32501 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4386, quirk_no_msi);
32502 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4387, quirk_no_msi);
32503 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4388, quirk_no_msi);
32504 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4389, quirk_no_msi);
32505 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438a, quirk_no_msi);
32506 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438b, quirk_no_msi);
32507 ++
32508 + static void quirk_pcie_mch(struct pci_dev *pdev)
32509 + {
32510 + pdev->no_msi = 1;
32511 +diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
32512 +index e1a0c44bc6864..7d6ffdf44a415 100644
32513 +--- a/drivers/perf/Kconfig
32514 ++++ b/drivers/perf/Kconfig
32515 +@@ -141,7 +141,7 @@ config ARM_DMC620_PMU
32516 +
32517 + config MARVELL_CN10K_TAD_PMU
32518 + tristate "Marvell CN10K LLC-TAD PMU"
32519 +- depends on ARM64 || (COMPILE_TEST && 64BIT)
32520 ++ depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
32521 + help
32522 + Provides support for Last-Level cache Tag-and-data Units (LLC-TAD)
32523 + performance monitors on CN10K family silicons.
32524 +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
32525 +index 0e48adce57ef3..71448229bc5e9 100644
32526 +--- a/drivers/perf/arm-cmn.c
32527 ++++ b/drivers/perf/arm-cmn.c
32528 +@@ -71,9 +71,11 @@
32529 + #define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18)
32530 + #define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00)
32531 + #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18,17)
32532 +-#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(6)
32533 +-#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(5)
32534 +-#define CMN_DTM_WPn_CONFIG_WP_GRP BIT(4)
32535 ++#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(9)
32536 ++#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8)
32537 ++#define CMN600_WPn_CONFIG_WP_COMBINE BIT(6)
32538 ++#define CMN600_WPn_CONFIG_WP_EXCLUSIVE BIT(5)
32539 ++#define CMN_DTM_WPn_CONFIG_WP_GRP GENMASK_ULL(5, 4)
32540 + #define CMN_DTM_WPn_CONFIG_WP_CHN_SEL GENMASK_ULL(3, 1)
32541 + #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL BIT(0)
32542 + #define CMN_DTM_WPn_VAL(n) (CMN_DTM_WPn(n) + 0x08)
32543 +@@ -155,6 +157,7 @@
32544 + #define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24)
32545 + #define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
32546 + #define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
32547 ++/* Note that we don't yet support the tertiary match group on newer IPs */
32548 + #define CMN_CONFIG_WP_GRP BIT_ULL(56)
32549 + #define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57)
32550 + #define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0)
32551 +@@ -595,6 +598,9 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
32552 + if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
32553 + return 0;
32554 +
32555 ++ if (chan == 4 && cmn->model == CMN600)
32556 ++ return 0;
32557 ++
32558 + if ((chan == 5 && cmn->rsp_vc_num < 2) ||
32559 + (chan == 6 && cmn->dat_vc_num < 2))
32560 + return 0;
32561 +@@ -905,15 +911,18 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
32562 + u32 grp = CMN_EVENT_WP_GRP(event);
32563 + u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
32564 + u32 combine = CMN_EVENT_WP_COMBINE(event);
32565 ++ bool is_cmn600 = to_cmn(event->pmu)->model == CMN600;
32566 +
32567 + config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
32568 + FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
32569 + FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
32570 +- FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc) |
32571 + FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1);
32572 ++ if (exc)
32573 ++ config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE :
32574 ++ CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE;
32575 + if (combine && !grp)
32576 +- config |= CMN_DTM_WPn_CONFIG_WP_COMBINE;
32577 +-
32578 ++ config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE :
32579 ++ CMN_DTM_WPn_CONFIG_WP_COMBINE;
32580 + return config;
32581 + }
32582 +
32583 +diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
32584 +index 9391ab42a12b3..dd0f66288fbdd 100644
32585 +--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
32586 ++++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
32587 +@@ -79,6 +79,7 @@
32588 +
32589 + enum brcm_family_type {
32590 + BRCM_FAMILY_3390A0,
32591 ++ BRCM_FAMILY_4908,
32592 + BRCM_FAMILY_7250B0,
32593 + BRCM_FAMILY_7271A0,
32594 + BRCM_FAMILY_7364A0,
32595 +@@ -96,6 +97,7 @@ enum brcm_family_type {
32596 +
32597 + static const char *family_names[BRCM_FAMILY_COUNT] = {
32598 + USB_BRCM_FAMILY(3390A0),
32599 ++ USB_BRCM_FAMILY(4908),
32600 + USB_BRCM_FAMILY(7250B0),
32601 + USB_BRCM_FAMILY(7271A0),
32602 + USB_BRCM_FAMILY(7364A0),
32603 +@@ -203,6 +205,27 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
32604 + USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK,
32605 + ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
32606 + },
32607 ++ /* 4908 */
32608 ++ [BRCM_FAMILY_4908] = {
32609 ++ 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */
32610 ++ 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */
32611 ++ 0, /* USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK */
32612 ++ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
32613 ++ 0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */
32614 ++ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
32615 ++ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
32616 ++ USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
32617 ++ USB_CTRL_USB_PM_USB_PWRDN_MASK,
32618 ++ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
32619 ++ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
32620 ++ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
32621 ++ 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */
32622 ++ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
32623 ++ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
32624 ++ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
32625 ++ 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK */
32626 ++ 0, /* USB_CTRL_SETUP ENDIAN bits */
32627 ++ },
32628 + /* 7250b0 */
32629 + [BRCM_FAMILY_7250B0] = {
32630 + USB_CTRL_SETUP_SCB1_EN_MASK,
32631 +@@ -559,6 +582,7 @@ static void brcmusb_usb3_pll_54mhz(struct brcm_usb_init_params *params)
32632 + */
32633 + switch (params->selected_family) {
32634 + case BRCM_FAMILY_3390A0:
32635 ++ case BRCM_FAMILY_4908:
32636 + case BRCM_FAMILY_7250B0:
32637 + case BRCM_FAMILY_7366C0:
32638 + case BRCM_FAMILY_74371A0:
32639 +@@ -1004,6 +1028,18 @@ static const struct brcm_usb_init_ops bcm7445_ops = {
32640 + .set_dual_select = usb_set_dual_select,
32641 + };
32642 +
32643 ++void brcm_usb_dvr_init_4908(struct brcm_usb_init_params *params)
32644 ++{
32645 ++ int fam;
32646 ++
32647 ++ fam = BRCM_FAMILY_4908;
32648 ++ params->selected_family = fam;
32649 ++ params->usb_reg_bits_map =
32650 ++ &usb_reg_bits_map_table[fam][0];
32651 ++ params->family_name = family_names[fam];
32652 ++ params->ops = &bcm7445_ops;
32653 ++}
32654 ++
32655 + void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params)
32656 + {
32657 + int fam;
32658 +diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
32659 +index a39f30fa2e991..1ccb5ddab865c 100644
32660 +--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
32661 ++++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
32662 +@@ -64,6 +64,7 @@ struct brcm_usb_init_params {
32663 + bool suspend_with_clocks;
32664 + };
32665 +
32666 ++void brcm_usb_dvr_init_4908(struct brcm_usb_init_params *params);
32667 + void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params);
32668 + void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params);
32669 + void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params);
32670 +diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
32671 +index 0f1deb6e0eabf..2cb3779fcdf82 100644
32672 +--- a/drivers/phy/broadcom/phy-brcm-usb.c
32673 ++++ b/drivers/phy/broadcom/phy-brcm-usb.c
32674 +@@ -283,6 +283,15 @@ static const struct attribute_group brcm_usb_phy_group = {
32675 + .attrs = brcm_usb_phy_attrs,
32676 + };
32677 +
32678 ++static const struct match_chip_info chip_info_4908 = {
32679 ++ .init_func = &brcm_usb_dvr_init_4908,
32680 ++ .required_regs = {
32681 ++ BRCM_REGS_CTRL,
32682 ++ BRCM_REGS_XHCI_EC,
32683 ++ -1,
32684 ++ },
32685 ++};
32686 ++
32687 + static const struct match_chip_info chip_info_7216 = {
32688 + .init_func = &brcm_usb_dvr_init_7216,
32689 + .required_regs = {
32690 +@@ -318,7 +327,7 @@ static const struct match_chip_info chip_info_7445 = {
32691 + static const struct of_device_id brcm_usb_dt_ids[] = {
32692 + {
32693 + .compatible = "brcm,bcm4908-usb-phy",
32694 +- .data = &chip_info_7445,
32695 ++ .data = &chip_info_4908,
32696 + },
32697 + {
32698 + .compatible = "brcm,bcm7216-usb-phy",
32699 +diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c
32700 +index ccb4045685cdd..929e86d6558e0 100644
32701 +--- a/drivers/phy/phy-core-mipi-dphy.c
32702 ++++ b/drivers/phy/phy-core-mipi-dphy.c
32703 +@@ -64,10 +64,10 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock,
32704 + cfg->hs_trail = max(4 * 8 * ui, 60000 + 4 * 4 * ui);
32705 +
32706 + cfg->init = 100;
32707 +- cfg->lpx = 60000;
32708 ++ cfg->lpx = 50000;
32709 + cfg->ta_get = 5 * cfg->lpx;
32710 + cfg->ta_go = 4 * cfg->lpx;
32711 +- cfg->ta_sure = 2 * cfg->lpx;
32712 ++ cfg->ta_sure = cfg->lpx;
32713 + cfg->wakeup = 1000;
32714 +
32715 + cfg->hs_clk_rate = hs_clk_rate;
32716 +diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
32717 +index 5f7c421ab6e76..334cb85855a93 100644
32718 +--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
32719 ++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
32720 +@@ -1038,6 +1038,7 @@ int mtk_pctrl_init(struct platform_device *pdev,
32721 + node = of_parse_phandle(np, "mediatek,pctl-regmap", 0);
32722 + if (node) {
32723 + pctl->regmap1 = syscon_node_to_regmap(node);
32724 ++ of_node_put(node);
32725 + if (IS_ERR(pctl->regmap1))
32726 + return PTR_ERR(pctl->regmap1);
32727 + } else if (regmap) {
32728 +@@ -1051,6 +1052,7 @@ int mtk_pctrl_init(struct platform_device *pdev,
32729 + node = of_parse_phandle(np, "mediatek,pctl-regmap", 1);
32730 + if (node) {
32731 + pctl->regmap2 = syscon_node_to_regmap(node);
32732 ++ of_node_put(node);
32733 + if (IS_ERR(pctl->regmap2))
32734 + return PTR_ERR(pctl->regmap2);
32735 + }
32736 +diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
32737 +index f9f9110f2107d..fe6cf068c4f41 100644
32738 +--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
32739 ++++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
32740 +@@ -96,20 +96,16 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
32741 + err = hw->soc->bias_get_combo(hw, desc, &pullup, &ret);
32742 + if (err)
32743 + goto out;
32744 ++ if (ret == MTK_PUPD_SET_R1R0_00)
32745 ++ ret = MTK_DISABLE;
32746 + if (param == PIN_CONFIG_BIAS_DISABLE) {
32747 +- if (ret == MTK_PUPD_SET_R1R0_00)
32748 +- ret = MTK_DISABLE;
32749 ++ if (ret != MTK_DISABLE)
32750 ++ err = -EINVAL;
32751 + } else if (param == PIN_CONFIG_BIAS_PULL_UP) {
32752 +- /* When desire to get pull-up value, return
32753 +- * error if current setting is pull-down
32754 +- */
32755 +- if (!pullup)
32756 ++ if (!pullup || ret == MTK_DISABLE)
32757 + err = -EINVAL;
32758 + } else if (param == PIN_CONFIG_BIAS_PULL_DOWN) {
32759 +- /* When desire to get pull-down value, return
32760 +- * error if current setting is pull-up
32761 +- */
32762 +- if (pullup)
32763 ++ if (pullup || ret == MTK_DISABLE)
32764 + err = -EINVAL;
32765 + }
32766 + } else {
32767 +@@ -188,8 +184,7 @@ out:
32768 + }
32769 +
32770 + static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
32771 +- enum pin_config_param param,
32772 +- enum pin_config_param arg)
32773 ++ enum pin_config_param param, u32 arg)
32774 + {
32775 + struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
32776 + const struct mtk_pin_desc *desc;
32777 +@@ -586,6 +581,9 @@ ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
32778 + if (gpio >= hw->soc->npins)
32779 + return -EINVAL;
32780 +
32781 ++ if (mtk_is_virt_gpio(hw, gpio))
32782 ++ return -EINVAL;
32783 ++
32784 + desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
32785 + pinmux = mtk_pctrl_get_pinmux(hw, gpio);
32786 + if (pinmux >= hw->soc->nfuncs)
32787 +@@ -737,10 +735,10 @@ static int mtk_pconf_group_get(struct pinctrl_dev *pctldev, unsigned group,
32788 + unsigned long *config)
32789 + {
32790 + struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
32791 ++ struct mtk_pinctrl_group *grp = &hw->groups[group];
32792 +
32793 +- *config = hw->groups[group].config;
32794 +-
32795 +- return 0;
32796 ++ /* One pin per group only */
32797 ++ return mtk_pinconf_get(pctldev, grp->pin, config);
32798 + }
32799 +
32800 + static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
32801 +@@ -756,8 +754,6 @@ static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
32802 + pinconf_to_config_argument(configs[i]));
32803 + if (ret < 0)
32804 + return ret;
32805 +-
32806 +- grp->config = configs[i];
32807 + }
32808 +
32809 + return 0;
32810 +@@ -988,7 +984,7 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev,
32811 + hw->nbase = hw->soc->nbase_names;
32812 +
32813 + if (of_find_property(hw->dev->of_node,
32814 +- "mediatek,rsel_resistance_in_si_unit", NULL))
32815 ++ "mediatek,rsel-resistance-in-si-unit", NULL))
32816 + hw->rsel_si_unit = true;
32817 + else
32818 + hw->rsel_si_unit = false;
32819 +diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
32820 +index 39828e9c3120a..4757bf964d3cd 100644
32821 +--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
32822 ++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
32823 +@@ -1883,8 +1883,10 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
32824 + }
32825 +
32826 + prcm_np = of_parse_phandle(np, "prcm", 0);
32827 +- if (prcm_np)
32828 ++ if (prcm_np) {
32829 + npct->prcm_base = of_iomap(prcm_np, 0);
32830 ++ of_node_put(prcm_np);
32831 ++ }
32832 + if (!npct->prcm_base) {
32833 + if (version == PINCTRL_NMK_STN8815) {
32834 + dev_info(&pdev->dev,
32835 +diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
32836 +index 4d81908d6725d..41136f63014a4 100644
32837 +--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
32838 ++++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
32839 +@@ -78,7 +78,6 @@ struct npcm7xx_gpio {
32840 + struct gpio_chip gc;
32841 + int irqbase;
32842 + int irq;
32843 +- void *priv;
32844 + struct irq_chip irq_chip;
32845 + u32 pinctrl_id;
32846 + int (*direction_input)(struct gpio_chip *chip, unsigned offset);
32847 +@@ -226,7 +225,7 @@ static void npcmgpio_irq_handler(struct irq_desc *desc)
32848 + chained_irq_enter(chip, desc);
32849 + sts = ioread32(bank->base + NPCM7XX_GP_N_EVST);
32850 + en = ioread32(bank->base + NPCM7XX_GP_N_EVEN);
32851 +- dev_dbg(chip->parent_device, "==> got irq sts %.8x %.8x\n", sts,
32852 ++ dev_dbg(bank->gc.parent, "==> got irq sts %.8x %.8x\n", sts,
32853 + en);
32854 +
32855 + sts &= en;
32856 +@@ -241,33 +240,33 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type)
32857 + gpiochip_get_data(irq_data_get_irq_chip_data(d));
32858 + unsigned int gpio = BIT(d->hwirq);
32859 +
32860 +- dev_dbg(d->chip->parent_device, "setirqtype: %u.%u = %u\n", gpio,
32861 ++ dev_dbg(bank->gc.parent, "setirqtype: %u.%u = %u\n", gpio,
32862 + d->irq, type);
32863 + switch (type) {
32864 + case IRQ_TYPE_EDGE_RISING:
32865 +- dev_dbg(d->chip->parent_device, "edge.rising\n");
32866 ++ dev_dbg(bank->gc.parent, "edge.rising\n");
32867 + npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
32868 + npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
32869 + break;
32870 + case IRQ_TYPE_EDGE_FALLING:
32871 +- dev_dbg(d->chip->parent_device, "edge.falling\n");
32872 ++ dev_dbg(bank->gc.parent, "edge.falling\n");
32873 + npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
32874 + npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
32875 + break;
32876 + case IRQ_TYPE_EDGE_BOTH:
32877 +- dev_dbg(d->chip->parent_device, "edge.both\n");
32878 ++ dev_dbg(bank->gc.parent, "edge.both\n");
32879 + npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
32880 + break;
32881 + case IRQ_TYPE_LEVEL_LOW:
32882 +- dev_dbg(d->chip->parent_device, "level.low\n");
32883 ++ dev_dbg(bank->gc.parent, "level.low\n");
32884 + npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
32885 + break;
32886 + case IRQ_TYPE_LEVEL_HIGH:
32887 +- dev_dbg(d->chip->parent_device, "level.high\n");
32888 ++ dev_dbg(bank->gc.parent, "level.high\n");
32889 + npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
32890 + break;
32891 + default:
32892 +- dev_dbg(d->chip->parent_device, "invalid irq type\n");
32893 ++ dev_dbg(bank->gc.parent, "invalid irq type\n");
32894 + return -EINVAL;
32895 + }
32896 +
32897 +@@ -289,7 +288,7 @@ static void npcmgpio_irq_ack(struct irq_data *d)
32898 + gpiochip_get_data(irq_data_get_irq_chip_data(d));
32899 + unsigned int gpio = d->hwirq;
32900 +
32901 +- dev_dbg(d->chip->parent_device, "irq_ack: %u.%u\n", gpio, d->irq);
32902 ++ dev_dbg(bank->gc.parent, "irq_ack: %u.%u\n", gpio, d->irq);
32903 + iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST);
32904 + }
32905 +
32906 +@@ -301,7 +300,7 @@ static void npcmgpio_irq_mask(struct irq_data *d)
32907 + unsigned int gpio = d->hwirq;
32908 +
32909 + /* Clear events */
32910 +- dev_dbg(d->chip->parent_device, "irq_mask: %u.%u\n", gpio, d->irq);
32911 ++ dev_dbg(bank->gc.parent, "irq_mask: %u.%u\n", gpio, d->irq);
32912 + iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC);
32913 + }
32914 +
32915 +@@ -313,7 +312,7 @@ static void npcmgpio_irq_unmask(struct irq_data *d)
32916 + unsigned int gpio = d->hwirq;
32917 +
32918 + /* Enable events */
32919 +- dev_dbg(d->chip->parent_device, "irq_unmask: %u.%u\n", gpio, d->irq);
32920 ++ dev_dbg(bank->gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq);
32921 + iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS);
32922 + }
32923 +
32924 +@@ -323,7 +322,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d)
32925 + unsigned int gpio = d->hwirq;
32926 +
32927 + /* active-high, input, clear interrupt, enable interrupt */
32928 +- dev_dbg(d->chip->parent_device, "startup: %u.%u\n", gpio, d->irq);
32929 ++ dev_dbg(gc->parent, "startup: %u.%u\n", gpio, d->irq);
32930 + npcmgpio_direction_input(gc, gpio);
32931 + npcmgpio_irq_ack(d);
32932 + npcmgpio_irq_unmask(d);
32933 +@@ -905,7 +904,7 @@ static struct npcm7xx_func npcm7xx_funcs[] = {
32934 + #define DRIVE_STRENGTH_HI_SHIFT 12
32935 + #define DRIVE_STRENGTH_MASK 0x0000FF00
32936 +
32937 +-#define DS(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \
32938 ++#define DSTR(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \
32939 + ((hi) << DRIVE_STRENGTH_HI_SHIFT))
32940 + #define DSLO(x) (((x) >> DRIVE_STRENGTH_LO_SHIFT) & 0xF)
32941 + #define DSHI(x) (((x) >> DRIVE_STRENGTH_HI_SHIFT) & 0xF)
32942 +@@ -925,31 +924,31 @@ struct npcm7xx_pincfg {
32943 + static const struct npcm7xx_pincfg pincfg[] = {
32944 + /* PIN FUNCTION 1 FUNCTION 2 FUNCTION 3 FLAGS */
32945 + NPCM7XX_PINCFG(0, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0),
32946 +- NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)),
32947 +- NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)),
32948 ++ NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
32949 ++ NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
32950 + NPCM7XX_PINCFG(3, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0),
32951 + NPCM7XX_PINCFG(4, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW),
32952 + NPCM7XX_PINCFG(5, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW),
32953 + NPCM7XX_PINCFG(6, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW),
32954 + NPCM7XX_PINCFG(7, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW),
32955 +- NPCM7XX_PINCFG(8, lkgpo1, FLOCKR1, 4, none, NONE, 0, none, NONE, 0, DS(8, 12)),
32956 +- NPCM7XX_PINCFG(9, lkgpo2, FLOCKR1, 8, none, NONE, 0, none, NONE, 0, DS(8, 12)),
32957 +- NPCM7XX_PINCFG(10, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
32958 +- NPCM7XX_PINCFG(11, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
32959 ++ NPCM7XX_PINCFG(8, lkgpo1, FLOCKR1, 4, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
32960 ++ NPCM7XX_PINCFG(9, lkgpo2, FLOCKR1, 8, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
32961 ++ NPCM7XX_PINCFG(10, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
32962 ++ NPCM7XX_PINCFG(11, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
32963 + NPCM7XX_PINCFG(12, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW),
32964 + NPCM7XX_PINCFG(13, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW),
32965 + NPCM7XX_PINCFG(14, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW),
32966 + NPCM7XX_PINCFG(15, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW),
32967 +- NPCM7XX_PINCFG(16, lkgpo0, FLOCKR1, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)),
32968 +- NPCM7XX_PINCFG(17, pspi2, MFSEL3, 13, smb4den, I2CSEGSEL, 23, none, NONE, 0, DS(8, 12)),
32969 +- NPCM7XX_PINCFG(18, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)),
32970 +- NPCM7XX_PINCFG(19, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)),
32971 ++ NPCM7XX_PINCFG(16, lkgpo0, FLOCKR1, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
32972 ++ NPCM7XX_PINCFG(17, pspi2, MFSEL3, 13, smb4den, I2CSEGSEL, 23, none, NONE, 0, DSTR(8, 12)),
32973 ++ NPCM7XX_PINCFG(18, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DSTR(8, 12)),
32974 ++ NPCM7XX_PINCFG(19, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DSTR(8, 12)),
32975 + NPCM7XX_PINCFG(20, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0),
32976 + NPCM7XX_PINCFG(21, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0),
32977 + NPCM7XX_PINCFG(22, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0),
32978 + NPCM7XX_PINCFG(23, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0),
32979 +- NPCM7XX_PINCFG(24, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
32980 +- NPCM7XX_PINCFG(25, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
32981 ++ NPCM7XX_PINCFG(24, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
32982 ++ NPCM7XX_PINCFG(25, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
32983 + NPCM7XX_PINCFG(26, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0),
32984 + NPCM7XX_PINCFG(27, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0),
32985 + NPCM7XX_PINCFG(28, smb4, MFSEL1, 1, none, NONE, 0, none, NONE, 0, 0),
32986 +@@ -965,12 +964,12 @@ static const struct npcm7xx_pincfg pincfg[] = {
32987 + NPCM7XX_PINCFG(39, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW),
32988 + NPCM7XX_PINCFG(40, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW),
32989 + NPCM7XX_PINCFG(41, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, 0),
32990 +- NPCM7XX_PINCFG(42, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, DS(2, 4) | GPO),
32991 ++ NPCM7XX_PINCFG(42, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, DSTR(2, 4) | GPO),
32992 + NPCM7XX_PINCFG(43, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0),
32993 + NPCM7XX_PINCFG(44, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0),
32994 + NPCM7XX_PINCFG(45, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, 0),
32995 +- NPCM7XX_PINCFG(46, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)),
32996 +- NPCM7XX_PINCFG(47, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)),
32997 ++ NPCM7XX_PINCFG(46, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DSTR(2, 8)),
32998 ++ NPCM7XX_PINCFG(47, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DSTR(2, 8)),
32999 + NPCM7XX_PINCFG(48, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, GPO),
33000 + NPCM7XX_PINCFG(49, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, 0),
33001 + NPCM7XX_PINCFG(50, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0),
33002 +@@ -980,8 +979,8 @@ static const struct npcm7xx_pincfg pincfg[] = {
33003 + NPCM7XX_PINCFG(54, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0),
33004 + NPCM7XX_PINCFG(55, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0),
33005 + NPCM7XX_PINCFG(56, r1err, MFSEL1, 12, none, NONE, 0, none, NONE, 0, 0),
33006 +- NPCM7XX_PINCFG(57, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)),
33007 +- NPCM7XX_PINCFG(58, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)),
33008 ++ NPCM7XX_PINCFG(57, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
33009 ++ NPCM7XX_PINCFG(58, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
33010 + NPCM7XX_PINCFG(59, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0),
33011 + NPCM7XX_PINCFG(60, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0),
33012 + NPCM7XX_PINCFG(61, uart1, MFSEL1, 10, none, NONE, 0, none, NONE, 0, GPO),
33013 +@@ -1004,19 +1003,19 @@ static const struct npcm7xx_pincfg pincfg[] = {
33014 + NPCM7XX_PINCFG(77, fanin13, MFSEL2, 13, none, NONE, 0, none, NONE, 0, 0),
33015 + NPCM7XX_PINCFG(78, fanin14, MFSEL2, 14, none, NONE, 0, none, NONE, 0, 0),
33016 + NPCM7XX_PINCFG(79, fanin15, MFSEL2, 15, none, NONE, 0, none, NONE, 0, 0),
33017 +- NPCM7XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, DS(4, 8)),
33018 +- NPCM7XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, DS(4, 8)),
33019 +- NPCM7XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, DS(4, 8)),
33020 +- NPCM7XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, DS(4, 8)),
33021 +- NPCM7XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33022 +- NPCM7XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33023 +- NPCM7XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33024 ++ NPCM7XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
33025 ++ NPCM7XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
33026 ++ NPCM7XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
33027 ++ NPCM7XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
33028 ++ NPCM7XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33029 ++ NPCM7XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33030 ++ NPCM7XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33031 + NPCM7XX_PINCFG(87, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
33032 + NPCM7XX_PINCFG(88, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
33033 + NPCM7XX_PINCFG(89, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
33034 + NPCM7XX_PINCFG(90, r2err, MFSEL1, 15, none, NONE, 0, none, NONE, 0, 0),
33035 +- NPCM7XX_PINCFG(91, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)),
33036 +- NPCM7XX_PINCFG(92, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)),
33037 ++ NPCM7XX_PINCFG(91, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
33038 ++ NPCM7XX_PINCFG(92, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
33039 + NPCM7XX_PINCFG(93, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0),
33040 + NPCM7XX_PINCFG(94, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0),
33041 + NPCM7XX_PINCFG(95, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0),
33042 +@@ -1062,34 +1061,34 @@ static const struct npcm7xx_pincfg pincfg[] = {
33043 + NPCM7XX_PINCFG(133, smb10, MFSEL4, 13, none, NONE, 0, none, NONE, 0, 0),
33044 + NPCM7XX_PINCFG(134, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0),
33045 + NPCM7XX_PINCFG(135, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0),
33046 +- NPCM7XX_PINCFG(136, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33047 +- NPCM7XX_PINCFG(137, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33048 +- NPCM7XX_PINCFG(138, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33049 +- NPCM7XX_PINCFG(139, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33050 +- NPCM7XX_PINCFG(140, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33051 ++ NPCM7XX_PINCFG(136, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33052 ++ NPCM7XX_PINCFG(137, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33053 ++ NPCM7XX_PINCFG(138, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33054 ++ NPCM7XX_PINCFG(139, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33055 ++ NPCM7XX_PINCFG(140, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33056 + NPCM7XX_PINCFG(141, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, 0),
33057 +- NPCM7XX_PINCFG(142, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33058 ++ NPCM7XX_PINCFG(142, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33059 + NPCM7XX_PINCFG(143, sd1, MFSEL3, 12, sd1pwr, MFSEL4, 5, none, NONE, 0, 0),
33060 +- NPCM7XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)),
33061 +- NPCM7XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, DS(4, 8)),
33062 +- NPCM7XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, DS(4, 8)),
33063 +- NPCM7XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, DS(4, 8)),
33064 +- NPCM7XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33065 +- NPCM7XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33066 +- NPCM7XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33067 +- NPCM7XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33068 +- NPCM7XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33069 ++ NPCM7XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
33070 ++ NPCM7XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
33071 ++ NPCM7XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
33072 ++ NPCM7XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
33073 ++ NPCM7XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33074 ++ NPCM7XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33075 ++ NPCM7XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33076 ++ NPCM7XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33077 ++ NPCM7XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33078 + NPCM7XX_PINCFG(153, mmcwp, FLOCKR1, 24, none, NONE, 0, none, NONE, 0, 0), /* Z1/A1 */
33079 +- NPCM7XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33080 ++ NPCM7XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33081 + NPCM7XX_PINCFG(155, mmccd, MFSEL3, 25, mmcrst, MFSEL4, 6, none, NONE, 0, 0), /* Z1/A1 */
33082 +- NPCM7XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33083 +- NPCM7XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33084 +- NPCM7XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33085 +- NPCM7XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33086 +-
33087 +- NPCM7XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33088 +- NPCM7XX_PINCFG(161, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, DS(8, 12)),
33089 +- NPCM7XX_PINCFG(162, serirq, NONE, 0, gpio, MFSEL1, 31, none, NONE, 0, DS(8, 12)),
33090 ++ NPCM7XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33091 ++ NPCM7XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33092 ++ NPCM7XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33093 ++ NPCM7XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33094 ++
33095 ++ NPCM7XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33096 ++ NPCM7XX_PINCFG(161, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, DSTR(8, 12)),
33097 ++ NPCM7XX_PINCFG(162, serirq, NONE, 0, gpio, MFSEL1, 31, none, NONE, 0, DSTR(8, 12)),
33098 + NPCM7XX_PINCFG(163, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0),
33099 + NPCM7XX_PINCFG(164, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC),
33100 + NPCM7XX_PINCFG(165, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC),
33101 +@@ -1102,25 +1101,25 @@ static const struct npcm7xx_pincfg pincfg[] = {
33102 + NPCM7XX_PINCFG(172, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, 0),
33103 + NPCM7XX_PINCFG(173, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0),
33104 + NPCM7XX_PINCFG(174, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0),
33105 +- NPCM7XX_PINCFG(175, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)),
33106 +- NPCM7XX_PINCFG(176, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)),
33107 +- NPCM7XX_PINCFG(177, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)),
33108 +- NPCM7XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33109 +- NPCM7XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33110 +- NPCM7XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33111 ++ NPCM7XX_PINCFG(175, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)),
33112 ++ NPCM7XX_PINCFG(176, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)),
33113 ++ NPCM7XX_PINCFG(177, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)),
33114 ++ NPCM7XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33115 ++ NPCM7XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33116 ++ NPCM7XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33117 + NPCM7XX_PINCFG(181, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
33118 + NPCM7XX_PINCFG(182, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
33119 +- NPCM7XX_PINCFG(183, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33120 +- NPCM7XX_PINCFG(184, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
33121 +- NPCM7XX_PINCFG(185, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
33122 +- NPCM7XX_PINCFG(186, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12)),
33123 +- NPCM7XX_PINCFG(187, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, DS(8, 12)),
33124 +- NPCM7XX_PINCFG(188, spi3quad, MFSEL4, 20, spi3cs2, MFSEL4, 18, none, NONE, 0, DS(8, 12) | SLEW),
33125 +- NPCM7XX_PINCFG(189, spi3quad, MFSEL4, 20, spi3cs3, MFSEL4, 19, none, NONE, 0, DS(8, 12) | SLEW),
33126 +- NPCM7XX_PINCFG(190, gpio, FLOCKR1, 20, nprd_smi, NONE, 0, none, NONE, 0, DS(2, 4)),
33127 +- NPCM7XX_PINCFG(191, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */
33128 +-
33129 +- NPCM7XX_PINCFG(192, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */
33130 ++ NPCM7XX_PINCFG(183, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33131 ++ NPCM7XX_PINCFG(184, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO),
33132 ++ NPCM7XX_PINCFG(185, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO),
33133 ++ NPCM7XX_PINCFG(186, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
33134 ++ NPCM7XX_PINCFG(187, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
33135 ++ NPCM7XX_PINCFG(188, spi3quad, MFSEL4, 20, spi3cs2, MFSEL4, 18, none, NONE, 0, DSTR(8, 12) | SLEW),
33136 ++ NPCM7XX_PINCFG(189, spi3quad, MFSEL4, 20, spi3cs3, MFSEL4, 19, none, NONE, 0, DSTR(8, 12) | SLEW),
33137 ++ NPCM7XX_PINCFG(190, gpio, FLOCKR1, 20, nprd_smi, NONE, 0, none, NONE, 0, DSTR(2, 4)),
33138 ++ NPCM7XX_PINCFG(191, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), /* XX */
33139 ++
33140 ++ NPCM7XX_PINCFG(192, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), /* XX */
33141 + NPCM7XX_PINCFG(193, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
33142 + NPCM7XX_PINCFG(194, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0),
33143 + NPCM7XX_PINCFG(195, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0),
33144 +@@ -1131,11 +1130,11 @@ static const struct npcm7xx_pincfg pincfg[] = {
33145 + NPCM7XX_PINCFG(200, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
33146 + NPCM7XX_PINCFG(201, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
33147 + NPCM7XX_PINCFG(202, smb0c, I2CSEGSEL, 1, none, NONE, 0, none, NONE, 0, 0),
33148 +- NPCM7XX_PINCFG(203, faninx, MFSEL3, 3, none, NONE, 0, none, NONE, 0, DS(8, 12)),
33149 ++ NPCM7XX_PINCFG(203, faninx, MFSEL3, 3, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
33150 + NPCM7XX_PINCFG(204, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW),
33151 + NPCM7XX_PINCFG(205, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW),
33152 +- NPCM7XX_PINCFG(206, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)),
33153 +- NPCM7XX_PINCFG(207, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)),
33154 ++ NPCM7XX_PINCFG(206, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DSTR(4, 8)),
33155 ++ NPCM7XX_PINCFG(207, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DSTR(4, 8)),
33156 + NPCM7XX_PINCFG(208, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
33157 + NPCM7XX_PINCFG(209, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
33158 + NPCM7XX_PINCFG(210, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
33159 +@@ -1147,20 +1146,20 @@ static const struct npcm7xx_pincfg pincfg[] = {
33160 + NPCM7XX_PINCFG(216, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0),
33161 + NPCM7XX_PINCFG(217, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0),
33162 + NPCM7XX_PINCFG(218, wdog1, MFSEL3, 19, none, NONE, 0, none, NONE, 0, 0),
33163 +- NPCM7XX_PINCFG(219, wdog2, MFSEL3, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)),
33164 ++ NPCM7XX_PINCFG(219, wdog2, MFSEL3, 20, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
33165 + NPCM7XX_PINCFG(220, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0),
33166 + NPCM7XX_PINCFG(221, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0),
33167 + NPCM7XX_PINCFG(222, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0),
33168 + NPCM7XX_PINCFG(223, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0),
33169 +
33170 + NPCM7XX_PINCFG(224, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, SLEW),
33171 +- NPCM7XX_PINCFG(225, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
33172 +- NPCM7XX_PINCFG(226, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
33173 +- NPCM7XX_PINCFG(227, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33174 +- NPCM7XX_PINCFG(228, spixcs1, MFSEL4, 28, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33175 +- NPCM7XX_PINCFG(229, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33176 +- NPCM7XX_PINCFG(230, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
33177 +- NPCM7XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, DS(8, 12)),
33178 ++ NPCM7XX_PINCFG(225, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO),
33179 ++ NPCM7XX_PINCFG(226, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO),
33180 ++ NPCM7XX_PINCFG(227, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33181 ++ NPCM7XX_PINCFG(228, spixcs1, MFSEL4, 28, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33182 ++ NPCM7XX_PINCFG(229, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33183 ++ NPCM7XX_PINCFG(230, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
33184 ++ NPCM7XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
33185 + NPCM7XX_PINCFG(253, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC1 power */
33186 + NPCM7XX_PINCFG(254, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC2 power */
33187 + NPCM7XX_PINCFG(255, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* DACOSEL */
33188 +@@ -1561,7 +1560,7 @@ static int npcm7xx_get_groups_count(struct pinctrl_dev *pctldev)
33189 + {
33190 + struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
33191 +
33192 +- dev_dbg(npcm->dev, "group size: %d\n", ARRAY_SIZE(npcm7xx_groups));
33193 ++ dev_dbg(npcm->dev, "group size: %zu\n", ARRAY_SIZE(npcm7xx_groups));
33194 + return ARRAY_SIZE(npcm7xx_groups);
33195 + }
33196 +
33197 +diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
33198 +index f8edcc88ac013..415d1df8f46a5 100644
33199 +--- a/drivers/pinctrl/pinconf-generic.c
33200 ++++ b/drivers/pinctrl/pinconf-generic.c
33201 +@@ -30,10 +30,10 @@ static const struct pin_config_item conf_items[] = {
33202 + PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL, false),
33203 + PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL, false),
33204 + PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL, false),
33205 +- PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL, false),
33206 ++ PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", "ohms", true),
33207 + PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
33208 +- "input bias pull to pin specific state", NULL, false),
33209 +- PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
33210 ++ "input bias pull to pin specific state", "ohms", true),
33211 ++ PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", "ohms", true),
33212 + PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
33213 + PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
33214 + PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
33215 +diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
33216 +index 2712f51eb2381..fa6becca17889 100644
33217 +--- a/drivers/pinctrl/pinctrl-ingenic.c
33218 ++++ b/drivers/pinctrl/pinctrl-ingenic.c
33219 +@@ -119,6 +119,8 @@ struct ingenic_chip_info {
33220 + unsigned int num_functions;
33221 +
33222 + const u32 *pull_ups, *pull_downs;
33223 ++
33224 ++ const struct regmap_access_table *access_table;
33225 + };
33226 +
33227 + struct ingenic_pinctrl {
33228 +@@ -2179,6 +2181,17 @@ static const struct function_desc x1000_functions[] = {
33229 + { "mac", x1000_mac_groups, ARRAY_SIZE(x1000_mac_groups), },
33230 + };
33231 +
33232 ++static const struct regmap_range x1000_access_ranges[] = {
33233 ++ regmap_reg_range(0x000, 0x400 - 4),
33234 ++ regmap_reg_range(0x700, 0x800 - 4),
33235 ++};
33236 ++
33237 ++/* shared with X1500 */
33238 ++static const struct regmap_access_table x1000_access_table = {
33239 ++ .yes_ranges = x1000_access_ranges,
33240 ++ .n_yes_ranges = ARRAY_SIZE(x1000_access_ranges),
33241 ++};
33242 ++
33243 + static const struct ingenic_chip_info x1000_chip_info = {
33244 + .num_chips = 4,
33245 + .reg_offset = 0x100,
33246 +@@ -2189,6 +2202,7 @@ static const struct ingenic_chip_info x1000_chip_info = {
33247 + .num_functions = ARRAY_SIZE(x1000_functions),
33248 + .pull_ups = x1000_pull_ups,
33249 + .pull_downs = x1000_pull_downs,
33250 ++ .access_table = &x1000_access_table,
33251 + };
33252 +
33253 + static int x1500_uart0_data_pins[] = { 0x4a, 0x4b, };
33254 +@@ -2300,6 +2314,7 @@ static const struct ingenic_chip_info x1500_chip_info = {
33255 + .num_functions = ARRAY_SIZE(x1500_functions),
33256 + .pull_ups = x1000_pull_ups,
33257 + .pull_downs = x1000_pull_downs,
33258 ++ .access_table = &x1000_access_table,
33259 + };
33260 +
33261 + static const u32 x1830_pull_ups[4] = {
33262 +@@ -2506,6 +2521,16 @@ static const struct function_desc x1830_functions[] = {
33263 + { "mac", x1830_mac_groups, ARRAY_SIZE(x1830_mac_groups), },
33264 + };
33265 +
33266 ++static const struct regmap_range x1830_access_ranges[] = {
33267 ++ regmap_reg_range(0x0000, 0x4000 - 4),
33268 ++ regmap_reg_range(0x7000, 0x8000 - 4),
33269 ++};
33270 ++
33271 ++static const struct regmap_access_table x1830_access_table = {
33272 ++ .yes_ranges = x1830_access_ranges,
33273 ++ .n_yes_ranges = ARRAY_SIZE(x1830_access_ranges),
33274 ++};
33275 ++
33276 + static const struct ingenic_chip_info x1830_chip_info = {
33277 + .num_chips = 4,
33278 + .reg_offset = 0x1000,
33279 +@@ -2516,6 +2541,7 @@ static const struct ingenic_chip_info x1830_chip_info = {
33280 + .num_functions = ARRAY_SIZE(x1830_functions),
33281 + .pull_ups = x1830_pull_ups,
33282 + .pull_downs = x1830_pull_downs,
33283 ++ .access_table = &x1830_access_table,
33284 + };
33285 +
33286 + static const u32 x2000_pull_ups[5] = {
33287 +@@ -2969,6 +2995,17 @@ static const struct function_desc x2000_functions[] = {
33288 + { "otg", x2000_otg_groups, ARRAY_SIZE(x2000_otg_groups), },
33289 + };
33290 +
33291 ++static const struct regmap_range x2000_access_ranges[] = {
33292 ++ regmap_reg_range(0x000, 0x500 - 4),
33293 ++ regmap_reg_range(0x700, 0x800 - 4),
33294 ++};
33295 ++
33296 ++/* shared with X2100 */
33297 ++static const struct regmap_access_table x2000_access_table = {
33298 ++ .yes_ranges = x2000_access_ranges,
33299 ++ .n_yes_ranges = ARRAY_SIZE(x2000_access_ranges),
33300 ++};
33301 ++
33302 + static const struct ingenic_chip_info x2000_chip_info = {
33303 + .num_chips = 5,
33304 + .reg_offset = 0x100,
33305 +@@ -2979,6 +3016,7 @@ static const struct ingenic_chip_info x2000_chip_info = {
33306 + .num_functions = ARRAY_SIZE(x2000_functions),
33307 + .pull_ups = x2000_pull_ups,
33308 + .pull_downs = x2000_pull_downs,
33309 ++ .access_table = &x2000_access_table,
33310 + };
33311 +
33312 + static const u32 x2100_pull_ups[5] = {
33313 +@@ -3189,6 +3227,7 @@ static const struct ingenic_chip_info x2100_chip_info = {
33314 + .num_functions = ARRAY_SIZE(x2100_functions),
33315 + .pull_ups = x2100_pull_ups,
33316 + .pull_downs = x2100_pull_downs,
33317 ++ .access_table = &x2000_access_table,
33318 + };
33319 +
33320 + static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
33321 +@@ -4168,7 +4207,12 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
33322 + return PTR_ERR(base);
33323 +
33324 + regmap_config = ingenic_pinctrl_regmap_config;
33325 +- regmap_config.max_register = chip_info->num_chips * chip_info->reg_offset;
33326 ++ if (chip_info->access_table) {
33327 ++ regmap_config.rd_table = chip_info->access_table;
33328 ++ regmap_config.wr_table = chip_info->access_table;
33329 ++ } else {
33330 ++ regmap_config.max_register = chip_info->num_chips * chip_info->reg_offset - 4;
33331 ++ }
33332 +
33333 + jzpc->map = devm_regmap_init_mmio(dev, base, &regmap_config);
33334 + if (IS_ERR(jzpc->map)) {
33335 +diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
33336 +index 639f1130e9892..666f1e3889e00 100644
33337 +--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
33338 ++++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
33339 +@@ -19,6 +19,7 @@
33340 + #include <linux/property.h>
33341 + #include <linux/regmap.h>
33342 + #include <linux/reset.h>
33343 ++#include <linux/spinlock.h>
33344 +
33345 + #include "core.h"
33346 + #include "pinconf.h"
33347 +@@ -116,6 +117,7 @@ struct sgpio_priv {
33348 + u32 clock;
33349 + struct regmap *regs;
33350 + const struct sgpio_properties *properties;
33351 ++ spinlock_t lock;
33352 + };
33353 +
33354 + struct sgpio_port_addr {
33355 +@@ -229,6 +231,7 @@ static void sgpio_output_set(struct sgpio_priv *priv,
33356 + int value)
33357 + {
33358 + unsigned int bit = SGPIO_SRC_BITS * addr->bit;
33359 ++ unsigned long flags;
33360 + u32 clr, set;
33361 +
33362 + switch (priv->properties->arch) {
33363 +@@ -247,7 +250,10 @@ static void sgpio_output_set(struct sgpio_priv *priv,
33364 + default:
33365 + return;
33366 + }
33367 ++
33368 ++ spin_lock_irqsave(&priv->lock, flags);
33369 + sgpio_clrsetbits(priv, REG_PORT_CONFIG, addr->port, clr, set);
33370 ++ spin_unlock_irqrestore(&priv->lock, flags);
33371 + }
33372 +
33373 + static int sgpio_output_get(struct sgpio_priv *priv,
33374 +@@ -575,10 +581,13 @@ static void microchip_sgpio_irq_settype(struct irq_data *data,
33375 + struct sgpio_bank *bank = gpiochip_get_data(chip);
33376 + unsigned int gpio = irqd_to_hwirq(data);
33377 + struct sgpio_port_addr addr;
33378 ++ unsigned long flags;
33379 + u32 ena;
33380 +
33381 + sgpio_pin_to_addr(bank->priv, gpio, &addr);
33382 +
33383 ++ spin_lock_irqsave(&bank->priv->lock, flags);
33384 ++
33385 + /* Disable interrupt while changing type */
33386 + ena = sgpio_readl(bank->priv, REG_INT_ENABLE, addr.bit);
33387 + sgpio_writel(bank->priv, ena & ~BIT(addr.port), REG_INT_ENABLE, addr.bit);
33388 +@@ -595,6 +604,8 @@ static void microchip_sgpio_irq_settype(struct irq_data *data,
33389 +
33390 + /* Possibly re-enable interrupts */
33391 + sgpio_writel(bank->priv, ena, REG_INT_ENABLE, addr.bit);
33392 ++
33393 ++ spin_unlock_irqrestore(&bank->priv->lock, flags);
33394 + }
33395 +
33396 + static void microchip_sgpio_irq_setreg(struct irq_data *data,
33397 +@@ -605,13 +616,16 @@ static void microchip_sgpio_irq_setreg(struct irq_data *data,
33398 + struct sgpio_bank *bank = gpiochip_get_data(chip);
33399 + unsigned int gpio = irqd_to_hwirq(data);
33400 + struct sgpio_port_addr addr;
33401 ++ unsigned long flags;
33402 +
33403 + sgpio_pin_to_addr(bank->priv, gpio, &addr);
33404 +
33405 ++ spin_lock_irqsave(&bank->priv->lock, flags);
33406 + if (clear)
33407 + sgpio_clrsetbits(bank->priv, reg, addr.bit, BIT(addr.port), 0);
33408 + else
33409 + sgpio_clrsetbits(bank->priv, reg, addr.bit, 0, BIT(addr.port));
33410 ++ spin_unlock_irqrestore(&bank->priv->lock, flags);
33411 + }
33412 +
33413 + static void microchip_sgpio_irq_mask(struct irq_data *data)
33414 +@@ -833,6 +847,7 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
33415 + return -ENOMEM;
33416 +
33417 + priv->dev = dev;
33418 ++ spin_lock_init(&priv->lock);
33419 +
33420 + reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
33421 + if (IS_ERR(reset))
33422 +diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
33423 +index fc969208d904c..370459243007b 100644
33424 +--- a/drivers/pinctrl/pinctrl-ocelot.c
33425 ++++ b/drivers/pinctrl/pinctrl-ocelot.c
33426 +@@ -1750,8 +1750,8 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
33427 + gc->base = -1;
33428 + gc->label = "ocelot-gpio";
33429 +
33430 +- irq = irq_of_parse_and_map(gc->of_node, 0);
33431 +- if (irq) {
33432 ++ irq = platform_get_irq_optional(pdev, 0);
33433 ++ if (irq > 0) {
33434 + girq = &gc->irq;
33435 + girq->chip = &ocelot_irqchip;
33436 + girq->parent_handler = ocelot_irq_handler;
33437 +@@ -1788,9 +1788,10 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
33438 + .val_bits = 32,
33439 + .reg_stride = 4,
33440 + .max_register = 32,
33441 ++ .name = "pincfg",
33442 + };
33443 +
33444 +- base = devm_platform_ioremap_resource(pdev, 0);
33445 ++ base = devm_platform_ioremap_resource(pdev, 1);
33446 + if (IS_ERR(base)) {
33447 + dev_dbg(&pdev->dev, "Failed to ioremap config registers (no extended pinconf)\n");
33448 + return NULL;
33449 +diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
33450 +index d8dd8415fa81b..a1b598b86aa9f 100644
33451 +--- a/drivers/pinctrl/pinctrl-rockchip.c
33452 ++++ b/drivers/pinctrl/pinctrl-rockchip.c
33453 +@@ -2693,6 +2693,7 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
33454 + node = of_parse_phandle(np, "rockchip,grf", 0);
33455 + if (node) {
33456 + info->regmap_base = syscon_node_to_regmap(node);
33457 ++ of_node_put(node);
33458 + if (IS_ERR(info->regmap_base))
33459 + return PTR_ERR(info->regmap_base);
33460 + } else {
33461 +@@ -2725,6 +2726,7 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
33462 + node = of_parse_phandle(np, "rockchip,pmu", 0);
33463 + if (node) {
33464 + info->regmap_pmu = syscon_node_to_regmap(node);
33465 ++ of_node_put(node);
33466 + if (IS_ERR(info->regmap_pmu))
33467 + return PTR_ERR(info->regmap_pmu);
33468 + }
33469 +diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
33470 +index 0d4ea2e22a535..12d41ac017b53 100644
33471 +--- a/drivers/pinctrl/renesas/core.c
33472 ++++ b/drivers/pinctrl/renesas/core.c
33473 +@@ -741,7 +741,7 @@ static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; }
33474 +
33475 + #ifdef DEBUG
33476 + #define SH_PFC_MAX_REGS 300
33477 +-#define SH_PFC_MAX_ENUMS 3000
33478 ++#define SH_PFC_MAX_ENUMS 5000
33479 +
33480 + static unsigned int sh_pfc_errors __initdata;
33481 + static unsigned int sh_pfc_warnings __initdata;
33482 +@@ -865,7 +865,8 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname,
33483 + GENMASK(cfg_reg->reg_width - 1, 0));
33484 +
33485 + if (cfg_reg->field_width) {
33486 +- n = cfg_reg->reg_width / cfg_reg->field_width;
33487 ++ fw = cfg_reg->field_width;
33488 ++ n = (cfg_reg->reg_width / fw) << fw;
33489 + /* Skip field checks (done at build time) */
33490 + goto check_enum_ids;
33491 + }
33492 +diff --git a/drivers/pinctrl/renesas/pfc-r8a77470.c b/drivers/pinctrl/renesas/pfc-r8a77470.c
33493 +index e6e5487691c16..cf7153d06a953 100644
33494 +--- a/drivers/pinctrl/renesas/pfc-r8a77470.c
33495 ++++ b/drivers/pinctrl/renesas/pfc-r8a77470.c
33496 +@@ -2140,7 +2140,7 @@ static const unsigned int vin0_clk_mux[] = {
33497 + VI0_CLK_MARK,
33498 + };
33499 + /* - VIN1 ------------------------------------------------------------------- */
33500 +-static const union vin_data vin1_data_pins = {
33501 ++static const union vin_data12 vin1_data_pins = {
33502 + .data12 = {
33503 + RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 2),
33504 + RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4),
33505 +@@ -2150,7 +2150,7 @@ static const union vin_data vin1_data_pins = {
33506 + RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
33507 + },
33508 + };
33509 +-static const union vin_data vin1_data_mux = {
33510 ++static const union vin_data12 vin1_data_mux = {
33511 + .data12 = {
33512 + VI1_DATA0_MARK, VI1_DATA1_MARK,
33513 + VI1_DATA2_MARK, VI1_DATA3_MARK,
33514 +diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
33515 +index 2e490e7696f47..4102ce955bd7f 100644
33516 +--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
33517 ++++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
33518 +@@ -585,13 +585,11 @@ static const struct samsung_pin_ctrl exynos850_pin_ctrl[] __initconst = {
33519 + /* pin-controller instance 0 ALIVE data */
33520 + .pin_banks = exynos850_pin_banks0,
33521 + .nr_banks = ARRAY_SIZE(exynos850_pin_banks0),
33522 +- .eint_gpio_init = exynos_eint_gpio_init,
33523 + .eint_wkup_init = exynos_eint_wkup_init,
33524 + }, {
33525 + /* pin-controller instance 1 CMGP data */
33526 + .pin_banks = exynos850_pin_banks1,
33527 + .nr_banks = ARRAY_SIZE(exynos850_pin_banks1),
33528 +- .eint_gpio_init = exynos_eint_gpio_init,
33529 + .eint_wkup_init = exynos_eint_wkup_init,
33530 + }, {
33531 + /* pin-controller instance 2 AUD data */
33532 +diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
33533 +index 0f6e9305fec58..c4175fea7d741 100644
33534 +--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
33535 ++++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
33536 +@@ -1002,6 +1002,16 @@ samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev)
33537 + return &(of_data->ctrl[id]);
33538 + }
33539 +
33540 ++static void samsung_banks_of_node_put(struct samsung_pinctrl_drv_data *d)
33541 ++{
33542 ++ struct samsung_pin_bank *bank;
33543 ++ unsigned int i;
33544 ++
33545 ++ bank = d->pin_banks;
33546 ++ for (i = 0; i < d->nr_banks; ++i, ++bank)
33547 ++ of_node_put(bank->of_node);
33548 ++}
33549 ++
33550 + /* retrieve the soc specific data */
33551 + static const struct samsung_pin_ctrl *
33552 + samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
33553 +@@ -1117,19 +1127,19 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
33554 + if (ctrl->retention_data) {
33555 + drvdata->retention_ctrl = ctrl->retention_data->init(drvdata,
33556 + ctrl->retention_data);
33557 +- if (IS_ERR(drvdata->retention_ctrl))
33558 +- return PTR_ERR(drvdata->retention_ctrl);
33559 ++ if (IS_ERR(drvdata->retention_ctrl)) {
33560 ++ ret = PTR_ERR(drvdata->retention_ctrl);
33561 ++ goto err_put_banks;
33562 ++ }
33563 + }
33564 +
33565 + ret = samsung_pinctrl_register(pdev, drvdata);
33566 + if (ret)
33567 +- return ret;
33568 ++ goto err_put_banks;
33569 +
33570 + ret = samsung_gpiolib_register(pdev, drvdata);
33571 +- if (ret) {
33572 +- samsung_pinctrl_unregister(pdev, drvdata);
33573 +- return ret;
33574 +- }
33575 ++ if (ret)
33576 ++ goto err_unregister;
33577 +
33578 + if (ctrl->eint_gpio_init)
33579 + ctrl->eint_gpio_init(drvdata);
33580 +@@ -1139,6 +1149,12 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
33581 + platform_set_drvdata(pdev, drvdata);
33582 +
33583 + return 0;
33584 ++
33585 ++err_unregister:
33586 ++ samsung_pinctrl_unregister(pdev, drvdata);
33587 ++err_put_banks:
33588 ++ samsung_banks_of_node_put(drvdata);
33589 ++ return ret;
33590 + }
33591 +
33592 + /*
33593 +diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
33594 +index f901d2e43166c..88cbc434c06b2 100644
33595 +--- a/drivers/platform/chrome/Makefile
33596 ++++ b/drivers/platform/chrome/Makefile
33597 +@@ -2,6 +2,7 @@
33598 +
33599 + # tell define_trace.h where to find the cros ec trace header
33600 + CFLAGS_cros_ec_trace.o:= -I$(src)
33601 ++CFLAGS_cros_ec_sensorhub_ring.o:= -I$(src)
33602 +
33603 + obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
33604 + obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
33605 +@@ -20,7 +21,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
33606 + obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
33607 + obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
33608 + obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
33609 +-cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o
33610 ++cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
33611 + obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o
33612 + obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
33613 + obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
33614 +diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
33615 +index 98e37080f7609..71948dade0e2a 100644
33616 +--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
33617 ++++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
33618 +@@ -17,7 +17,8 @@
33619 + #include <linux/sort.h>
33620 + #include <linux/slab.h>
33621 +
33622 +-#include "cros_ec_trace.h"
33623 ++#define CREATE_TRACE_POINTS
33624 ++#include "cros_ec_sensorhub_trace.h"
33625 +
33626 + /* Precision of fixed point for the m values from the filter */
33627 + #define M_PRECISION BIT(23)
33628 +diff --git a/drivers/platform/chrome/cros_ec_sensorhub_trace.h b/drivers/platform/chrome/cros_ec_sensorhub_trace.h
33629 +new file mode 100644
33630 +index 0000000000000..57d9b47859692
33631 +--- /dev/null
33632 ++++ b/drivers/platform/chrome/cros_ec_sensorhub_trace.h
33633 +@@ -0,0 +1,123 @@
33634 ++/* SPDX-License-Identifier: GPL-2.0 */
33635 ++/*
33636 ++ * Trace events for the ChromeOS Sensorhub kernel module
33637 ++ *
33638 ++ * Copyright 2021 Google LLC.
33639 ++ */
33640 ++
33641 ++#undef TRACE_SYSTEM
33642 ++#define TRACE_SYSTEM cros_ec
33643 ++
33644 ++#if !defined(_CROS_EC_SENSORHUB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
33645 ++#define _CROS_EC_SENSORHUB_TRACE_H_
33646 ++
33647 ++#include <linux/types.h>
33648 ++#include <linux/platform_data/cros_ec_sensorhub.h>
33649 ++
33650 ++#include <linux/tracepoint.h>
33651 ++
33652 ++TRACE_EVENT(cros_ec_sensorhub_timestamp,
33653 ++ TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
33654 ++ s64 current_timestamp, s64 current_time),
33655 ++ TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
33656 ++ current_time),
33657 ++ TP_STRUCT__entry(
33658 ++ __field(u32, ec_sample_timestamp)
33659 ++ __field(u32, ec_fifo_timestamp)
33660 ++ __field(s64, fifo_timestamp)
33661 ++ __field(s64, current_timestamp)
33662 ++ __field(s64, current_time)
33663 ++ __field(s64, delta)
33664 ++ ),
33665 ++ TP_fast_assign(
33666 ++ __entry->ec_sample_timestamp = ec_sample_timestamp;
33667 ++ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
33668 ++ __entry->fifo_timestamp = fifo_timestamp;
33669 ++ __entry->current_timestamp = current_timestamp;
33670 ++ __entry->current_time = current_time;
33671 ++ __entry->delta = current_timestamp - current_time;
33672 ++ ),
33673 ++ TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
33674 ++ __entry->ec_sample_timestamp,
33675 ++ __entry->ec_fifo_timestamp,
33676 ++ __entry->fifo_timestamp,
33677 ++ __entry->current_timestamp,
33678 ++ __entry->current_time,
33679 ++ __entry->delta
33680 ++ )
33681 ++);
33682 ++
33683 ++TRACE_EVENT(cros_ec_sensorhub_data,
33684 ++ TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
33685 ++ s64 current_timestamp, s64 current_time),
33686 ++ TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
33687 ++ TP_STRUCT__entry(
33688 ++ __field(u32, ec_sensor_num)
33689 ++ __field(u32, ec_fifo_timestamp)
33690 ++ __field(s64, fifo_timestamp)
33691 ++ __field(s64, current_timestamp)
33692 ++ __field(s64, current_time)
33693 ++ __field(s64, delta)
33694 ++ ),
33695 ++ TP_fast_assign(
33696 ++ __entry->ec_sensor_num = ec_sensor_num;
33697 ++ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
33698 ++ __entry->fifo_timestamp = fifo_timestamp;
33699 ++ __entry->current_timestamp = current_timestamp;
33700 ++ __entry->current_time = current_time;
33701 ++ __entry->delta = current_timestamp - current_time;
33702 ++ ),
33703 ++ TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
33704 ++ __entry->ec_sensor_num,
33705 ++ __entry->ec_fifo_timestamp,
33706 ++ __entry->fifo_timestamp,
33707 ++ __entry->current_timestamp,
33708 ++ __entry->current_time,
33709 ++ __entry->delta
33710 ++ )
33711 ++);
33712 ++
33713 ++TRACE_EVENT(cros_ec_sensorhub_filter,
33714 ++ TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
33715 ++ TP_ARGS(state, dx, dy),
33716 ++ TP_STRUCT__entry(
33717 ++ __field(s64, dx)
33718 ++ __field(s64, dy)
33719 ++ __field(s64, median_m)
33720 ++ __field(s64, median_error)
33721 ++ __field(s64, history_len)
33722 ++ __field(s64, x)
33723 ++ __field(s64, y)
33724 ++ ),
33725 ++ TP_fast_assign(
33726 ++ __entry->dx = dx;
33727 ++ __entry->dy = dy;
33728 ++ __entry->median_m = state->median_m;
33729 ++ __entry->median_error = state->median_error;
33730 ++ __entry->history_len = state->history_len;
33731 ++ __entry->x = state->x_offset;
33732 ++ __entry->y = state->y_offset;
33733 ++ ),
33734 ++ TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
33735 ++ __entry->dx,
33736 ++ __entry->dy,
33737 ++ __entry->median_m,
33738 ++ __entry->median_error,
33739 ++ __entry->history_len,
33740 ++ __entry->x,
33741 ++ __entry->y
33742 ++ )
33743 ++);
33744 ++
33745 ++
33746 ++#endif /* _CROS_EC_SENSORHUB_TRACE_H_ */
33747 ++
33748 ++/* this part must be outside header guard */
33749 ++
33750 ++#undef TRACE_INCLUDE_PATH
33751 ++#define TRACE_INCLUDE_PATH .
33752 ++
33753 ++#undef TRACE_INCLUDE_FILE
33754 ++#define TRACE_INCLUDE_FILE cros_ec_sensorhub_trace
33755 ++
33756 ++#include <trace/define_trace.h>
33757 +diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
33758 +index 7e7cfc98657a4..9bb5cd2c98b8b 100644
33759 +--- a/drivers/platform/chrome/cros_ec_trace.h
33760 ++++ b/drivers/platform/chrome/cros_ec_trace.h
33761 +@@ -15,7 +15,6 @@
33762 + #include <linux/types.h>
33763 + #include <linux/platform_data/cros_ec_commands.h>
33764 + #include <linux/platform_data/cros_ec_proto.h>
33765 +-#include <linux/platform_data/cros_ec_sensorhub.h>
33766 +
33767 + #include <linux/tracepoint.h>
33768 +
33769 +@@ -71,100 +70,6 @@ TRACE_EVENT(cros_ec_request_done,
33770 + __entry->retval)
33771 + );
33772 +
33773 +-TRACE_EVENT(cros_ec_sensorhub_timestamp,
33774 +- TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
33775 +- s64 current_timestamp, s64 current_time),
33776 +- TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
33777 +- current_time),
33778 +- TP_STRUCT__entry(
33779 +- __field(u32, ec_sample_timestamp)
33780 +- __field(u32, ec_fifo_timestamp)
33781 +- __field(s64, fifo_timestamp)
33782 +- __field(s64, current_timestamp)
33783 +- __field(s64, current_time)
33784 +- __field(s64, delta)
33785 +- ),
33786 +- TP_fast_assign(
33787 +- __entry->ec_sample_timestamp = ec_sample_timestamp;
33788 +- __entry->ec_fifo_timestamp = ec_fifo_timestamp;
33789 +- __entry->fifo_timestamp = fifo_timestamp;
33790 +- __entry->current_timestamp = current_timestamp;
33791 +- __entry->current_time = current_time;
33792 +- __entry->delta = current_timestamp - current_time;
33793 +- ),
33794 +- TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
33795 +- __entry->ec_sample_timestamp,
33796 +- __entry->ec_fifo_timestamp,
33797 +- __entry->fifo_timestamp,
33798 +- __entry->current_timestamp,
33799 +- __entry->current_time,
33800 +- __entry->delta
33801 +- )
33802 +-);
33803 +-
33804 +-TRACE_EVENT(cros_ec_sensorhub_data,
33805 +- TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
33806 +- s64 current_timestamp, s64 current_time),
33807 +- TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
33808 +- TP_STRUCT__entry(
33809 +- __field(u32, ec_sensor_num)
33810 +- __field(u32, ec_fifo_timestamp)
33811 +- __field(s64, fifo_timestamp)
33812 +- __field(s64, current_timestamp)
33813 +- __field(s64, current_time)
33814 +- __field(s64, delta)
33815 +- ),
33816 +- TP_fast_assign(
33817 +- __entry->ec_sensor_num = ec_sensor_num;
33818 +- __entry->ec_fifo_timestamp = ec_fifo_timestamp;
33819 +- __entry->fifo_timestamp = fifo_timestamp;
33820 +- __entry->current_timestamp = current_timestamp;
33821 +- __entry->current_time = current_time;
33822 +- __entry->delta = current_timestamp - current_time;
33823 +- ),
33824 +- TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
33825 +- __entry->ec_sensor_num,
33826 +- __entry->ec_fifo_timestamp,
33827 +- __entry->fifo_timestamp,
33828 +- __entry->current_timestamp,
33829 +- __entry->current_time,
33830 +- __entry->delta
33831 +- )
33832 +-);
33833 +-
33834 +-TRACE_EVENT(cros_ec_sensorhub_filter,
33835 +- TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
33836 +- TP_ARGS(state, dx, dy),
33837 +- TP_STRUCT__entry(
33838 +- __field(s64, dx)
33839 +- __field(s64, dy)
33840 +- __field(s64, median_m)
33841 +- __field(s64, median_error)
33842 +- __field(s64, history_len)
33843 +- __field(s64, x)
33844 +- __field(s64, y)
33845 +- ),
33846 +- TP_fast_assign(
33847 +- __entry->dx = dx;
33848 +- __entry->dy = dy;
33849 +- __entry->median_m = state->median_m;
33850 +- __entry->median_error = state->median_error;
33851 +- __entry->history_len = state->history_len;
33852 +- __entry->x = state->x_offset;
33853 +- __entry->y = state->y_offset;
33854 +- ),
33855 +- TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
33856 +- __entry->dx,
33857 +- __entry->dy,
33858 +- __entry->median_m,
33859 +- __entry->median_error,
33860 +- __entry->history_len,
33861 +- __entry->x,
33862 +- __entry->y
33863 +- )
33864 +-);
33865 +-
33866 +-
33867 + #endif /* _CROS_EC_TRACE_H_ */
33868 +
33869 + /* this part must be outside header guard */
33870 +diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
33871 +index 5de0bfb0bc4d9..952c1756f59ee 100644
33872 +--- a/drivers/platform/chrome/cros_ec_typec.c
33873 ++++ b/drivers/platform/chrome/cros_ec_typec.c
33874 +@@ -1075,7 +1075,13 @@ static int cros_typec_probe(struct platform_device *pdev)
33875 + return -ENOMEM;
33876 +
33877 + typec->dev = dev;
33878 ++
33879 + typec->ec = dev_get_drvdata(pdev->dev.parent);
33880 ++ if (!typec->ec) {
33881 ++ dev_err(dev, "couldn't find parent EC device\n");
33882 ++ return -ENODEV;
33883 ++ }
33884 ++
33885 + platform_set_drvdata(pdev, typec);
33886 +
33887 + ret = cros_typec_get_cmd_version(typec);
33888 +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
33889 +index 2104a2621e507..adab31b52f2af 100644
33890 +--- a/drivers/platform/x86/asus-wmi.c
33891 ++++ b/drivers/platform/x86/asus-wmi.c
33892 +@@ -2059,7 +2059,7 @@ static int fan_boost_mode_check_present(struct asus_wmi *asus)
33893 + err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_BOOST_MODE,
33894 + &result);
33895 + if (err) {
33896 +- if (err == -ENODEV)
33897 ++ if (err == -ENODEV || err == -ENODATA)
33898 + return 0;
33899 + else
33900 + return err;
33901 +diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
33902 +index a2d846c4a7eef..eac3e6b4ea113 100644
33903 +--- a/drivers/platform/x86/huawei-wmi.c
33904 ++++ b/drivers/platform/x86/huawei-wmi.c
33905 +@@ -470,10 +470,17 @@ static DEVICE_ATTR_RW(charge_control_thresholds);
33906 +
33907 + static int huawei_wmi_battery_add(struct power_supply *battery)
33908 + {
33909 +- device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold);
33910 +- device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold);
33911 ++ int err = 0;
33912 +
33913 +- return 0;
33914 ++ err = device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold);
33915 ++ if (err)
33916 ++ return err;
33917 ++
33918 ++ err = device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold);
33919 ++ if (err)
33920 ++ device_remove_file(&battery->dev, &dev_attr_charge_control_start_threshold);
33921 ++
33922 ++ return err;
33923 + }
33924 +
33925 + static int huawei_wmi_battery_remove(struct power_supply *battery)
33926 +diff --git a/drivers/power/reset/gemini-poweroff.c b/drivers/power/reset/gemini-poweroff.c
33927 +index 90e35c07240ae..b7f7a8225f22e 100644
33928 +--- a/drivers/power/reset/gemini-poweroff.c
33929 ++++ b/drivers/power/reset/gemini-poweroff.c
33930 +@@ -107,8 +107,8 @@ static int gemini_poweroff_probe(struct platform_device *pdev)
33931 + return PTR_ERR(gpw->base);
33932 +
33933 + irq = platform_get_irq(pdev, 0);
33934 +- if (!irq)
33935 +- return -EINVAL;
33936 ++ if (irq < 0)
33937 ++ return irq;
33938 +
33939 + gpw->dev = dev;
33940 +
33941 +diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c
33942 +index 7ae95f5375801..9a8334a65de1b 100644
33943 +--- a/drivers/power/supply/ab8500_bmdata.c
33944 ++++ b/drivers/power/supply/ab8500_bmdata.c
33945 +@@ -188,13 +188,11 @@ int ab8500_bm_of_probe(struct power_supply *psy,
33946 + * fall back to safe defaults.
33947 + */
33948 + if ((bi->voltage_min_design_uv < 0) ||
33949 +- (bi->voltage_max_design_uv < 0) ||
33950 +- (bi->overvoltage_limit_uv < 0)) {
33951 ++ (bi->voltage_max_design_uv < 0)) {
33952 + /* Nominal voltage is 3.7V for unknown batteries */
33953 + bi->voltage_min_design_uv = 3700000;
33954 +- bi->voltage_max_design_uv = 3700000;
33955 +- /* Termination voltage (overcharge limit) 4.05V */
33956 +- bi->overvoltage_limit_uv = 4050000;
33957 ++ /* Termination voltage 4.05V */
33958 ++ bi->voltage_max_design_uv = 4050000;
33959 + }
33960 +
33961 + if (bi->constant_charge_current_max_ua < 0)
33962 +diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
33963 +index c4a2fe07126c3..da490e090ce48 100644
33964 +--- a/drivers/power/supply/ab8500_chargalg.c
33965 ++++ b/drivers/power/supply/ab8500_chargalg.c
33966 +@@ -802,7 +802,7 @@ static void ab8500_chargalg_end_of_charge(struct ab8500_chargalg *di)
33967 + if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
33968 + di->charge_state == STATE_NORMAL &&
33969 + !di->maintenance_chg && (di->batt_data.volt_uv >=
33970 +- di->bm->bi->overvoltage_limit_uv ||
33971 ++ di->bm->bi->voltage_max_design_uv ||
33972 + di->events.usb_cv_active || di->events.ac_cv_active) &&
33973 + di->batt_data.avg_curr_ua <
33974 + di->bm->bi->charge_term_current_ua &&
33975 +@@ -2020,11 +2020,11 @@ static int ab8500_chargalg_probe(struct platform_device *pdev)
33976 + psy_cfg.drv_data = di;
33977 +
33978 + /* Initilialize safety timer */
33979 +- hrtimer_init(&di->safety_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
33980 ++ hrtimer_init(&di->safety_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
33981 + di->safety_timer.function = ab8500_chargalg_safety_timer_expired;
33982 +
33983 + /* Initilialize maintenance timer */
33984 +- hrtimer_init(&di->maintenance_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
33985 ++ hrtimer_init(&di->maintenance_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
33986 + di->maintenance_timer.function =
33987 + ab8500_chargalg_maintenance_timer_expired;
33988 +
33989 +diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
33990 +index b0919a6a65878..09a4cbd69676a 100644
33991 +--- a/drivers/power/supply/ab8500_fg.c
33992 ++++ b/drivers/power/supply/ab8500_fg.c
33993 +@@ -2263,7 +2263,13 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
33994 + {
33995 + int ret;
33996 +
33997 +- /* Set VBAT OVV threshold */
33998 ++ /*
33999 ++ * Set VBAT OVV (overvoltage) threshold to 4.75V (typ) this is what
34000 ++ * the hardware supports, nothing else can be configured in hardware.
34001 ++ * See this as an "outer limit" where the charger will certainly
34002 ++ * shut down. Other (lower) overvoltage levels need to be implemented
34003 ++ * in software.
34004 ++ */
34005 + ret = abx500_mask_and_set_register_interruptible(di->dev,
34006 + AB8500_CHARGER,
34007 + AB8500_BATT_OVV,
34008 +@@ -2521,8 +2527,10 @@ static int ab8500_fg_sysfs_init(struct ab8500_fg *di)
34009 + ret = kobject_init_and_add(&di->fg_kobject,
34010 + &ab8500_fg_ktype,
34011 + NULL, "battery");
34012 +- if (ret < 0)
34013 ++ if (ret < 0) {
34014 ++ kobject_put(&di->fg_kobject);
34015 + dev_err(di->dev, "failed to create sysfs entry\n");
34016 ++ }
34017 +
34018 + return ret;
34019 + }
34020 +diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
34021 +index 06c34b09349ca..8ad1b3b02490c 100644
34022 +--- a/drivers/power/supply/bq24190_charger.c
34023 ++++ b/drivers/power/supply/bq24190_charger.c
34024 +@@ -39,6 +39,7 @@
34025 + #define BQ24190_REG_POC_CHG_CONFIG_DISABLE 0x0
34026 + #define BQ24190_REG_POC_CHG_CONFIG_CHARGE 0x1
34027 + #define BQ24190_REG_POC_CHG_CONFIG_OTG 0x2
34028 ++#define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT 0x3
34029 + #define BQ24190_REG_POC_SYS_MIN_MASK (BIT(3) | BIT(2) | BIT(1))
34030 + #define BQ24190_REG_POC_SYS_MIN_SHIFT 1
34031 + #define BQ24190_REG_POC_SYS_MIN_MIN 3000
34032 +@@ -550,7 +551,11 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev)
34033 + pm_runtime_mark_last_busy(bdi->dev);
34034 + pm_runtime_put_autosuspend(bdi->dev);
34035 +
34036 +- return ret ? ret : val == BQ24190_REG_POC_CHG_CONFIG_OTG;
34037 ++ if (ret)
34038 ++ return ret;
34039 ++
34040 ++ return (val == BQ24190_REG_POC_CHG_CONFIG_OTG ||
34041 ++ val == BQ24190_REG_POC_CHG_CONFIG_OTG_ALT);
34042 + }
34043 +
34044 + static const struct regulator_ops bq24190_vbus_ops = {
34045 +diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
34046 +index 6fa65d118ec12..b08f7d0c41815 100644
34047 +--- a/drivers/power/supply/sbs-charger.c
34048 ++++ b/drivers/power/supply/sbs-charger.c
34049 +@@ -18,6 +18,7 @@
34050 + #include <linux/interrupt.h>
34051 + #include <linux/regmap.h>
34052 + #include <linux/bitops.h>
34053 ++#include <linux/devm-helpers.h>
34054 +
34055 + #define SBS_CHARGER_REG_SPEC_INFO 0x11
34056 + #define SBS_CHARGER_REG_STATUS 0x13
34057 +@@ -209,7 +210,12 @@ static int sbs_probe(struct i2c_client *client,
34058 + if (ret)
34059 + return dev_err_probe(&client->dev, ret, "Failed to request irq\n");
34060 + } else {
34061 +- INIT_DELAYED_WORK(&chip->work, sbs_delayed_work);
34062 ++ ret = devm_delayed_work_autocancel(&client->dev, &chip->work,
34063 ++ sbs_delayed_work);
34064 ++ if (ret)
34065 ++ return dev_err_probe(&client->dev, ret,
34066 ++ "Failed to init work for polling\n");
34067 ++
34068 + schedule_delayed_work(&chip->work,
34069 + msecs_to_jiffies(SBS_CHARGER_POLL_TIME));
34070 + }
34071 +@@ -220,15 +226,6 @@ static int sbs_probe(struct i2c_client *client,
34072 + return 0;
34073 + }
34074 +
34075 +-static int sbs_remove(struct i2c_client *client)
34076 +-{
34077 +- struct sbs_info *chip = i2c_get_clientdata(client);
34078 +-
34079 +- cancel_delayed_work_sync(&chip->work);
34080 +-
34081 +- return 0;
34082 +-}
34083 +-
34084 + #ifdef CONFIG_OF
34085 + static const struct of_device_id sbs_dt_ids[] = {
34086 + { .compatible = "sbs,sbs-charger" },
34087 +@@ -245,7 +242,6 @@ MODULE_DEVICE_TABLE(i2c, sbs_id);
34088 +
34089 + static struct i2c_driver sbs_driver = {
34090 + .probe = sbs_probe,
34091 +- .remove = sbs_remove,
34092 + .id_table = sbs_id,
34093 + .driver = {
34094 + .name = "sbs-charger",
34095 +diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
34096 +index e05cee457471b..908cfd45d2624 100644
34097 +--- a/drivers/power/supply/wm8350_power.c
34098 ++++ b/drivers/power/supply/wm8350_power.c
34099 +@@ -408,44 +408,112 @@ static const struct power_supply_desc wm8350_usb_desc = {
34100 + * Initialisation
34101 + *********************************************************************/
34102 +
34103 +-static void wm8350_init_charger(struct wm8350 *wm8350)
34104 ++static int wm8350_init_charger(struct wm8350 *wm8350)
34105 + {
34106 ++ int ret;
34107 ++
34108 + /* register our interest in charger events */
34109 +- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT,
34110 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT,
34111 + wm8350_charger_handler, 0, "Battery hot", wm8350);
34112 +- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD,
34113 ++ if (ret)
34114 ++ goto err;
34115 ++
34116 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD,
34117 + wm8350_charger_handler, 0, "Battery cold", wm8350);
34118 +- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL,
34119 ++ if (ret)
34120 ++ goto free_chg_bat_hot;
34121 ++
34122 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL,
34123 + wm8350_charger_handler, 0, "Battery fail", wm8350);
34124 +- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO,
34125 ++ if (ret)
34126 ++ goto free_chg_bat_cold;
34127 ++
34128 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO,
34129 + wm8350_charger_handler, 0,
34130 + "Charger timeout", wm8350);
34131 +- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END,
34132 ++ if (ret)
34133 ++ goto free_chg_bat_fail;
34134 ++
34135 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END,
34136 + wm8350_charger_handler, 0,
34137 + "Charge end", wm8350);
34138 +- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START,
34139 ++ if (ret)
34140 ++ goto free_chg_to;
34141 ++
34142 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START,
34143 + wm8350_charger_handler, 0,
34144 + "Charge start", wm8350);
34145 +- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY,
34146 ++ if (ret)
34147 ++ goto free_chg_end;
34148 ++
34149 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY,
34150 + wm8350_charger_handler, 0,
34151 + "Fast charge ready", wm8350);
34152 +- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9,
34153 ++ if (ret)
34154 ++ goto free_chg_start;
34155 ++
34156 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9,
34157 + wm8350_charger_handler, 0,
34158 + "Battery <3.9V", wm8350);
34159 +- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1,
34160 ++ if (ret)
34161 ++ goto free_chg_fast_rdy;
34162 ++
34163 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1,
34164 + wm8350_charger_handler, 0,
34165 + "Battery <3.1V", wm8350);
34166 +- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85,
34167 ++ if (ret)
34168 ++ goto free_chg_vbatt_lt_3p9;
34169 ++
34170 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85,
34171 + wm8350_charger_handler, 0,
34172 + "Battery <2.85V", wm8350);
34173 ++ if (ret)
34174 ++ goto free_chg_vbatt_lt_3p1;
34175 +
34176 + /* and supply change events */
34177 +- wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB,
34178 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB,
34179 + wm8350_charger_handler, 0, "USB", wm8350);
34180 +- wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB,
34181 ++ if (ret)
34182 ++ goto free_chg_vbatt_lt_2p85;
34183 ++
34184 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB,
34185 + wm8350_charger_handler, 0, "Wall", wm8350);
34186 +- wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB,
34187 ++ if (ret)
34188 ++ goto free_ext_usb_fb;
34189 ++
34190 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB,
34191 + wm8350_charger_handler, 0, "Battery", wm8350);
34192 ++ if (ret)
34193 ++ goto free_ext_wall_fb;
34194 ++
34195 ++ return 0;
34196 ++
34197 ++free_ext_wall_fb:
34198 ++ wm8350_free_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, wm8350);
34199 ++free_ext_usb_fb:
34200 ++ wm8350_free_irq(wm8350, WM8350_IRQ_EXT_USB_FB, wm8350);
34201 ++free_chg_vbatt_lt_2p85:
34202 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350);
34203 ++free_chg_vbatt_lt_3p1:
34204 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350);
34205 ++free_chg_vbatt_lt_3p9:
34206 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350);
34207 ++free_chg_fast_rdy:
34208 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350);
34209 ++free_chg_start:
34210 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350);
34211 ++free_chg_end:
34212 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350);
34213 ++free_chg_to:
34214 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350);
34215 ++free_chg_bat_fail:
34216 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, wm8350);
34217 ++free_chg_bat_cold:
34218 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, wm8350);
34219 ++free_chg_bat_hot:
34220 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, wm8350);
34221 ++err:
34222 ++ return ret;
34223 + }
34224 +
34225 + static void free_charger_irq(struct wm8350 *wm8350)
34226 +@@ -456,6 +524,7 @@ static void free_charger_irq(struct wm8350 *wm8350)
34227 + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350);
34228 + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350);
34229 + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350);
34230 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350);
34231 + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350);
34232 + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350);
34233 + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350);
34234 +diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
34235 +index b740866b228d9..1e8cac699646c 100644
34236 +--- a/drivers/powercap/dtpm_cpu.c
34237 ++++ b/drivers/powercap/dtpm_cpu.c
34238 +@@ -150,10 +150,17 @@ static int update_pd_power_uw(struct dtpm *dtpm)
34239 + static void pd_release(struct dtpm *dtpm)
34240 + {
34241 + struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
34242 ++ struct cpufreq_policy *policy;
34243 +
34244 + if (freq_qos_request_active(&dtpm_cpu->qos_req))
34245 + freq_qos_remove_request(&dtpm_cpu->qos_req);
34246 +
34247 ++ policy = cpufreq_cpu_get(dtpm_cpu->cpu);
34248 ++ if (policy) {
34249 ++ for_each_cpu(dtpm_cpu->cpu, policy->related_cpus)
34250 ++ per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
34251 ++ }
34252 ++
34253 + kfree(dtpm_cpu);
34254 + }
34255 +
34256 +diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
34257 +index 35799e6401c99..2f4b11b4dfcd9 100644
34258 +--- a/drivers/pps/clients/pps-gpio.c
34259 ++++ b/drivers/pps/clients/pps-gpio.c
34260 +@@ -169,7 +169,7 @@ static int pps_gpio_probe(struct platform_device *pdev)
34261 + /* GPIO setup */
34262 + ret = pps_gpio_setup(dev);
34263 + if (ret)
34264 +- return -EINVAL;
34265 ++ return ret;
34266 +
34267 + /* IRQ setup */
34268 + ret = gpiod_to_irq(data->gpio_pin);
34269 +diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
34270 +index 0e4bc8b9329dd..b6f2cfd15dd2d 100644
34271 +--- a/drivers/ptp/ptp_clock.c
34272 ++++ b/drivers/ptp/ptp_clock.c
34273 +@@ -317,11 +317,18 @@ no_memory:
34274 + }
34275 + EXPORT_SYMBOL(ptp_clock_register);
34276 +
34277 ++static int unregister_vclock(struct device *dev, void *data)
34278 ++{
34279 ++ struct ptp_clock *ptp = dev_get_drvdata(dev);
34280 ++
34281 ++ ptp_vclock_unregister(info_to_vclock(ptp->info));
34282 ++ return 0;
34283 ++}
34284 ++
34285 + int ptp_clock_unregister(struct ptp_clock *ptp)
34286 + {
34287 + if (ptp_vclock_in_use(ptp)) {
34288 +- pr_err("ptp: virtual clock in use\n");
34289 +- return -EBUSY;
34290 ++ device_for_each_child(&ptp->dev, NULL, unregister_vclock);
34291 + }
34292 +
34293 + ptp->defunct = 1;
34294 +diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
34295 +index 8e461f3baa05a..8cc8ae16553cf 100644
34296 +--- a/drivers/pwm/pwm-lpc18xx-sct.c
34297 ++++ b/drivers/pwm/pwm-lpc18xx-sct.c
34298 +@@ -395,12 +395,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
34299 + lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_LIMIT,
34300 + BIT(lpc18xx_pwm->period_event));
34301 +
34302 +- ret = pwmchip_add(&lpc18xx_pwm->chip);
34303 +- if (ret < 0) {
34304 +- dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
34305 +- goto disable_pwmclk;
34306 +- }
34307 +-
34308 + for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) {
34309 + struct lpc18xx_pwm_data *data;
34310 +
34311 +@@ -410,14 +404,12 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
34312 + GFP_KERNEL);
34313 + if (!data) {
34314 + ret = -ENOMEM;
34315 +- goto remove_pwmchip;
34316 ++ goto disable_pwmclk;
34317 + }
34318 +
34319 + pwm_set_chip_data(pwm, data);
34320 + }
34321 +
34322 +- platform_set_drvdata(pdev, lpc18xx_pwm);
34323 +-
34324 + val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
34325 + val &= ~LPC18XX_PWM_BIDIR;
34326 + val &= ~LPC18XX_PWM_CTRL_HALT;
34327 +@@ -425,10 +417,16 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
34328 + val |= LPC18XX_PWM_PRE(0);
34329 + lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
34330 +
34331 ++ ret = pwmchip_add(&lpc18xx_pwm->chip);
34332 ++ if (ret < 0) {
34333 ++ dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
34334 ++ goto disable_pwmclk;
34335 ++ }
34336 ++
34337 ++ platform_set_drvdata(pdev, lpc18xx_pwm);
34338 ++
34339 + return 0;
34340 +
34341 +-remove_pwmchip:
34342 +- pwmchip_remove(&lpc18xx_pwm->chip);
34343 + disable_pwmclk:
34344 + clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
34345 + return ret;
34346 +diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
34347 +index 9fc666107a06c..8490aa8eecb1a 100644
34348 +--- a/drivers/regulator/qcom_smd-regulator.c
34349 ++++ b/drivers/regulator/qcom_smd-regulator.c
34350 +@@ -1317,8 +1317,10 @@ static int rpm_reg_probe(struct platform_device *pdev)
34351 +
34352 + for_each_available_child_of_node(dev->of_node, node) {
34353 + vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
34354 +- if (!vreg)
34355 ++ if (!vreg) {
34356 ++ of_node_put(node);
34357 + return -ENOMEM;
34358 ++ }
34359 +
34360 + ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data);
34361 +
34362 +diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
34363 +index ee46bfbf5eee7..991b4730d7687 100644
34364 +--- a/drivers/regulator/rpi-panel-attiny-regulator.c
34365 ++++ b/drivers/regulator/rpi-panel-attiny-regulator.c
34366 +@@ -37,11 +37,24 @@ static const struct regmap_config attiny_regmap_config = {
34367 + static int attiny_lcd_power_enable(struct regulator_dev *rdev)
34368 + {
34369 + unsigned int data;
34370 ++ int ret, i;
34371 +
34372 + regmap_write(rdev->regmap, REG_POWERON, 1);
34373 ++ msleep(80);
34374 ++
34375 + /* Wait for nPWRDWN to go low to indicate poweron is done. */
34376 +- regmap_read_poll_timeout(rdev->regmap, REG_PORTB, data,
34377 +- data & BIT(0), 10, 1000000);
34378 ++ for (i = 0; i < 20; i++) {
34379 ++ ret = regmap_read(rdev->regmap, REG_PORTB, &data);
34380 ++ if (!ret) {
34381 ++ if (data & BIT(0))
34382 ++ break;
34383 ++ }
34384 ++ usleep_range(10000, 12000);
34385 ++ }
34386 ++ usleep_range(10000, 12000);
34387 ++
34388 ++ if (ret)
34389 ++ pr_err("%s: regmap_read_poll_timeout failed %d\n", __func__, ret);
34390 +
34391 + /* Default to the same orientation as the closed source
34392 + * firmware used for the panel. Runtime rotation
34393 +@@ -57,23 +70,34 @@ static int attiny_lcd_power_disable(struct regulator_dev *rdev)
34394 + {
34395 + regmap_write(rdev->regmap, REG_PWM, 0);
34396 + regmap_write(rdev->regmap, REG_POWERON, 0);
34397 +- udelay(1);
34398 ++ msleep(30);
34399 + return 0;
34400 + }
34401 +
34402 + static int attiny_lcd_power_is_enabled(struct regulator_dev *rdev)
34403 + {
34404 + unsigned int data;
34405 +- int ret;
34406 ++ int ret, i;
34407 +
34408 +- ret = regmap_read(rdev->regmap, REG_POWERON, &data);
34409 ++ for (i = 0; i < 10; i++) {
34410 ++ ret = regmap_read(rdev->regmap, REG_POWERON, &data);
34411 ++ if (!ret)
34412 ++ break;
34413 ++ usleep_range(10000, 12000);
34414 ++ }
34415 + if (ret < 0)
34416 + return ret;
34417 +
34418 + if (!(data & BIT(0)))
34419 + return 0;
34420 +
34421 +- ret = regmap_read(rdev->regmap, REG_PORTB, &data);
34422 ++ for (i = 0; i < 10; i++) {
34423 ++ ret = regmap_read(rdev->regmap, REG_PORTB, &data);
34424 ++ if (!ret)
34425 ++ break;
34426 ++ usleep_range(10000, 12000);
34427 ++ }
34428 ++
34429 + if (ret < 0)
34430 + return ret;
34431 +
34432 +@@ -103,20 +127,32 @@ static int attiny_update_status(struct backlight_device *bl)
34433 + {
34434 + struct regmap *regmap = bl_get_data(bl);
34435 + int brightness = bl->props.brightness;
34436 ++ int ret, i;
34437 +
34438 + if (bl->props.power != FB_BLANK_UNBLANK ||
34439 + bl->props.fb_blank != FB_BLANK_UNBLANK)
34440 + brightness = 0;
34441 +
34442 +- return regmap_write(regmap, REG_PWM, brightness);
34443 ++ for (i = 0; i < 10; i++) {
34444 ++ ret = regmap_write(regmap, REG_PWM, brightness);
34445 ++ if (!ret)
34446 ++ break;
34447 ++ }
34448 ++
34449 ++ return ret;
34450 + }
34451 +
34452 + static int attiny_get_brightness(struct backlight_device *bl)
34453 + {
34454 + struct regmap *regmap = bl_get_data(bl);
34455 +- int ret, brightness;
34456 ++ int ret, brightness, i;
34457 ++
34458 ++ for (i = 0; i < 10; i++) {
34459 ++ ret = regmap_read(regmap, REG_PWM, &brightness);
34460 ++ if (!ret)
34461 ++ break;
34462 ++ }
34463 +
34464 +- ret = regmap_read(regmap, REG_PWM, &brightness);
34465 + if (ret)
34466 + return ret;
34467 +
34468 +@@ -166,7 +202,7 @@ static int attiny_i2c_probe(struct i2c_client *i2c,
34469 + }
34470 +
34471 + regmap_write(regmap, REG_POWERON, 0);
34472 +- mdelay(1);
34473 ++ msleep(30);
34474 +
34475 + config.dev = &i2c->dev;
34476 + config.regmap = regmap;
34477 +diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
34478 +index 098362e6e233b..7c02bc1322479 100644
34479 +--- a/drivers/remoteproc/qcom_q6v5_adsp.c
34480 ++++ b/drivers/remoteproc/qcom_q6v5_adsp.c
34481 +@@ -408,6 +408,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
34482 + }
34483 +
34484 + ret = of_address_to_resource(node, 0, &r);
34485 ++ of_node_put(node);
34486 + if (ret)
34487 + return ret;
34488 +
34489 +diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
34490 +index 43ea8455546ca..b9ab91540b00d 100644
34491 +--- a/drivers/remoteproc/qcom_q6v5_mss.c
34492 ++++ b/drivers/remoteproc/qcom_q6v5_mss.c
34493 +@@ -1806,18 +1806,20 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
34494 + * reserved memory regions from device's memory-region property.
34495 + */
34496 + child = of_get_child_by_name(qproc->dev->of_node, "mba");
34497 +- if (!child)
34498 ++ if (!child) {
34499 + node = of_parse_phandle(qproc->dev->of_node,
34500 + "memory-region", 0);
34501 +- else
34502 ++ } else {
34503 + node = of_parse_phandle(child, "memory-region", 0);
34504 ++ of_node_put(child);
34505 ++ }
34506 +
34507 + ret = of_address_to_resource(node, 0, &r);
34508 ++ of_node_put(node);
34509 + if (ret) {
34510 + dev_err(qproc->dev, "unable to resolve mba region\n");
34511 + return ret;
34512 + }
34513 +- of_node_put(node);
34514 +
34515 + qproc->mba_phys = r.start;
34516 + qproc->mba_size = resource_size(&r);
34517 +@@ -1828,14 +1830,15 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
34518 + } else {
34519 + child = of_get_child_by_name(qproc->dev->of_node, "mpss");
34520 + node = of_parse_phandle(child, "memory-region", 0);
34521 ++ of_node_put(child);
34522 + }
34523 +
34524 + ret = of_address_to_resource(node, 0, &r);
34525 ++ of_node_put(node);
34526 + if (ret) {
34527 + dev_err(qproc->dev, "unable to resolve mpss region\n");
34528 + return ret;
34529 + }
34530 +- of_node_put(node);
34531 +
34532 + qproc->mpss_phys = qproc->mpss_reloc = r.start;
34533 + qproc->mpss_size = resource_size(&r);
34534 +diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
34535 +index 80bbafee98463..9a223d394087f 100644
34536 +--- a/drivers/remoteproc/qcom_wcnss.c
34537 ++++ b/drivers/remoteproc/qcom_wcnss.c
34538 +@@ -500,6 +500,7 @@ static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
34539 + }
34540 +
34541 + ret = of_address_to_resource(node, 0, &r);
34542 ++ of_node_put(node);
34543 + if (ret)
34544 + return ret;
34545 +
34546 +diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
34547 +index b5a1e3b697d9f..581930483ef84 100644
34548 +--- a/drivers/remoteproc/remoteproc_debugfs.c
34549 ++++ b/drivers/remoteproc/remoteproc_debugfs.c
34550 +@@ -76,7 +76,7 @@ static ssize_t rproc_coredump_write(struct file *filp,
34551 + int ret, err = 0;
34552 + char buf[20];
34553 +
34554 +- if (count > sizeof(buf))
34555 ++ if (count < 1 || count > sizeof(buf))
34556 + return -EINVAL;
34557 +
34558 + ret = copy_from_user(buf, user_buf, count);
34559 +diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
34560 +index d8e8357981537..9edd662c69ace 100644
34561 +--- a/drivers/rtc/interface.c
34562 ++++ b/drivers/rtc/interface.c
34563 +@@ -804,9 +804,13 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
34564 + struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
34565 + struct rtc_time tm;
34566 + ktime_t now;
34567 ++ int err;
34568 ++
34569 ++ err = __rtc_read_time(rtc, &tm);
34570 ++ if (err)
34571 ++ return err;
34572 +
34573 + timer->enabled = 1;
34574 +- __rtc_read_time(rtc, &tm);
34575 + now = rtc_tm_to_ktime(tm);
34576 +
34577 + /* Skip over expired timers */
34578 +@@ -820,7 +824,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
34579 + trace_rtc_timer_enqueue(timer);
34580 + if (!next || ktime_before(timer->node.expires, next->expires)) {
34581 + struct rtc_wkalrm alarm;
34582 +- int err;
34583 +
34584 + alarm.time = rtc_ktime_to_tm(timer->node.expires);
34585 + alarm.enabled = 1;
34586 +diff --git a/drivers/rtc/rtc-gamecube.c b/drivers/rtc/rtc-gamecube.c
34587 +index f717b36f4738c..18ca3b38b2d04 100644
34588 +--- a/drivers/rtc/rtc-gamecube.c
34589 ++++ b/drivers/rtc/rtc-gamecube.c
34590 +@@ -235,6 +235,7 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d)
34591 + }
34592 +
34593 + ret = of_address_to_resource(np, 0, &res);
34594 ++ of_node_put(np);
34595 + if (ret) {
34596 + pr_err("no io memory range found\n");
34597 + return -1;
34598 +diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
34599 +index ae9f131b43c0c..562f99b664a24 100644
34600 +--- a/drivers/rtc/rtc-mc146818-lib.c
34601 ++++ b/drivers/rtc/rtc-mc146818-lib.c
34602 +@@ -232,8 +232,10 @@ int mc146818_set_time(struct rtc_time *time)
34603 + if (yrs >= 100)
34604 + yrs -= 100;
34605 +
34606 +- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
34607 +- || RTC_ALWAYS_BCD) {
34608 ++ spin_lock_irqsave(&rtc_lock, flags);
34609 ++ save_control = CMOS_READ(RTC_CONTROL);
34610 ++ spin_unlock_irqrestore(&rtc_lock, flags);
34611 ++ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
34612 + sec = bin2bcd(sec);
34613 + min = bin2bcd(min);
34614 + hrs = bin2bcd(hrs);
34615 +diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
34616 +index e38ee88483855..bad6a5d9c6839 100644
34617 +--- a/drivers/rtc/rtc-pl031.c
34618 ++++ b/drivers/rtc/rtc-pl031.c
34619 +@@ -350,9 +350,6 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
34620 + }
34621 + }
34622 +
34623 +- if (!adev->irq[0])
34624 +- clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
34625 +-
34626 + device_init_wakeup(&adev->dev, true);
34627 + ldata->rtc = devm_rtc_allocate_device(&adev->dev);
34628 + if (IS_ERR(ldata->rtc)) {
34629 +@@ -360,6 +357,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
34630 + goto out;
34631 + }
34632 +
34633 ++ if (!adev->irq[0])
34634 ++ clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
34635 ++
34636 + ldata->rtc->ops = ops;
34637 + ldata->rtc->range_min = vendor->range_min;
34638 + ldata->rtc->range_max = vendor->range_max;
34639 +diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
34640 +index 40a52feb315da..65047806a5410 100644
34641 +--- a/drivers/scsi/fnic/fnic_scsi.c
34642 ++++ b/drivers/scsi/fnic/fnic_scsi.c
34643 +@@ -604,7 +604,7 @@ out:
34644 +
34645 + FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
34646 + tag, sc, io_req, sg_count, cmd_trace,
34647 +- (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
34648 ++ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
34649 +
34650 + /* if only we issued IO, will we have the io lock */
34651 + if (io_lock_acquired)
34652 +diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
34653 +index a01a3a7b706b5..70173389f6ebd 100644
34654 +--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
34655 ++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
34656 +@@ -530,7 +530,7 @@ MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
34657 +
34658 + /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
34659 + static int prot_mask;
34660 +-module_param(prot_mask, int, 0);
34661 ++module_param(prot_mask, int, 0444);
34662 + MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
34663 +
34664 + static void debugfs_work_handler_v3_hw(struct work_struct *work);
34665 +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34666 +index a315715b36227..7e0cde710fc3c 100644
34667 +--- a/drivers/scsi/libsas/sas_ata.c
34668 ++++ b/drivers/scsi/libsas/sas_ata.c
34669 +@@ -197,7 +197,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
34670 + task->total_xfer_len = qc->nbytes;
34671 + task->num_scatter = qc->n_elem;
34672 + task->data_dir = qc->dma_dir;
34673 +- } else if (qc->tf.protocol == ATA_PROT_NODATA) {
34674 ++ } else if (!ata_is_data(qc->tf.protocol)) {
34675 + task->data_dir = DMA_NONE;
34676 + } else {
34677 + for_each_sg(qc->sg, sg, qc->n_elem, si)
34678 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
34679 +index 76229b839560a..fb5a3a348dbec 100644
34680 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
34681 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
34682 +@@ -5736,14 +5736,13 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
34683 + */
34684 +
34685 + static int
34686 +-mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
34687 ++mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
34688 + {
34689 +- long reply_pool_end_address;
34690 ++ dma_addr_t end_address;
34691 +
34692 +- reply_pool_end_address = reply_pool_start_address + pool_sz;
34693 ++ end_address = start_address + pool_sz - 1;
34694 +
34695 +- if (upper_32_bits(reply_pool_start_address) ==
34696 +- upper_32_bits(reply_pool_end_address))
34697 ++ if (upper_32_bits(start_address) == upper_32_bits(end_address))
34698 + return 1;
34699 + else
34700 + return 0;
34701 +@@ -5804,7 +5803,7 @@ _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
34702 + }
34703 +
34704 + if (!mpt3sas_check_same_4gb_region(
34705 +- (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
34706 ++ ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) {
34707 + ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
34708 + ioc->pcie_sg_lookup[i].pcie_sgl,
34709 + (unsigned long long)
34710 +@@ -5859,8 +5858,8 @@ _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
34711 + GFP_KERNEL, &ctr->chain_buffer_dma);
34712 + if (!ctr->chain_buffer)
34713 + return -EAGAIN;
34714 +- if (!mpt3sas_check_same_4gb_region((long)
34715 +- ctr->chain_buffer, ioc->chain_segment_sz)) {
34716 ++ if (!mpt3sas_check_same_4gb_region(
34717 ++ ctr->chain_buffer_dma, ioc->chain_segment_sz)) {
34718 + ioc_err(ioc,
34719 + "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
34720 + ctr->chain_buffer,
34721 +@@ -5896,7 +5895,7 @@ _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
34722 + GFP_KERNEL, &ioc->sense_dma);
34723 + if (!ioc->sense)
34724 + return -EAGAIN;
34725 +- if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
34726 ++ if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) {
34727 + dinitprintk(ioc, pr_err(
34728 + "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
34729 + ioc->sense, (unsigned long long) ioc->sense_dma));
34730 +@@ -5929,7 +5928,7 @@ _base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
34731 + &ioc->reply_dma);
34732 + if (!ioc->reply)
34733 + return -EAGAIN;
34734 +- if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
34735 ++ if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) {
34736 + dinitprintk(ioc, pr_err(
34737 + "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
34738 + ioc->reply, (unsigned long long) ioc->reply_dma));
34739 +@@ -5964,7 +5963,7 @@ _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
34740 + GFP_KERNEL, &ioc->reply_free_dma);
34741 + if (!ioc->reply_free)
34742 + return -EAGAIN;
34743 +- if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
34744 ++ if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) {
34745 + dinitprintk(ioc,
34746 + pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
34747 + ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
34748 +@@ -6003,7 +6002,7 @@ _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
34749 + GFP_KERNEL, &ioc->reply_post_free_array_dma);
34750 + if (!ioc->reply_post_free_array)
34751 + return -EAGAIN;
34752 +- if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array,
34753 ++ if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma,
34754 + reply_post_free_array_sz)) {
34755 + dinitprintk(ioc, pr_err(
34756 + "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
34757 +@@ -6068,7 +6067,7 @@ base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
34758 + * resources and set DMA mask to 32 and allocate.
34759 + */
34760 + if (!mpt3sas_check_same_4gb_region(
34761 +- (long)ioc->reply_post[i].reply_post_free, sz)) {
34762 ++ ioc->reply_post[i].reply_post_free_dma, sz)) {
34763 + dinitprintk(ioc,
34764 + ioc_err(ioc, "bad Replypost free pool(0x%p)"
34765 + "reply_post_free_dma = (0x%llx)\n",
34766 +diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
34767 +index 9ec310b795c33..d853e8d0195a6 100644
34768 +--- a/drivers/scsi/pm8001/pm8001_hwi.c
34769 ++++ b/drivers/scsi/pm8001/pm8001_hwi.c
34770 +@@ -1788,6 +1788,7 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
34771 + ccb->device = pm8001_ha_dev;
34772 + ccb->ccb_tag = ccb_tag;
34773 + ccb->task = task;
34774 ++ ccb->n_elem = 0;
34775 +
34776 + circularQ = &pm8001_ha->inbnd_q_tbl[0];
34777 +
34778 +@@ -1849,6 +1850,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
34779 + ccb->device = pm8001_ha_dev;
34780 + ccb->ccb_tag = ccb_tag;
34781 + ccb->task = task;
34782 ++ ccb->n_elem = 0;
34783 + pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
34784 + pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
34785 +
34786 +@@ -1865,7 +1867,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
34787 +
34788 + sata_cmd.tag = cpu_to_le32(ccb_tag);
34789 + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
34790 +- sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
34791 ++ sata_cmd.ncqtag_atap_dir_m = cpu_to_le32((0x1 << 7) | (0x5 << 9));
34792 + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
34793 +
34794 + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
34795 +@@ -2418,7 +2420,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
34796 + len = sizeof(struct pio_setup_fis);
34797 + pm8001_dbg(pm8001_ha, IO,
34798 + "PIO read len = %d\n", len);
34799 +- } else if (t->ata_task.use_ncq) {
34800 ++ } else if (t->ata_task.use_ncq &&
34801 ++ t->data_dir != DMA_NONE) {
34802 + len = sizeof(struct set_dev_bits_fis);
34803 + pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
34804 + len);
34805 +@@ -4271,22 +4274,22 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
34806 + u32 opc = OPC_INB_SATA_HOST_OPSTART;
34807 + memset(&sata_cmd, 0, sizeof(sata_cmd));
34808 + circularQ = &pm8001_ha->inbnd_q_tbl[0];
34809 +- if (task->data_dir == DMA_NONE) {
34810 ++
34811 ++ if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
34812 + ATAP = 0x04; /* no data*/
34813 + pm8001_dbg(pm8001_ha, IO, "no data\n");
34814 + } else if (likely(!task->ata_task.device_control_reg_update)) {
34815 +- if (task->ata_task.dma_xfer) {
34816 ++ if (task->ata_task.use_ncq &&
34817 ++ dev->sata_dev.class != ATA_DEV_ATAPI) {
34818 ++ ATAP = 0x07; /* FPDMA */
34819 ++ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
34820 ++ } else if (task->ata_task.dma_xfer) {
34821 + ATAP = 0x06; /* DMA */
34822 + pm8001_dbg(pm8001_ha, IO, "DMA\n");
34823 + } else {
34824 + ATAP = 0x05; /* PIO*/
34825 + pm8001_dbg(pm8001_ha, IO, "PIO\n");
34826 + }
34827 +- if (task->ata_task.use_ncq &&
34828 +- dev->sata_dev.class != ATA_DEV_ATAPI) {
34829 +- ATAP = 0x07; /* FPDMA */
34830 +- pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
34831 +- }
34832 + }
34833 + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
34834 + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
34835 +@@ -4626,7 +4629,7 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
34836 + memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
34837 + sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
34838 + if (pm8001_ha->chip_id != chip_8001)
34839 +- sspTMCmd.ds_ads_m = 0x08;
34840 ++ sspTMCmd.ds_ads_m = cpu_to_le32(0x08);
34841 + circularQ = &pm8001_ha->inbnd_q_tbl[0];
34842 + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd,
34843 + sizeof(sspTMCmd), 0);
34844 +diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
34845 +index 9d20f8009b89f..908dbac20b483 100644
34846 +--- a/drivers/scsi/pm8001/pm80xx_hwi.c
34847 ++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
34848 +@@ -1203,9 +1203,11 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
34849 + else
34850 + page_code = THERMAL_PAGE_CODE_8H;
34851 +
34852 +- payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
34853 +- (THERMAL_ENABLE << 8) | page_code;
34854 +- payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
34855 ++ payload.cfg_pg[0] =
34856 ++ cpu_to_le32((THERMAL_LOG_ENABLE << 9) |
34857 ++ (THERMAL_ENABLE << 8) | page_code);
34858 ++ payload.cfg_pg[1] =
34859 ++ cpu_to_le32((LTEMPHIL << 24) | (RTEMPHIL << 8));
34860 +
34861 + pm8001_dbg(pm8001_ha, DEV,
34862 + "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
34863 +@@ -1245,43 +1247,41 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
34864 + circularQ = &pm8001_ha->inbnd_q_tbl[0];
34865 + payload.tag = cpu_to_le32(tag);
34866 +
34867 +- SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE;
34868 +- SASConfigPage.MST_MSI = 3 << 15;
34869 +- SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO;
34870 +- SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) |
34871 +- (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
34872 +- SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME;
34873 +-
34874 +- if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
34875 +- SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
34876 +-
34877 +-
34878 +- SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) |
34879 +- SAS_OPNRJT_RTRY_INTVL;
34880 +- SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16)
34881 +- | SAS_COPNRJT_RTRY_TMO;
34882 +- SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16)
34883 +- | SAS_COPNRJT_RTRY_THR;
34884 +- SASConfigPage.MAX_AIP = SAS_MAX_AIP;
34885 ++ SASConfigPage.pageCode = cpu_to_le32(SAS_PROTOCOL_TIMER_CONFIG_PAGE);
34886 ++ SASConfigPage.MST_MSI = cpu_to_le32(3 << 15);
34887 ++ SASConfigPage.STP_SSP_MCT_TMO =
34888 ++ cpu_to_le32((STP_MCT_TMO << 16) | SSP_MCT_TMO);
34889 ++ SASConfigPage.STP_FRM_TMO =
34890 ++ cpu_to_le32((SAS_MAX_OPEN_TIME << 24) |
34891 ++ (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER);
34892 ++ SASConfigPage.STP_IDLE_TMO = cpu_to_le32(STP_IDLE_TIME);
34893 ++
34894 ++ SASConfigPage.OPNRJT_RTRY_INTVL =
34895 ++ cpu_to_le32((SAS_MFD << 16) | SAS_OPNRJT_RTRY_INTVL);
34896 ++ SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO =
34897 ++ cpu_to_le32((SAS_DOPNRJT_RTRY_TMO << 16) | SAS_COPNRJT_RTRY_TMO);
34898 ++ SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR =
34899 ++ cpu_to_le32((SAS_DOPNRJT_RTRY_THR << 16) | SAS_COPNRJT_RTRY_THR);
34900 ++ SASConfigPage.MAX_AIP = cpu_to_le32(SAS_MAX_AIP);
34901 +
34902 + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n",
34903 +- SASConfigPage.pageCode);
34904 ++ le32_to_cpu(SASConfigPage.pageCode));
34905 + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n",
34906 +- SASConfigPage.MST_MSI);
34907 ++ le32_to_cpu(SASConfigPage.MST_MSI));
34908 + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n",
34909 +- SASConfigPage.STP_SSP_MCT_TMO);
34910 ++ le32_to_cpu(SASConfigPage.STP_SSP_MCT_TMO));
34911 + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n",
34912 +- SASConfigPage.STP_FRM_TMO);
34913 ++ le32_to_cpu(SASConfigPage.STP_FRM_TMO));
34914 + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n",
34915 +- SASConfigPage.STP_IDLE_TMO);
34916 ++ le32_to_cpu(SASConfigPage.STP_IDLE_TMO));
34917 + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n",
34918 +- SASConfigPage.OPNRJT_RTRY_INTVL);
34919 ++ le32_to_cpu(SASConfigPage.OPNRJT_RTRY_INTVL));
34920 + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n",
34921 +- SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO);
34922 ++ le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
34923 + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n",
34924 +- SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR);
34925 ++ le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
34926 + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n",
34927 +- SASConfigPage.MAX_AIP);
34928 ++ le32_to_cpu(SASConfigPage.MAX_AIP));
34929 +
34930 + memcpy(&payload.cfg_pg, &SASConfigPage,
34931 + sizeof(SASProtocolTimerConfig_t));
34932 +@@ -1407,12 +1407,13 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
34933 + /* Currently only one key is used. New KEK index is 1.
34934 + * Current KEK index is 1. Store KEK to NVRAM is 1.
34935 + */
34936 +- payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
34937 +- KEK_MGMT_SUBOP_KEYCARDUPDATE);
34938 ++ payload.new_curidx_ksop =
34939 ++ cpu_to_le32(((1 << 24) | (1 << 16) | (1 << 8) |
34940 ++ KEK_MGMT_SUBOP_KEYCARDUPDATE));
34941 +
34942 + pm8001_dbg(pm8001_ha, DEV,
34943 + "Saving Encryption info to flash. payload 0x%x\n",
34944 +- payload.new_curidx_ksop);
34945 ++ le32_to_cpu(payload.new_curidx_ksop));
34946 +
34947 + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
34948 + sizeof(payload), 0);
34949 +@@ -1801,6 +1802,7 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
34950 + ccb->device = pm8001_ha_dev;
34951 + ccb->ccb_tag = ccb_tag;
34952 + ccb->task = task;
34953 ++ ccb->n_elem = 0;
34954 +
34955 + circularQ = &pm8001_ha->inbnd_q_tbl[0];
34956 +
34957 +@@ -1882,7 +1884,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
34958 +
34959 + sata_cmd.tag = cpu_to_le32(ccb_tag);
34960 + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
34961 +- sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
34962 ++ sata_cmd.ncqtag_atap_dir_m_dad = cpu_to_le32(((0x1 << 7) | (0x5 << 9)));
34963 + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
34964 +
34965 + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
34966 +@@ -2510,7 +2512,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
34967 + len = sizeof(struct pio_setup_fis);
34968 + pm8001_dbg(pm8001_ha, IO,
34969 + "PIO read len = %d\n", len);
34970 +- } else if (t->ata_task.use_ncq) {
34971 ++ } else if (t->ata_task.use_ncq &&
34972 ++ t->data_dir != DMA_NONE) {
34973 + len = sizeof(struct set_dev_bits_fis);
34974 + pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
34975 + len);
34976 +@@ -4379,13 +4382,15 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
34977 + struct ssp_ini_io_start_req ssp_cmd;
34978 + u32 tag = ccb->ccb_tag;
34979 + int ret;
34980 +- u64 phys_addr, start_addr, end_addr;
34981 ++ u64 phys_addr, end_addr;
34982 + u32 end_addr_high, end_addr_low;
34983 + struct inbound_queue_table *circularQ;
34984 + u32 q_index, cpu_id;
34985 + u32 opc = OPC_INB_SSPINIIOSTART;
34986 ++
34987 + memset(&ssp_cmd, 0, sizeof(ssp_cmd));
34988 + memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
34989 ++
34990 + /* data address domain added for spcv; set to 0 by host,
34991 + * used internally by controller
34992 + * 0 for SAS 1.1 and SAS 2.0 compatible TLR
34993 +@@ -4396,7 +4401,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
34994 + ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
34995 + ssp_cmd.tag = cpu_to_le32(tag);
34996 + if (task->ssp_task.enable_first_burst)
34997 +- ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
34998 ++ ssp_cmd.ssp_iu.efb_prio_attr = 0x80;
34999 + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
35000 + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
35001 + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
35002 +@@ -4428,21 +4433,24 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
35003 + ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
35004 + } else if (task->num_scatter == 1) {
35005 + u64 dma_addr = sg_dma_address(task->scatter);
35006 ++
35007 + ssp_cmd.enc_addr_low =
35008 + cpu_to_le32(lower_32_bits(dma_addr));
35009 + ssp_cmd.enc_addr_high =
35010 + cpu_to_le32(upper_32_bits(dma_addr));
35011 + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
35012 + ssp_cmd.enc_esgl = 0;
35013 ++
35014 + /* Check 4G Boundary */
35015 +- start_addr = cpu_to_le64(dma_addr);
35016 +- end_addr = (start_addr + ssp_cmd.enc_len) - 1;
35017 +- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
35018 +- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
35019 +- if (end_addr_high != ssp_cmd.enc_addr_high) {
35020 ++ end_addr = dma_addr + le32_to_cpu(ssp_cmd.enc_len) - 1;
35021 ++ end_addr_low = lower_32_bits(end_addr);
35022 ++ end_addr_high = upper_32_bits(end_addr);
35023 ++
35024 ++ if (end_addr_high != le32_to_cpu(ssp_cmd.enc_addr_high)) {
35025 + pm8001_dbg(pm8001_ha, FAIL,
35026 + "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
35027 +- start_addr, ssp_cmd.enc_len,
35028 ++ dma_addr,
35029 ++ le32_to_cpu(ssp_cmd.enc_len),
35030 + end_addr_high, end_addr_low);
35031 + pm8001_chip_make_sg(task->scatter, 1,
35032 + ccb->buf_prd);
35033 +@@ -4451,7 +4459,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
35034 + cpu_to_le32(lower_32_bits(phys_addr));
35035 + ssp_cmd.enc_addr_high =
35036 + cpu_to_le32(upper_32_bits(phys_addr));
35037 +- ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
35038 ++ ssp_cmd.enc_esgl = cpu_to_le32(1U<<31);
35039 + }
35040 + } else if (task->num_scatter == 0) {
35041 + ssp_cmd.enc_addr_low = 0;
35042 +@@ -4459,8 +4467,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
35043 + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
35044 + ssp_cmd.enc_esgl = 0;
35045 + }
35046 ++
35047 + /* XTS mode. All other fields are 0 */
35048 +- ssp_cmd.key_cmode = 0x6 << 4;
35049 ++ ssp_cmd.key_cmode = cpu_to_le32(0x6 << 4);
35050 ++
35051 + /* set tweak values. Should be the start lba */
35052 + ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) |
35053 + (task->ssp_task.cmd->cmnd[3] << 16) |
35054 +@@ -4482,20 +4492,22 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
35055 + ssp_cmd.esgl = cpu_to_le32(1<<31);
35056 + } else if (task->num_scatter == 1) {
35057 + u64 dma_addr = sg_dma_address(task->scatter);
35058 ++
35059 + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
35060 + ssp_cmd.addr_high =
35061 + cpu_to_le32(upper_32_bits(dma_addr));
35062 + ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
35063 + ssp_cmd.esgl = 0;
35064 ++
35065 + /* Check 4G Boundary */
35066 +- start_addr = cpu_to_le64(dma_addr);
35067 +- end_addr = (start_addr + ssp_cmd.len) - 1;
35068 +- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
35069 +- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
35070 +- if (end_addr_high != ssp_cmd.addr_high) {
35071 ++ end_addr = dma_addr + le32_to_cpu(ssp_cmd.len) - 1;
35072 ++ end_addr_low = lower_32_bits(end_addr);
35073 ++ end_addr_high = upper_32_bits(end_addr);
35074 ++ if (end_addr_high != le32_to_cpu(ssp_cmd.addr_high)) {
35075 + pm8001_dbg(pm8001_ha, FAIL,
35076 + "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
35077 +- start_addr, ssp_cmd.len,
35078 ++ dma_addr,
35079 ++ le32_to_cpu(ssp_cmd.len),
35080 + end_addr_high, end_addr_low);
35081 + pm8001_chip_make_sg(task->scatter, 1,
35082 + ccb->buf_prd);
35083 +@@ -4530,7 +4542,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
35084 + u32 q_index, cpu_id;
35085 + struct sata_start_req sata_cmd;
35086 + u32 hdr_tag, ncg_tag = 0;
35087 +- u64 phys_addr, start_addr, end_addr;
35088 ++ u64 phys_addr, end_addr;
35089 + u32 end_addr_high, end_addr_low;
35090 + u32 ATAP = 0x0;
35091 + u32 dir;
35092 +@@ -4542,22 +4554,21 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
35093 + q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
35094 + circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
35095 +
35096 +- if (task->data_dir == DMA_NONE) {
35097 ++ if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
35098 + ATAP = 0x04; /* no data*/
35099 + pm8001_dbg(pm8001_ha, IO, "no data\n");
35100 + } else if (likely(!task->ata_task.device_control_reg_update)) {
35101 +- if (task->ata_task.dma_xfer) {
35102 ++ if (task->ata_task.use_ncq &&
35103 ++ dev->sata_dev.class != ATA_DEV_ATAPI) {
35104 ++ ATAP = 0x07; /* FPDMA */
35105 ++ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
35106 ++ } else if (task->ata_task.dma_xfer) {
35107 + ATAP = 0x06; /* DMA */
35108 + pm8001_dbg(pm8001_ha, IO, "DMA\n");
35109 + } else {
35110 + ATAP = 0x05; /* PIO*/
35111 + pm8001_dbg(pm8001_ha, IO, "PIO\n");
35112 + }
35113 +- if (task->ata_task.use_ncq &&
35114 +- dev->sata_dev.class != ATA_DEV_ATAPI) {
35115 +- ATAP = 0x07; /* FPDMA */
35116 +- pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
35117 +- }
35118 + }
35119 + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
35120 + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
35121 +@@ -4591,32 +4602,38 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
35122 + pm8001_chip_make_sg(task->scatter,
35123 + ccb->n_elem, ccb->buf_prd);
35124 + phys_addr = ccb->ccb_dma_handle;
35125 +- sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
35126 +- sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
35127 ++ sata_cmd.enc_addr_low =
35128 ++ cpu_to_le32(lower_32_bits(phys_addr));
35129 ++ sata_cmd.enc_addr_high =
35130 ++ cpu_to_le32(upper_32_bits(phys_addr));
35131 + sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
35132 + } else if (task->num_scatter == 1) {
35133 + u64 dma_addr = sg_dma_address(task->scatter);
35134 +- sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
35135 +- sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
35136 ++
35137 ++ sata_cmd.enc_addr_low =
35138 ++ cpu_to_le32(lower_32_bits(dma_addr));
35139 ++ sata_cmd.enc_addr_high =
35140 ++ cpu_to_le32(upper_32_bits(dma_addr));
35141 + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
35142 + sata_cmd.enc_esgl = 0;
35143 ++
35144 + /* Check 4G Boundary */
35145 +- start_addr = cpu_to_le64(dma_addr);
35146 +- end_addr = (start_addr + sata_cmd.enc_len) - 1;
35147 +- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
35148 +- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
35149 +- if (end_addr_high != sata_cmd.enc_addr_high) {
35150 ++ end_addr = dma_addr + le32_to_cpu(sata_cmd.enc_len) - 1;
35151 ++ end_addr_low = lower_32_bits(end_addr);
35152 ++ end_addr_high = upper_32_bits(end_addr);
35153 ++ if (end_addr_high != le32_to_cpu(sata_cmd.enc_addr_high)) {
35154 + pm8001_dbg(pm8001_ha, FAIL,
35155 + "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
35156 +- start_addr, sata_cmd.enc_len,
35157 ++ dma_addr,
35158 ++ le32_to_cpu(sata_cmd.enc_len),
35159 + end_addr_high, end_addr_low);
35160 + pm8001_chip_make_sg(task->scatter, 1,
35161 + ccb->buf_prd);
35162 + phys_addr = ccb->ccb_dma_handle;
35163 + sata_cmd.enc_addr_low =
35164 +- lower_32_bits(phys_addr);
35165 ++ cpu_to_le32(lower_32_bits(phys_addr));
35166 + sata_cmd.enc_addr_high =
35167 +- upper_32_bits(phys_addr);
35168 ++ cpu_to_le32(upper_32_bits(phys_addr));
35169 + sata_cmd.enc_esgl =
35170 + cpu_to_le32(1 << 31);
35171 + }
35172 +@@ -4627,7 +4644,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
35173 + sata_cmd.enc_esgl = 0;
35174 + }
35175 + /* XTS mode. All other fields are 0 */
35176 +- sata_cmd.key_index_mode = 0x6 << 4;
35177 ++ sata_cmd.key_index_mode = cpu_to_le32(0x6 << 4);
35178 ++
35179 + /* set tweak values. Should be the start lba */
35180 + sata_cmd.twk_val0 =
35181 + cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
35182 +@@ -4653,31 +4671,31 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
35183 + phys_addr = ccb->ccb_dma_handle;
35184 + sata_cmd.addr_low = lower_32_bits(phys_addr);
35185 + sata_cmd.addr_high = upper_32_bits(phys_addr);
35186 +- sata_cmd.esgl = cpu_to_le32(1 << 31);
35187 ++ sata_cmd.esgl = cpu_to_le32(1U << 31);
35188 + } else if (task->num_scatter == 1) {
35189 + u64 dma_addr = sg_dma_address(task->scatter);
35190 ++
35191 + sata_cmd.addr_low = lower_32_bits(dma_addr);
35192 + sata_cmd.addr_high = upper_32_bits(dma_addr);
35193 + sata_cmd.len = cpu_to_le32(task->total_xfer_len);
35194 + sata_cmd.esgl = 0;
35195 ++
35196 + /* Check 4G Boundary */
35197 +- start_addr = cpu_to_le64(dma_addr);
35198 +- end_addr = (start_addr + sata_cmd.len) - 1;
35199 +- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
35200 +- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
35201 ++ end_addr = dma_addr + le32_to_cpu(sata_cmd.len) - 1;
35202 ++ end_addr_low = lower_32_bits(end_addr);
35203 ++ end_addr_high = upper_32_bits(end_addr);
35204 + if (end_addr_high != sata_cmd.addr_high) {
35205 + pm8001_dbg(pm8001_ha, FAIL,
35206 + "The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
35207 +- start_addr, sata_cmd.len,
35208 ++ dma_addr,
35209 ++ le32_to_cpu(sata_cmd.len),
35210 + end_addr_high, end_addr_low);
35211 + pm8001_chip_make_sg(task->scatter, 1,
35212 + ccb->buf_prd);
35213 + phys_addr = ccb->ccb_dma_handle;
35214 +- sata_cmd.addr_low =
35215 +- lower_32_bits(phys_addr);
35216 +- sata_cmd.addr_high =
35217 +- upper_32_bits(phys_addr);
35218 +- sata_cmd.esgl = cpu_to_le32(1 << 31);
35219 ++ sata_cmd.addr_low = lower_32_bits(phys_addr);
35220 ++ sata_cmd.addr_high = upper_32_bits(phys_addr);
35221 ++ sata_cmd.esgl = cpu_to_le32(1U << 31);
35222 + }
35223 + } else if (task->num_scatter == 0) {
35224 + sata_cmd.addr_low = 0;
35225 +@@ -4685,27 +4703,28 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
35226 + sata_cmd.len = cpu_to_le32(task->total_xfer_len);
35227 + sata_cmd.esgl = 0;
35228 + }
35229 ++
35230 + /* scsi cdb */
35231 + sata_cmd.atapi_scsi_cdb[0] =
35232 + cpu_to_le32(((task->ata_task.atapi_packet[0]) |
35233 +- (task->ata_task.atapi_packet[1] << 8) |
35234 +- (task->ata_task.atapi_packet[2] << 16) |
35235 +- (task->ata_task.atapi_packet[3] << 24)));
35236 ++ (task->ata_task.atapi_packet[1] << 8) |
35237 ++ (task->ata_task.atapi_packet[2] << 16) |
35238 ++ (task->ata_task.atapi_packet[3] << 24)));
35239 + sata_cmd.atapi_scsi_cdb[1] =
35240 + cpu_to_le32(((task->ata_task.atapi_packet[4]) |
35241 +- (task->ata_task.atapi_packet[5] << 8) |
35242 +- (task->ata_task.atapi_packet[6] << 16) |
35243 +- (task->ata_task.atapi_packet[7] << 24)));
35244 ++ (task->ata_task.atapi_packet[5] << 8) |
35245 ++ (task->ata_task.atapi_packet[6] << 16) |
35246 ++ (task->ata_task.atapi_packet[7] << 24)));
35247 + sata_cmd.atapi_scsi_cdb[2] =
35248 + cpu_to_le32(((task->ata_task.atapi_packet[8]) |
35249 +- (task->ata_task.atapi_packet[9] << 8) |
35250 +- (task->ata_task.atapi_packet[10] << 16) |
35251 +- (task->ata_task.atapi_packet[11] << 24)));
35252 ++ (task->ata_task.atapi_packet[9] << 8) |
35253 ++ (task->ata_task.atapi_packet[10] << 16) |
35254 ++ (task->ata_task.atapi_packet[11] << 24)));
35255 + sata_cmd.atapi_scsi_cdb[3] =
35256 + cpu_to_le32(((task->ata_task.atapi_packet[12]) |
35257 +- (task->ata_task.atapi_packet[13] << 8) |
35258 +- (task->ata_task.atapi_packet[14] << 16) |
35259 +- (task->ata_task.atapi_packet[15] << 24)));
35260 ++ (task->ata_task.atapi_packet[13] << 8) |
35261 ++ (task->ata_task.atapi_packet[14] << 16) |
35262 ++ (task->ata_task.atapi_packet[15] << 24)));
35263 + }
35264 +
35265 + /* Check for read log for failed drive and return */
35266 +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
35267 +index db55737000ab5..3b3e4234f37a0 100644
35268 +--- a/drivers/scsi/qla2xxx/qla_attr.c
35269 ++++ b/drivers/scsi/qla2xxx/qla_attr.c
35270 +@@ -555,7 +555,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
35271 + if (!capable(CAP_SYS_ADMIN))
35272 + return -EINVAL;
35273 +
35274 +- if (IS_NOCACHE_VPD_TYPE(ha))
35275 ++ if (!IS_NOCACHE_VPD_TYPE(ha))
35276 + goto skip;
35277 +
35278 + faddr = ha->flt_region_vpd << 2;
35279 +@@ -745,7 +745,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
35280 + ql_log(ql_log_info, vha, 0x706f,
35281 + "Issuing MPI reset.\n");
35282 +
35283 +- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
35284 ++ if (IS_QLA83XX(ha)) {
35285 + uint32_t idc_control;
35286 +
35287 + qla83xx_idc_lock(vha, 0);
35288 +@@ -1056,9 +1056,6 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
35289 + continue;
35290 + if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
35291 + continue;
35292 +- if (iter->type == 0x27 &&
35293 +- (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
35294 +- continue;
35295 +
35296 + sysfs_remove_bin_file(&host->shost_gendev.kobj,
35297 + iter->attr);
35298 +diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
35299 +index 9da8034ccad40..c2f00f076f799 100644
35300 +--- a/drivers/scsi/qla2xxx/qla_bsg.c
35301 ++++ b/drivers/scsi/qla2xxx/qla_bsg.c
35302 +@@ -29,7 +29,8 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
35303 + "%s: sp hdl %x, result=%x bsg ptr %p\n",
35304 + __func__, sp->handle, res, bsg_job);
35305 +
35306 +- sp->free(sp);
35307 ++ /* ref: INIT */
35308 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35309 +
35310 + bsg_reply->result = res;
35311 + bsg_job_done(bsg_job, bsg_reply->result,
35312 +@@ -3013,7 +3014,8 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
35313 +
35314 + done:
35315 + spin_unlock_irqrestore(&ha->hardware_lock, flags);
35316 +- sp->free(sp);
35317 ++ /* ref: INIT */
35318 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35319 + return 0;
35320 + }
35321 +
35322 +diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
35323 +index 9ebf4a234d9a9..aefb29d7c7aee 100644
35324 +--- a/drivers/scsi/qla2xxx/qla_def.h
35325 ++++ b/drivers/scsi/qla2xxx/qla_def.h
35326 +@@ -726,6 +726,11 @@ typedef struct srb {
35327 + * code.
35328 + */
35329 + void (*put_fn)(struct kref *kref);
35330 ++
35331 ++ /*
35332 ++ * Report completion for asynchronous commands.
35333 ++ */
35334 ++ void (*async_done)(struct srb *sp, int res);
35335 + } srb_t;
35336 +
35337 + #define GET_CMD_SP(sp) (sp->u.scmd.cmd)
35338 +@@ -2886,7 +2891,11 @@ struct ct_fdmi2_hba_attributes {
35339 + #define FDMI_PORT_SPEED_8GB 0x10
35340 + #define FDMI_PORT_SPEED_16GB 0x20
35341 + #define FDMI_PORT_SPEED_32GB 0x40
35342 +-#define FDMI_PORT_SPEED_64GB 0x80
35343 ++#define FDMI_PORT_SPEED_20GB 0x80
35344 ++#define FDMI_PORT_SPEED_40GB 0x100
35345 ++#define FDMI_PORT_SPEED_128GB 0x200
35346 ++#define FDMI_PORT_SPEED_64GB 0x400
35347 ++#define FDMI_PORT_SPEED_256GB 0x800
35348 + #define FDMI_PORT_SPEED_UNKNOWN 0x8000
35349 +
35350 + #define FC_CLASS_2 0x04
35351 +@@ -4262,8 +4271,10 @@ struct qla_hw_data {
35352 + #define QLA_ABTS_WAIT_ENABLED(_sp) \
35353 + (QLA_NVME_IOS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw))
35354 +
35355 +-#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
35356 +-#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
35357 ++#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
35358 ++ IS_QLA28XX(ha))
35359 ++#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
35360 ++ IS_QLA28XX(ha))
35361 + #define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
35362 + #define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
35363 + IS_QLA28XX(ha))
35364 +@@ -4610,6 +4621,7 @@ struct qla_hw_data {
35365 + struct workqueue_struct *wq;
35366 + struct work_struct heartbeat_work;
35367 + struct qlfc_fw fw_buf;
35368 ++ unsigned long last_heartbeat_run_jiffies;
35369 +
35370 + /* FCP_CMND priority support */
35371 + struct qla_fcp_prio_cfg *fcp_prio_cfg;
35372 +@@ -5427,4 +5439,8 @@ struct ql_vnd_tgt_stats_resp {
35373 + #include "qla_gbl.h"
35374 + #include "qla_dbg.h"
35375 + #include "qla_inline.h"
35376 ++
35377 ++#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \
35378 ++ _fcport->disc_state == DSC_DELETED)
35379 ++
35380 + #endif
35381 +diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
35382 +index 53d2b85620271..0628633c7c7e9 100644
35383 +--- a/drivers/scsi/qla2xxx/qla_edif.c
35384 ++++ b/drivers/scsi/qla2xxx/qla_edif.c
35385 +@@ -668,6 +668,11 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
35386 + bsg_job->request_payload.sg_cnt, &appplogiok,
35387 + sizeof(struct auth_complete_cmd));
35388 +
35389 ++ /* silent unaligned access warning */
35390 ++ portid.b.domain = appplogiok.u.d_id.b.domain;
35391 ++ portid.b.area = appplogiok.u.d_id.b.area;
35392 ++ portid.b.al_pa = appplogiok.u.d_id.b.al_pa;
35393 ++
35394 + switch (appplogiok.type) {
35395 + case PL_TYPE_WWPN:
35396 + fcport = qla2x00_find_fcport_by_wwpn(vha,
35397 +@@ -678,7 +683,7 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
35398 + __func__, appplogiok.u.wwpn);
35399 + break;
35400 + case PL_TYPE_DID:
35401 +- fcport = qla2x00_find_fcport_by_pid(vha, &appplogiok.u.d_id);
35402 ++ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
35403 + if (!fcport)
35404 + ql_dbg(ql_dbg_edif, vha, 0x911d,
35405 + "%s d_id lookup failed: %x\n", __func__,
35406 +@@ -777,6 +782,11 @@ qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
35407 + bsg_job->request_payload.sg_cnt, &appplogifail,
35408 + sizeof(struct auth_complete_cmd));
35409 +
35410 ++ /* silent unaligned access warning */
35411 ++ portid.b.domain = appplogifail.u.d_id.b.domain;
35412 ++ portid.b.area = appplogifail.u.d_id.b.area;
35413 ++ portid.b.al_pa = appplogifail.u.d_id.b.al_pa;
35414 ++
35415 + /*
35416 + * TODO: edif: app has failed this plogi. Inform driver to
35417 + * take any action (if any).
35418 +@@ -788,7 +798,7 @@ qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
35419 + SET_DID_STATUS(bsg_reply->result, DID_OK);
35420 + break;
35421 + case PL_TYPE_DID:
35422 +- fcport = qla2x00_find_fcport_by_pid(vha, &appplogifail.u.d_id);
35423 ++ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
35424 + if (!fcport)
35425 + ql_dbg(ql_dbg_edif, vha, 0x911d,
35426 + "%s d_id lookup failed: %x\n", __func__,
35427 +@@ -1253,6 +1263,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
35428 + int result = 0;
35429 + struct qla_sa_update_frame sa_frame;
35430 + struct srb_iocb *iocb_cmd;
35431 ++ port_id_t portid;
35432 +
35433 + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
35434 + "%s entered, vha: 0x%p\n", __func__, vha);
35435 +@@ -1276,7 +1287,12 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
35436 + goto done;
35437 + }
35438 +
35439 +- fcport = qla2x00_find_fcport_by_pid(vha, &sa_frame.port_id);
35440 ++ /* silent unaligned access warning */
35441 ++ portid.b.domain = sa_frame.port_id.b.domain;
35442 ++ portid.b.area = sa_frame.port_id.b.area;
35443 ++ portid.b.al_pa = sa_frame.port_id.b.al_pa;
35444 ++
35445 ++ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
35446 + if (fcport) {
35447 + found = 1;
35448 + if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
35449 +@@ -2146,7 +2162,8 @@ edif_doorbell_show(struct device *dev, struct device_attribute *attr,
35450 +
35451 + static void qla_noop_sp_done(srb_t *sp, int res)
35452 + {
35453 +- sp->free(sp);
35454 ++ /* ref: INIT */
35455 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35456 + }
35457 +
35458 + /*
35459 +diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
35460 +index 8d8503a284790..3f8b8bbabe6de 100644
35461 +--- a/drivers/scsi/qla2xxx/qla_gbl.h
35462 ++++ b/drivers/scsi/qla2xxx/qla_gbl.h
35463 +@@ -316,7 +316,8 @@ extern int qla2x00_start_sp(srb_t *);
35464 + extern int qla24xx_dif_start_scsi(srb_t *);
35465 + extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
35466 + extern int qla2xxx_dif_start_scsi_mq(srb_t *);
35467 +-extern void qla2x00_init_timer(srb_t *sp, unsigned long tmo);
35468 ++extern void qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
35469 ++ void (*done)(struct srb *, int));
35470 + extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
35471 +
35472 + extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
35473 +@@ -332,6 +333,7 @@ extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
35474 + extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
35475 + extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha,
35476 + struct qla_work_evt *e);
35477 ++void qla2x00_sp_release(struct kref *kref);
35478 +
35479 + /*
35480 + * Global Function Prototypes in qla_mbx.c source file.
35481 +diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
35482 +index 28b574e20ef32..6b67bd561810d 100644
35483 +--- a/drivers/scsi/qla2xxx/qla_gs.c
35484 ++++ b/drivers/scsi/qla2xxx/qla_gs.c
35485 +@@ -529,7 +529,6 @@ static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
35486 + if (!e)
35487 + goto err2;
35488 +
35489 +- del_timer(&sp->u.iocb_cmd.timer);
35490 + e->u.iosb.sp = sp;
35491 + qla2x00_post_work(vha, e);
35492 + return;
35493 +@@ -556,8 +555,8 @@ err2:
35494 + sp->u.iocb_cmd.u.ctarg.rsp = NULL;
35495 + }
35496 +
35497 +- sp->free(sp);
35498 +-
35499 ++ /* ref: INIT */
35500 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35501 + return;
35502 + }
35503 +
35504 +@@ -592,13 +591,15 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
35505 + if (!vha->flags.online)
35506 + goto done;
35507 +
35508 ++ /* ref: INIT */
35509 + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
35510 + if (!sp)
35511 + goto done;
35512 +
35513 + sp->type = SRB_CT_PTHRU_CMD;
35514 + sp->name = "rft_id";
35515 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
35516 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
35517 ++ qla2x00_async_sns_sp_done);
35518 +
35519 + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
35520 + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
35521 +@@ -638,8 +639,6 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
35522 + sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
35523 + sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
35524 + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
35525 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
35526 +- sp->done = qla2x00_async_sns_sp_done;
35527 +
35528 + ql_dbg(ql_dbg_disc, vha, 0xffff,
35529 + "Async-%s - hdl=%x portid %06x.\n",
35530 +@@ -653,7 +652,8 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
35531 + }
35532 + return rval;
35533 + done_free_sp:
35534 +- sp->free(sp);
35535 ++ /* ref: INIT */
35536 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35537 + done:
35538 + return rval;
35539 + }
35540 +@@ -676,8 +676,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
35541 + return (QLA_SUCCESS);
35542 + }
35543 +
35544 +- return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
35545 +- FC4_TYPE_FCP_SCSI);
35546 ++ return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type);
35547 + }
35548 +
35549 + static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
35550 +@@ -688,13 +687,15 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
35551 + srb_t *sp;
35552 + struct ct_sns_pkt *ct_sns;
35553 +
35554 ++ /* ref: INIT */
35555 + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
35556 + if (!sp)
35557 + goto done;
35558 +
35559 + sp->type = SRB_CT_PTHRU_CMD;
35560 + sp->name = "rff_id";
35561 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
35562 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
35563 ++ qla2x00_async_sns_sp_done);
35564 +
35565 + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
35566 + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
35567 +@@ -727,13 +728,11 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
35568 + /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
35569 + ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
35570 + ct_req->req.rff_id.fc4_feature = fc4feature;
35571 +- ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
35572 ++ ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */
35573 +
35574 + sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
35575 + sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
35576 + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
35577 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
35578 +- sp->done = qla2x00_async_sns_sp_done;
35579 +
35580 + ql_dbg(ql_dbg_disc, vha, 0xffff,
35581 + "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
35582 +@@ -749,7 +748,8 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
35583 + return rval;
35584 +
35585 + done_free_sp:
35586 +- sp->free(sp);
35587 ++ /* ref: INIT */
35588 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35589 + done:
35590 + return rval;
35591 + }
35592 +@@ -779,13 +779,15 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
35593 + srb_t *sp;
35594 + struct ct_sns_pkt *ct_sns;
35595 +
35596 ++ /* ref: INIT */
35597 + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
35598 + if (!sp)
35599 + goto done;
35600 +
35601 + sp->type = SRB_CT_PTHRU_CMD;
35602 + sp->name = "rnid";
35603 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
35604 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
35605 ++ qla2x00_async_sns_sp_done);
35606 +
35607 + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
35608 + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
35609 +@@ -823,9 +825,6 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
35610 + sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
35611 + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
35612 +
35613 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
35614 +- sp->done = qla2x00_async_sns_sp_done;
35615 +-
35616 + ql_dbg(ql_dbg_disc, vha, 0xffff,
35617 + "Async-%s - hdl=%x portid %06x\n",
35618 + sp->name, sp->handle, d_id->b24);
35619 +@@ -840,7 +839,8 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
35620 + return rval;
35621 +
35622 + done_free_sp:
35623 +- sp->free(sp);
35624 ++ /* ref: INIT */
35625 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35626 + done:
35627 + return rval;
35628 + }
35629 +@@ -886,13 +886,15 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
35630 + srb_t *sp;
35631 + struct ct_sns_pkt *ct_sns;
35632 +
35633 ++ /* ref: INIT */
35634 + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
35635 + if (!sp)
35636 + goto done;
35637 +
35638 + sp->type = SRB_CT_PTHRU_CMD;
35639 + sp->name = "rsnn_nn";
35640 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
35641 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
35642 ++ qla2x00_async_sns_sp_done);
35643 +
35644 + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
35645 + sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
35646 +@@ -936,9 +938,6 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
35647 + sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
35648 + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
35649 +
35650 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
35651 +- sp->done = qla2x00_async_sns_sp_done;
35652 +-
35653 + ql_dbg(ql_dbg_disc, vha, 0xffff,
35654 + "Async-%s - hdl=%x.\n",
35655 + sp->name, sp->handle);
35656 +@@ -953,7 +952,8 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
35657 + return rval;
35658 +
35659 + done_free_sp:
35660 +- sp->free(sp);
35661 ++ /* ref: INIT */
35662 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35663 + done:
35664 + return rval;
35665 + }
35666 +@@ -2893,7 +2893,8 @@ static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
35667 + qla24xx_handle_gpsc_event(vha, &ea);
35668 +
35669 + done:
35670 +- sp->free(sp);
35671 ++ /* ref: INIT */
35672 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35673 + }
35674 +
35675 + int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
35676 +@@ -2905,6 +2906,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
35677 + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
35678 + return rval;
35679 +
35680 ++ /* ref: INIT */
35681 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
35682 + if (!sp)
35683 + goto done;
35684 +@@ -2913,8 +2915,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
35685 + sp->name = "gpsc";
35686 + sp->gen1 = fcport->rscn_gen;
35687 + sp->gen2 = fcport->login_gen;
35688 +-
35689 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
35690 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
35691 ++ qla24xx_async_gpsc_sp_done);
35692 +
35693 + /* CT_IU preamble */
35694 + ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
35695 +@@ -2932,9 +2934,6 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
35696 + sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
35697 + sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
35698 +
35699 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
35700 +- sp->done = qla24xx_async_gpsc_sp_done;
35701 +-
35702 + ql_dbg(ql_dbg_disc, vha, 0x205e,
35703 + "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
35704 + sp->name, fcport->port_name, sp->handle,
35705 +@@ -2947,7 +2946,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
35706 + return rval;
35707 +
35708 + done_free_sp:
35709 +- sp->free(sp);
35710 ++ /* ref: INIT */
35711 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35712 + done:
35713 + return rval;
35714 + }
35715 +@@ -2996,7 +2996,8 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
35716 + break;
35717 + }
35718 +
35719 +- sp->free(sp);
35720 ++ /* ref: INIT */
35721 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35722 + }
35723 +
35724 + void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
35725 +@@ -3135,13 +3136,15 @@ static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
35726 + if (res) {
35727 + if (res == QLA_FUNCTION_TIMEOUT) {
35728 + qla24xx_post_gpnid_work(sp->vha, &ea.id);
35729 +- sp->free(sp);
35730 ++ /* ref: INIT */
35731 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35732 + return;
35733 + }
35734 + } else if (sp->gen1) {
35735 + /* There was another RSCN for this Nport ID */
35736 + qla24xx_post_gpnid_work(sp->vha, &ea.id);
35737 +- sp->free(sp);
35738 ++ /* ref: INIT */
35739 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35740 + return;
35741 + }
35742 +
35743 +@@ -3162,7 +3165,8 @@ static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
35744 + sp->u.iocb_cmd.u.ctarg.rsp_dma);
35745 + sp->u.iocb_cmd.u.ctarg.rsp = NULL;
35746 +
35747 +- sp->free(sp);
35748 ++ /* ref: INIT */
35749 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35750 + return;
35751 + }
35752 +
35753 +@@ -3182,6 +3186,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
35754 + if (!vha->flags.online)
35755 + goto done;
35756 +
35757 ++ /* ref: INIT */
35758 + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
35759 + if (!sp)
35760 + goto done;
35761 +@@ -3190,14 +3195,16 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
35762 + sp->name = "gpnid";
35763 + sp->u.iocb_cmd.u.ctarg.id = *id;
35764 + sp->gen1 = 0;
35765 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
35766 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
35767 ++ qla2x00_async_gpnid_sp_done);
35768 +
35769 + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
35770 + list_for_each_entry(tsp, &vha->gpnid_list, elem) {
35771 + if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
35772 + tsp->gen1++;
35773 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
35774 +- sp->free(sp);
35775 ++ /* ref: INIT */
35776 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35777 + goto done;
35778 + }
35779 + }
35780 +@@ -3238,9 +3245,6 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
35781 + sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
35782 + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
35783 +
35784 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
35785 +- sp->done = qla2x00_async_gpnid_sp_done;
35786 +-
35787 + ql_dbg(ql_dbg_disc, vha, 0x2067,
35788 + "Async-%s hdl=%x ID %3phC.\n", sp->name,
35789 + sp->handle, &ct_req->req.port_id.port_id);
35790 +@@ -3270,8 +3274,8 @@ done_free_sp:
35791 + sp->u.iocb_cmd.u.ctarg.rsp_dma);
35792 + sp->u.iocb_cmd.u.ctarg.rsp = NULL;
35793 + }
35794 +-
35795 +- sp->free(sp);
35796 ++ /* ref: INIT */
35797 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35798 + done:
35799 + return rval;
35800 + }
35801 +@@ -3326,7 +3330,8 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
35802 + ea.rc = res;
35803 +
35804 + qla24xx_handle_gffid_event(vha, &ea);
35805 +- sp->free(sp);
35806 ++ /* ref: INIT */
35807 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35808 + }
35809 +
35810 + /* Get FC4 Feature with Nport ID. */
35811 +@@ -3339,6 +3344,7 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
35812 + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
35813 + return rval;
35814 +
35815 ++ /* ref: INIT */
35816 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
35817 + if (!sp)
35818 + return rval;
35819 +@@ -3348,9 +3354,8 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
35820 + sp->name = "gffid";
35821 + sp->gen1 = fcport->rscn_gen;
35822 + sp->gen2 = fcport->login_gen;
35823 +-
35824 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
35825 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
35826 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
35827 ++ qla24xx_async_gffid_sp_done);
35828 +
35829 + /* CT_IU preamble */
35830 + ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
35831 +@@ -3368,8 +3373,6 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
35832 + sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
35833 + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
35834 +
35835 +- sp->done = qla24xx_async_gffid_sp_done;
35836 +-
35837 + ql_dbg(ql_dbg_disc, vha, 0x2132,
35838 + "Async-%s hdl=%x %8phC.\n", sp->name,
35839 + sp->handle, fcport->port_name);
35840 +@@ -3380,7 +3383,8 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
35841 +
35842 + return rval;
35843 + done_free_sp:
35844 +- sp->free(sp);
35845 ++ /* ref: INIT */
35846 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35847 + fcport->flags &= ~FCF_ASYNC_SENT;
35848 + return rval;
35849 + }
35850 +@@ -3767,7 +3771,6 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
35851 + "Async done-%s res %x FC4Type %x\n",
35852 + sp->name, res, sp->gen2);
35853 +
35854 +- del_timer(&sp->u.iocb_cmd.timer);
35855 + sp->rc = res;
35856 + if (res) {
35857 + unsigned long flags;
35858 +@@ -3892,9 +3895,8 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
35859 + sp->name = "gnnft";
35860 + sp->gen1 = vha->hw->base_qpair->chip_reset;
35861 + sp->gen2 = fc4_type;
35862 +-
35863 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
35864 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
35865 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
35866 ++ qla2x00_async_gpnft_gnnft_sp_done);
35867 +
35868 + memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
35869 + memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
35870 +@@ -3910,8 +3912,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
35871 + sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
35872 + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
35873 +
35874 +- sp->done = qla2x00_async_gpnft_gnnft_sp_done;
35875 +-
35876 + ql_dbg(ql_dbg_disc, vha, 0xffff,
35877 + "Async-%s hdl=%x FC4Type %x.\n", sp->name,
35878 + sp->handle, ct_req->req.gpn_ft.port_type);
35879 +@@ -3938,8 +3938,8 @@ done_free_sp:
35880 + sp->u.iocb_cmd.u.ctarg.rsp_dma);
35881 + sp->u.iocb_cmd.u.ctarg.rsp = NULL;
35882 + }
35883 +-
35884 +- sp->free(sp);
35885 ++ /* ref: INIT */
35886 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35887 +
35888 + spin_lock_irqsave(&vha->work_lock, flags);
35889 + vha->scan.scan_flags &= ~SF_SCANNING;
35890 +@@ -3991,9 +3991,12 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
35891 + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
35892 + "%s: Performing FCP Scan\n", __func__);
35893 +
35894 +- if (sp)
35895 +- sp->free(sp); /* should not happen */
35896 ++ if (sp) {
35897 ++ /* ref: INIT */
35898 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35899 ++ }
35900 +
35901 ++ /* ref: INIT */
35902 + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
35903 + if (!sp) {
35904 + spin_lock_irqsave(&vha->work_lock, flags);
35905 +@@ -4038,6 +4041,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
35906 + sp->u.iocb_cmd.u.ctarg.req,
35907 + sp->u.iocb_cmd.u.ctarg.req_dma);
35908 + sp->u.iocb_cmd.u.ctarg.req = NULL;
35909 ++ /* ref: INIT */
35910 + qla2x00_rel_sp(sp);
35911 + return rval;
35912 + }
35913 +@@ -4057,9 +4061,8 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
35914 + sp->name = "gpnft";
35915 + sp->gen1 = vha->hw->base_qpair->chip_reset;
35916 + sp->gen2 = fc4_type;
35917 +-
35918 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
35919 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
35920 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
35921 ++ qla2x00_async_gpnft_gnnft_sp_done);
35922 +
35923 + rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
35924 + memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
35925 +@@ -4074,8 +4077,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
35926 +
35927 + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
35928 +
35929 +- sp->done = qla2x00_async_gpnft_gnnft_sp_done;
35930 +-
35931 + ql_dbg(ql_dbg_disc, vha, 0xffff,
35932 + "Async-%s hdl=%x FC4Type %x.\n", sp->name,
35933 + sp->handle, ct_req->req.gpn_ft.port_type);
35934 +@@ -4103,7 +4104,8 @@ done_free_sp:
35935 + sp->u.iocb_cmd.u.ctarg.rsp = NULL;
35936 + }
35937 +
35938 +- sp->free(sp);
35939 ++ /* ref: INIT */
35940 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35941 +
35942 + spin_lock_irqsave(&vha->work_lock, flags);
35943 + vha->scan.scan_flags &= ~SF_SCANNING;
35944 +@@ -4167,7 +4169,8 @@ static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
35945 +
35946 + qla24xx_handle_gnnid_event(vha, &ea);
35947 +
35948 +- sp->free(sp);
35949 ++ /* ref: INIT */
35950 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35951 + }
35952 +
35953 + int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
35954 +@@ -4180,6 +4183,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
35955 + return rval;
35956 +
35957 + qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
35958 ++ /* ref: INIT */
35959 + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
35960 + if (!sp)
35961 + goto done;
35962 +@@ -4189,9 +4193,8 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
35963 + sp->name = "gnnid";
35964 + sp->gen1 = fcport->rscn_gen;
35965 + sp->gen2 = fcport->login_gen;
35966 +-
35967 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
35968 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
35969 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
35970 ++ qla2x00_async_gnnid_sp_done);
35971 +
35972 + /* CT_IU preamble */
35973 + ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
35974 +@@ -4210,8 +4213,6 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
35975 + sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
35976 + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
35977 +
35978 +- sp->done = qla2x00_async_gnnid_sp_done;
35979 +-
35980 + ql_dbg(ql_dbg_disc, vha, 0xffff,
35981 + "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
35982 + sp->name, fcport->port_name,
35983 +@@ -4223,7 +4224,8 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
35984 + return rval;
35985 +
35986 + done_free_sp:
35987 +- sp->free(sp);
35988 ++ /* ref: INIT */
35989 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
35990 + fcport->flags &= ~FCF_ASYNC_SENT;
35991 + done:
35992 + return rval;
35993 +@@ -4297,7 +4299,8 @@ static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
35994 +
35995 + qla24xx_handle_gfpnid_event(vha, &ea);
35996 +
35997 +- sp->free(sp);
35998 ++ /* ref: INIT */
35999 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36000 + }
36001 +
36002 + int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
36003 +@@ -4309,6 +4312,7 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
36004 + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
36005 + return rval;
36006 +
36007 ++ /* ref: INIT */
36008 + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
36009 + if (!sp)
36010 + goto done;
36011 +@@ -4317,9 +4321,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
36012 + sp->name = "gfpnid";
36013 + sp->gen1 = fcport->rscn_gen;
36014 + sp->gen2 = fcport->login_gen;
36015 +-
36016 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
36017 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
36018 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
36019 ++ qla2x00_async_gfpnid_sp_done);
36020 +
36021 + /* CT_IU preamble */
36022 + ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
36023 +@@ -4338,8 +4341,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
36024 + sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
36025 + sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
36026 +
36027 +- sp->done = qla2x00_async_gfpnid_sp_done;
36028 +-
36029 + ql_dbg(ql_dbg_disc, vha, 0xffff,
36030 + "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
36031 + sp->name, fcport->port_name,
36032 +@@ -4352,7 +4353,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
36033 + return rval;
36034 +
36035 + done_free_sp:
36036 +- sp->free(sp);
36037 ++ /* ref: INIT */
36038 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36039 + done:
36040 + return rval;
36041 + }
36042 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
36043 +index 1fe4966fc2f68..7f81525c4fb32 100644
36044 +--- a/drivers/scsi/qla2xxx/qla_init.c
36045 ++++ b/drivers/scsi/qla2xxx/qla_init.c
36046 +@@ -51,6 +51,9 @@ qla2x00_sp_timeout(struct timer_list *t)
36047 + WARN_ON(irqs_disabled());
36048 + iocb = &sp->u.iocb_cmd;
36049 + iocb->timeout(sp);
36050 ++
36051 ++ /* ref: TMR */
36052 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36053 + }
36054 +
36055 + void qla2x00_sp_free(srb_t *sp)
36056 +@@ -125,8 +128,13 @@ static void qla24xx_abort_iocb_timeout(void *data)
36057 + }
36058 + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
36059 +
36060 +- if (sp->cmd_sp)
36061 ++ if (sp->cmd_sp) {
36062 ++ /*
36063 ++ * This done function should take care of
36064 ++ * original command ref: INIT
36065 ++ */
36066 + sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
36067 ++ }
36068 +
36069 + abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
36070 + sp->done(sp, QLA_OS_TIMER_EXPIRED);
36071 +@@ -140,11 +148,11 @@ static void qla24xx_abort_sp_done(srb_t *sp, int res)
36072 + if (orig_sp)
36073 + qla_wait_nvme_release_cmd_kref(orig_sp);
36074 +
36075 +- del_timer(&sp->u.iocb_cmd.timer);
36076 + if (sp->flags & SRB_WAKEUP_ON_COMP)
36077 + complete(&abt->u.abt.comp);
36078 + else
36079 +- sp->free(sp);
36080 ++ /* ref: INIT */
36081 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36082 + }
36083 +
36084 + int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
36085 +@@ -154,6 +162,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
36086 + srb_t *sp;
36087 + int rval = QLA_FUNCTION_FAILED;
36088 +
36089 ++ /* ref: INIT for ABTS command */
36090 + sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
36091 + GFP_ATOMIC);
36092 + if (!sp)
36093 +@@ -167,23 +176,22 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
36094 + if (wait)
36095 + sp->flags = SRB_WAKEUP_ON_COMP;
36096 +
36097 +- abt_iocb->timeout = qla24xx_abort_iocb_timeout;
36098 + init_completion(&abt_iocb->u.abt.comp);
36099 + /* FW can send 2 x ABTS's timeout/20s */
36100 +- qla2x00_init_timer(sp, 42);
36101 ++ qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done);
36102 ++ sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout;
36103 +
36104 + abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
36105 + abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
36106 +
36107 +- sp->done = qla24xx_abort_sp_done;
36108 +-
36109 + ql_dbg(ql_dbg_async, vha, 0x507c,
36110 + "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
36111 + cmd_sp->type);
36112 +
36113 + rval = qla2x00_start_sp(sp);
36114 + if (rval != QLA_SUCCESS) {
36115 +- sp->free(sp);
36116 ++ /* ref: INIT */
36117 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36118 + return rval;
36119 + }
36120 +
36121 +@@ -191,7 +199,8 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
36122 + wait_for_completion(&abt_iocb->u.abt.comp);
36123 + rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
36124 + QLA_SUCCESS : QLA_ERR_FROM_FW;
36125 +- sp->free(sp);
36126 ++ /* ref: INIT */
36127 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36128 + }
36129 +
36130 + return rval;
36131 +@@ -286,10 +295,13 @@ static void qla2x00_async_login_sp_done(srb_t *sp, int res)
36132 + ea.iop[0] = lio->u.logio.iop[0];
36133 + ea.iop[1] = lio->u.logio.iop[1];
36134 + ea.sp = sp;
36135 ++ if (res)
36136 ++ ea.data[0] = MBS_COMMAND_ERROR;
36137 + qla24xx_handle_plogi_done_event(vha, &ea);
36138 + }
36139 +
36140 +- sp->free(sp);
36141 ++ /* ref: INIT */
36142 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36143 + }
36144 +
36145 + int
36146 +@@ -308,6 +320,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
36147 + return rval;
36148 + }
36149 +
36150 ++ /* ref: INIT */
36151 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
36152 + if (!sp)
36153 + goto done;
36154 +@@ -320,12 +333,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
36155 + sp->name = "login";
36156 + sp->gen1 = fcport->rscn_gen;
36157 + sp->gen2 = fcport->login_gen;
36158 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
36159 ++ qla2x00_async_login_sp_done);
36160 +
36161 + lio = &sp->u.iocb_cmd;
36162 +- lio->timeout = qla2x00_async_iocb_timeout;
36163 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
36164 +-
36165 +- sp->done = qla2x00_async_login_sp_done;
36166 + if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
36167 + lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
36168 + } else {
36169 +@@ -358,7 +369,8 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
36170 + return rval;
36171 +
36172 + done_free_sp:
36173 +- sp->free(sp);
36174 ++ /* ref: INIT */
36175 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36176 + fcport->flags &= ~FCF_ASYNC_SENT;
36177 + done:
36178 + fcport->flags &= ~FCF_ASYNC_ACTIVE;
36179 +@@ -370,29 +382,26 @@ static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
36180 + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
36181 + sp->fcport->login_gen++;
36182 + qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
36183 +- sp->free(sp);
36184 ++ /* ref: INIT */
36185 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36186 + }
36187 +
36188 + int
36189 + qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
36190 + {
36191 + srb_t *sp;
36192 +- struct srb_iocb *lio;
36193 + int rval = QLA_FUNCTION_FAILED;
36194 +
36195 + fcport->flags |= FCF_ASYNC_SENT;
36196 ++ /* ref: INIT */
36197 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
36198 + if (!sp)
36199 + goto done;
36200 +
36201 + sp->type = SRB_LOGOUT_CMD;
36202 + sp->name = "logout";
36203 +-
36204 +- lio = &sp->u.iocb_cmd;
36205 +- lio->timeout = qla2x00_async_iocb_timeout;
36206 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
36207 +-
36208 +- sp->done = qla2x00_async_logout_sp_done;
36209 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
36210 ++ qla2x00_async_logout_sp_done),
36211 +
36212 + ql_dbg(ql_dbg_disc, vha, 0x2070,
36213 + "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
36214 +@@ -406,7 +415,8 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
36215 + return rval;
36216 +
36217 + done_free_sp:
36218 +- sp->free(sp);
36219 ++ /* ref: INIT */
36220 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36221 + done:
36222 + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
36223 + return rval;
36224 +@@ -432,29 +442,26 @@ static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
36225 + if (!test_bit(UNLOADING, &vha->dpc_flags))
36226 + qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
36227 + lio->u.logio.data);
36228 +- sp->free(sp);
36229 ++ /* ref: INIT */
36230 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36231 + }
36232 +
36233 + int
36234 + qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
36235 + {
36236 + srb_t *sp;
36237 +- struct srb_iocb *lio;
36238 + int rval;
36239 +
36240 + rval = QLA_FUNCTION_FAILED;
36241 ++ /* ref: INIT */
36242 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
36243 + if (!sp)
36244 + goto done;
36245 +
36246 + sp->type = SRB_PRLO_CMD;
36247 + sp->name = "prlo";
36248 +-
36249 +- lio = &sp->u.iocb_cmd;
36250 +- lio->timeout = qla2x00_async_iocb_timeout;
36251 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
36252 +-
36253 +- sp->done = qla2x00_async_prlo_sp_done;
36254 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
36255 ++ qla2x00_async_prlo_sp_done);
36256 +
36257 + ql_dbg(ql_dbg_disc, vha, 0x2070,
36258 + "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
36259 +@@ -468,7 +475,8 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
36260 + return rval;
36261 +
36262 + done_free_sp:
36263 +- sp->free(sp);
36264 ++ /* ref: INIT */
36265 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36266 + done:
36267 + fcport->flags &= ~FCF_ASYNC_ACTIVE;
36268 + return rval;
36269 +@@ -551,10 +559,12 @@ static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
36270 + ea.iop[1] = lio->u.logio.iop[1];
36271 + ea.fcport = sp->fcport;
36272 + ea.sp = sp;
36273 ++ if (res)
36274 ++ ea.data[0] = MBS_COMMAND_ERROR;
36275 +
36276 + qla24xx_handle_adisc_event(vha, &ea);
36277 +-
36278 +- sp->free(sp);
36279 ++ /* ref: INIT */
36280 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36281 + }
36282 +
36283 + int
36284 +@@ -565,26 +575,34 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
36285 + struct srb_iocb *lio;
36286 + int rval = QLA_FUNCTION_FAILED;
36287 +
36288 ++ if (IS_SESSION_DELETED(fcport)) {
36289 ++ ql_log(ql_log_warn, vha, 0xffff,
36290 ++ "%s: %8phC is being delete - not sending command.\n",
36291 ++ __func__, fcport->port_name);
36292 ++ fcport->flags &= ~FCF_ASYNC_ACTIVE;
36293 ++ return rval;
36294 ++ }
36295 ++
36296 + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
36297 + return rval;
36298 +
36299 + fcport->flags |= FCF_ASYNC_SENT;
36300 ++ /* ref: INIT */
36301 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
36302 + if (!sp)
36303 + goto done;
36304 +
36305 + sp->type = SRB_ADISC_CMD;
36306 + sp->name = "adisc";
36307 +-
36308 +- lio = &sp->u.iocb_cmd;
36309 +- lio->timeout = qla2x00_async_iocb_timeout;
36310 + sp->gen1 = fcport->rscn_gen;
36311 + sp->gen2 = fcport->login_gen;
36312 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
36313 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
36314 ++ qla2x00_async_adisc_sp_done);
36315 +
36316 +- sp->done = qla2x00_async_adisc_sp_done;
36317 +- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
36318 ++ if (data[1] & QLA_LOGIO_LOGIN_RETRIED) {
36319 ++ lio = &sp->u.iocb_cmd;
36320 + lio->u.logio.flags |= SRB_LOGIN_RETRIED;
36321 ++ }
36322 +
36323 + ql_dbg(ql_dbg_disc, vha, 0x206f,
36324 + "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
36325 +@@ -597,7 +615,8 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
36326 + return rval;
36327 +
36328 + done_free_sp:
36329 +- sp->free(sp);
36330 ++ /* ref: INIT */
36331 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36332 + done:
36333 + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
36334 + qla2x00_post_async_adisc_work(vha, fcport, data);
36335 +@@ -963,6 +982,9 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
36336 + set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
36337 + }
36338 + break;
36339 ++ case ISP_CFG_NL:
36340 ++ qla24xx_fcport_handle_login(vha, fcport);
36341 ++ break;
36342 + default:
36343 + break;
36344 + }
36345 +@@ -1078,13 +1100,13 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
36346 + }
36347 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
36348 +
36349 +- sp->free(sp);
36350 ++ /* ref: INIT */
36351 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36352 + }
36353 +
36354 + int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
36355 + {
36356 + srb_t *sp;
36357 +- struct srb_iocb *mbx;
36358 + int rval = QLA_FUNCTION_FAILED;
36359 + unsigned long flags;
36360 + u16 *mb;
36361 +@@ -1109,6 +1131,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
36362 + vha->gnl.sent = 1;
36363 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
36364 +
36365 ++ /* ref: INIT */
36366 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
36367 + if (!sp)
36368 + goto done;
36369 +@@ -1117,10 +1140,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
36370 + sp->name = "gnlist";
36371 + sp->gen1 = fcport->rscn_gen;
36372 + sp->gen2 = fcport->login_gen;
36373 +-
36374 +- mbx = &sp->u.iocb_cmd;
36375 +- mbx->timeout = qla2x00_async_iocb_timeout;
36376 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
36377 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
36378 ++ qla24xx_async_gnl_sp_done);
36379 +
36380 + mb = sp->u.iocb_cmd.u.mbx.out_mb;
36381 + mb[0] = MBC_PORT_NODE_NAME_LIST;
36382 +@@ -1132,8 +1153,6 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
36383 + mb[8] = vha->gnl.size;
36384 + mb[9] = vha->vp_idx;
36385 +
36386 +- sp->done = qla24xx_async_gnl_sp_done;
36387 +-
36388 + ql_dbg(ql_dbg_disc, vha, 0x20da,
36389 + "Async-%s - OUT WWPN %8phC hndl %x\n",
36390 + sp->name, fcport->port_name, sp->handle);
36391 +@@ -1145,7 +1164,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
36392 + return rval;
36393 +
36394 + done_free_sp:
36395 +- sp->free(sp);
36396 ++ /* ref: INIT */
36397 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36398 + done:
36399 + fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
36400 + return rval;
36401 +@@ -1191,7 +1211,7 @@ done:
36402 + dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
36403 + sp->u.iocb_cmd.u.mbx.in_dma);
36404 +
36405 +- sp->free(sp);
36406 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36407 + }
36408 +
36409 + int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
36410 +@@ -1232,11 +1252,13 @@ static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
36411 + ea.sp = sp;
36412 + if (res == QLA_OS_TIMER_EXPIRED)
36413 + ea.data[0] = QLA_OS_TIMER_EXPIRED;
36414 ++ else if (res)
36415 ++ ea.data[0] = MBS_COMMAND_ERROR;
36416 +
36417 + qla24xx_handle_prli_done_event(vha, &ea);
36418 + }
36419 +
36420 +- sp->free(sp);
36421 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36422 + }
36423 +
36424 + int
36425 +@@ -1269,12 +1291,10 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
36426 +
36427 + sp->type = SRB_PRLI_CMD;
36428 + sp->name = "prli";
36429 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
36430 ++ qla2x00_async_prli_sp_done);
36431 +
36432 + lio = &sp->u.iocb_cmd;
36433 +- lio->timeout = qla2x00_async_iocb_timeout;
36434 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
36435 +-
36436 +- sp->done = qla2x00_async_prli_sp_done;
36437 + lio->u.logio.flags = 0;
36438 +
36439 + if (NVME_TARGET(vha->hw, fcport))
36440 +@@ -1296,7 +1316,8 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
36441 + return rval;
36442 +
36443 + done_free_sp:
36444 +- sp->free(sp);
36445 ++ /* ref: INIT */
36446 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36447 + fcport->flags &= ~FCF_ASYNC_SENT;
36448 + return rval;
36449 + }
36450 +@@ -1325,14 +1346,21 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
36451 + struct port_database_24xx *pd;
36452 + struct qla_hw_data *ha = vha->hw;
36453 +
36454 +- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
36455 +- fcport->loop_id == FC_NO_LOOP_ID) {
36456 ++ if (IS_SESSION_DELETED(fcport)) {
36457 + ql_log(ql_log_warn, vha, 0xffff,
36458 +- "%s: %8phC - not sending command.\n",
36459 +- __func__, fcport->port_name);
36460 ++ "%s: %8phC is being delete - not sending command.\n",
36461 ++ __func__, fcport->port_name);
36462 ++ fcport->flags &= ~FCF_ASYNC_ACTIVE;
36463 + return rval;
36464 + }
36465 +
36466 ++ if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
36467 ++ ql_log(ql_log_warn, vha, 0xffff,
36468 ++ "%s: %8phC online %d flags %x - not sending command.\n",
36469 ++ __func__, fcport->port_name, vha->flags.online, fcport->flags);
36470 ++ goto done;
36471 ++ }
36472 ++
36473 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
36474 + if (!sp)
36475 + goto done;
36476 +@@ -1344,10 +1372,8 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
36477 + sp->name = "gpdb";
36478 + sp->gen1 = fcport->rscn_gen;
36479 + sp->gen2 = fcport->login_gen;
36480 +-
36481 +- mbx = &sp->u.iocb_cmd;
36482 +- mbx->timeout = qla2x00_async_iocb_timeout;
36483 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
36484 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
36485 ++ qla24xx_async_gpdb_sp_done);
36486 +
36487 + pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
36488 + if (pd == NULL) {
36489 +@@ -1366,11 +1392,10 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
36490 + mb[9] = vha->vp_idx;
36491 + mb[10] = opt;
36492 +
36493 +- mbx->u.mbx.in = pd;
36494 ++ mbx = &sp->u.iocb_cmd;
36495 ++ mbx->u.mbx.in = (void *)pd;
36496 + mbx->u.mbx.in_dma = pd_dma;
36497 +
36498 +- sp->done = qla24xx_async_gpdb_sp_done;
36499 +-
36500 + ql_dbg(ql_dbg_disc, vha, 0x20dc,
36501 + "Async-%s %8phC hndl %x opt %x\n",
36502 + sp->name, fcport->port_name, sp->handle, opt);
36503 +@@ -1384,7 +1409,7 @@ done_free_sp:
36504 + if (pd)
36505 + dma_pool_free(ha->s_dma_pool, pd, pd_dma);
36506 +
36507 +- sp->free(sp);
36508 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36509 + fcport->flags &= ~FCF_ASYNC_SENT;
36510 + done:
36511 + fcport->flags &= ~FCF_ASYNC_ACTIVE;
36512 +@@ -1556,6 +1581,11 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
36513 + u8 login = 0;
36514 + int rc;
36515 +
36516 ++ ql_dbg(ql_dbg_disc, vha, 0x307b,
36517 ++ "%s %8phC DS %d LS %d lid %d retries=%d\n",
36518 ++ __func__, fcport->port_name, fcport->disc_state,
36519 ++ fcport->fw_login_state, fcport->loop_id, fcport->login_retry);
36520 ++
36521 + if (qla_tgt_mode_enabled(vha))
36522 + return;
36523 +
36524 +@@ -1614,7 +1644,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
36525 + fcport->login_gen, fcport->loop_id, fcport->scan_state,
36526 + fcport->fc4_type);
36527 +
36528 +- if (fcport->scan_state != QLA_FCPORT_FOUND)
36529 ++ if (fcport->scan_state != QLA_FCPORT_FOUND ||
36530 ++ fcport->disc_state == DSC_DELETE_PEND)
36531 + return 0;
36532 +
36533 + if ((fcport->loop_id != FC_NO_LOOP_ID) &&
36534 +@@ -1635,7 +1666,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
36535 + if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
36536 + return 0;
36537 +
36538 +- if (fcport->flags & FCF_ASYNC_SENT) {
36539 ++ if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) {
36540 + set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
36541 + return 0;
36542 + }
36543 +@@ -1970,22 +2001,21 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
36544 + srb_t *sp;
36545 + int rval = QLA_FUNCTION_FAILED;
36546 +
36547 ++ /* ref: INIT */
36548 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
36549 + if (!sp)
36550 + goto done;
36551 +
36552 +- tm_iocb = &sp->u.iocb_cmd;
36553 + sp->type = SRB_TM_CMD;
36554 + sp->name = "tmf";
36555 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
36556 ++ qla2x00_tmf_sp_done);
36557 ++ sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
36558 +
36559 +- tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
36560 ++ tm_iocb = &sp->u.iocb_cmd;
36561 + init_completion(&tm_iocb->u.tmf.comp);
36562 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
36563 +-
36564 + tm_iocb->u.tmf.flags = flags;
36565 + tm_iocb->u.tmf.lun = lun;
36566 +- tm_iocb->u.tmf.data = tag;
36567 +- sp->done = qla2x00_tmf_sp_done;
36568 +
36569 + ql_dbg(ql_dbg_taskm, vha, 0x802f,
36570 + "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
36571 +@@ -2015,7 +2045,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
36572 + }
36573 +
36574 + done_free_sp:
36575 +- sp->free(sp);
36576 ++ /* ref: INIT */
36577 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36578 + fcport->flags &= ~FCF_ASYNC_SENT;
36579 + done:
36580 + return rval;
36581 +@@ -2074,13 +2105,6 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
36582 + qla24xx_post_gpdb_work(vha, ea->fcport, 0);
36583 + break;
36584 + default:
36585 +- if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
36586 +- (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
36587 +- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
36588 +- ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
36589 +- break;
36590 +- }
36591 +-
36592 + sp = ea->sp;
36593 + ql_dbg(ql_dbg_disc, vha, 0x2118,
36594 + "%s %d %8phC priority %s, fc4type %x prev try %s\n",
36595 +@@ -2224,12 +2248,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
36596 + ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
36597 + __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
36598 +
36599 +- ea->fcport->flags &= ~FCF_ASYNC_SENT;
36600 +- qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED);
36601 +- if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
36602 +- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
36603 +- else
36604 +- qla2x00_mark_device_lost(vha, ea->fcport, 1);
36605 ++ qlt_schedule_sess_for_deletion(ea->fcport);
36606 + break;
36607 + case MBS_LOOP_ID_USED:
36608 + /* data[1] = IO PARAM 1 = nport ID */
36609 +@@ -3472,6 +3491,14 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
36610 + struct rsp_que *rsp = ha->rsp_q_map[0];
36611 + struct qla2xxx_fw_dump *fw_dump;
36612 +
36613 ++ if (ha->fw_dump) {
36614 ++ ql_dbg(ql_dbg_init, vha, 0x00bd,
36615 ++ "Firmware dump already allocated.\n");
36616 ++ return;
36617 ++ }
36618 ++
36619 ++ ha->fw_dumped = 0;
36620 ++ ha->fw_dump_cap_flags = 0;
36621 + dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
36622 + req_q_size = rsp_q_size = 0;
36623 +
36624 +@@ -3482,7 +3509,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
36625 + mem_size = (ha->fw_memory_size - 0x11000 + 1) *
36626 + sizeof(uint16_t);
36627 + } else if (IS_FWI2_CAPABLE(ha)) {
36628 +- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
36629 ++ if (IS_QLA83XX(ha))
36630 + fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
36631 + else if (IS_QLA81XX(ha))
36632 + fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
36633 +@@ -3494,8 +3521,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
36634 + mem_size = (ha->fw_memory_size - 0x100000 + 1) *
36635 + sizeof(uint32_t);
36636 + if (ha->mqenable) {
36637 +- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
36638 +- !IS_QLA28XX(ha))
36639 ++ if (!IS_QLA83XX(ha))
36640 + mq_size = sizeof(struct qla2xxx_mq_chain);
36641 + /*
36642 + * Allocate maximum buffer size for all queues - Q0.
36643 +@@ -4056,8 +4082,7 @@ enable_82xx_npiv:
36644 + ha->fw_major_version, ha->fw_minor_version,
36645 + ha->fw_subminor_version);
36646 +
36647 +- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
36648 +- IS_QLA28XX(ha)) {
36649 ++ if (IS_QLA83XX(ha)) {
36650 + ha->flags.fac_supported = 0;
36651 + rval = QLA_SUCCESS;
36652 + }
36653 +@@ -5602,6 +5627,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
36654 + memcpy(fcport->node_name, new_fcport->node_name,
36655 + WWN_SIZE);
36656 + fcport->scan_state = QLA_FCPORT_FOUND;
36657 ++ if (fcport->login_retry == 0) {
36658 ++ fcport->login_retry = vha->hw->login_retry_count;
36659 ++ ql_dbg(ql_dbg_disc, vha, 0x2135,
36660 ++ "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
36661 ++ fcport->port_name, fcport->loop_id,
36662 ++ fcport->login_retry);
36663 ++ }
36664 + found++;
36665 + break;
36666 + }
36667 +@@ -5735,6 +5767,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
36668 + if (atomic_read(&fcport->state) == FCS_ONLINE)
36669 + return;
36670 +
36671 ++ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
36672 ++
36673 + rport_ids.node_name = wwn_to_u64(fcport->node_name);
36674 + rport_ids.port_name = wwn_to_u64(fcport->port_name);
36675 + rport_ids.port_id = fcport->d_id.b.domain << 16 |
36676 +@@ -5835,6 +5869,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
36677 + qla2x00_reg_remote_port(vha, fcport);
36678 + break;
36679 + case MODE_TARGET:
36680 ++ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
36681 + if (!vha->vha_tgt.qla_tgt->tgt_stop &&
36682 + !vha->vha_tgt.qla_tgt->tgt_stopped)
36683 + qlt_fc_port_added(vha, fcport);
36684 +@@ -5852,8 +5887,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
36685 + if (NVME_TARGET(vha->hw, fcport))
36686 + qla_nvme_register_remote(vha, fcport);
36687 +
36688 +- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
36689 +-
36690 + if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
36691 + if (fcport->id_changed) {
36692 + fcport->id_changed = 0;
36693 +@@ -9390,7 +9423,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
36694 + qpair->rsp->req = qpair->req;
36695 + qpair->rsp->qpair = qpair;
36696 + /* init qpair to this cpu. Will adjust at run time. */
36697 +- qla_cpu_update(qpair, smp_processor_id());
36698 ++ qla_cpu_update(qpair, raw_smp_processor_id());
36699 +
36700 + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
36701 + if (ha->fw_attributes & BIT_4)
36702 +diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
36703 +index 5f3b7995cc8f3..db17f7f410cdd 100644
36704 +--- a/drivers/scsi/qla2xxx/qla_inline.h
36705 ++++ b/drivers/scsi/qla2xxx/qla_inline.h
36706 +@@ -184,6 +184,8 @@ static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
36707 + sp->vha = vha;
36708 + sp->qpair = qpair;
36709 + sp->cmd_type = TYPE_SRB;
36710 ++ /* ref : INIT - normal flow */
36711 ++ kref_init(&sp->cmd_kref);
36712 + INIT_LIST_HEAD(&sp->elem);
36713 + }
36714 +
36715 +diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
36716 +index ed604f2185bf2..e0fe9ddb4bd2c 100644
36717 +--- a/drivers/scsi/qla2xxx/qla_iocb.c
36718 ++++ b/drivers/scsi/qla2xxx/qla_iocb.c
36719 +@@ -2560,11 +2560,38 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
36720 + }
36721 + }
36722 +
36723 +-void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
36724 ++static void
36725 ++qla2x00_async_done(struct srb *sp, int res)
36726 ++{
36727 ++ if (del_timer(&sp->u.iocb_cmd.timer)) {
36728 ++ /*
36729 ++ * Successfully cancelled the timeout handler
36730 ++ * ref: TMR
36731 ++ */
36732 ++ if (kref_put(&sp->cmd_kref, qla2x00_sp_release))
36733 ++ return;
36734 ++ }
36735 ++ sp->async_done(sp, res);
36736 ++}
36737 ++
36738 ++void
36739 ++qla2x00_sp_release(struct kref *kref)
36740 ++{
36741 ++ struct srb *sp = container_of(kref, struct srb, cmd_kref);
36742 ++
36743 ++ sp->free(sp);
36744 ++}
36745 ++
36746 ++void
36747 ++qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
36748 ++ void (*done)(struct srb *sp, int res))
36749 + {
36750 + timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
36751 +- sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
36752 ++ sp->done = qla2x00_async_done;
36753 ++ sp->async_done = done;
36754 + sp->free = qla2x00_sp_free;
36755 ++ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
36756 ++ sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
36757 + if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
36758 + init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
36759 + sp->start_timer = 1;
36760 +@@ -2651,7 +2678,9 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
36761 + return -ENOMEM;
36762 + }
36763 +
36764 +- /* Alloc SRB structure */
36765 ++ /* Alloc SRB structure
36766 ++ * ref: INIT
36767 ++ */
36768 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
36769 + if (!sp) {
36770 + kfree(fcport);
36771 +@@ -2672,18 +2701,19 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
36772 + sp->type = SRB_ELS_DCMD;
36773 + sp->name = "ELS_DCMD";
36774 + sp->fcport = fcport;
36775 +- elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
36776 +- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
36777 +- init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
36778 +- sp->done = qla2x00_els_dcmd_sp_done;
36779 ++ qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT,
36780 ++ qla2x00_els_dcmd_sp_done);
36781 + sp->free = qla2x00_els_dcmd_sp_free;
36782 ++ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout;
36783 ++ init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
36784 +
36785 + elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
36786 + DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
36787 + GFP_KERNEL);
36788 +
36789 + if (!elsio->u.els_logo.els_logo_pyld) {
36790 +- sp->free(sp);
36791 ++ /* ref: INIT */
36792 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36793 + return QLA_FUNCTION_FAILED;
36794 + }
36795 +
36796 +@@ -2706,7 +2736,8 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
36797 +
36798 + rval = qla2x00_start_sp(sp);
36799 + if (rval != QLA_SUCCESS) {
36800 +- sp->free(sp);
36801 ++ /* ref: INIT */
36802 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36803 + return QLA_FUNCTION_FAILED;
36804 + }
36805 +
36806 +@@ -2717,7 +2748,8 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
36807 +
36808 + wait_for_completion(&elsio->u.els_logo.comp);
36809 +
36810 +- sp->free(sp);
36811 ++ /* ref: INIT */
36812 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36813 + return rval;
36814 + }
36815 +
36816 +@@ -2850,7 +2882,6 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
36817 + sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
36818 +
36819 + fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
36820 +- del_timer(&sp->u.iocb_cmd.timer);
36821 +
36822 + if (sp->flags & SRB_WAKEUP_ON_COMP)
36823 + complete(&lio->u.els_plogi.comp);
36824 +@@ -2927,6 +2958,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
36825 + set_bit(ISP_ABORT_NEEDED,
36826 + &vha->dpc_flags);
36827 + qla2xxx_wake_dpc(vha);
36828 ++ break;
36829 + }
36830 + fallthrough;
36831 + default:
36832 +@@ -2936,9 +2968,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
36833 + fw_status[0], fw_status[1], fw_status[2]);
36834 +
36835 + fcport->flags &= ~FCF_ASYNC_SENT;
36836 +- qla2x00_set_fcport_disc_state(fcport,
36837 +- DSC_LOGIN_FAILED);
36838 +- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
36839 ++ qlt_schedule_sess_for_deletion(fcport);
36840 + break;
36841 + }
36842 + break;
36843 +@@ -2950,8 +2980,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
36844 + fw_status[0], fw_status[1], fw_status[2]);
36845 +
36846 + sp->fcport->flags &= ~FCF_ASYNC_SENT;
36847 +- qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
36848 +- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
36849 ++ qlt_schedule_sess_for_deletion(fcport);
36850 + break;
36851 + }
36852 +
36853 +@@ -2960,7 +2989,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
36854 + struct srb_iocb *elsio = &sp->u.iocb_cmd;
36855 +
36856 + qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
36857 +- sp->free(sp);
36858 ++ /* ref: INIT */
36859 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36860 + return;
36861 + }
36862 + e->u.iosb.sp = sp;
36863 +@@ -2978,7 +3008,9 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
36864 + int rval = QLA_SUCCESS;
36865 + void *ptr, *resp_ptr;
36866 +
36867 +- /* Alloc SRB structure */
36868 ++ /* Alloc SRB structure
36869 ++ * ref: INIT
36870 ++ */
36871 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
36872 + if (!sp) {
36873 + ql_log(ql_log_info, vha, 0x70e6,
36874 +@@ -2993,17 +3025,16 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
36875 + ql_dbg(ql_dbg_io, vha, 0x3073,
36876 + "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
36877 +
36878 +- sp->type = SRB_ELS_DCMD;
36879 +- sp->name = "ELS_DCMD";
36880 +- sp->fcport = fcport;
36881 +-
36882 +- elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
36883 + if (wait)
36884 + sp->flags = SRB_WAKEUP_ON_COMP;
36885 +
36886 +- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
36887 ++ sp->type = SRB_ELS_DCMD;
36888 ++ sp->name = "ELS_DCMD";
36889 ++ sp->fcport = fcport;
36890 ++ qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2,
36891 ++ qla2x00_els_dcmd2_sp_done);
36892 ++ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
36893 +
36894 +- sp->done = qla2x00_els_dcmd2_sp_done;
36895 + elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
36896 +
36897 + ptr = elsio->u.els_plogi.els_plogi_pyld =
36898 +@@ -3068,7 +3099,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
36899 + out:
36900 + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
36901 + qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
36902 +- sp->free(sp);
36903 ++ /* ref: INIT */
36904 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
36905 + done:
36906 + return rval;
36907 + }
36908 +@@ -3879,8 +3911,15 @@ qla2x00_start_sp(srb_t *sp)
36909 + break;
36910 + }
36911 +
36912 +- if (sp->start_timer)
36913 ++ if (sp->start_timer) {
36914 ++ /* ref: TMR timer ref
36915 ++ * this code should be just before start_iocbs function
36916 ++ * This will make sure that caller function don't to do
36917 ++ * kref_put even on failure
36918 ++ */
36919 ++ kref_get(&sp->cmd_kref);
36920 + add_timer(&sp->u.iocb_cmd.timer);
36921 ++ }
36922 +
36923 + wmb();
36924 + qla2x00_start_iocbs(vha, qp->req);
36925 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
36926 +index aaf6504570fdd..198b782d77901 100644
36927 +--- a/drivers/scsi/qla2xxx/qla_isr.c
36928 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
36929 +@@ -2498,6 +2498,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
36930 + iocb->u.tmf.data = QLA_FUNCTION_FAILED;
36931 + } else if ((le16_to_cpu(sts->scsi_status) &
36932 + SS_RESPONSE_INFO_LEN_VALID)) {
36933 ++ host_to_fcp_swap(sts->data, sizeof(sts->data));
36934 + if (le32_to_cpu(sts->rsp_data_len) < 4) {
36935 + ql_log(ql_log_warn, fcport->vha, 0x503b,
36936 + "Async-%s error - hdl=%x not enough response(%d).\n",
36937 +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
36938 +index 10d2655ef6767..7f236db058869 100644
36939 +--- a/drivers/scsi/qla2xxx/qla_mbx.c
36940 ++++ b/drivers/scsi/qla2xxx/qla_mbx.c
36941 +@@ -9,6 +9,12 @@
36942 + #include <linux/delay.h>
36943 + #include <linux/gfp.h>
36944 +
36945 ++#ifdef CONFIG_PPC
36946 ++#define IS_PPCARCH true
36947 ++#else
36948 ++#define IS_PPCARCH false
36949 ++#endif
36950 ++
36951 + static struct mb_cmd_name {
36952 + uint16_t cmd;
36953 + const char *str;
36954 +@@ -728,6 +734,9 @@ again:
36955 + vha->min_supported_speed =
36956 + nv->min_supported_speed;
36957 + }
36958 ++
36959 ++ if (IS_PPCARCH)
36960 ++ mcp->mb[11] |= BIT_4;
36961 + }
36962 +
36963 + if (ha->flags.exlogins_enabled)
36964 +@@ -3029,8 +3038,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
36965 + ha->orig_fw_iocb_count = mcp->mb[10];
36966 + if (ha->flags.npiv_supported)
36967 + ha->max_npiv_vports = mcp->mb[11];
36968 +- if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
36969 +- IS_QLA28XX(ha))
36970 ++ if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
36971 + ha->fw_max_fcf_count = mcp->mb[12];
36972 + }
36973 +
36974 +@@ -5621,7 +5629,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
36975 + mcp->out_mb = MBX_1|MBX_0;
36976 + mcp->in_mb = MBX_2|MBX_1|MBX_0;
36977 + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
36978 +- mcp->in_mb |= MBX_3;
36979 ++ mcp->in_mb |= MBX_4|MBX_3;
36980 + mcp->tov = MBX_TOV_SECONDS;
36981 + mcp->flags = 0;
36982 + rval = qla2x00_mailbox_command(vha, mcp);
36983 +@@ -6479,23 +6487,21 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
36984 + if (!vha->hw->flags.fw_started)
36985 + goto done;
36986 +
36987 ++ /* ref: INIT */
36988 + sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
36989 + if (!sp)
36990 + goto done;
36991 +
36992 +- sp->type = SRB_MB_IOCB;
36993 +- sp->name = mb_to_str(mcp->mb[0]);
36994 +-
36995 + c = &sp->u.iocb_cmd;
36996 +- c->timeout = qla2x00_async_iocb_timeout;
36997 + init_completion(&c->u.mbx.comp);
36998 +
36999 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
37000 ++ sp->type = SRB_MB_IOCB;
37001 ++ sp->name = mb_to_str(mcp->mb[0]);
37002 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
37003 ++ qla2x00_async_mb_sp_done);
37004 +
37005 + memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
37006 +
37007 +- sp->done = qla2x00_async_mb_sp_done;
37008 +-
37009 + rval = qla2x00_start_sp(sp);
37010 + if (rval != QLA_SUCCESS) {
37011 + ql_dbg(ql_dbg_mbx, vha, 0x1018,
37012 +@@ -6527,7 +6533,8 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
37013 + }
37014 +
37015 + done_free_sp:
37016 +- sp->free(sp);
37017 ++ /* ref: INIT */
37018 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
37019 + done:
37020 + return rval;
37021 + }
37022 +diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
37023 +index 1c024055f8c50..e6b5c4ccce97b 100644
37024 +--- a/drivers/scsi/qla2xxx/qla_mid.c
37025 ++++ b/drivers/scsi/qla2xxx/qla_mid.c
37026 +@@ -965,6 +965,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
37027 + if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
37028 + return QLA_PARAMETER_ERROR;
37029 +
37030 ++ /* ref: INIT */
37031 + sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
37032 + if (!sp)
37033 + return rval;
37034 +@@ -972,9 +973,8 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
37035 + sp->type = SRB_CTRL_VP;
37036 + sp->name = "ctrl_vp";
37037 + sp->comp = &comp;
37038 +- sp->done = qla_ctrlvp_sp_done;
37039 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
37040 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
37041 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
37042 ++ qla_ctrlvp_sp_done);
37043 + sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
37044 + sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
37045 +
37046 +@@ -1008,6 +1008,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
37047 + break;
37048 + }
37049 + done:
37050 +- sp->free(sp);
37051 ++ /* ref: INIT */
37052 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
37053 + return rval;
37054 + }
37055 +diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
37056 +index 350b0c4346fb6..f726eb8449c5e 100644
37057 +--- a/drivers/scsi/qla2xxx/qla_mr.c
37058 ++++ b/drivers/scsi/qla2xxx/qla_mr.c
37059 +@@ -1787,17 +1787,18 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
37060 + struct register_host_info *preg_hsi;
37061 + struct new_utsname *p_sysid = NULL;
37062 +
37063 ++ /* ref: INIT */
37064 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
37065 + if (!sp)
37066 + goto done;
37067 +
37068 + sp->type = SRB_FXIOCB_DCMD;
37069 + sp->name = "fxdisc";
37070 ++ qla2x00_init_async_sp(sp, FXDISC_TIMEOUT,
37071 ++ qla2x00_fxdisc_sp_done);
37072 ++ sp->u.iocb_cmd.timeout = qla2x00_fxdisc_iocb_timeout;
37073 +
37074 + fdisc = &sp->u.iocb_cmd;
37075 +- fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
37076 +- qla2x00_init_timer(sp, FXDISC_TIMEOUT);
37077 +-
37078 + switch (fx_type) {
37079 + case FXDISC_GET_CONFIG_INFO:
37080 + fdisc->u.fxiocb.flags =
37081 +@@ -1898,7 +1899,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
37082 + }
37083 +
37084 + fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
37085 +- sp->done = qla2x00_fxdisc_sp_done;
37086 +
37087 + rval = qla2x00_start_sp(sp);
37088 + if (rval != QLA_SUCCESS)
37089 +@@ -1974,7 +1974,8 @@ done_unmap_req:
37090 + dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
37091 + fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
37092 + done_free_sp:
37093 +- sp->free(sp);
37094 ++ /* ref: INIT */
37095 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
37096 + done:
37097 + return rval;
37098 + }
37099 +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
37100 +index e22ec7cb65db5..4cfc2efdf7766 100644
37101 +--- a/drivers/scsi/qla2xxx/qla_nvme.c
37102 ++++ b/drivers/scsi/qla2xxx/qla_nvme.c
37103 +@@ -37,6 +37,11 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
37104 + (fcport->nvme_flag & NVME_FLAG_REGISTERED))
37105 + return 0;
37106 +
37107 ++ if (atomic_read(&fcport->state) == FCS_ONLINE)
37108 ++ return 0;
37109 ++
37110 ++ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
37111 ++
37112 + fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
37113 +
37114 + memset(&req, 0, sizeof(struct nvme_fc_port_info));
37115 +@@ -170,6 +175,18 @@ out:
37116 + qla2xxx_rel_qpair_sp(sp->qpair, sp);
37117 + }
37118 +
37119 ++static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
37120 ++{
37121 ++ if (sp->flags & SRB_DMA_VALID) {
37122 ++ struct srb_iocb *nvme = &sp->u.iocb_cmd;
37123 ++ struct qla_hw_data *ha = sp->fcport->vha->hw;
37124 ++
37125 ++ dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
37126 ++ fd->rqstlen, DMA_TO_DEVICE);
37127 ++ sp->flags &= ~SRB_DMA_VALID;
37128 ++ }
37129 ++}
37130 ++
37131 + static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
37132 + {
37133 + struct srb *sp = container_of(kref, struct srb, cmd_kref);
37134 +@@ -186,6 +203,8 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
37135 + spin_unlock_irqrestore(&priv->cmd_lock, flags);
37136 +
37137 + fd = priv->fd;
37138 ++
37139 ++ qla_nvme_ls_unmap(sp, fd);
37140 + fd->done(fd, priv->comp_status);
37141 + out:
37142 + qla2x00_rel_sp(sp);
37143 +@@ -356,6 +375,8 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
37144 + dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
37145 + fd->rqstlen, DMA_TO_DEVICE);
37146 +
37147 ++ sp->flags |= SRB_DMA_VALID;
37148 ++
37149 + rval = qla2x00_start_sp(sp);
37150 + if (rval != QLA_SUCCESS) {
37151 + ql_log(ql_log_warn, vha, 0x700e,
37152 +@@ -363,6 +384,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
37153 + wake_up(&sp->nvme_ls_waitq);
37154 + sp->priv = NULL;
37155 + priv->sp = NULL;
37156 ++ qla_nvme_ls_unmap(sp, fd);
37157 + qla2x00_rel_sp(sp);
37158 + return rval;
37159 + }
37160 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
37161 +index abcd309172638..6dc2189badd33 100644
37162 +--- a/drivers/scsi/qla2xxx/qla_os.c
37163 ++++ b/drivers/scsi/qla2xxx/qla_os.c
37164 +@@ -728,7 +728,8 @@ void qla2x00_sp_compl(srb_t *sp, int res)
37165 + struct scsi_cmnd *cmd = GET_CMD_SP(sp);
37166 + struct completion *comp = sp->comp;
37167 +
37168 +- sp->free(sp);
37169 ++ /* kref: INIT */
37170 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
37171 + cmd->result = res;
37172 + CMD_SP(cmd) = NULL;
37173 + scsi_done(cmd);
37174 +@@ -819,7 +820,8 @@ void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
37175 + struct scsi_cmnd *cmd = GET_CMD_SP(sp);
37176 + struct completion *comp = sp->comp;
37177 +
37178 +- sp->free(sp);
37179 ++ /* ref: INIT */
37180 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
37181 + cmd->result = res;
37182 + CMD_SP(cmd) = NULL;
37183 + scsi_done(cmd);
37184 +@@ -919,6 +921,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
37185 + goto qc24_target_busy;
37186 +
37187 + sp = scsi_cmd_priv(cmd);
37188 ++ /* ref: INIT */
37189 + qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
37190 +
37191 + sp->u.scmd.cmd = cmd;
37192 +@@ -938,7 +941,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
37193 + return 0;
37194 +
37195 + qc24_host_busy_free_sp:
37196 +- sp->free(sp);
37197 ++ /* ref: INIT */
37198 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
37199 +
37200 + qc24_target_busy:
37201 + return SCSI_MLQUEUE_TARGET_BUSY;
37202 +@@ -1008,6 +1012,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
37203 + goto qc24_target_busy;
37204 +
37205 + sp = scsi_cmd_priv(cmd);
37206 ++ /* ref: INIT */
37207 + qla2xxx_init_sp(sp, vha, qpair, fcport);
37208 +
37209 + sp->u.scmd.cmd = cmd;
37210 +@@ -1026,7 +1031,8 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
37211 + return 0;
37212 +
37213 + qc24_host_busy_free_sp:
37214 +- sp->free(sp);
37215 ++ /* ref: INIT */
37216 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
37217 +
37218 + qc24_target_busy:
37219 + return SCSI_MLQUEUE_TARGET_BUSY;
37220 +@@ -3748,8 +3754,7 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
37221 + if (ha->mqiobase)
37222 + iounmap(ha->mqiobase);
37223 +
37224 +- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
37225 +- ha->msixbase)
37226 ++ if (ha->msixbase)
37227 + iounmap(ha->msixbase);
37228 + }
37229 + }
37230 +@@ -3891,6 +3896,8 @@ qla24xx_free_purex_list(struct purex_list *list)
37231 + spin_lock_irqsave(&list->lock, flags);
37232 + list_for_each_entry_safe(item, next, &list->head, list) {
37233 + list_del(&item->list);
37234 ++ if (item == &item->vha->default_item)
37235 ++ continue;
37236 + kfree(item);
37237 + }
37238 + spin_unlock_irqrestore(&list->lock, flags);
37239 +@@ -5526,6 +5533,11 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
37240 + memset(&ea, 0, sizeof(ea));
37241 + ea.fcport = fcport;
37242 + qla24xx_handle_relogin_event(vha, &ea);
37243 ++ } else if (vha->hw->current_topology ==
37244 ++ ISP_CFG_NL &&
37245 ++ IS_QLA2XXX_MIDTYPE(vha->hw)) {
37246 ++ (void)qla24xx_fcport_handle_login(vha,
37247 ++ fcport);
37248 + } else if (vha->hw->current_topology ==
37249 + ISP_CFG_NL) {
37250 + fcport->login_retry--;
37251 +@@ -7199,7 +7211,7 @@ skip:
37252 + return do_heartbeat;
37253 + }
37254 +
37255 +-static void qla_heart_beat(struct scsi_qla_host *vha)
37256 ++static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started)
37257 + {
37258 + struct qla_hw_data *ha = vha->hw;
37259 +
37260 +@@ -7209,8 +7221,19 @@ static void qla_heart_beat(struct scsi_qla_host *vha)
37261 + if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
37262 + return;
37263 +
37264 +- if (qla_do_heartbeat(vha))
37265 ++ /*
37266 ++ * dpc thread cannot run if heartbeat is running at the same time.
37267 ++ * We also do not want to starve heartbeat task. Therefore, do
37268 ++ * heartbeat task at least once every 5 seconds.
37269 ++ */
37270 ++ if (dpc_started &&
37271 ++ time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ))
37272 ++ return;
37273 ++
37274 ++ if (qla_do_heartbeat(vha)) {
37275 ++ ha->last_heartbeat_run_jiffies = jiffies;
37276 + queue_work(ha->wq, &ha->heartbeat_work);
37277 ++ }
37278 + }
37279 +
37280 + /**************************************************************************
37281 +@@ -7401,6 +7424,8 @@ qla2x00_timer(struct timer_list *t)
37282 + start_dpc++;
37283 + }
37284 +
37285 ++ /* borrowing w to signify dpc will run */
37286 ++ w = 0;
37287 + /* Schedule the DPC routine if needed */
37288 + if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
37289 + test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
37290 +@@ -7433,9 +7458,10 @@ qla2x00_timer(struct timer_list *t)
37291 + test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
37292 + test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
37293 + qla2xxx_wake_dpc(vha);
37294 ++ w = 1;
37295 + }
37296 +
37297 +- qla_heart_beat(vha);
37298 ++ qla_heart_beat(vha, w);
37299 +
37300 + qla2x00_restart_timer(vha, WATCH_INTERVAL);
37301 + }
37302 +@@ -7633,7 +7659,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
37303 +
37304 + switch (state) {
37305 + case pci_channel_io_normal:
37306 +- ha->flags.eeh_busy = 0;
37307 ++ qla_pci_set_eeh_busy(vha);
37308 + if (ql2xmqsupport || ql2xnvmeenable) {
37309 + set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
37310 + qla2xxx_wake_dpc(vha);
37311 +@@ -7674,9 +7700,16 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
37312 + "mmio enabled\n");
37313 +
37314 + ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
37315 ++
37316 + if (IS_QLA82XX(ha))
37317 + return PCI_ERS_RESULT_RECOVERED;
37318 +
37319 ++ if (qla2x00_isp_reg_stat(ha)) {
37320 ++ ql_log(ql_log_info, base_vha, 0x803f,
37321 ++ "During mmio enabled, PCI/Register disconnect still detected.\n");
37322 ++ goto out;
37323 ++ }
37324 ++
37325 + spin_lock_irqsave(&ha->hardware_lock, flags);
37326 + if (IS_QLA2100(ha) || IS_QLA2200(ha)){
37327 + stat = rd_reg_word(&reg->hccr);
37328 +@@ -7698,6 +7731,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
37329 + "RISC paused -- mmio_enabled, Dumping firmware.\n");
37330 + qla2xxx_dump_fw(base_vha);
37331 + }
37332 ++out:
37333 + /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
37334 + ql_dbg(ql_dbg_aer, base_vha, 0x600d,
37335 + "mmio enabled returning.\n");
37336 +diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
37337 +index a0aeba69513d4..c092a6b1ced4f 100644
37338 +--- a/drivers/scsi/qla2xxx/qla_sup.c
37339 ++++ b/drivers/scsi/qla2xxx/qla_sup.c
37340 +@@ -844,7 +844,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
37341 + ha->flt_region_nvram = start;
37342 + break;
37343 + case FLT_REG_IMG_PRI_27XX:
37344 +- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
37345 ++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
37346 + ha->flt_region_img_status_pri = start;
37347 + break;
37348 + case FLT_REG_IMG_SEC_27XX:
37349 +@@ -1356,7 +1356,7 @@ next:
37350 + flash_data_addr(ha, faddr), le32_to_cpu(*dwptr));
37351 + if (ret) {
37352 + ql_dbg(ql_dbg_user, vha, 0x7006,
37353 +- "Failed slopw write %x (%x)\n", faddr, *dwptr);
37354 ++ "Failed slow write %x (%x)\n", faddr, *dwptr);
37355 + break;
37356 + }
37357 + }
37358 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
37359 +index 8993d438e0b72..b109716d44fb7 100644
37360 +--- a/drivers/scsi/qla2xxx/qla_target.c
37361 ++++ b/drivers/scsi/qla2xxx/qla_target.c
37362 +@@ -620,7 +620,7 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
37363 + }
37364 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
37365 +
37366 +- sp->free(sp);
37367 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
37368 + }
37369 +
37370 + int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
37371 +@@ -656,12 +656,10 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
37372 +
37373 + sp->type = type;
37374 + sp->name = "nack";
37375 +-
37376 +- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
37377 +- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
37378 ++ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
37379 ++ qla2x00_async_nack_sp_done);
37380 +
37381 + sp->u.iocb_cmd.u.nack.ntfy = ntfy;
37382 +- sp->done = qla2x00_async_nack_sp_done;
37383 +
37384 + ql_dbg(ql_dbg_disc, vha, 0x20f4,
37385 + "Async-%s %8phC hndl %x %s\n",
37386 +@@ -674,7 +672,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
37387 + return rval;
37388 +
37389 + done_free_sp:
37390 +- sp->free(sp);
37391 ++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
37392 + done:
37393 + fcport->flags &= ~FCF_ASYNC_SENT;
37394 + return rval;
37395 +@@ -3320,6 +3318,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
37396 + "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
37397 + vha->flags.online, qla2x00_reset_active(vha),
37398 + cmd->reset_count, qpair->chip_reset);
37399 ++ res = 0;
37400 + goto out_unmap_unlock;
37401 + }
37402 +
37403 +@@ -7221,8 +7220,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
37404 + if (!QLA_TGT_MODE_ENABLED())
37405 + return;
37406 +
37407 +- if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
37408 +- IS_QLA28XX(ha)) {
37409 ++ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
37410 + ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
37411 + ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
37412 + } else {
37413 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
37414 +index 26c13a953b975..b0a74b036cf4b 100644
37415 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c
37416 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
37417 +@@ -435,8 +435,13 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
37418 + {
37419 + ql_dbg(ql_dbg_misc, vha, 0xd20a,
37420 + "%s: reset risc [%lx]\n", __func__, *len);
37421 +- if (buf)
37422 +- WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
37423 ++ if (buf) {
37424 ++ if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) {
37425 ++ ql_dbg(ql_dbg_async, vha, 0x5001,
37426 ++ "%s: unable to soft reset\n", __func__);
37427 ++ return INVALID_ENTRY;
37428 ++ }
37429 ++ }
37430 +
37431 + return qla27xx_next_entry(ent);
37432 + }
37433 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
37434 +index 60a6ae9d1219f..a75499616f5ef 100644
37435 +--- a/drivers/scsi/scsi_error.c
37436 ++++ b/drivers/scsi/scsi_error.c
37437 +@@ -484,8 +484,13 @@ static void scsi_report_sense(struct scsi_device *sdev,
37438 +
37439 + if (sshdr->asc == 0x29) {
37440 + evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED;
37441 +- sdev_printk(KERN_WARNING, sdev,
37442 +- "Power-on or device reset occurred\n");
37443 ++ /*
37444 ++ * Do not print message if it is an expected side-effect
37445 ++ * of runtime PM.
37446 ++ */
37447 ++ if (!sdev->silence_suspend)
37448 ++ sdev_printk(KERN_WARNING, sdev,
37449 ++ "Power-on or device reset occurred\n");
37450 + }
37451 +
37452 + if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
37453 +diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
37454 +index 60e406bcf42a9..a2524106206db 100644
37455 +--- a/drivers/scsi/scsi_transport_fc.c
37456 ++++ b/drivers/scsi/scsi_transport_fc.c
37457 +@@ -34,7 +34,7 @@ static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
37458 + static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
37459 + static void fc_bsg_remove(struct request_queue *);
37460 + static void fc_bsg_goose_queue(struct fc_rport *);
37461 +-static void fc_li_stats_update(struct fc_fn_li_desc *li_desc,
37462 ++static void fc_li_stats_update(u16 event_type,
37463 + struct fc_fpin_stats *stats);
37464 + static void fc_delivery_stats_update(u32 reason_code,
37465 + struct fc_fpin_stats *stats);
37466 +@@ -670,42 +670,34 @@ fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn)
37467 + EXPORT_SYMBOL(fc_find_rport_by_wwpn);
37468 +
37469 + static void
37470 +-fc_li_stats_update(struct fc_fn_li_desc *li_desc,
37471 ++fc_li_stats_update(u16 event_type,
37472 + struct fc_fpin_stats *stats)
37473 + {
37474 +- stats->li += be32_to_cpu(li_desc->event_count);
37475 +- switch (be16_to_cpu(li_desc->event_type)) {
37476 ++ stats->li++;
37477 ++ switch (event_type) {
37478 + case FPIN_LI_UNKNOWN:
37479 +- stats->li_failure_unknown +=
37480 +- be32_to_cpu(li_desc->event_count);
37481 ++ stats->li_failure_unknown++;
37482 + break;
37483 + case FPIN_LI_LINK_FAILURE:
37484 +- stats->li_link_failure_count +=
37485 +- be32_to_cpu(li_desc->event_count);
37486 ++ stats->li_link_failure_count++;
37487 + break;
37488 + case FPIN_LI_LOSS_OF_SYNC:
37489 +- stats->li_loss_of_sync_count +=
37490 +- be32_to_cpu(li_desc->event_count);
37491 ++ stats->li_loss_of_sync_count++;
37492 + break;
37493 + case FPIN_LI_LOSS_OF_SIG:
37494 +- stats->li_loss_of_signals_count +=
37495 +- be32_to_cpu(li_desc->event_count);
37496 ++ stats->li_loss_of_signals_count++;
37497 + break;
37498 + case FPIN_LI_PRIM_SEQ_ERR:
37499 +- stats->li_prim_seq_err_count +=
37500 +- be32_to_cpu(li_desc->event_count);
37501 ++ stats->li_prim_seq_err_count++;
37502 + break;
37503 + case FPIN_LI_INVALID_TX_WD:
37504 +- stats->li_invalid_tx_word_count +=
37505 +- be32_to_cpu(li_desc->event_count);
37506 ++ stats->li_invalid_tx_word_count++;
37507 + break;
37508 + case FPIN_LI_INVALID_CRC:
37509 +- stats->li_invalid_crc_count +=
37510 +- be32_to_cpu(li_desc->event_count);
37511 ++ stats->li_invalid_crc_count++;
37512 + break;
37513 + case FPIN_LI_DEVICE_SPEC:
37514 +- stats->li_device_specific +=
37515 +- be32_to_cpu(li_desc->event_count);
37516 ++ stats->li_device_specific++;
37517 + break;
37518 + }
37519 + }
37520 +@@ -767,6 +759,7 @@ fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv)
37521 + struct fc_rport *attach_rport = NULL;
37522 + struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
37523 + struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv;
37524 ++ u16 event_type = be16_to_cpu(li_desc->event_type);
37525 + u64 wwpn;
37526 +
37527 + rport = fc_find_rport_by_wwpn(shost,
37528 +@@ -775,7 +768,7 @@ fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv)
37529 + (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
37530 + rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
37531 + attach_rport = rport;
37532 +- fc_li_stats_update(li_desc, &attach_rport->fpin_stats);
37533 ++ fc_li_stats_update(event_type, &attach_rport->fpin_stats);
37534 + }
37535 +
37536 + if (be32_to_cpu(li_desc->pname_count) > 0) {
37537 +@@ -789,14 +782,14 @@ fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv)
37538 + rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
37539 + if (rport == attach_rport)
37540 + continue;
37541 +- fc_li_stats_update(li_desc,
37542 ++ fc_li_stats_update(event_type,
37543 + &rport->fpin_stats);
37544 + }
37545 + }
37546 + }
37547 +
37548 + if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn))
37549 +- fc_li_stats_update(li_desc, &fc_host->fpin_stats);
37550 ++ fc_li_stats_update(event_type, &fc_host->fpin_stats);
37551 + }
37552 +
37553 + /*
37554 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
37555 +index 62eb9921cc947..66056806159a6 100644
37556 +--- a/drivers/scsi/sd.c
37557 ++++ b/drivers/scsi/sd.c
37558 +@@ -3752,7 +3752,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
37559 + return 0;
37560 +
37561 + if (sdkp->WCE && sdkp->media_present) {
37562 +- sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
37563 ++ if (!sdkp->device->silence_suspend)
37564 ++ sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
37565 + ret = sd_sync_cache(sdkp, &sshdr);
37566 +
37567 + if (ret) {
37568 +@@ -3774,7 +3775,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
37569 + }
37570 +
37571 + if (sdkp->device->manage_start_stop) {
37572 +- sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
37573 ++ if (!sdkp->device->silence_suspend)
37574 ++ sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
37575 + /* an error is not worth aborting a system sleep */
37576 + ret = sd_start_stop_device(sdkp, 0);
37577 + if (ignore_stop_errors)
37578 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
37579 +index 9349557b8a01b..cb285d277201c 100644
37580 +--- a/drivers/scsi/ufs/ufshcd.c
37581 ++++ b/drivers/scsi/ufs/ufshcd.c
37582 +@@ -585,7 +585,12 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
37583 + "INVALID MODE",
37584 + };
37585 +
37586 +- dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
37587 ++ /*
37588 ++ * Using dev_dbg to avoid messages during runtime PM to avoid
37589 ++ * never-ending cycles of messages written back to storage by user space
37590 ++ * causing runtime resume, causing more messages and so on.
37591 ++ */
37592 ++ dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
37593 + __func__,
37594 + hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
37595 + hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
37596 +@@ -5024,6 +5029,12 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
37597 + pm_runtime_get_noresume(&sdev->sdev_gendev);
37598 + else if (ufshcd_is_rpm_autosuspend_allowed(hba))
37599 + sdev->rpm_autosuspend = 1;
37600 ++ /*
37601 ++ * Do not print messages during runtime PM to avoid never-ending cycles
37602 ++ * of messages written back to storage by user space causing runtime
37603 ++ * resume, causing more messages and so on.
37604 ++ */
37605 ++ sdev->silence_suspend = 1;
37606 +
37607 + ufshcd_crypto_register(hba, q);
37608 +
37609 +@@ -7339,7 +7350,13 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
37610 +
37611 + if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
37612 + !hba->vreg_info.vccq2) {
37613 +- dev_err(hba->dev,
37614 ++ /*
37615 ++ * Using dev_dbg to avoid messages during runtime PM to avoid
37616 ++ * never-ending cycles of messages written back to storage by
37617 ++ * user space causing runtime resume, causing more messages and
37618 ++ * so on.
37619 ++ */
37620 ++ dev_dbg(hba->dev,
37621 + "%s: Regulator capability was not set, actvIccLevel=%d",
37622 + __func__, icc_level);
37623 + goto out;
37624 +diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
37625 +index b762bc40f56bd..afd2fd74802d2 100644
37626 +--- a/drivers/soc/mediatek/mtk-pm-domains.c
37627 ++++ b/drivers/soc/mediatek/mtk-pm-domains.c
37628 +@@ -443,6 +443,9 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
37629 + pd->genpd.power_off = scpsys_power_off;
37630 + pd->genpd.power_on = scpsys_power_on;
37631 +
37632 ++ if (MTK_SCPD_CAPS(pd, MTK_SCPD_ACTIVE_WAKEUP))
37633 ++ pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
37634 ++
37635 + if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF))
37636 + pm_genpd_init(&pd->genpd, NULL, true);
37637 + else
37638 +diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
37639 +index d2dacbbaafbd1..97fd24c178f8d 100644
37640 +--- a/drivers/soc/qcom/ocmem.c
37641 ++++ b/drivers/soc/qcom/ocmem.c
37642 +@@ -206,6 +206,7 @@ struct ocmem *of_get_ocmem(struct device *dev)
37643 + ocmem = platform_get_drvdata(pdev);
37644 + if (!ocmem) {
37645 + dev_err(dev, "Cannot get ocmem\n");
37646 ++ put_device(&pdev->dev);
37647 + return ERR_PTR(-ENODEV);
37648 + }
37649 + return ocmem;
37650 +diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
37651 +index cbe5e39fdaeb0..a59bb34e5ebaf 100644
37652 +--- a/drivers/soc/qcom/qcom_aoss.c
37653 ++++ b/drivers/soc/qcom/qcom_aoss.c
37654 +@@ -451,7 +451,11 @@ struct qmp *qmp_get(struct device *dev)
37655 +
37656 + qmp = platform_get_drvdata(pdev);
37657 +
37658 +- return qmp ? qmp : ERR_PTR(-EPROBE_DEFER);
37659 ++ if (!qmp) {
37660 ++ put_device(&pdev->dev);
37661 ++ return ERR_PTR(-EPROBE_DEFER);
37662 ++ }
37663 ++ return qmp;
37664 + }
37665 + EXPORT_SYMBOL(qmp_get);
37666 +
37667 +@@ -497,7 +501,7 @@ static int qmp_probe(struct platform_device *pdev)
37668 + }
37669 +
37670 + irq = platform_get_irq(pdev, 0);
37671 +- ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT,
37672 ++ ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0,
37673 + "aoss-qmp", qmp);
37674 + if (ret < 0) {
37675 + dev_err(&pdev->dev, "failed to request interrupt\n");
37676 +diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
37677 +index 0a8d8d24bfb77..624b5630feb87 100644
37678 +--- a/drivers/soc/qcom/rpmpd.c
37679 ++++ b/drivers/soc/qcom/rpmpd.c
37680 +@@ -610,6 +610,9 @@ static int rpmpd_probe(struct platform_device *pdev)
37681 +
37682 + data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains),
37683 + GFP_KERNEL);
37684 ++ if (!data->domains)
37685 ++ return -ENOMEM;
37686 ++
37687 + data->num_domains = num;
37688 +
37689 + for (i = 0; i < num; i++) {
37690 +diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
37691 +index 72386bd393fed..2f03ced0f4113 100644
37692 +--- a/drivers/soc/ti/wkup_m3_ipc.c
37693 ++++ b/drivers/soc/ti/wkup_m3_ipc.c
37694 +@@ -450,9 +450,9 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
37695 + return PTR_ERR(m3_ipc->ipc_mem_base);
37696 +
37697 + irq = platform_get_irq(pdev, 0);
37698 +- if (!irq) {
37699 ++ if (irq < 0) {
37700 + dev_err(&pdev->dev, "no irq resource\n");
37701 +- return -ENXIO;
37702 ++ return irq;
37703 + }
37704 +
37705 + ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
37706 +diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
37707 +index 0ca2a3e3a02e2..747983743a14b 100644
37708 +--- a/drivers/soundwire/dmi-quirks.c
37709 ++++ b/drivers/soundwire/dmi-quirks.c
37710 +@@ -59,7 +59,7 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
37711 + {
37712 + .matches = {
37713 + DMI_MATCH(DMI_SYS_VENDOR, "HP"),
37714 +- DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible"),
37715 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Conv"),
37716 + },
37717 + .driver_data = (void *)intel_tgl_bios,
37718 + },
37719 +diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
37720 +index 122f7a29d8ca9..63101f1ba2713 100644
37721 +--- a/drivers/soundwire/intel.c
37722 ++++ b/drivers/soundwire/intel.c
37723 +@@ -448,8 +448,8 @@ static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
37724 +
37725 + /* Clear wake status */
37726 + wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
37727 +- wake_sts |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
37728 +- intel_writew(shim, SDW_SHIM_WAKESTS_STATUS, wake_sts);
37729 ++ wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
37730 ++ intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
37731 + }
37732 + mutex_unlock(sdw->link_res->shim_lock);
37733 + }
37734 +diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
37735 +index b6c7467f0b590..d403a7a3021d0 100644
37736 +--- a/drivers/spi/spi-fsi.c
37737 ++++ b/drivers/spi/spi-fsi.c
37738 +@@ -25,6 +25,7 @@
37739 +
37740 + #define SPI_FSI_BASE 0x70000
37741 + #define SPI_FSI_INIT_TIMEOUT_MS 1000
37742 ++#define SPI_FSI_STATUS_TIMEOUT_MS 100
37743 + #define SPI_FSI_MAX_RX_SIZE 8
37744 + #define SPI_FSI_MAX_TX_SIZE 40
37745 +
37746 +@@ -299,6 +300,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
37747 + struct spi_transfer *transfer)
37748 + {
37749 + int rc = 0;
37750 ++ unsigned long end;
37751 + u64 status = 0ULL;
37752 +
37753 + if (transfer->tx_buf) {
37754 +@@ -315,10 +317,14 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
37755 + if (rc)
37756 + return rc;
37757 +
37758 ++ end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
37759 + do {
37760 + rc = fsi_spi_status(ctx, &status, "TX");
37761 + if (rc)
37762 + return rc;
37763 ++
37764 ++ if (time_after(jiffies, end))
37765 ++ return -ETIMEDOUT;
37766 + } while (status & SPI_FSI_STATUS_TDR_FULL);
37767 +
37768 + sent += nb;
37769 +@@ -329,10 +335,14 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
37770 + u8 *rx = transfer->rx_buf;
37771 +
37772 + while (transfer->len > recv) {
37773 ++ end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS);
37774 + do {
37775 + rc = fsi_spi_status(ctx, &status, "RX");
37776 + if (rc)
37777 + return rc;
37778 ++
37779 ++ if (time_after(jiffies, end))
37780 ++ return -ETIMEDOUT;
37781 + } while (!(status & SPI_FSI_STATUS_RDR_FULL));
37782 +
37783 + rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
37784 +diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
37785 +index 753bd313e6fda..2ca19b01948a2 100644
37786 +--- a/drivers/spi/spi-mt65xx.c
37787 ++++ b/drivers/spi/spi-mt65xx.c
37788 +@@ -43,8 +43,11 @@
37789 + #define SPI_CFG1_PACKET_LOOP_OFFSET 8
37790 + #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
37791 + #define SPI_CFG1_GET_TICK_DLY_OFFSET 29
37792 ++#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
37793 +
37794 + #define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
37795 ++#define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
37796 ++
37797 + #define SPI_CFG1_CS_IDLE_MASK 0xff
37798 + #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
37799 + #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
37800 +@@ -346,9 +349,15 @@ static int mtk_spi_prepare_message(struct spi_master *master,
37801 +
37802 + /* tick delay */
37803 + reg_val = readl(mdata->base + SPI_CFG1_REG);
37804 +- reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
37805 +- reg_val |= ((chip_config->tick_delay & 0x7)
37806 +- << SPI_CFG1_GET_TICK_DLY_OFFSET);
37807 ++ if (mdata->dev_comp->enhance_timing) {
37808 ++ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
37809 ++ reg_val |= ((chip_config->tick_delay & 0x7)
37810 ++ << SPI_CFG1_GET_TICK_DLY_OFFSET);
37811 ++ } else {
37812 ++ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
37813 ++ reg_val |= ((chip_config->tick_delay & 0x3)
37814 ++ << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
37815 ++ }
37816 + writel(reg_val, mdata->base + SPI_CFG1_REG);
37817 +
37818 + /* set hw cs timing */
37819 +diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
37820 +index 45889947afed8..03fce4493aa79 100644
37821 +--- a/drivers/spi/spi-mxic.c
37822 ++++ b/drivers/spi/spi-mxic.c
37823 +@@ -304,25 +304,21 @@ static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf,
37824 +
37825 + writel(data, mxic->regs + TXD(nbytes % 4));
37826 +
37827 ++ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
37828 ++ sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
37829 ++ if (ret)
37830 ++ return ret;
37831 ++
37832 ++ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
37833 ++ sts & INT_RX_NOT_EMPTY, 0,
37834 ++ USEC_PER_SEC);
37835 ++ if (ret)
37836 ++ return ret;
37837 ++
37838 ++ data = readl(mxic->regs + RXD);
37839 + if (rxbuf) {
37840 +- ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
37841 +- sts & INT_TX_EMPTY, 0,
37842 +- USEC_PER_SEC);
37843 +- if (ret)
37844 +- return ret;
37845 +-
37846 +- ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
37847 +- sts & INT_RX_NOT_EMPTY, 0,
37848 +- USEC_PER_SEC);
37849 +- if (ret)
37850 +- return ret;
37851 +-
37852 +- data = readl(mxic->regs + RXD);
37853 + data >>= (8 * (4 - nbytes));
37854 + memcpy(rxbuf + pos, &data, nbytes);
37855 +- WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY);
37856 +- } else {
37857 +- readl(mxic->regs + RXD);
37858 + }
37859 + WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY);
37860 +
37861 +diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
37862 +index 2e134eb4bd2c9..6502fda6243e0 100644
37863 +--- a/drivers/spi/spi-pxa2xx-pci.c
37864 ++++ b/drivers/spi/spi-pxa2xx-pci.c
37865 +@@ -76,14 +76,23 @@ static bool lpss_dma_filter(struct dma_chan *chan, void *param)
37866 + return true;
37867 + }
37868 +
37869 ++static void lpss_dma_put_device(void *dma_dev)
37870 ++{
37871 ++ pci_dev_put(dma_dev);
37872 ++}
37873 ++
37874 + static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
37875 + {
37876 + struct pci_dev *dma_dev;
37877 ++ int ret;
37878 +
37879 + c->num_chipselect = 1;
37880 + c->max_clk_rate = 50000000;
37881 +
37882 + dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
37883 ++ ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
37884 ++ if (ret)
37885 ++ return ret;
37886 +
37887 + if (c->tx_param) {
37888 + struct dw_dma_slave *slave = c->tx_param;
37889 +@@ -107,8 +116,9 @@ static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
37890 +
37891 + static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
37892 + {
37893 +- struct pci_dev *dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
37894 + struct dw_dma_slave *tx, *rx;
37895 ++ struct pci_dev *dma_dev;
37896 ++ int ret;
37897 +
37898 + switch (PCI_FUNC(dev->devfn)) {
37899 + case 0:
37900 +@@ -133,6 +143,11 @@ static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
37901 + return -ENODEV;
37902 + }
37903 +
37904 ++ dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
37905 ++ ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
37906 ++ if (ret)
37907 ++ return ret;
37908 ++
37909 + tx = c->tx_param;
37910 + tx->dma_dev = &dma_dev->dev;
37911 +
37912 +diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
37913 +index e9de1d958bbd2..8f345247a8c32 100644
37914 +--- a/drivers/spi/spi-tegra114.c
37915 ++++ b/drivers/spi/spi-tegra114.c
37916 +@@ -1352,6 +1352,10 @@ static int tegra_spi_probe(struct platform_device *pdev)
37917 + tspi->phys = r->start;
37918 +
37919 + spi_irq = platform_get_irq(pdev, 0);
37920 ++ if (spi_irq < 0) {
37921 ++ ret = spi_irq;
37922 ++ goto exit_free_master;
37923 ++ }
37924 + tspi->irq = spi_irq;
37925 +
37926 + tspi->clk = devm_clk_get(&pdev->dev, "spi");
37927 +diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
37928 +index 2a03739a0c609..80c3787deea9d 100644
37929 +--- a/drivers/spi/spi-tegra20-slink.c
37930 ++++ b/drivers/spi/spi-tegra20-slink.c
37931 +@@ -1006,14 +1006,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
37932 + struct resource *r;
37933 + int ret, spi_irq;
37934 + const struct tegra_slink_chip_data *cdata = NULL;
37935 +- const struct of_device_id *match;
37936 +
37937 +- match = of_match_device(tegra_slink_of_match, &pdev->dev);
37938 +- if (!match) {
37939 +- dev_err(&pdev->dev, "Error: No device match found\n");
37940 +- return -ENODEV;
37941 +- }
37942 +- cdata = match->data;
37943 ++ cdata = of_device_get_match_data(&pdev->dev);
37944 +
37945 + master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
37946 + if (!master) {
37947 +diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
37948 +index ce1bdb4767ea3..cb00ac2fc7d8e 100644
37949 +--- a/drivers/spi/spi-tegra210-quad.c
37950 ++++ b/drivers/spi/spi-tegra210-quad.c
37951 +@@ -1240,6 +1240,8 @@ static int tegra_qspi_probe(struct platform_device *pdev)
37952 +
37953 + tqspi->phys = r->start;
37954 + qspi_irq = platform_get_irq(pdev, 0);
37955 ++ if (qspi_irq < 0)
37956 ++ return qspi_irq;
37957 + tqspi->irq = qspi_irq;
37958 +
37959 + tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
37960 +diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
37961 +index 328b6559bb19a..2b5afae8ff7fc 100644
37962 +--- a/drivers/spi/spi-zynqmp-gqspi.c
37963 ++++ b/drivers/spi/spi-zynqmp-gqspi.c
37964 +@@ -1172,7 +1172,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
37965 + goto clk_dis_all;
37966 + }
37967 +
37968 +- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
37969 ++ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
37970 ++ if (ret)
37971 ++ goto clk_dis_all;
37972 ++
37973 + ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
37974 + ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
37975 + ctlr->mem_ops = &zynqmp_qspi_mem_ops;
37976 +diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
37977 +index fef0055b89909..20183b2ea1279 100644
37978 +--- a/drivers/staging/iio/adc/ad7280a.c
37979 ++++ b/drivers/staging/iio/adc/ad7280a.c
37980 +@@ -107,9 +107,9 @@
37981 + static unsigned int ad7280a_devaddr(unsigned int addr)
37982 + {
37983 + return ((addr & 0x1) << 4) |
37984 +- ((addr & 0x2) << 3) |
37985 ++ ((addr & 0x2) << 2) |
37986 + (addr & 0x4) |
37987 +- ((addr & 0x8) >> 3) |
37988 ++ ((addr & 0x8) >> 2) |
37989 + ((addr & 0x10) >> 4);
37990 + }
37991 +
37992 +diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c
37993 +index 9a1751895ab03..28cb271663c47 100644
37994 +--- a/drivers/staging/media/atomisp/pci/atomisp_acc.c
37995 ++++ b/drivers/staging/media/atomisp/pci/atomisp_acc.c
37996 +@@ -439,6 +439,18 @@ int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd,
37997 + return 0;
37998 + }
37999 +
38000 ++static void atomisp_acc_unload_some_extensions(struct atomisp_sub_device *asd,
38001 ++ int i,
38002 ++ struct atomisp_acc_fw *acc_fw)
38003 ++{
38004 ++ while (--i >= 0) {
38005 ++ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
38006 ++ atomisp_css_unload_acc_extension(asd, acc_fw->fw,
38007 ++ acc_flag_to_pipe[i].pipe_id);
38008 ++ }
38009 ++ }
38010 ++}
38011 ++
38012 + /*
38013 + * Appends the loaded acceleration binary extensions to the
38014 + * current ISP mode. Must be called just before sh_css_start().
38015 +@@ -479,16 +491,20 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd)
38016 + acc_fw->fw,
38017 + acc_flag_to_pipe[i].pipe_id,
38018 + acc_fw->type);
38019 +- if (ret)
38020 ++ if (ret) {
38021 ++ atomisp_acc_unload_some_extensions(asd, i, acc_fw);
38022 + goto error;
38023 ++ }
38024 +
38025 + ext_loaded = true;
38026 + }
38027 + }
38028 +
38029 + ret = atomisp_css_set_acc_parameters(acc_fw);
38030 +- if (ret < 0)
38031 ++ if (ret < 0) {
38032 ++ atomisp_acc_unload_some_extensions(asd, i, acc_fw);
38033 + goto error;
38034 ++ }
38035 + }
38036 +
38037 + if (!ext_loaded)
38038 +@@ -497,6 +513,7 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd)
38039 + ret = atomisp_css_update_stream(asd);
38040 + if (ret) {
38041 + dev_err(isp->dev, "%s: update stream failed.\n", __func__);
38042 ++ atomisp_acc_unload_extensions(asd);
38043 + goto error;
38044 + }
38045 +
38046 +@@ -504,13 +521,6 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd)
38047 + return 0;
38048 +
38049 + error:
38050 +- while (--i >= 0) {
38051 +- if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
38052 +- atomisp_css_unload_acc_extension(asd, acc_fw->fw,
38053 +- acc_flag_to_pipe[i].pipe_id);
38054 +- }
38055 +- }
38056 +-
38057 + list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) {
38058 + if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
38059 + acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
38060 +diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
38061 +index 1cc581074ba76..9a194fbb305b7 100644
38062 +--- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
38063 ++++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
38064 +@@ -748,6 +748,21 @@ static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs,
38065 + return 0;
38066 + }
38067 +
38068 ++/*
38069 ++ * Some boards contain a hw-bug where turning eldo2 back on after having turned
38070 ++ * it off causes the CPLM3218 ambient-light-sensor on the image-sensor's I2C bus
38071 ++ * to crash, hanging the bus. Do not turn eldo2 off on these systems.
38072 ++ */
38073 ++static const struct dmi_system_id axp_leave_eldo2_on_ids[] = {
38074 ++ {
38075 ++ .matches = {
38076 ++ DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
38077 ++ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
38078 ++ },
38079 ++ },
38080 ++ { }
38081 ++};
38082 ++
38083 + static int axp_v1p8_on(struct device *dev, struct gmin_subdev *gs)
38084 + {
38085 + int ret;
38086 +@@ -782,6 +797,9 @@ static int axp_v1p8_off(struct device *dev, struct gmin_subdev *gs)
38087 + if (ret)
38088 + return ret;
38089 +
38090 ++ if (dmi_check_system(axp_leave_eldo2_on_ids))
38091 ++ return 0;
38092 ++
38093 + ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v,
38094 + ELDO_CTRL_REG, gs->eldo2_ctrl_shift, false);
38095 + return ret;
38096 +diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
38097 +index 6a5ee46070898..c1cda16f2dc01 100644
38098 +--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
38099 ++++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
38100 +@@ -39,7 +39,7 @@
38101 + struct hmm_bo_device bo_device;
38102 + struct hmm_pool dynamic_pool;
38103 + struct hmm_pool reserved_pool;
38104 +-static ia_css_ptr dummy_ptr;
38105 ++static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
38106 + static bool hmm_initialized;
38107 + struct _hmm_mem_stat hmm_mem_stat;
38108 +
38109 +@@ -209,7 +209,7 @@ int hmm_init(void)
38110 +
38111 + void hmm_cleanup(void)
38112 + {
38113 +- if (!dummy_ptr)
38114 ++ if (dummy_ptr == mmgr_EXCEPTION)
38115 + return;
38116 + sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
38117 +
38118 +@@ -288,7 +288,8 @@ void hmm_free(ia_css_ptr virt)
38119 +
38120 + dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt);
38121 +
38122 +- WARN_ON(!virt);
38123 ++ if (WARN_ON(virt == mmgr_EXCEPTION))
38124 ++ return;
38125 +
38126 + bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
38127 +
38128 +diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
38129 +index 1450013d3685d..c5d32048d90ff 100644
38130 +--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
38131 ++++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
38132 +@@ -23,7 +23,7 @@ static void hantro_h1_set_src_img_ctrl(struct hantro_dev *vpu,
38133 +
38134 + reg = H1_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width)
38135 + | H1_REG_IN_IMG_CTRL_OVRFLR_D4(0)
38136 +- | H1_REG_IN_IMG_CTRL_OVRFLB_D4(0)
38137 ++ | H1_REG_IN_IMG_CTRL_OVRFLB(0)
38138 + | H1_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
38139 + vepu_write_relaxed(vpu, reg, H1_REG_IN_IMG_CTRL);
38140 + }
38141 +diff --git a/drivers/staging/media/hantro/hantro_h1_regs.h b/drivers/staging/media/hantro/hantro_h1_regs.h
38142 +index d6e9825bb5c7b..30e7e7b920b55 100644
38143 +--- a/drivers/staging/media/hantro/hantro_h1_regs.h
38144 ++++ b/drivers/staging/media/hantro/hantro_h1_regs.h
38145 +@@ -47,7 +47,7 @@
38146 + #define H1_REG_IN_IMG_CTRL 0x03c
38147 + #define H1_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12)
38148 + #define H1_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10)
38149 +-#define H1_REG_IN_IMG_CTRL_OVRFLB_D4(x) ((x) << 6)
38150 ++#define H1_REG_IN_IMG_CTRL_OVRFLB(x) ((x) << 6)
38151 + #define H1_REG_IN_IMG_CTRL_FMT(x) ((x) << 2)
38152 + #define H1_REG_ENC_CTRL0 0x040
38153 + #define H1_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26)
38154 +diff --git a/drivers/staging/media/hantro/sunxi_vpu_hw.c b/drivers/staging/media/hantro/sunxi_vpu_hw.c
38155 +index 90633406c4eb8..c0edd5856a0c8 100644
38156 +--- a/drivers/staging/media/hantro/sunxi_vpu_hw.c
38157 ++++ b/drivers/staging/media/hantro/sunxi_vpu_hw.c
38158 +@@ -29,10 +29,10 @@ static const struct hantro_fmt sunxi_vpu_dec_fmts[] = {
38159 + .frmsize = {
38160 + .min_width = 48,
38161 + .max_width = 3840,
38162 +- .step_width = MB_DIM,
38163 ++ .step_width = 32,
38164 + .min_height = 48,
38165 + .max_height = 2160,
38166 +- .step_height = MB_DIM,
38167 ++ .step_height = 32,
38168 + },
38169 + },
38170 + };
38171 +diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
38172 +index 2b73fa55c938b..9ea723bb5f209 100644
38173 +--- a/drivers/staging/media/imx/imx7-mipi-csis.c
38174 ++++ b/drivers/staging/media/imx/imx7-mipi-csis.c
38175 +@@ -32,7 +32,6 @@
38176 + #include <media/v4l2-subdev.h>
38177 +
38178 + #define CSIS_DRIVER_NAME "imx7-mipi-csis"
38179 +-#define CSIS_SUBDEV_NAME CSIS_DRIVER_NAME
38180 +
38181 + #define CSIS_PAD_SINK 0
38182 + #define CSIS_PAD_SOURCE 1
38183 +@@ -311,7 +310,6 @@ struct csi_state {
38184 + struct reset_control *mrst;
38185 + struct regulator *mipi_phy_regulator;
38186 + const struct mipi_csis_info *info;
38187 +- u8 index;
38188 +
38189 + struct v4l2_subdev sd;
38190 + struct media_pad pads[CSIS_PADS_NUM];
38191 +@@ -1303,8 +1301,8 @@ static int mipi_csis_subdev_init(struct csi_state *state)
38192 +
38193 + v4l2_subdev_init(sd, &mipi_csis_subdev_ops);
38194 + sd->owner = THIS_MODULE;
38195 +- snprintf(sd->name, sizeof(sd->name), "%s.%d",
38196 +- CSIS_SUBDEV_NAME, state->index);
38197 ++ snprintf(sd->name, sizeof(sd->name), "csis-%s",
38198 ++ dev_name(state->dev));
38199 +
38200 + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
38201 + sd->ctrl_handler = NULL;
38202 +diff --git a/drivers/staging/media/imx/imx8mq-mipi-csi2.c b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
38203 +index 7adbdd14daa93..3b9fa75efac6b 100644
38204 +--- a/drivers/staging/media/imx/imx8mq-mipi-csi2.c
38205 ++++ b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
38206 +@@ -398,9 +398,6 @@ static int imx8mq_mipi_csi_s_stream(struct v4l2_subdev *sd, int enable)
38207 + struct csi_state *state = mipi_sd_to_csi2_state(sd);
38208 + int ret = 0;
38209 +
38210 +- imx8mq_mipi_csi_write(state, CSI2RX_IRQ_MASK,
38211 +- CSI2RX_IRQ_MASK_ULPS_STATUS_CHANGE);
38212 +-
38213 + if (enable) {
38214 + ret = pm_runtime_resume_and_get(state->dev);
38215 + if (ret < 0)
38216 +@@ -696,7 +693,7 @@ err_parse:
38217 + * Suspend/resume
38218 + */
38219 +
38220 +-static int imx8mq_mipi_csi_pm_suspend(struct device *dev, bool runtime)
38221 ++static int imx8mq_mipi_csi_pm_suspend(struct device *dev)
38222 + {
38223 + struct v4l2_subdev *sd = dev_get_drvdata(dev);
38224 + struct csi_state *state = mipi_sd_to_csi2_state(sd);
38225 +@@ -708,36 +705,21 @@ static int imx8mq_mipi_csi_pm_suspend(struct device *dev, bool runtime)
38226 + imx8mq_mipi_csi_stop_stream(state);
38227 + imx8mq_mipi_csi_clk_disable(state);
38228 + state->state &= ~ST_POWERED;
38229 +- if (!runtime)
38230 +- state->state |= ST_SUSPENDED;
38231 + }
38232 +
38233 + mutex_unlock(&state->lock);
38234 +
38235 +- ret = icc_set_bw(state->icc_path, 0, 0);
38236 +- if (ret)
38237 +- dev_err(dev, "icc_set_bw failed with %d\n", ret);
38238 +-
38239 + return ret ? -EAGAIN : 0;
38240 + }
38241 +
38242 +-static int imx8mq_mipi_csi_pm_resume(struct device *dev, bool runtime)
38243 ++static int imx8mq_mipi_csi_pm_resume(struct device *dev)
38244 + {
38245 + struct v4l2_subdev *sd = dev_get_drvdata(dev);
38246 + struct csi_state *state = mipi_sd_to_csi2_state(sd);
38247 + int ret = 0;
38248 +
38249 +- ret = icc_set_bw(state->icc_path, 0, state->icc_path_bw);
38250 +- if (ret) {
38251 +- dev_err(dev, "icc_set_bw failed with %d\n", ret);
38252 +- return ret;
38253 +- }
38254 +-
38255 + mutex_lock(&state->lock);
38256 +
38257 +- if (!runtime && !(state->state & ST_SUSPENDED))
38258 +- goto unlock;
38259 +-
38260 + if (!(state->state & ST_POWERED)) {
38261 + state->state |= ST_POWERED;
38262 + ret = imx8mq_mipi_csi_clk_enable(state);
38263 +@@ -758,22 +740,60 @@ unlock:
38264 +
38265 + static int __maybe_unused imx8mq_mipi_csi_suspend(struct device *dev)
38266 + {
38267 +- return imx8mq_mipi_csi_pm_suspend(dev, false);
38268 ++ struct v4l2_subdev *sd = dev_get_drvdata(dev);
38269 ++ struct csi_state *state = mipi_sd_to_csi2_state(sd);
38270 ++ int ret;
38271 ++
38272 ++ ret = imx8mq_mipi_csi_pm_suspend(dev);
38273 ++ if (ret)
38274 ++ return ret;
38275 ++
38276 ++ state->state |= ST_SUSPENDED;
38277 ++
38278 ++ return ret;
38279 + }
38280 +
38281 + static int __maybe_unused imx8mq_mipi_csi_resume(struct device *dev)
38282 + {
38283 +- return imx8mq_mipi_csi_pm_resume(dev, false);
38284 ++ struct v4l2_subdev *sd = dev_get_drvdata(dev);
38285 ++ struct csi_state *state = mipi_sd_to_csi2_state(sd);
38286 ++
38287 ++ if (!(state->state & ST_SUSPENDED))
38288 ++ return 0;
38289 ++
38290 ++ return imx8mq_mipi_csi_pm_resume(dev);
38291 + }
38292 +
38293 + static int __maybe_unused imx8mq_mipi_csi_runtime_suspend(struct device *dev)
38294 + {
38295 +- return imx8mq_mipi_csi_pm_suspend(dev, true);
38296 ++ struct v4l2_subdev *sd = dev_get_drvdata(dev);
38297 ++ struct csi_state *state = mipi_sd_to_csi2_state(sd);
38298 ++ int ret;
38299 ++
38300 ++ ret = imx8mq_mipi_csi_pm_suspend(dev);
38301 ++ if (ret)
38302 ++ return ret;
38303 ++
38304 ++ ret = icc_set_bw(state->icc_path, 0, 0);
38305 ++ if (ret)
38306 ++ dev_err(dev, "icc_set_bw failed with %d\n", ret);
38307 ++
38308 ++ return ret;
38309 + }
38310 +
38311 + static int __maybe_unused imx8mq_mipi_csi_runtime_resume(struct device *dev)
38312 + {
38313 +- return imx8mq_mipi_csi_pm_resume(dev, true);
38314 ++ struct v4l2_subdev *sd = dev_get_drvdata(dev);
38315 ++ struct csi_state *state = mipi_sd_to_csi2_state(sd);
38316 ++ int ret;
38317 ++
38318 ++ ret = icc_set_bw(state->icc_path, 0, state->icc_path_bw);
38319 ++ if (ret) {
38320 ++ dev_err(dev, "icc_set_bw failed with %d\n", ret);
38321 ++ return ret;
38322 ++ }
38323 ++
38324 ++ return imx8mq_mipi_csi_pm_resume(dev);
38325 + }
38326 +
38327 + static const struct dev_pm_ops imx8mq_mipi_csi_pm_ops = {
38328 +@@ -921,7 +941,7 @@ static int imx8mq_mipi_csi_probe(struct platform_device *pdev)
38329 + /* Enable runtime PM. */
38330 + pm_runtime_enable(dev);
38331 + if (!pm_runtime_enabled(dev)) {
38332 +- ret = imx8mq_mipi_csi_pm_resume(dev, true);
38333 ++ ret = imx8mq_mipi_csi_runtime_resume(dev);
38334 + if (ret < 0)
38335 + goto icc;
38336 + }
38337 +@@ -934,7 +954,7 @@ static int imx8mq_mipi_csi_probe(struct platform_device *pdev)
38338 +
38339 + cleanup:
38340 + pm_runtime_disable(&pdev->dev);
38341 +- imx8mq_mipi_csi_pm_suspend(&pdev->dev, true);
38342 ++ imx8mq_mipi_csi_runtime_suspend(&pdev->dev);
38343 +
38344 + media_entity_cleanup(&state->sd.entity);
38345 + v4l2_async_nf_unregister(&state->notifier);
38346 +@@ -958,7 +978,7 @@ static int imx8mq_mipi_csi_remove(struct platform_device *pdev)
38347 + v4l2_async_unregister_subdev(&state->sd);
38348 +
38349 + pm_runtime_disable(&pdev->dev);
38350 +- imx8mq_mipi_csi_pm_suspend(&pdev->dev, true);
38351 ++ imx8mq_mipi_csi_runtime_suspend(&pdev->dev);
38352 + media_entity_cleanup(&state->sd.entity);
38353 + mutex_destroy(&state->lock);
38354 + pm_runtime_set_suspended(&pdev->dev);
38355 +diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c
38356 +index db7022707ff8d..86ccc8937afca 100644
38357 +--- a/drivers/staging/media/meson/vdec/esparser.c
38358 ++++ b/drivers/staging/media/meson/vdec/esparser.c
38359 +@@ -328,7 +328,12 @@ esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf)
38360 +
38361 + offset = esparser_get_offset(sess);
38362 +
38363 +- amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags);
38364 ++ ret = amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags);
38365 ++ if (ret) {
38366 ++ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
38367 ++ return ret;
38368 ++ }
38369 ++
38370 + dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X flags = %08X\n",
38371 + vb->timestamp, payload_size, offset, vbuf->flags);
38372 +
38373 +diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c
38374 +index 203d7afa085d7..7d2a756532503 100644
38375 +--- a/drivers/staging/media/meson/vdec/vdec_helpers.c
38376 ++++ b/drivers/staging/media/meson/vdec/vdec_helpers.c
38377 +@@ -227,13 +227,16 @@ int amvdec_set_canvases(struct amvdec_session *sess,
38378 + }
38379 + EXPORT_SYMBOL_GPL(amvdec_set_canvases);
38380 +
38381 +-void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
38382 +- struct v4l2_timecode tc, u32 offset, u32 vbuf_flags)
38383 ++int amvdec_add_ts(struct amvdec_session *sess, u64 ts,
38384 ++ struct v4l2_timecode tc, u32 offset, u32 vbuf_flags)
38385 + {
38386 + struct amvdec_timestamp *new_ts;
38387 + unsigned long flags;
38388 +
38389 + new_ts = kzalloc(sizeof(*new_ts), GFP_KERNEL);
38390 ++ if (!new_ts)
38391 ++ return -ENOMEM;
38392 ++
38393 + new_ts->ts = ts;
38394 + new_ts->tc = tc;
38395 + new_ts->offset = offset;
38396 +@@ -242,6 +245,7 @@ void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
38397 + spin_lock_irqsave(&sess->ts_spinlock, flags);
38398 + list_add_tail(&new_ts->list, &sess->timestamps);
38399 + spin_unlock_irqrestore(&sess->ts_spinlock, flags);
38400 ++ return 0;
38401 + }
38402 + EXPORT_SYMBOL_GPL(amvdec_add_ts);
38403 +
38404 +diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h
38405 +index 88137d15aa3ad..4bf3e61d081b3 100644
38406 +--- a/drivers/staging/media/meson/vdec/vdec_helpers.h
38407 ++++ b/drivers/staging/media/meson/vdec/vdec_helpers.h
38408 +@@ -56,8 +56,8 @@ void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
38409 + * @offset: offset in the VIFIFO where the associated packet was written
38410 + * @flags: the vb2_v4l2_buffer flags
38411 + */
38412 +-void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
38413 +- struct v4l2_timecode tc, u32 offset, u32 flags);
38414 ++int amvdec_add_ts(struct amvdec_session *sess, u64 ts,
38415 ++ struct v4l2_timecode tc, u32 offset, u32 flags);
38416 + void amvdec_remove_ts(struct amvdec_session *sess, u64 ts);
38417 +
38418 + /**
38419 +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
38420 +index b4173a8926d69..d8fb93035470e 100644
38421 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
38422 ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
38423 +@@ -38,7 +38,7 @@ struct cedrus_h264_sram_ref_pic {
38424 +
38425 + #define CEDRUS_H264_FRAME_NUM 18
38426 +
38427 +-#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K)
38428 ++#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (32 * SZ_1K)
38429 + #define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K)
38430 +
38431 + static void cedrus_h264_write_sram(struct cedrus_dev *dev,
38432 +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
38433 +index 8829a7bab07ec..ffade5cbd2e40 100644
38434 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
38435 ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
38436 +@@ -23,7 +23,7 @@
38437 + * Subsequent BSP implementations seem to double the neighbor info buffer size
38438 + * for the H6 SoC, which may be related to 10 bit H265 support.
38439 + */
38440 +-#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (397 * SZ_1K)
38441 ++#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (794 * SZ_1K)
38442 + #define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K)
38443 + #define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160
38444 +
38445 +diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h
38446 +index b1ad2a2b914cd..50d5a7acfab6c 100644
38447 +--- a/drivers/staging/media/zoran/zoran.h
38448 ++++ b/drivers/staging/media/zoran/zoran.h
38449 +@@ -313,6 +313,6 @@ static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev)
38450 +
38451 + #endif
38452 +
38453 +-int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq);
38454 ++int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir);
38455 + void zoran_queue_exit(struct zoran *zr);
38456 + int zr_set_buf(struct zoran *zr);
38457 +diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/staging/media/zoran/zoran_card.c
38458 +index f259585b06897..11d415c0c05d2 100644
38459 +--- a/drivers/staging/media/zoran/zoran_card.c
38460 ++++ b/drivers/staging/media/zoran/zoran_card.c
38461 +@@ -803,6 +803,52 @@ int zoran_check_jpg_settings(struct zoran *zr,
38462 + return 0;
38463 + }
38464 +
38465 ++static int zoran_init_video_device(struct zoran *zr, struct video_device *video_dev, int dir)
38466 ++{
38467 ++ int err;
38468 ++
38469 ++ /* Now add the template and register the device unit. */
38470 ++ *video_dev = zoran_template;
38471 ++ video_dev->v4l2_dev = &zr->v4l2_dev;
38472 ++ video_dev->lock = &zr->lock;
38473 ++ video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | dir;
38474 ++
38475 ++ strscpy(video_dev->name, ZR_DEVNAME(zr), sizeof(video_dev->name));
38476 ++ /*
38477 ++ * It's not a mem2mem device, but you can both capture and output from one and the same
38478 ++ * device. This should really be split up into two device nodes, but that's a job for
38479 ++ * another day.
38480 ++ */
38481 ++ video_dev->vfl_dir = VFL_DIR_M2M;
38482 ++ zoran_queue_init(zr, &zr->vq, V4L2_BUF_TYPE_VIDEO_CAPTURE);
38483 ++
38484 ++ err = video_register_device(video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]);
38485 ++ if (err < 0)
38486 ++ return err;
38487 ++ video_set_drvdata(video_dev, zr);
38488 ++ return 0;
38489 ++}
38490 ++
38491 ++static void zoran_exit_video_devices(struct zoran *zr)
38492 ++{
38493 ++ video_unregister_device(zr->video_dev);
38494 ++ kfree(zr->video_dev);
38495 ++}
38496 ++
38497 ++static int zoran_init_video_devices(struct zoran *zr)
38498 ++{
38499 ++ int err;
38500 ++
38501 ++ zr->video_dev = video_device_alloc();
38502 ++ if (!zr->video_dev)
38503 ++ return -ENOMEM;
38504 ++
38505 ++ err = zoran_init_video_device(zr, zr->video_dev, V4L2_CAP_VIDEO_CAPTURE);
38506 ++ if (err)
38507 ++ kfree(zr->video_dev);
38508 ++ return err;
38509 ++}
38510 ++
38511 + void zoran_open_init_params(struct zoran *zr)
38512 + {
38513 + int i;
38514 +@@ -874,17 +920,11 @@ static int zr36057_init(struct zoran *zr)
38515 + zoran_open_init_params(zr);
38516 +
38517 + /* allocate memory *before* doing anything to the hardware in case allocation fails */
38518 +- zr->video_dev = video_device_alloc();
38519 +- if (!zr->video_dev) {
38520 +- err = -ENOMEM;
38521 +- goto exit;
38522 +- }
38523 + zr->stat_com = dma_alloc_coherent(&zr->pci_dev->dev,
38524 + BUZ_NUM_STAT_COM * sizeof(u32),
38525 + &zr->p_sc, GFP_KERNEL);
38526 + if (!zr->stat_com) {
38527 +- err = -ENOMEM;
38528 +- goto exit_video;
38529 ++ return -ENOMEM;
38530 + }
38531 + for (j = 0; j < BUZ_NUM_STAT_COM; j++)
38532 + zr->stat_com[j] = cpu_to_le32(1); /* mark as unavailable to zr36057 */
38533 +@@ -897,26 +937,9 @@ static int zr36057_init(struct zoran *zr)
38534 + goto exit_statcom;
38535 + }
38536 +
38537 +- /* Now add the template and register the device unit. */
38538 +- *zr->video_dev = zoran_template;
38539 +- zr->video_dev->v4l2_dev = &zr->v4l2_dev;
38540 +- zr->video_dev->lock = &zr->lock;
38541 +- zr->video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
38542 +-
38543 +- strscpy(zr->video_dev->name, ZR_DEVNAME(zr), sizeof(zr->video_dev->name));
38544 +- /*
38545 +- * It's not a mem2mem device, but you can both capture and output from one and the same
38546 +- * device. This should really be split up into two device nodes, but that's a job for
38547 +- * another day.
38548 +- */
38549 +- zr->video_dev->vfl_dir = VFL_DIR_M2M;
38550 +-
38551 +- zoran_queue_init(zr, &zr->vq);
38552 +-
38553 +- err = video_register_device(zr->video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]);
38554 +- if (err < 0)
38555 ++ err = zoran_init_video_devices(zr);
38556 ++ if (err)
38557 + goto exit_statcomb;
38558 +- video_set_drvdata(zr->video_dev, zr);
38559 +
38560 + zoran_init_hardware(zr);
38561 + if (!pass_through) {
38562 +@@ -931,9 +954,6 @@ exit_statcomb:
38563 + dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb);
38564 + exit_statcom:
38565 + dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), zr->stat_com, zr->p_sc);
38566 +-exit_video:
38567 +- kfree(zr->video_dev);
38568 +-exit:
38569 + return err;
38570 + }
38571 +
38572 +@@ -965,7 +985,7 @@ static void zoran_remove(struct pci_dev *pdev)
38573 + dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb);
38574 + pci_release_regions(pdev);
38575 + pci_disable_device(zr->pci_dev);
38576 +- video_unregister_device(zr->video_dev);
38577 ++ zoran_exit_video_devices(zr);
38578 + exit_free:
38579 + v4l2_ctrl_handler_free(&zr->hdl);
38580 + v4l2_device_unregister(&zr->v4l2_dev);
38581 +@@ -1069,8 +1089,10 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
38582 +
38583 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
38584 + if (err)
38585 +- return -ENODEV;
38586 +- vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
38587 ++ return err;
38588 ++ err = vb2_dma_contig_set_max_seg_size(&pdev->dev, U32_MAX);
38589 ++ if (err)
38590 ++ return err;
38591 +
38592 + nr = zoran_num++;
38593 + if (nr >= BUZ_MAX) {
38594 +diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/staging/media/zoran/zoran_device.c
38595 +index 5b12a730a2290..fb1f0465ca87f 100644
38596 +--- a/drivers/staging/media/zoran/zoran_device.c
38597 ++++ b/drivers/staging/media/zoran/zoran_device.c
38598 +@@ -814,7 +814,7 @@ static void zoran_reap_stat_com(struct zoran *zr)
38599 + if (zr->jpg_settings.tmp_dcm == 1)
38600 + i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM;
38601 + else
38602 +- i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2 + 1;
38603 ++ i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2;
38604 +
38605 + stat_com = le32_to_cpu(zr->stat_com[i]);
38606 + if ((stat_com & 1) == 0) {
38607 +@@ -826,6 +826,11 @@ static void zoran_reap_stat_com(struct zoran *zr)
38608 + size = (stat_com & GENMASK(22, 1)) >> 1;
38609 +
38610 + buf = zr->inuse[i];
38611 ++ if (!buf) {
38612 ++ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
38613 ++ pci_err(zr->pci_dev, "No buffer at slot %d\n", i);
38614 ++ return;
38615 ++ }
38616 + buf->vbuf.vb2_buf.timestamp = ktime_get_ns();
38617 +
38618 + if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) {
38619 +diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c
38620 +index 46382e43f1bf7..84665637ebb79 100644
38621 +--- a/drivers/staging/media/zoran/zoran_driver.c
38622 ++++ b/drivers/staging/media/zoran/zoran_driver.c
38623 +@@ -255,8 +255,6 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability
38624 + strscpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card));
38625 + strscpy(cap->driver, "zoran", sizeof(cap->driver));
38626 + snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", pci_name(zr->pci_dev));
38627 +- cap->device_caps = zr->video_dev->device_caps;
38628 +- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
38629 + return 0;
38630 + }
38631 +
38632 +@@ -582,6 +580,9 @@ static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std)
38633 + struct zoran *zr = video_drvdata(file);
38634 + int res = 0;
38635 +
38636 ++ if (zr->norm == std)
38637 ++ return 0;
38638 ++
38639 + if (zr->running != ZORAN_MAP_MODE_NONE)
38640 + return -EBUSY;
38641 +
38642 +@@ -739,6 +740,7 @@ static int zoran_g_parm(struct file *file, void *priv, struct v4l2_streamparm *p
38643 + if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
38644 + return -EINVAL;
38645 +
38646 ++ parm->parm.capture.readbuffers = 9;
38647 + return 0;
38648 + }
38649 +
38650 +@@ -869,6 +871,10 @@ int zr_set_buf(struct zoran *zr)
38651 + vbuf = &buf->vbuf;
38652 +
38653 + buf->vbuf.field = V4L2_FIELD_INTERLACED;
38654 ++ if (BUZ_MAX_HEIGHT < (zr->v4l_settings.height * 2))
38655 ++ buf->vbuf.field = V4L2_FIELD_INTERLACED;
38656 ++ else
38657 ++ buf->vbuf.field = V4L2_FIELD_TOP;
38658 + vb2_set_plane_payload(&buf->vbuf.vb2_buf, 0, zr->buffer_size);
38659 + vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_DONE);
38660 + zr->inuse[0] = NULL;
38661 +@@ -928,6 +934,7 @@ static int zr_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
38662 + zr->stat_com[j] = cpu_to_le32(1);
38663 + zr->inuse[j] = NULL;
38664 + }
38665 ++ zr->vbseq = 0;
38666 +
38667 + if (zr->map_mode != ZORAN_MAP_MODE_RAW) {
38668 + pci_info(zr->pci_dev, "START JPG\n");
38669 +@@ -1008,7 +1015,7 @@ static const struct vb2_ops zr_video_qops = {
38670 + .wait_finish = vb2_ops_wait_finish,
38671 + };
38672 +
38673 +-int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq)
38674 ++int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir)
38675 + {
38676 + int err;
38677 +
38678 +@@ -1016,8 +1023,9 @@ int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq)
38679 + INIT_LIST_HEAD(&zr->queued_bufs);
38680 +
38681 + vq->dev = &zr->pci_dev->dev;
38682 +- vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
38683 +- vq->io_modes = VB2_USERPTR | VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE;
38684 ++ vq->type = dir;
38685 ++
38686 ++ vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE;
38687 + vq->drv_priv = zr;
38688 + vq->buf_struct_size = sizeof(struct zr_buffer);
38689 + vq->ops = &zr_video_qops;
38690 +diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
38691 +index e38a083811e54..5ae94b1ad5998 100644
38692 +--- a/drivers/staging/mt7621-dts/gbpc1.dts
38693 ++++ b/drivers/staging/mt7621-dts/gbpc1.dts
38694 +@@ -12,7 +12,8 @@
38695 +
38696 + memory@0 {
38697 + device_type = "memory";
38698 +- reg = <0x0 0x1c000000>, <0x20000000 0x4000000>;
38699 ++ reg = <0x00000000 0x1c000000>,
38700 ++ <0x20000000 0x04000000>;
38701 + };
38702 +
38703 + chosen {
38704 +@@ -38,24 +39,16 @@
38705 + gpio-leds {
38706 + compatible = "gpio-leds";
38707 +
38708 +- system {
38709 +- label = "gb-pc1:green:system";
38710 ++ power {
38711 ++ label = "green:power";
38712 + gpios = <&gpio 6 GPIO_ACTIVE_LOW>;
38713 ++ linux,default-trigger = "default-on";
38714 + };
38715 +
38716 +- status {
38717 +- label = "gb-pc1:green:status";
38718 ++ system {
38719 ++ label = "green:system";
38720 + gpios = <&gpio 8 GPIO_ACTIVE_LOW>;
38721 +- };
38722 +-
38723 +- lan1 {
38724 +- label = "gb-pc1:green:lan1";
38725 +- gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
38726 +- };
38727 +-
38728 +- lan2 {
38729 +- label = "gb-pc1:green:lan2";
38730 +- gpios = <&gpio 25 GPIO_ACTIVE_LOW>;
38731 ++ linux,default-trigger = "disk-activity";
38732 + };
38733 + };
38734 + };
38735 +@@ -95,9 +88,8 @@
38736 +
38737 + partition@50000 {
38738 + label = "firmware";
38739 +- reg = <0x50000 0x1FB0000>;
38740 ++ reg = <0x50000 0x1fb0000>;
38741 + };
38742 +-
38743 + };
38744 + };
38745 +
38746 +@@ -106,9 +98,12 @@
38747 + };
38748 +
38749 + &pinctrl {
38750 +- state_default: pinctrl0 {
38751 +- default_gpio: gpio {
38752 +- groups = "wdt", "rgmii2", "uart3";
38753 ++ pinctrl-names = "default";
38754 ++ pinctrl-0 = <&state_default>;
38755 ++
38756 ++ state_default: state-default {
38757 ++ gpio-pinmux {
38758 ++ groups = "rgmii2", "uart3", "wdt";
38759 + function = "gpio";
38760 + };
38761 + };
38762 +@@ -117,12 +112,13 @@
38763 + &switch0 {
38764 + ports {
38765 + port@0 {
38766 ++ status = "okay";
38767 + label = "ethblack";
38768 +- status = "ok";
38769 + };
38770 ++
38771 + port@4 {
38772 ++ status = "okay";
38773 + label = "ethblue";
38774 +- status = "ok";
38775 + };
38776 + };
38777 + };
38778 +diff --git a/drivers/staging/mt7621-dts/gbpc2.dts b/drivers/staging/mt7621-dts/gbpc2.dts
38779 +index 6fe603c7711d7..a7fce8de61472 100644
38780 +--- a/drivers/staging/mt7621-dts/gbpc2.dts
38781 ++++ b/drivers/staging/mt7621-dts/gbpc2.dts
38782 +@@ -1,22 +1,122 @@
38783 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
38784 + /dts-v1/;
38785 +
38786 +-#include "gbpc1.dts"
38787 ++#include "mt7621.dtsi"
38788 ++
38789 ++#include <dt-bindings/gpio/gpio.h>
38790 ++#include <dt-bindings/input/input.h>
38791 +
38792 + / {
38793 + compatible = "gnubee,gb-pc2", "mediatek,mt7621-soc";
38794 + model = "GB-PC2";
38795 ++
38796 ++ memory@0 {
38797 ++ device_type = "memory";
38798 ++ reg = <0x00000000 0x1c000000>,
38799 ++ <0x20000000 0x04000000>;
38800 ++ };
38801 ++
38802 ++ chosen {
38803 ++ bootargs = "console=ttyS0,57600";
38804 ++ };
38805 ++
38806 ++ palmbus: palmbus@1e000000 {
38807 ++ i2c@900 {
38808 ++ status = "okay";
38809 ++ };
38810 ++ };
38811 ++
38812 ++ gpio-keys {
38813 ++ compatible = "gpio-keys";
38814 ++
38815 ++ reset {
38816 ++ label = "reset";
38817 ++ gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
38818 ++ linux,code = <KEY_RESTART>;
38819 ++ };
38820 ++ };
38821 ++};
38822 ++
38823 ++&sdhci {
38824 ++ status = "okay";
38825 ++};
38826 ++
38827 ++&spi0 {
38828 ++ status = "okay";
38829 ++
38830 ++ m25p80@0 {
38831 ++ #address-cells = <1>;
38832 ++ #size-cells = <1>;
38833 ++ compatible = "jedec,spi-nor";
38834 ++ reg = <0>;
38835 ++ spi-max-frequency = <50000000>;
38836 ++ broken-flash-reset;
38837 ++
38838 ++ partition@0 {
38839 ++ label = "u-boot";
38840 ++ reg = <0x0 0x30000>;
38841 ++ read-only;
38842 ++ };
38843 ++
38844 ++ partition@30000 {
38845 ++ label = "u-boot-env";
38846 ++ reg = <0x30000 0x10000>;
38847 ++ read-only;
38848 ++ };
38849 ++
38850 ++ factory: partition@40000 {
38851 ++ label = "factory";
38852 ++ reg = <0x40000 0x10000>;
38853 ++ read-only;
38854 ++ };
38855 ++
38856 ++ partition@50000 {
38857 ++ label = "firmware";
38858 ++ reg = <0x50000 0x1fb0000>;
38859 ++ };
38860 ++ };
38861 + };
38862 +
38863 +-&default_gpio {
38864 +- groups = "wdt", "uart3";
38865 +- function = "gpio";
38866 ++&pcie {
38867 ++ status = "okay";
38868 + };
38869 +
38870 +-&gmac1 {
38871 +- status = "ok";
38872 ++&pinctrl {
38873 ++ pinctrl-names = "default";
38874 ++ pinctrl-0 = <&state_default>;
38875 ++
38876 ++ state_default: state-default {
38877 ++ gpio-pinmux {
38878 ++ groups = "wdt";
38879 ++ function = "gpio";
38880 ++ };
38881 ++ };
38882 + };
38883 +
38884 +-&phy_external {
38885 +- status = "ok";
38886 ++&ethernet {
38887 ++ gmac1: mac@1 {
38888 ++ status = "okay";
38889 ++ phy-handle = <&ethphy7>;
38890 ++ };
38891 ++
38892 ++ mdio-bus {
38893 ++ ethphy7: ethernet-phy@7 {
38894 ++ reg = <7>;
38895 ++ phy-mode = "rgmii-rxid";
38896 ++ };
38897 ++ };
38898 ++};
38899 ++
38900 ++&switch0 {
38901 ++ ports {
38902 ++ port@0 {
38903 ++ status = "okay";
38904 ++ label = "ethblack";
38905 ++ };
38906 ++
38907 ++ port@4 {
38908 ++ status = "okay";
38909 ++ label = "ethblue";
38910 ++ };
38911 ++ };
38912 + };
38913 +diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
38914 +index 644a65d1a6a16..786cdb5fc4da1 100644
38915 +--- a/drivers/staging/mt7621-dts/mt7621.dtsi
38916 ++++ b/drivers/staging/mt7621-dts/mt7621.dtsi
38917 +@@ -44,9 +44,9 @@
38918 + regulator-max-microvolt = <3300000>;
38919 + enable-active-high;
38920 + regulator-always-on;
38921 +- };
38922 ++ };
38923 +
38924 +- mmc_fixed_1v8_io: fixedregulator@1 {
38925 ++ mmc_fixed_1v8_io: fixedregulator@1 {
38926 + compatible = "regulator-fixed";
38927 + regulator-name = "mmc_io";
38928 + regulator-min-microvolt = <1800000>;
38929 +@@ -325,37 +325,32 @@
38930 +
38931 + mediatek,ethsys = <&sysc>;
38932 +
38933 ++ pinctrl-names = "default";
38934 ++ pinctrl-0 = <&mdio_pins>, <&rgmii1_pins>, <&rgmii2_pins>;
38935 +
38936 + gmac0: mac@0 {
38937 + compatible = "mediatek,eth-mac";
38938 + reg = <0>;
38939 + phy-mode = "rgmii";
38940 ++
38941 + fixed-link {
38942 + speed = <1000>;
38943 + full-duplex;
38944 + pause;
38945 + };
38946 + };
38947 ++
38948 + gmac1: mac@1 {
38949 + compatible = "mediatek,eth-mac";
38950 + reg = <1>;
38951 + status = "off";
38952 + phy-mode = "rgmii-rxid";
38953 +- phy-handle = <&phy_external>;
38954 + };
38955 ++
38956 + mdio-bus {
38957 + #address-cells = <1>;
38958 + #size-cells = <0>;
38959 +
38960 +- phy_external: ethernet-phy@5 {
38961 +- status = "off";
38962 +- reg = <5>;
38963 +- phy-mode = "rgmii-rxid";
38964 +-
38965 +- pinctrl-names = "default";
38966 +- pinctrl-0 = <&rgmii2_pins>;
38967 +- };
38968 +-
38969 + switch0: switch0@0 {
38970 + compatible = "mediatek,mt7621";
38971 + #address-cells = <1>;
38972 +@@ -373,36 +368,43 @@
38973 + #address-cells = <1>;
38974 + #size-cells = <0>;
38975 + reg = <0>;
38976 ++
38977 + port@0 {
38978 + status = "off";
38979 + reg = <0>;
38980 + label = "lan0";
38981 + };
38982 ++
38983 + port@1 {
38984 + status = "off";
38985 + reg = <1>;
38986 + label = "lan1";
38987 + };
38988 ++
38989 + port@2 {
38990 + status = "off";
38991 + reg = <2>;
38992 + label = "lan2";
38993 + };
38994 ++
38995 + port@3 {
38996 + status = "off";
38997 + reg = <3>;
38998 + label = "lan3";
38999 + };
39000 ++
39001 + port@4 {
39002 + status = "off";
39003 + reg = <4>;
39004 + label = "lan4";
39005 + };
39006 ++
39007 + port@6 {
39008 + reg = <6>;
39009 + label = "cpu";
39010 + ethernet = <&gmac0>;
39011 + phy-mode = "trgmii";
39012 ++
39013 + fixed-link {
39014 + speed = <1000>;
39015 + full-duplex;
39016 +diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
39017 +index 9873bb2a9ee4f..113a3efd12e95 100644
39018 +--- a/drivers/staging/qlge/qlge_main.c
39019 ++++ b/drivers/staging/qlge/qlge_main.c
39020 +@@ -4605,14 +4605,12 @@ static int qlge_probe(struct pci_dev *pdev,
39021 + err = register_netdev(ndev);
39022 + if (err) {
39023 + dev_err(&pdev->dev, "net device registration failed.\n");
39024 +- qlge_release_all(pdev);
39025 +- pci_disable_device(pdev);
39026 +- goto netdev_free;
39027 ++ goto cleanup_pdev;
39028 + }
39029 +
39030 + err = qlge_health_create_reporters(qdev);
39031 + if (err)
39032 +- goto netdev_free;
39033 ++ goto unregister_netdev;
39034 +
39035 + /* Start up the timer to trigger EEH if
39036 + * the bus goes dead
39037 +@@ -4626,6 +4624,11 @@ static int qlge_probe(struct pci_dev *pdev,
39038 + devlink_register(devlink);
39039 + return 0;
39040 +
39041 ++unregister_netdev:
39042 ++ unregister_netdev(ndev);
39043 ++cleanup_pdev:
39044 ++ qlge_release_all(pdev);
39045 ++ pci_disable_device(pdev);
39046 + netdev_free:
39047 + free_netdev(ndev);
39048 + devlink_free:
39049 +diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c
39050 +index 51a13262a226f..d120d61454a35 100644
39051 +--- a/drivers/staging/r8188eu/core/rtw_recv.c
39052 ++++ b/drivers/staging/r8188eu/core/rtw_recv.c
39053 +@@ -1853,8 +1853,7 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
39054 + struct recv_frame *pending_frame;
39055 + int cnt = 0;
39056 +
39057 +- pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue);
39058 +- while (pending_frame) {
39059 ++ while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) {
39060 + cnt++;
39061 + recv_func_posthandle(padapter, pending_frame);
39062 + }
39063 +diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
39064 +index b818872e0d194..31a9b7500a7b6 100644
39065 +--- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
39066 ++++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
39067 +@@ -538,10 +538,10 @@ static int load_firmware(struct rt_firmware *pFirmware, struct device *device)
39068 + }
39069 + memcpy(pFirmware->szFwBuffer, fw->data, fw->size);
39070 + pFirmware->ulFwLength = fw->size;
39071 +- release_firmware(fw);
39072 + dev_dbg(device, "!bUsedWoWLANFw, FmrmwareLen:%d+\n", pFirmware->ulFwLength);
39073 +
39074 + Exit:
39075 ++ release_firmware(fw);
39076 + return rtStatus;
39077 + }
39078 +
39079 +diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
39080 +index 4f478812cb514..a0b599100106b 100644
39081 +--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
39082 ++++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
39083 +@@ -53,7 +53,7 @@ struct int3400_thermal_priv {
39084 + struct art *arts;
39085 + int trt_count;
39086 + struct trt *trts;
39087 +- u8 uuid_bitmap;
39088 ++ u32 uuid_bitmap;
39089 + int rel_misc_dev_res;
39090 + int current_uuid_index;
39091 + char *data_vault;
39092 +@@ -468,6 +468,11 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv)
39093 + priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
39094 + obj->package.elements[0].buffer.length,
39095 + GFP_KERNEL);
39096 ++ if (!priv->data_vault) {
39097 ++ kfree(buffer.pointer);
39098 ++ return;
39099 ++ }
39100 ++
39101 + bin_attr_data_vault.private = priv->data_vault;
39102 + bin_attr_data_vault.size = obj->package.elements[0].buffer.length;
39103 + kfree(buffer.pointer);
39104 +diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
39105 +index 82a76cac94deb..32366caca6623 100644
39106 +--- a/drivers/tty/hvc/hvc_iucv.c
39107 ++++ b/drivers/tty/hvc/hvc_iucv.c
39108 +@@ -1417,7 +1417,9 @@ out_error:
39109 + */
39110 + static int __init hvc_iucv_config(char *val)
39111 + {
39112 +- return kstrtoul(val, 10, &hvc_iucv_devices);
39113 ++ if (kstrtoul(val, 10, &hvc_iucv_devices))
39114 ++ pr_warn("hvc_iucv= invalid parameter value '%s'\n", val);
39115 ++ return 1;
39116 + }
39117 +
39118 +
39119 +diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
39120 +index c858aff721c41..fbb796f837532 100644
39121 +--- a/drivers/tty/mxser.c
39122 ++++ b/drivers/tty/mxser.c
39123 +@@ -744,6 +744,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
39124 + struct mxser_port *info = container_of(port, struct mxser_port, port);
39125 + unsigned long page;
39126 + unsigned long flags;
39127 ++ int ret;
39128 +
39129 + page = __get_free_page(GFP_KERNEL);
39130 + if (!page)
39131 +@@ -753,9 +754,9 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
39132 +
39133 + if (!info->type) {
39134 + set_bit(TTY_IO_ERROR, &tty->flags);
39135 +- free_page(page);
39136 + spin_unlock_irqrestore(&info->slock, flags);
39137 +- return 0;
39138 ++ ret = 0;
39139 ++ goto err_free_xmit;
39140 + }
39141 + info->port.xmit_buf = (unsigned char *) page;
39142 +
39143 +@@ -775,8 +776,10 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
39144 + if (capable(CAP_SYS_ADMIN)) {
39145 + set_bit(TTY_IO_ERROR, &tty->flags);
39146 + return 0;
39147 +- } else
39148 +- return -ENODEV;
39149 ++ }
39150 ++
39151 ++ ret = -ENODEV;
39152 ++ goto err_free_xmit;
39153 + }
39154 +
39155 + /*
39156 +@@ -821,6 +824,10 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
39157 + spin_unlock_irqrestore(&info->slock, flags);
39158 +
39159 + return 0;
39160 ++err_free_xmit:
39161 ++ free_page(page);
39162 ++ info->port.xmit_buf = NULL;
39163 ++ return ret;
39164 + }
39165 +
39166 + /*
39167 +diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
39168 +index 2350fb3bb5e4c..c2cecc6f47db4 100644
39169 +--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
39170 ++++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
39171 +@@ -487,7 +487,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
39172 + port.port.irq = irq_of_parse_and_map(np, 0);
39173 + port.port.handle_irq = aspeed_vuart_handle_irq;
39174 + port.port.iotype = UPIO_MEM;
39175 +- port.port.type = PORT_16550A;
39176 ++ port.port.type = PORT_ASPEED_VUART;
39177 + port.port.uartclk = clk;
39178 + port.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP
39179 + | UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_NO_THRE_TEST;
39180 +diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
39181 +index 890fa7ddaa7f3..b3c3f7e5851ab 100644
39182 +--- a/drivers/tty/serial/8250/8250_dma.c
39183 ++++ b/drivers/tty/serial/8250/8250_dma.c
39184 +@@ -64,10 +64,19 @@ int serial8250_tx_dma(struct uart_8250_port *p)
39185 + struct uart_8250_dma *dma = p->dma;
39186 + struct circ_buf *xmit = &p->port.state->xmit;
39187 + struct dma_async_tx_descriptor *desc;
39188 ++ struct uart_port *up = &p->port;
39189 + int ret;
39190 +
39191 +- if (dma->tx_running)
39192 ++ if (dma->tx_running) {
39193 ++ if (up->x_char) {
39194 ++ dmaengine_pause(dma->txchan);
39195 ++ uart_xchar_out(up, UART_TX);
39196 ++ dmaengine_resume(dma->txchan);
39197 ++ }
39198 + return 0;
39199 ++ } else if (up->x_char) {
39200 ++ uart_xchar_out(up, UART_TX);
39201 ++ }
39202 +
39203 + if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
39204 + /* We have been called from __dma_tx_complete() */
39205 +diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
39206 +index d3bafec7619da..0f5af061e0b45 100644
39207 +--- a/drivers/tty/serial/8250/8250_lpss.c
39208 ++++ b/drivers/tty/serial/8250/8250_lpss.c
39209 +@@ -117,8 +117,7 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
39210 + {
39211 + struct dw_dma_slave *param = &lpss->dma_param;
39212 + struct pci_dev *pdev = to_pci_dev(port->dev);
39213 +- unsigned int dma_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
39214 +- struct pci_dev *dma_dev = pci_get_slot(pdev->bus, dma_devfn);
39215 ++ struct pci_dev *dma_dev;
39216 +
39217 + switch (pdev->device) {
39218 + case PCI_DEVICE_ID_INTEL_BYT_UART1:
39219 +@@ -137,6 +136,8 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
39220 + return -EINVAL;
39221 + }
39222 +
39223 ++ dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
39224 ++
39225 + param->dma_dev = &dma_dev->dev;
39226 + param->m_master = 0;
39227 + param->p_master = 1;
39228 +@@ -152,6 +153,14 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
39229 + return 0;
39230 + }
39231 +
39232 ++static void byt_serial_exit(struct lpss8250 *lpss)
39233 ++{
39234 ++ struct dw_dma_slave *param = &lpss->dma_param;
39235 ++
39236 ++ /* Paired with pci_get_slot() in the byt_serial_setup() above */
39237 ++ put_device(param->dma_dev);
39238 ++}
39239 ++
39240 + static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
39241 + {
39242 + struct uart_8250_dma *dma = &lpss->data.dma;
39243 +@@ -170,6 +179,13 @@ static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
39244 + return 0;
39245 + }
39246 +
39247 ++static void ehl_serial_exit(struct lpss8250 *lpss)
39248 ++{
39249 ++ struct uart_8250_port *up = serial8250_get_port(lpss->data.line);
39250 ++
39251 ++ up->dma = NULL;
39252 ++}
39253 ++
39254 + #ifdef CONFIG_SERIAL_8250_DMA
39255 + static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
39256 + .nr_channels = 2,
39257 +@@ -344,8 +360,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39258 + return 0;
39259 +
39260 + err_exit:
39261 +- if (lpss->board->exit)
39262 +- lpss->board->exit(lpss);
39263 ++ lpss->board->exit(lpss);
39264 + pci_free_irq_vectors(pdev);
39265 + return ret;
39266 + }
39267 +@@ -356,8 +371,7 @@ static void lpss8250_remove(struct pci_dev *pdev)
39268 +
39269 + serial8250_unregister_port(lpss->data.line);
39270 +
39271 +- if (lpss->board->exit)
39272 +- lpss->board->exit(lpss);
39273 ++ lpss->board->exit(lpss);
39274 + pci_free_irq_vectors(pdev);
39275 + }
39276 +
39277 +@@ -365,12 +379,14 @@ static const struct lpss8250_board byt_board = {
39278 + .freq = 100000000,
39279 + .base_baud = 2764800,
39280 + .setup = byt_serial_setup,
39281 ++ .exit = byt_serial_exit,
39282 + };
39283 +
39284 + static const struct lpss8250_board ehl_board = {
39285 + .freq = 200000000,
39286 + .base_baud = 12500000,
39287 + .setup = ehl_serial_setup,
39288 ++ .exit = ehl_serial_exit,
39289 + };
39290 +
39291 + static const struct lpss8250_board qrk_board = {
39292 +diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
39293 +index efa0515139f8e..e6c1791609ddf 100644
39294 +--- a/drivers/tty/serial/8250/8250_mid.c
39295 ++++ b/drivers/tty/serial/8250/8250_mid.c
39296 +@@ -73,6 +73,11 @@ static int pnw_setup(struct mid8250 *mid, struct uart_port *p)
39297 + return 0;
39298 + }
39299 +
39300 ++static void pnw_exit(struct mid8250 *mid)
39301 ++{
39302 ++ pci_dev_put(mid->dma_dev);
39303 ++}
39304 ++
39305 + static int tng_handle_irq(struct uart_port *p)
39306 + {
39307 + struct mid8250 *mid = p->private_data;
39308 +@@ -124,6 +129,11 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p)
39309 + return 0;
39310 + }
39311 +
39312 ++static void tng_exit(struct mid8250 *mid)
39313 ++{
39314 ++ pci_dev_put(mid->dma_dev);
39315 ++}
39316 ++
39317 + static int dnv_handle_irq(struct uart_port *p)
39318 + {
39319 + struct mid8250 *mid = p->private_data;
39320 +@@ -330,9 +340,9 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39321 +
39322 + pci_set_drvdata(pdev, mid);
39323 + return 0;
39324 ++
39325 + err:
39326 +- if (mid->board->exit)
39327 +- mid->board->exit(mid);
39328 ++ mid->board->exit(mid);
39329 + return ret;
39330 + }
39331 +
39332 +@@ -342,8 +352,7 @@ static void mid8250_remove(struct pci_dev *pdev)
39333 +
39334 + serial8250_unregister_port(mid->line);
39335 +
39336 +- if (mid->board->exit)
39337 +- mid->board->exit(mid);
39338 ++ mid->board->exit(mid);
39339 + }
39340 +
39341 + static const struct mid8250_board pnw_board = {
39342 +@@ -351,6 +360,7 @@ static const struct mid8250_board pnw_board = {
39343 + .freq = 50000000,
39344 + .base_baud = 115200,
39345 + .setup = pnw_setup,
39346 ++ .exit = pnw_exit,
39347 + };
39348 +
39349 + static const struct mid8250_board tng_board = {
39350 +@@ -358,6 +368,7 @@ static const struct mid8250_board tng_board = {
39351 + .freq = 38400000,
39352 + .base_baud = 1843200,
39353 + .setup = tng_setup,
39354 ++ .exit = tng_exit,
39355 + };
39356 +
39357 + static const struct mid8250_board dnv_board = {
39358 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
39359 +index 3b12bfc1ed67b..9f116e75956e2 100644
39360 +--- a/drivers/tty/serial/8250/8250_port.c
39361 ++++ b/drivers/tty/serial/8250/8250_port.c
39362 +@@ -307,6 +307,14 @@ static const struct serial8250_config uart_config[] = {
39363 + .rxtrig_bytes = {1, 32, 64, 112},
39364 + .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
39365 + },
39366 ++ [PORT_ASPEED_VUART] = {
39367 ++ .name = "ASPEED VUART",
39368 ++ .fifo_size = 16,
39369 ++ .tx_loadsz = 16,
39370 ++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
39371 ++ .rxtrig_bytes = {1, 4, 8, 14},
39372 ++ .flags = UART_CAP_FIFO,
39373 ++ },
39374 + };
39375 +
39376 + /* Uart divisor latch read */
39377 +@@ -1615,6 +1623,18 @@ static inline void start_tx_rs485(struct uart_port *port)
39378 + struct uart_8250_port *up = up_to_u8250p(port);
39379 + struct uart_8250_em485 *em485 = up->em485;
39380 +
39381 ++ /*
39382 ++ * While serial8250_em485_handle_stop_tx() is a noop if
39383 ++ * em485->active_timer != &em485->stop_tx_timer, it might happen that
39384 ++ * the timer is still armed and triggers only after the current bunch of
39385 ++ * chars is send and em485->active_timer == &em485->stop_tx_timer again.
39386 ++ * So cancel the timer. There is still a theoretical race condition if
39387 ++ * the timer is already running and only comes around to check for
39388 ++ * em485->active_timer when &em485->stop_tx_timer is armed again.
39389 ++ */
39390 ++ if (em485->active_timer == &em485->stop_tx_timer)
39391 ++ hrtimer_try_to_cancel(&em485->stop_tx_timer);
39392 ++
39393 + em485->active_timer = NULL;
39394 +
39395 + if (em485->tx_stopped) {
39396 +@@ -1799,9 +1819,7 @@ void serial8250_tx_chars(struct uart_8250_port *up)
39397 + int count;
39398 +
39399 + if (port->x_char) {
39400 +- serial_out(up, UART_TX, port->x_char);
39401 +- port->icount.tx++;
39402 +- port->x_char = 0;
39403 ++ uart_xchar_out(port, UART_TX);
39404 + return;
39405 + }
39406 + if (uart_tx_stopped(port)) {
39407 +diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
39408 +index 49d0c7f2b29b8..79b7db8580e05 100644
39409 +--- a/drivers/tty/serial/kgdboc.c
39410 ++++ b/drivers/tty/serial/kgdboc.c
39411 +@@ -403,16 +403,16 @@ static int kgdboc_option_setup(char *opt)
39412 + {
39413 + if (!opt) {
39414 + pr_err("config string not provided\n");
39415 +- return -EINVAL;
39416 ++ return 1;
39417 + }
39418 +
39419 + if (strlen(opt) >= MAX_CONFIG_LEN) {
39420 + pr_err("config string too long\n");
39421 +- return -ENOSPC;
39422 ++ return 1;
39423 + }
39424 + strcpy(config, opt);
39425 +
39426 +- return 0;
39427 ++ return 1;
39428 + }
39429 +
39430 + __setup("kgdboc=", kgdboc_option_setup);
39431 +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
39432 +index 0db90be4c3bc3..f67540ae2a883 100644
39433 +--- a/drivers/tty/serial/serial_core.c
39434 ++++ b/drivers/tty/serial/serial_core.c
39435 +@@ -644,6 +644,20 @@ static void uart_flush_buffer(struct tty_struct *tty)
39436 + tty_port_tty_wakeup(&state->port);
39437 + }
39438 +
39439 ++/*
39440 ++ * This function performs low-level write of high-priority XON/XOFF
39441 ++ * character and accounting for it.
39442 ++ *
39443 ++ * Requires uart_port to implement .serial_out().
39444 ++ */
39445 ++void uart_xchar_out(struct uart_port *uport, int offset)
39446 ++{
39447 ++ serial_port_out(uport, offset, uport->x_char);
39448 ++ uport->icount.tx++;
39449 ++ uport->x_char = 0;
39450 ++}
39451 ++EXPORT_SYMBOL_GPL(uart_xchar_out);
39452 ++
39453 + /*
39454 + * This function is used to send a high-priority XON/XOFF character to
39455 + * the device
39456 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
39457 +index df3522dab31b5..1e7dc130c39a6 100644
39458 +--- a/drivers/usb/host/xhci-hub.c
39459 ++++ b/drivers/usb/host/xhci-hub.c
39460 +@@ -762,7 +762,7 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
39461 + }
39462 + pm_runtime_allow(xhci_to_hcd(xhci)->self.controller);
39463 + xhci->test_mode = 0;
39464 +- return xhci_reset(xhci);
39465 ++ return xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
39466 + }
39467 +
39468 + void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
39469 +@@ -1088,6 +1088,9 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
39470 + if (link_state == XDEV_U2)
39471 + *status |= USB_PORT_STAT_L1;
39472 + if (link_state == XDEV_U0) {
39473 ++ if (bus_state->resume_done[portnum])
39474 ++ usb_hcd_end_port_resume(&port->rhub->hcd->self,
39475 ++ portnum);
39476 + bus_state->resume_done[portnum] = 0;
39477 + clear_bit(portnum, &bus_state->resuming_ports);
39478 + if (bus_state->suspended_ports & (1 << portnum)) {
39479 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
39480 +index 0e312066c5c63..b398d3fdabf61 100644
39481 +--- a/drivers/usb/host/xhci-mem.c
39482 ++++ b/drivers/usb/host/xhci-mem.c
39483 +@@ -2583,7 +2583,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
39484 +
39485 + fail:
39486 + xhci_halt(xhci);
39487 +- xhci_reset(xhci);
39488 ++ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
39489 + xhci_mem_cleanup(xhci);
39490 + return -ENOMEM;
39491 + }
39492 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
39493 +index 2d378543bc3aa..7d1ad8d654cbb 100644
39494 +--- a/drivers/usb/host/xhci.c
39495 ++++ b/drivers/usb/host/xhci.c
39496 +@@ -65,7 +65,7 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
39497 + * handshake done). There are two failure modes: "usec" have passed (major
39498 + * hardware flakeout), or the register reads as all-ones (hardware removed).
39499 + */
39500 +-int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
39501 ++int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
39502 + {
39503 + u32 result;
39504 + int ret;
39505 +@@ -73,7 +73,7 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
39506 + ret = readl_poll_timeout_atomic(ptr, result,
39507 + (result & mask) == done ||
39508 + result == U32_MAX,
39509 +- 1, usec);
39510 ++ 1, timeout_us);
39511 + if (result == U32_MAX) /* card removed */
39512 + return -ENODEV;
39513 +
39514 +@@ -162,7 +162,7 @@ int xhci_start(struct xhci_hcd *xhci)
39515 + * Transactions will be terminated immediately, and operational registers
39516 + * will be set to their defaults.
39517 + */
39518 +-int xhci_reset(struct xhci_hcd *xhci)
39519 ++int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
39520 + {
39521 + u32 command;
39522 + u32 state;
39523 +@@ -195,8 +195,7 @@ int xhci_reset(struct xhci_hcd *xhci)
39524 + if (xhci->quirks & XHCI_INTEL_HOST)
39525 + udelay(1000);
39526 +
39527 +- ret = xhci_handshake(&xhci->op_regs->command,
39528 +- CMD_RESET, 0, 10 * 1000 * 1000);
39529 ++ ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
39530 + if (ret)
39531 + return ret;
39532 +
39533 +@@ -209,8 +208,7 @@ int xhci_reset(struct xhci_hcd *xhci)
39534 + * xHCI cannot write to any doorbells or operational registers other
39535 + * than status until the "Controller Not Ready" flag is cleared.
39536 + */
39537 +- ret = xhci_handshake(&xhci->op_regs->status,
39538 +- STS_CNR, 0, 10 * 1000 * 1000);
39539 ++ ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
39540 +
39541 + xhci->usb2_rhub.bus_state.port_c_suspend = 0;
39542 + xhci->usb2_rhub.bus_state.suspended_ports = 0;
39543 +@@ -731,7 +729,7 @@ static void xhci_stop(struct usb_hcd *hcd)
39544 + xhci->xhc_state |= XHCI_STATE_HALTED;
39545 + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
39546 + xhci_halt(xhci);
39547 +- xhci_reset(xhci);
39548 ++ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
39549 + spin_unlock_irq(&xhci->lock);
39550 +
39551 + xhci_cleanup_msix(xhci);
39552 +@@ -784,7 +782,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
39553 + xhci_halt(xhci);
39554 + /* Workaround for spurious wakeups at shutdown with HSW */
39555 + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
39556 +- xhci_reset(xhci);
39557 ++ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
39558 + spin_unlock_irq(&xhci->lock);
39559 +
39560 + xhci_cleanup_msix(xhci);
39561 +@@ -1170,7 +1168,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
39562 + xhci_dbg(xhci, "Stop HCD\n");
39563 + xhci_halt(xhci);
39564 + xhci_zero_64b_regs(xhci);
39565 +- retval = xhci_reset(xhci);
39566 ++ retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
39567 + spin_unlock_irq(&xhci->lock);
39568 + if (retval)
39569 + return retval;
39570 +@@ -5316,7 +5314,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
39571 +
39572 + xhci_dbg(xhci, "Resetting HCD\n");
39573 + /* Reset the internal HC memory state and registers. */
39574 +- retval = xhci_reset(xhci);
39575 ++ retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
39576 + if (retval)
39577 + return retval;
39578 + xhci_dbg(xhci, "Reset complete\n");
39579 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
39580 +index 5a75fe5631238..bc0789229527f 100644
39581 +--- a/drivers/usb/host/xhci.h
39582 ++++ b/drivers/usb/host/xhci.h
39583 +@@ -229,6 +229,9 @@ struct xhci_op_regs {
39584 + #define CMD_ETE (1 << 14)
39585 + /* bits 15:31 are reserved (and should be preserved on writes). */
39586 +
39587 ++#define XHCI_RESET_LONG_USEC (10 * 1000 * 1000)
39588 ++#define XHCI_RESET_SHORT_USEC (250 * 1000)
39589 ++
39590 + /* IMAN - Interrupt Management Register */
39591 + #define IMAN_IE (1 << 1)
39592 + #define IMAN_IP (1 << 0)
39593 +@@ -2083,11 +2086,11 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci,
39594 +
39595 + /* xHCI host controller glue */
39596 + typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
39597 +-int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec);
39598 ++int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us);
39599 + void xhci_quiesce(struct xhci_hcd *xhci);
39600 + int xhci_halt(struct xhci_hcd *xhci);
39601 + int xhci_start(struct xhci_hcd *xhci);
39602 +-int xhci_reset(struct xhci_hcd *xhci);
39603 ++int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us);
39604 + int xhci_run(struct usb_hcd *hcd);
39605 + int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
39606 + void xhci_shutdown(struct usb_hcd *hcd);
39607 +@@ -2467,6 +2470,8 @@ static inline const char *xhci_decode_ctrl_ctx(char *str,
39608 + unsigned int bit;
39609 + int ret = 0;
39610 +
39611 ++ str[0] = '\0';
39612 ++
39613 + if (drop) {
39614 + ret = sprintf(str, "Drop:");
39615 + for_each_set_bit(bit, &drop, 32)
39616 +@@ -2624,8 +2629,11 @@ static inline const char *xhci_decode_usbsts(char *str, u32 usbsts)
39617 + {
39618 + int ret = 0;
39619 +
39620 ++ ret = sprintf(str, " 0x%08x", usbsts);
39621 ++
39622 + if (usbsts == ~(u32)0)
39623 +- return " 0xffffffff";
39624 ++ return str;
39625 ++
39626 + if (usbsts & STS_HALT)
39627 + ret += sprintf(str + ret, " HCHalted");
39628 + if (usbsts & STS_FATAL)
39629 +diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
39630 +index de5c012570603..ef8d1c73c7545 100644
39631 +--- a/drivers/usb/serial/Kconfig
39632 ++++ b/drivers/usb/serial/Kconfig
39633 +@@ -66,6 +66,7 @@ config USB_SERIAL_SIMPLE
39634 + - Libtransistor USB console
39635 + - a number of Motorola phones
39636 + - Motorola Tetra devices
39637 ++ - Nokia mobile phones
39638 + - Novatel Wireless GPS receivers
39639 + - Siemens USB/MPI adapter.
39640 + - ViVOtech ViVOpay USB device.
39641 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
39642 +index a70fd86f735ca..88b284d61681a 100644
39643 +--- a/drivers/usb/serial/pl2303.c
39644 ++++ b/drivers/usb/serial/pl2303.c
39645 +@@ -116,6 +116,7 @@ static const struct usb_device_id id_table[] = {
39646 + { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) },
39647 + { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
39648 + { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
39649 ++ { USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) },
39650 + { } /* Terminating entry */
39651 + };
39652 +
39653 +@@ -435,6 +436,7 @@ static int pl2303_detect_type(struct usb_serial *serial)
39654 + case 0x105:
39655 + case 0x305:
39656 + case 0x405:
39657 ++ case 0x605:
39658 + /*
39659 + * Assume it's an HXN-type if the device doesn't
39660 + * support the old read request value.
39661 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
39662 +index 6097ee8fccb25..c5406452b774e 100644
39663 +--- a/drivers/usb/serial/pl2303.h
39664 ++++ b/drivers/usb/serial/pl2303.h
39665 +@@ -35,6 +35,9 @@
39666 + #define ATEN_PRODUCT_UC232B 0x2022
39667 + #define ATEN_PRODUCT_ID2 0x2118
39668 +
39669 ++#define IBM_VENDOR_ID 0x04b3
39670 ++#define IBM_PRODUCT_ID 0x4016
39671 ++
39672 + #define IODATA_VENDOR_ID 0x04bb
39673 + #define IODATA_PRODUCT_ID 0x0a03
39674 + #define IODATA_PRODUCT_ID_RSAQ5 0x0a0e
39675 +diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
39676 +index bd23a7cb1be2b..4c6747889a194 100644
39677 +--- a/drivers/usb/serial/usb-serial-simple.c
39678 ++++ b/drivers/usb/serial/usb-serial-simple.c
39679 +@@ -91,6 +91,11 @@ DEVICE(moto_modem, MOTO_IDS);
39680 + { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
39681 + DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
39682 +
39683 ++/* Nokia mobile phone driver */
39684 ++#define NOKIA_IDS() \
39685 ++ { USB_DEVICE(0x0421, 0x069a) } /* Nokia 130 (RM-1035) */
39686 ++DEVICE(nokia, NOKIA_IDS);
39687 ++
39688 + /* Novatel Wireless GPS driver */
39689 + #define NOVATEL_IDS() \
39690 + { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
39691 +@@ -123,6 +128,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
39692 + &vivopay_device,
39693 + &moto_modem_device,
39694 + &motorola_tetra_device,
39695 ++ &nokia_device,
39696 + &novatel_gps_device,
39697 + &hp4x_device,
39698 + &suunto_device,
39699 +@@ -140,6 +146,7 @@ static const struct usb_device_id id_table[] = {
39700 + VIVOPAY_IDS(),
39701 + MOTO_IDS(),
39702 + MOTOROLA_TETRA_IDS(),
39703 ++ NOKIA_IDS(),
39704 + NOVATEL_IDS(),
39705 + HP4X_IDS(),
39706 + SUUNTO_IDS(),
39707 +diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
39708 +index 5f7d678502be4..6012603f3630e 100644
39709 +--- a/drivers/usb/storage/ene_ub6250.c
39710 ++++ b/drivers/usb/storage/ene_ub6250.c
39711 +@@ -237,36 +237,33 @@ static struct us_unusual_dev ene_ub6250_unusual_dev_list[] = {
39712 + #define memstick_logaddr(logadr1, logadr0) ((((u16)(logadr1)) << 8) | (logadr0))
39713 +
39714 +
39715 +-struct SD_STATUS {
39716 +- u8 Insert:1;
39717 +- u8 Ready:1;
39718 +- u8 MediaChange:1;
39719 +- u8 IsMMC:1;
39720 +- u8 HiCapacity:1;
39721 +- u8 HiSpeed:1;
39722 +- u8 WtP:1;
39723 +- u8 Reserved:1;
39724 +-};
39725 +-
39726 +-struct MS_STATUS {
39727 +- u8 Insert:1;
39728 +- u8 Ready:1;
39729 +- u8 MediaChange:1;
39730 +- u8 IsMSPro:1;
39731 +- u8 IsMSPHG:1;
39732 +- u8 Reserved1:1;
39733 +- u8 WtP:1;
39734 +- u8 Reserved2:1;
39735 +-};
39736 +-
39737 +-struct SM_STATUS {
39738 +- u8 Insert:1;
39739 +- u8 Ready:1;
39740 +- u8 MediaChange:1;
39741 +- u8 Reserved:3;
39742 +- u8 WtP:1;
39743 +- u8 IsMS:1;
39744 +-};
39745 ++/* SD_STATUS bits */
39746 ++#define SD_Insert BIT(0)
39747 ++#define SD_Ready BIT(1)
39748 ++#define SD_MediaChange BIT(2)
39749 ++#define SD_IsMMC BIT(3)
39750 ++#define SD_HiCapacity BIT(4)
39751 ++#define SD_HiSpeed BIT(5)
39752 ++#define SD_WtP BIT(6)
39753 ++ /* Bit 7 reserved */
39754 ++
39755 ++/* MS_STATUS bits */
39756 ++#define MS_Insert BIT(0)
39757 ++#define MS_Ready BIT(1)
39758 ++#define MS_MediaChange BIT(2)
39759 ++#define MS_IsMSPro BIT(3)
39760 ++#define MS_IsMSPHG BIT(4)
39761 ++ /* Bit 5 reserved */
39762 ++#define MS_WtP BIT(6)
39763 ++ /* Bit 7 reserved */
39764 ++
39765 ++/* SM_STATUS bits */
39766 ++#define SM_Insert BIT(0)
39767 ++#define SM_Ready BIT(1)
39768 ++#define SM_MediaChange BIT(2)
39769 ++ /* Bits 3-5 reserved */
39770 ++#define SM_WtP BIT(6)
39771 ++#define SM_IsMS BIT(7)
39772 +
39773 + struct ms_bootblock_cis {
39774 + u8 bCistplDEVICE[6]; /* 0 */
39775 +@@ -437,9 +434,9 @@ struct ene_ub6250_info {
39776 + u8 *bbuf;
39777 +
39778 + /* for 6250 code */
39779 +- struct SD_STATUS SD_Status;
39780 +- struct MS_STATUS MS_Status;
39781 +- struct SM_STATUS SM_Status;
39782 ++ u8 SD_Status;
39783 ++ u8 MS_Status;
39784 ++ u8 SM_Status;
39785 +
39786 + /* ----- SD Control Data ---------------- */
39787 + /*SD_REGISTER SD_Regs; */
39788 +@@ -602,7 +599,7 @@ static int sd_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb)
39789 + {
39790 + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
39791 +
39792 +- if (info->SD_Status.Insert && info->SD_Status.Ready)
39793 ++ if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready))
39794 + return USB_STOR_TRANSPORT_GOOD;
39795 + else {
39796 + ene_sd_init(us);
39797 +@@ -622,7 +619,7 @@ static int sd_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
39798 + 0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
39799 + 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
39800 +
39801 +- if (info->SD_Status.WtP)
39802 ++ if (info->SD_Status & SD_WtP)
39803 + usb_stor_set_xfer_buf(mediaWP, 12, srb);
39804 + else
39805 + usb_stor_set_xfer_buf(mediaNoWP, 12, srb);
39806 +@@ -641,9 +638,9 @@ static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
39807 + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
39808 +
39809 + usb_stor_dbg(us, "sd_scsi_read_capacity\n");
39810 +- if (info->SD_Status.HiCapacity) {
39811 ++ if (info->SD_Status & SD_HiCapacity) {
39812 + bl_len = 0x200;
39813 +- if (info->SD_Status.IsMMC)
39814 ++ if (info->SD_Status & SD_IsMMC)
39815 + bl_num = info->HC_C_SIZE-1;
39816 + else
39817 + bl_num = (info->HC_C_SIZE + 1) * 1024 - 1;
39818 +@@ -693,7 +690,7 @@ static int sd_scsi_read(struct us_data *us, struct scsi_cmnd *srb)
39819 + return USB_STOR_TRANSPORT_ERROR;
39820 + }
39821 +
39822 +- if (info->SD_Status.HiCapacity)
39823 ++ if (info->SD_Status & SD_HiCapacity)
39824 + bnByte = bn;
39825 +
39826 + /* set up the command wrapper */
39827 +@@ -733,7 +730,7 @@ static int sd_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
39828 + return USB_STOR_TRANSPORT_ERROR;
39829 + }
39830 +
39831 +- if (info->SD_Status.HiCapacity)
39832 ++ if (info->SD_Status & SD_HiCapacity)
39833 + bnByte = bn;
39834 +
39835 + /* set up the command wrapper */
39836 +@@ -1456,7 +1453,7 @@ static int ms_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb)
39837 + struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
39838 +
39839 + /* pr_info("MS_SCSI_Test_Unit_Ready\n"); */
39840 +- if (info->MS_Status.Insert && info->MS_Status.Ready) {
39841 ++ if ((info->MS_Status & MS_Insert) && (info->MS_Status & MS_Ready)) {
39842 + return USB_STOR_TRANSPORT_GOOD;
39843 + } else {
39844 + ene_ms_init(us);
39845 +@@ -1476,7 +1473,7 @@ static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
39846 + 0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
39847 + 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
39848 +
39849 +- if (info->MS_Status.WtP)
39850 ++ if (info->MS_Status & MS_WtP)
39851 + usb_stor_set_xfer_buf(mediaWP, 12, srb);
39852 + else
39853 + usb_stor_set_xfer_buf(mediaNoWP, 12, srb);
39854 +@@ -1495,7 +1492,7 @@ static int ms_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
39855 +
39856 + usb_stor_dbg(us, "ms_scsi_read_capacity\n");
39857 + bl_len = 0x200;
39858 +- if (info->MS_Status.IsMSPro)
39859 ++ if (info->MS_Status & MS_IsMSPro)
39860 + bl_num = info->MSP_TotalBlock - 1;
39861 + else
39862 + bl_num = info->MS_Lib.NumberOfLogBlock * info->MS_Lib.blockSize * 2 - 1;
39863 +@@ -1650,7 +1647,7 @@ static int ms_scsi_read(struct us_data *us, struct scsi_cmnd *srb)
39864 + if (bn > info->bl_num)
39865 + return USB_STOR_TRANSPORT_ERROR;
39866 +
39867 +- if (info->MS_Status.IsMSPro) {
39868 ++ if (info->MS_Status & MS_IsMSPro) {
39869 + result = ene_load_bincode(us, MSP_RW_PATTERN);
39870 + if (result != USB_STOR_XFER_GOOD) {
39871 + usb_stor_dbg(us, "Load MPS RW pattern Fail !!\n");
39872 +@@ -1751,7 +1748,7 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
39873 + if (bn > info->bl_num)
39874 + return USB_STOR_TRANSPORT_ERROR;
39875 +
39876 +- if (info->MS_Status.IsMSPro) {
39877 ++ if (info->MS_Status & MS_IsMSPro) {
39878 + result = ene_load_bincode(us, MSP_RW_PATTERN);
39879 + if (result != USB_STOR_XFER_GOOD) {
39880 + pr_info("Load MSP RW pattern Fail !!\n");
39881 +@@ -1859,12 +1856,12 @@ static int ene_get_card_status(struct us_data *us, u8 *buf)
39882 +
39883 + tmpreg = (u16) reg4b;
39884 + reg4b = *(u32 *)(&buf[0x14]);
39885 +- if (info->SD_Status.HiCapacity && !info->SD_Status.IsMMC)
39886 ++ if ((info->SD_Status & SD_HiCapacity) && !(info->SD_Status & SD_IsMMC))
39887 + info->HC_C_SIZE = (reg4b >> 8) & 0x3fffff;
39888 +
39889 + info->SD_C_SIZE = ((tmpreg & 0x03) << 10) | (u16)(reg4b >> 22);
39890 + info->SD_C_SIZE_MULT = (u8)(reg4b >> 7) & 0x07;
39891 +- if (info->SD_Status.HiCapacity && info->SD_Status.IsMMC)
39892 ++ if ((info->SD_Status & SD_HiCapacity) && (info->SD_Status & SD_IsMMC))
39893 + info->HC_C_SIZE = *(u32 *)(&buf[0x100]);
39894 +
39895 + if (info->SD_READ_BL_LEN > SD_BLOCK_LEN) {
39896 +@@ -2076,6 +2073,7 @@ static int ene_ms_init(struct us_data *us)
39897 + u16 MSP_BlockSize, MSP_UserAreaBlocks;
39898 + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
39899 + u8 *bbuf = info->bbuf;
39900 ++ unsigned int s;
39901 +
39902 + printk(KERN_INFO "transport --- ENE_MSInit\n");
39903 +
39904 +@@ -2100,15 +2098,16 @@ static int ene_ms_init(struct us_data *us)
39905 + return USB_STOR_TRANSPORT_ERROR;
39906 + }
39907 + /* the same part to test ENE */
39908 +- info->MS_Status = *(struct MS_STATUS *) bbuf;
39909 +-
39910 +- if (info->MS_Status.Insert && info->MS_Status.Ready) {
39911 +- printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert);
39912 +- printk(KERN_INFO "Ready = %x\n", info->MS_Status.Ready);
39913 +- printk(KERN_INFO "IsMSPro = %x\n", info->MS_Status.IsMSPro);
39914 +- printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG);
39915 +- printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP);
39916 +- if (info->MS_Status.IsMSPro) {
39917 ++ info->MS_Status = bbuf[0];
39918 ++
39919 ++ s = info->MS_Status;
39920 ++ if ((s & MS_Insert) && (s & MS_Ready)) {
39921 ++ printk(KERN_INFO "Insert = %x\n", !!(s & MS_Insert));
39922 ++ printk(KERN_INFO "Ready = %x\n", !!(s & MS_Ready));
39923 ++ printk(KERN_INFO "IsMSPro = %x\n", !!(s & MS_IsMSPro));
39924 ++ printk(KERN_INFO "IsMSPHG = %x\n", !!(s & MS_IsMSPHG));
39925 ++ printk(KERN_INFO "WtP= %x\n", !!(s & MS_WtP));
39926 ++ if (s & MS_IsMSPro) {
39927 + MSP_BlockSize = (bbuf[6] << 8) | bbuf[7];
39928 + MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11];
39929 + info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
39930 +@@ -2169,17 +2168,17 @@ static int ene_sd_init(struct us_data *us)
39931 + return USB_STOR_TRANSPORT_ERROR;
39932 + }
39933 +
39934 +- info->SD_Status = *(struct SD_STATUS *) bbuf;
39935 +- if (info->SD_Status.Insert && info->SD_Status.Ready) {
39936 +- struct SD_STATUS *s = &info->SD_Status;
39937 ++ info->SD_Status = bbuf[0];
39938 ++ if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready)) {
39939 ++ unsigned int s = info->SD_Status;
39940 +
39941 + ene_get_card_status(us, bbuf);
39942 +- usb_stor_dbg(us, "Insert = %x\n", s->Insert);
39943 +- usb_stor_dbg(us, "Ready = %x\n", s->Ready);
39944 +- usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC);
39945 +- usb_stor_dbg(us, "HiCapacity = %x\n", s->HiCapacity);
39946 +- usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed);
39947 +- usb_stor_dbg(us, "WtP = %x\n", s->WtP);
39948 ++ usb_stor_dbg(us, "Insert = %x\n", !!(s & SD_Insert));
39949 ++ usb_stor_dbg(us, "Ready = %x\n", !!(s & SD_Ready));
39950 ++ usb_stor_dbg(us, "IsMMC = %x\n", !!(s & SD_IsMMC));
39951 ++ usb_stor_dbg(us, "HiCapacity = %x\n", !!(s & SD_HiCapacity));
39952 ++ usb_stor_dbg(us, "HiSpeed = %x\n", !!(s & SD_HiSpeed));
39953 ++ usb_stor_dbg(us, "WtP = %x\n", !!(s & SD_WtP));
39954 + } else {
39955 + usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]);
39956 + return USB_STOR_TRANSPORT_ERROR;
39957 +@@ -2201,14 +2200,14 @@ static int ene_init(struct us_data *us)
39958 +
39959 + misc_reg03 = bbuf[0];
39960 + if (misc_reg03 & 0x01) {
39961 +- if (!info->SD_Status.Ready) {
39962 ++ if (!(info->SD_Status & SD_Ready)) {
39963 + result = ene_sd_init(us);
39964 + if (result != USB_STOR_XFER_GOOD)
39965 + return USB_STOR_TRANSPORT_ERROR;
39966 + }
39967 + }
39968 + if (misc_reg03 & 0x02) {
39969 +- if (!info->MS_Status.Ready) {
39970 ++ if (!(info->MS_Status & MS_Ready)) {
39971 + result = ene_ms_init(us);
39972 + if (result != USB_STOR_XFER_GOOD)
39973 + return USB_STOR_TRANSPORT_ERROR;
39974 +@@ -2307,14 +2306,14 @@ static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
39975 +
39976 + /*US_DEBUG(usb_stor_show_command(us, srb)); */
39977 + scsi_set_resid(srb, 0);
39978 +- if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready)))
39979 ++ if (unlikely(!(info->SD_Status & SD_Ready) || (info->MS_Status & MS_Ready)))
39980 + result = ene_init(us);
39981 + if (result == USB_STOR_XFER_GOOD) {
39982 + result = USB_STOR_TRANSPORT_ERROR;
39983 +- if (info->SD_Status.Ready)
39984 ++ if (info->SD_Status & SD_Ready)
39985 + result = sd_scsi_irp(us, srb);
39986 +
39987 +- if (info->MS_Status.Ready)
39988 ++ if (info->MS_Status & MS_Ready)
39989 + result = ms_scsi_irp(us, srb);
39990 + }
39991 + return result;
39992 +@@ -2378,7 +2377,6 @@ static int ene_ub6250_probe(struct usb_interface *intf,
39993 +
39994 + static int ene_ub6250_resume(struct usb_interface *iface)
39995 + {
39996 +- u8 tmp = 0;
39997 + struct us_data *us = usb_get_intfdata(iface);
39998 + struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
39999 +
40000 +@@ -2390,17 +2388,16 @@ static int ene_ub6250_resume(struct usb_interface *iface)
40001 + mutex_unlock(&us->dev_mutex);
40002 +
40003 + info->Power_IsResum = true;
40004 +- /*info->SD_Status.Ready = 0; */
40005 +- info->SD_Status = *(struct SD_STATUS *)&tmp;
40006 +- info->MS_Status = *(struct MS_STATUS *)&tmp;
40007 +- info->SM_Status = *(struct SM_STATUS *)&tmp;
40008 ++ /* info->SD_Status &= ~SD_Ready; */
40009 ++ info->SD_Status = 0;
40010 ++ info->MS_Status = 0;
40011 ++ info->SM_Status = 0;
40012 +
40013 + return 0;
40014 + }
40015 +
40016 + static int ene_ub6250_reset_resume(struct usb_interface *iface)
40017 + {
40018 +- u8 tmp = 0;
40019 + struct us_data *us = usb_get_intfdata(iface);
40020 + struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
40021 +
40022 +@@ -2412,10 +2409,10 @@ static int ene_ub6250_reset_resume(struct usb_interface *iface)
40023 + * the device
40024 + */
40025 + info->Power_IsResum = true;
40026 +- /*info->SD_Status.Ready = 0; */
40027 +- info->SD_Status = *(struct SD_STATUS *)&tmp;
40028 +- info->MS_Status = *(struct MS_STATUS *)&tmp;
40029 +- info->SM_Status = *(struct SM_STATUS *)&tmp;
40030 ++ /* info->SD_Status &= ~SD_Ready; */
40031 ++ info->SD_Status = 0;
40032 ++ info->MS_Status = 0;
40033 ++ info->SM_Status = 0;
40034 +
40035 + return 0;
40036 + }
40037 +diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
40038 +index 3789698d9d3c6..0c423916d7bfa 100644
40039 +--- a/drivers/usb/storage/realtek_cr.c
40040 ++++ b/drivers/usb/storage/realtek_cr.c
40041 +@@ -365,7 +365,7 @@ static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
40042 +
40043 + buf = kmalloc(len, GFP_NOIO);
40044 + if (buf == NULL)
40045 +- return USB_STOR_TRANSPORT_ERROR;
40046 ++ return -ENOMEM;
40047 +
40048 + usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len);
40049 +
40050 +diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
40051 +index 7ffcda94d323a..16b4560216ba6 100644
40052 +--- a/drivers/usb/typec/tipd/core.c
40053 ++++ b/drivers/usb/typec/tipd/core.c
40054 +@@ -256,6 +256,10 @@ static int tps6598x_connect(struct tps6598x *tps, u32 status)
40055 + typec_set_pwr_opmode(tps->port, mode);
40056 + typec_set_pwr_role(tps->port, TPS_STATUS_TO_TYPEC_PORTROLE(status));
40057 + typec_set_vconn_role(tps->port, TPS_STATUS_TO_TYPEC_VCONN(status));
40058 ++ if (TPS_STATUS_TO_UPSIDE_DOWN(status))
40059 ++ typec_set_orientation(tps->port, TYPEC_ORIENTATION_REVERSE);
40060 ++ else
40061 ++ typec_set_orientation(tps->port, TYPEC_ORIENTATION_NORMAL);
40062 + tps6598x_set_data_role(tps, TPS_STATUS_TO_TYPEC_DATAROLE(status), true);
40063 +
40064 + tps->partner = typec_register_partner(tps->port, &desc);
40065 +@@ -278,6 +282,7 @@ static void tps6598x_disconnect(struct tps6598x *tps, u32 status)
40066 + typec_set_pwr_opmode(tps->port, TYPEC_PWR_MODE_USB);
40067 + typec_set_pwr_role(tps->port, TPS_STATUS_TO_TYPEC_PORTROLE(status));
40068 + typec_set_vconn_role(tps->port, TPS_STATUS_TO_TYPEC_VCONN(status));
40069 ++ typec_set_orientation(tps->port, TYPEC_ORIENTATION_NONE);
40070 + tps6598x_set_data_role(tps, TPS_STATUS_TO_TYPEC_DATAROLE(status), false);
40071 +
40072 + power_supply_changed(tps->psy);
40073 +diff --git a/drivers/usb/typec/tipd/tps6598x.h b/drivers/usb/typec/tipd/tps6598x.h
40074 +index 3dae84c524fb5..527857549d699 100644
40075 +--- a/drivers/usb/typec/tipd/tps6598x.h
40076 ++++ b/drivers/usb/typec/tipd/tps6598x.h
40077 +@@ -17,6 +17,7 @@
40078 + /* TPS_REG_STATUS bits */
40079 + #define TPS_STATUS_PLUG_PRESENT BIT(0)
40080 + #define TPS_STATUS_PLUG_UPSIDE_DOWN BIT(4)
40081 ++#define TPS_STATUS_TO_UPSIDE_DOWN(s) (!!((s) & TPS_STATUS_PLUG_UPSIDE_DOWN))
40082 + #define TPS_STATUS_PORTROLE BIT(5)
40083 + #define TPS_STATUS_TO_TYPEC_PORTROLE(s) (!!((s) & TPS_STATUS_PORTROLE))
40084 + #define TPS_STATUS_DATAROLE BIT(6)
40085 +diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
40086 +index d0f91078600e9..9fe1071a96444 100644
40087 +--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
40088 ++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
40089 +@@ -1669,7 +1669,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
40090 + return;
40091 +
40092 + if (unlikely(is_ctrl_vq_idx(mvdev, idx))) {
40093 +- if (!mvdev->cvq.ready)
40094 ++ if (!mvdev->wq || !mvdev->cvq.ready)
40095 + return;
40096 +
40097 + wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
40098 +@@ -2707,9 +2707,12 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
40099 + struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
40100 + struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
40101 + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
40102 ++ struct workqueue_struct *wq;
40103 +
40104 + mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
40105 +- destroy_workqueue(mvdev->wq);
40106 ++ wq = mvdev->wq;
40107 ++ mvdev->wq = NULL;
40108 ++ destroy_workqueue(wq);
40109 + _vdpa_unregister_device(dev);
40110 + mgtdev->ndev = NULL;
40111 + }
40112 +diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
40113 +index f948e6cd29939..2e6409cc11ada 100644
40114 +--- a/drivers/vfio/pci/vfio_pci_core.c
40115 ++++ b/drivers/vfio/pci/vfio_pci_core.c
40116 +@@ -228,6 +228,19 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat
40117 + if (!ret) {
40118 + /* D3 might be unsupported via quirk, skip unless in D3 */
40119 + if (needs_save && pdev->current_state >= PCI_D3hot) {
40120 ++ /*
40121 ++ * The current PCI state will be saved locally in
40122 ++ * 'pm_save' during the D3hot transition. When the
40123 ++ * device state is changed to D0 again with the current
40124 ++ * function, then pci_store_saved_state() will restore
40125 ++ * the state and will free the memory pointed by
40126 ++ * 'pm_save'. There are few cases where the PCI power
40127 ++ * state can be changed to D0 without the involvement
40128 ++ * of the driver. For these cases, free the earlier
40129 ++ * allocated memory first before overwriting 'pm_save'
40130 ++ * to prevent the memory leak.
40131 ++ */
40132 ++ kfree(vdev->pm_save);
40133 + vdev->pm_save = pci_store_saved_state(pdev);
40134 + } else if (needs_restore) {
40135 + pci_load_and_free_saved_state(pdev, &vdev->pm_save);
40136 +@@ -322,6 +335,17 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
40137 + /* For needs_reset */
40138 + lockdep_assert_held(&vdev->vdev.dev_set->lock);
40139 +
40140 ++ /*
40141 ++ * This function can be invoked while the power state is non-D0.
40142 ++ * This function calls __pci_reset_function_locked() which internally
40143 ++ * can use pci_pm_reset() for the function reset. pci_pm_reset() will
40144 ++ * fail if the power state is non-D0. Also, for the devices which
40145 ++ * have NoSoftRst-, the reset function can cause the PCI config space
40146 ++ * reset without restoring the original state (saved locally in
40147 ++ * 'vdev->pm_save').
40148 ++ */
40149 ++ vfio_pci_set_power_state(vdev, PCI_D0);
40150 ++
40151 + /* Stop the device from further DMA */
40152 + pci_clear_master(pdev);
40153 +
40154 +@@ -921,6 +945,19 @@ long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
40155 + return -EINVAL;
40156 +
40157 + vfio_pci_zap_and_down_write_memory_lock(vdev);
40158 ++
40159 ++ /*
40160 ++ * This function can be invoked while the power state is non-D0.
40161 ++ * If pci_try_reset_function() has been called while the power
40162 ++ * state is non-D0, then pci_try_reset_function() will
40163 ++ * internally set the power state to D0 without vfio driver
40164 ++ * involvement. For the devices which have NoSoftRst-, the
40165 ++ * reset function can cause the PCI config space reset without
40166 ++ * restoring the original state (saved locally in
40167 ++ * 'vdev->pm_save').
40168 ++ */
40169 ++ vfio_pci_set_power_state(vdev, PCI_D0);
40170 ++
40171 + ret = pci_try_reset_function(vdev->pdev);
40172 + up_write(&vdev->memory_lock);
40173 +
40174 +@@ -2055,6 +2092,18 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
40175 + }
40176 + cur_mem = NULL;
40177 +
40178 ++ /*
40179 ++ * The pci_reset_bus() will reset all the devices in the bus.
40180 ++ * The power state can be non-D0 for some of the devices in the bus.
40181 ++ * For these devices, the pci_reset_bus() will internally set
40182 ++ * the power state to D0 without vfio driver involvement.
40183 ++ * For the devices which have NoSoftRst-, the reset function can
40184 ++ * cause the PCI config space reset without restoring the original
40185 ++ * state (saved locally in 'vdev->pm_save').
40186 ++ */
40187 ++ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
40188 ++ vfio_pci_set_power_state(cur, PCI_D0);
40189 ++
40190 + ret = pci_reset_bus(pdev);
40191 +
40192 + err_undo:
40193 +@@ -2108,6 +2157,18 @@ static bool vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
40194 + if (!pdev)
40195 + return false;
40196 +
40197 ++ /*
40198 ++ * The pci_reset_bus() will reset all the devices in the bus.
40199 ++ * The power state can be non-D0 for some of the devices in the bus.
40200 ++ * For these devices, the pci_reset_bus() will internally set
40201 ++ * the power state to D0 without vfio driver involvement.
40202 ++ * For the devices which have NoSoftRst-, the reset function can
40203 ++ * cause the PCI config space reset without restoring the original
40204 ++ * state (saved locally in 'vdev->pm_save').
40205 ++ */
40206 ++ list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
40207 ++ vfio_pci_set_power_state(cur, PCI_D0);
40208 ++
40209 + ret = pci_reset_bus(pdev);
40210 + if (ret)
40211 + return false;
40212 +diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
40213 +index 40b098320b2a7..5829cf2d0552d 100644
40214 +--- a/drivers/vhost/iotlb.c
40215 ++++ b/drivers/vhost/iotlb.c
40216 +@@ -62,8 +62,12 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
40217 + */
40218 + if (start == 0 && last == ULONG_MAX) {
40219 + u64 mid = last / 2;
40220 ++ int err = vhost_iotlb_add_range_ctx(iotlb, start, mid, addr,
40221 ++ perm, opaque);
40222 ++
40223 ++ if (err)
40224 ++ return err;
40225 +
40226 +- vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, perm, opaque);
40227 + addr += mid + 1;
40228 + start = mid + 1;
40229 + }
40230 +diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
40231 +index e3812a8ff55a4..29e650ecfceb1 100644
40232 +--- a/drivers/video/fbdev/atafb.c
40233 ++++ b/drivers/video/fbdev/atafb.c
40234 +@@ -1683,9 +1683,9 @@ static int falcon_setcolreg(unsigned int regno, unsigned int red,
40235 + ((blue & 0xfc00) >> 8));
40236 + if (regno < 16) {
40237 + shifter_tt.color_reg[regno] =
40238 +- (((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) |
40239 +- (((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) |
40240 +- ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12);
40241 ++ ((((red & 0xe000) >> 13) | ((red & 0x1000) >> 12)) << 8) |
40242 ++ ((((green & 0xe000) >> 13) | ((green & 0x1000) >> 12)) << 4) |
40243 ++ ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12);
40244 + ((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) |
40245 + ((green & 0xfc00) >> 5) |
40246 + ((blue & 0xf800) >> 11));
40247 +@@ -1971,9 +1971,9 @@ static int stste_setcolreg(unsigned int regno, unsigned int red,
40248 + green >>= 12;
40249 + if (ATARIHW_PRESENT(EXTD_SHIFTER))
40250 + shifter_tt.color_reg[regno] =
40251 +- (((red & 0xe) >> 1) | ((red & 1) << 3) << 8) |
40252 +- (((green & 0xe) >> 1) | ((green & 1) << 3) << 4) |
40253 +- ((blue & 0xe) >> 1) | ((blue & 1) << 3);
40254 ++ ((((red & 0xe) >> 1) | ((red & 1) << 3)) << 8) |
40255 ++ ((((green & 0xe) >> 1) | ((green & 1) << 3)) << 4) |
40256 ++ ((blue & 0xe) >> 1) | ((blue & 1) << 3);
40257 + else
40258 + shifter_tt.color_reg[regno] =
40259 + ((red & 0xe) << 7) |
40260 +diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
40261 +index 355b6120dc4f0..1fc8de4ecbebf 100644
40262 +--- a/drivers/video/fbdev/atmel_lcdfb.c
40263 ++++ b/drivers/video/fbdev/atmel_lcdfb.c
40264 +@@ -1062,15 +1062,16 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
40265 +
40266 + INIT_LIST_HEAD(&info->modelist);
40267 +
40268 +- if (pdev->dev.of_node) {
40269 +- ret = atmel_lcdfb_of_init(sinfo);
40270 +- if (ret)
40271 +- goto free_info;
40272 +- } else {
40273 ++ if (!pdev->dev.of_node) {
40274 + dev_err(dev, "cannot get default configuration\n");
40275 + goto free_info;
40276 + }
40277 +
40278 ++ ret = atmel_lcdfb_of_init(sinfo);
40279 ++ if (ret)
40280 ++ goto free_info;
40281 ++
40282 ++ ret = -ENODEV;
40283 + if (!sinfo->config)
40284 + goto free_info;
40285 +
40286 +diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
40287 +index 93802abbbc72a..3d47c347b8970 100644
40288 +--- a/drivers/video/fbdev/cirrusfb.c
40289 ++++ b/drivers/video/fbdev/cirrusfb.c
40290 +@@ -469,7 +469,7 @@ static int cirrusfb_check_mclk(struct fb_info *info, long freq)
40291 + return 0;
40292 + }
40293 +
40294 +-static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var,
40295 ++static int cirrusfb_check_pixclock(struct fb_var_screeninfo *var,
40296 + struct fb_info *info)
40297 + {
40298 + long freq;
40299 +@@ -478,9 +478,7 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var,
40300 + unsigned maxclockidx = var->bits_per_pixel >> 3;
40301 +
40302 + /* convert from ps to kHz */
40303 +- freq = PICOS2KHZ(var->pixclock);
40304 +-
40305 +- dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq);
40306 ++ freq = PICOS2KHZ(var->pixclock ? : 1);
40307 +
40308 + maxclock = cirrusfb_board_info[cinfo->btype].maxclock[maxclockidx];
40309 + cinfo->multiplexing = 0;
40310 +@@ -488,11 +486,13 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var,
40311 + /* If the frequency is greater than we can support, we might be able
40312 + * to use multiplexing for the video mode */
40313 + if (freq > maxclock) {
40314 +- dev_err(info->device,
40315 +- "Frequency greater than maxclock (%ld kHz)\n",
40316 +- maxclock);
40317 +- return -EINVAL;
40318 ++ var->pixclock = KHZ2PICOS(maxclock);
40319 ++
40320 ++ while ((freq = PICOS2KHZ(var->pixclock)) > maxclock)
40321 ++ var->pixclock++;
40322 + }
40323 ++ dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq);
40324 ++
40325 + /*
40326 + * Additional constraint: 8bpp uses DAC clock doubling to allow maximum
40327 + * pixel clock
40328 +diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
40329 +index 509311471d515..bd59e7b11ed53 100644
40330 +--- a/drivers/video/fbdev/controlfb.c
40331 ++++ b/drivers/video/fbdev/controlfb.c
40332 +@@ -67,7 +67,9 @@
40333 + #define out_8(addr, val) (void)(val)
40334 + #define in_le32(addr) 0
40335 + #define out_le32(addr, val) (void)(val)
40336 ++#ifndef pgprot_cached_wthru
40337 + #define pgprot_cached_wthru(prot) (prot)
40338 ++#endif
40339 + #else
40340 + static void invalid_vram_cache(void __force *addr)
40341 + {
40342 +diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c
40343 +index 55d2bd0ce5c02..64843464c6613 100644
40344 +--- a/drivers/video/fbdev/core/fbcvt.c
40345 ++++ b/drivers/video/fbdev/core/fbcvt.c
40346 +@@ -214,9 +214,11 @@ static u32 fb_cvt_aspect_ratio(struct fb_cvt_data *cvt)
40347 + static void fb_cvt_print_name(struct fb_cvt_data *cvt)
40348 + {
40349 + u32 pixcount, pixcount_mod;
40350 +- int cnt = 255, offset = 0, read = 0;
40351 +- u8 *buf = kzalloc(256, GFP_KERNEL);
40352 ++ int size = 256;
40353 ++ int off = 0;
40354 ++ u8 *buf;
40355 +
40356 ++ buf = kzalloc(size, GFP_KERNEL);
40357 + if (!buf)
40358 + return;
40359 +
40360 +@@ -224,43 +226,30 @@ static void fb_cvt_print_name(struct fb_cvt_data *cvt)
40361 + pixcount_mod = (cvt->xres * (cvt->yres/cvt->interlace)) % 1000000;
40362 + pixcount_mod /= 1000;
40363 +
40364 +- read = snprintf(buf+offset, cnt, "fbcvt: %dx%d@%d: CVT Name - ",
40365 +- cvt->xres, cvt->yres, cvt->refresh);
40366 +- offset += read;
40367 +- cnt -= read;
40368 ++ off += scnprintf(buf + off, size - off, "fbcvt: %dx%d@%d: CVT Name - ",
40369 ++ cvt->xres, cvt->yres, cvt->refresh);
40370 +
40371 +- if (cvt->status)
40372 +- snprintf(buf+offset, cnt, "Not a CVT standard - %d.%03d Mega "
40373 +- "Pixel Image\n", pixcount, pixcount_mod);
40374 +- else {
40375 +- if (pixcount) {
40376 +- read = snprintf(buf+offset, cnt, "%d", pixcount);
40377 +- cnt -= read;
40378 +- offset += read;
40379 +- }
40380 ++ if (cvt->status) {
40381 ++ off += scnprintf(buf + off, size - off,
40382 ++ "Not a CVT standard - %d.%03d Mega Pixel Image\n",
40383 ++ pixcount, pixcount_mod);
40384 ++ } else {
40385 ++ if (pixcount)
40386 ++ off += scnprintf(buf + off, size - off, "%d", pixcount);
40387 +
40388 +- read = snprintf(buf+offset, cnt, ".%03dM", pixcount_mod);
40389 +- cnt -= read;
40390 +- offset += read;
40391 ++ off += scnprintf(buf + off, size - off, ".%03dM", pixcount_mod);
40392 +
40393 + if (cvt->aspect_ratio == 0)
40394 +- read = snprintf(buf+offset, cnt, "3");
40395 ++ off += scnprintf(buf + off, size - off, "3");
40396 + else if (cvt->aspect_ratio == 3)
40397 +- read = snprintf(buf+offset, cnt, "4");
40398 ++ off += scnprintf(buf + off, size - off, "4");
40399 + else if (cvt->aspect_ratio == 1 || cvt->aspect_ratio == 4)
40400 +- read = snprintf(buf+offset, cnt, "9");
40401 ++ off += scnprintf(buf + off, size - off, "9");
40402 + else if (cvt->aspect_ratio == 2)
40403 +- read = snprintf(buf+offset, cnt, "A");
40404 +- else
40405 +- read = 0;
40406 +- cnt -= read;
40407 +- offset += read;
40408 +-
40409 +- if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) {
40410 +- read = snprintf(buf+offset, cnt, "-R");
40411 +- cnt -= read;
40412 +- offset += read;
40413 +- }
40414 ++ off += scnprintf(buf + off, size - off, "A");
40415 ++
40416 ++ if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK)
40417 ++ off += scnprintf(buf + off, size - off, "-R");
40418 + }
40419 +
40420 + printk(KERN_INFO "%s\n", buf);
40421 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
40422 +index 13083ad8d7515..ad9aac06427a0 100644
40423 +--- a/drivers/video/fbdev/core/fbmem.c
40424 ++++ b/drivers/video/fbdev/core/fbmem.c
40425 +@@ -25,6 +25,7 @@
40426 + #include <linux/init.h>
40427 + #include <linux/linux_logo.h>
40428 + #include <linux/proc_fs.h>
40429 ++#include <linux/platform_device.h>
40430 + #include <linux/seq_file.h>
40431 + #include <linux/console.h>
40432 + #include <linux/kmod.h>
40433 +@@ -1559,18 +1560,36 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
40434 + /* check all firmware fbs and kick off if the base addr overlaps */
40435 + for_each_registered_fb(i) {
40436 + struct apertures_struct *gen_aper;
40437 ++ struct device *device;
40438 +
40439 + if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
40440 + continue;
40441 +
40442 + gen_aper = registered_fb[i]->apertures;
40443 ++ device = registered_fb[i]->device;
40444 + if (fb_do_apertures_overlap(gen_aper, a) ||
40445 + (primary && gen_aper && gen_aper->count &&
40446 + gen_aper->ranges[0].base == VGA_FB_PHYS)) {
40447 +
40448 + printk(KERN_INFO "fb%d: switching to %s from %s\n",
40449 + i, name, registered_fb[i]->fix.id);
40450 +- do_unregister_framebuffer(registered_fb[i]);
40451 ++
40452 ++ /*
40453 ++ * If we kick-out a firmware driver, we also want to remove
40454 ++ * the underlying platform device, such as simple-framebuffer,
40455 ++ * VESA, EFI, etc. A native driver will then be able to
40456 ++ * allocate the memory range.
40457 ++ *
40458 ++ * If it's not a platform device, at least print a warning. A
40459 ++ * fix would add code to remove the device from the system.
40460 ++ */
40461 ++ if (dev_is_platform(device)) {
40462 ++ registered_fb[i]->forced_out = true;
40463 ++ platform_device_unregister(to_platform_device(device));
40464 ++ } else {
40465 ++ pr_warn("fb%d: cannot remove device\n", i);
40466 ++ do_unregister_framebuffer(registered_fb[i]);
40467 ++ }
40468 + }
40469 + }
40470 + }
40471 +@@ -1900,9 +1919,13 @@ EXPORT_SYMBOL(register_framebuffer);
40472 + void
40473 + unregister_framebuffer(struct fb_info *fb_info)
40474 + {
40475 +- mutex_lock(&registration_lock);
40476 ++ bool forced_out = fb_info->forced_out;
40477 ++
40478 ++ if (!forced_out)
40479 ++ mutex_lock(&registration_lock);
40480 + do_unregister_framebuffer(fb_info);
40481 +- mutex_unlock(&registration_lock);
40482 ++ if (!forced_out)
40483 ++ mutex_unlock(&registration_lock);
40484 + }
40485 + EXPORT_SYMBOL(unregister_framebuffer);
40486 +
40487 +diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
40488 +index 5c82611e93d99..236521b19daf7 100644
40489 +--- a/drivers/video/fbdev/matrox/matroxfb_base.c
40490 ++++ b/drivers/video/fbdev/matrox/matroxfb_base.c
40491 +@@ -1377,7 +1377,7 @@ static struct video_board vbG200 = {
40492 + .lowlevel = &matrox_G100
40493 + };
40494 + static struct video_board vbG200eW = {
40495 +- .maxvram = 0x800000,
40496 ++ .maxvram = 0x100000,
40497 + .maxdisplayable = 0x800000,
40498 + .accelID = FB_ACCEL_MATROX_MGAG200,
40499 + .lowlevel = &matrox_G100
40500 +diff --git a/drivers/video/fbdev/nvidia/nv_i2c.c b/drivers/video/fbdev/nvidia/nv_i2c.c
40501 +index d7994a1732459..0b48965a6420c 100644
40502 +--- a/drivers/video/fbdev/nvidia/nv_i2c.c
40503 ++++ b/drivers/video/fbdev/nvidia/nv_i2c.c
40504 +@@ -86,7 +86,7 @@ static int nvidia_setup_i2c_bus(struct nvidia_i2c_chan *chan, const char *name,
40505 + {
40506 + int rc;
40507 +
40508 +- strcpy(chan->adapter.name, name);
40509 ++ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
40510 + chan->adapter.owner = THIS_MODULE;
40511 + chan->adapter.class = i2c_class;
40512 + chan->adapter.algo_data = &chan->algo;
40513 +diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
40514 +index 2fa436475b406..c8ad3ef42bd31 100644
40515 +--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
40516 ++++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
40517 +@@ -246,6 +246,7 @@ static int dvic_probe_of(struct platform_device *pdev)
40518 + adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
40519 + if (adapter_node) {
40520 + adapter = of_get_i2c_adapter_by_node(adapter_node);
40521 ++ of_node_put(adapter_node);
40522 + if (adapter == NULL) {
40523 + dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
40524 + omap_dss_put_device(ddata->in);
40525 +diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
40526 +index 4b0793abdd84b..a2c7c5cb15234 100644
40527 +--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
40528 ++++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
40529 +@@ -409,7 +409,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev,
40530 + if (r)
40531 + return r;
40532 +
40533 +- return snprintf(buf, PAGE_SIZE, "%d\n", errors);
40534 ++ return sysfs_emit(buf, "%d\n", errors);
40535 + }
40536 +
40537 + static ssize_t dsicm_hw_revision_show(struct device *dev,
40538 +@@ -439,7 +439,7 @@ static ssize_t dsicm_hw_revision_show(struct device *dev,
40539 + if (r)
40540 + return r;
40541 +
40542 +- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
40543 ++ return sysfs_emit(buf, "%02x.%02x.%02x\n", id1, id2, id3);
40544 + }
40545 +
40546 + static ssize_t dsicm_store_ulps(struct device *dev,
40547 +@@ -487,7 +487,7 @@ static ssize_t dsicm_show_ulps(struct device *dev,
40548 + t = ddata->ulps_enabled;
40549 + mutex_unlock(&ddata->lock);
40550 +
40551 +- return snprintf(buf, PAGE_SIZE, "%u\n", t);
40552 ++ return sysfs_emit(buf, "%u\n", t);
40553 + }
40554 +
40555 + static ssize_t dsicm_store_ulps_timeout(struct device *dev,
40556 +@@ -532,7 +532,7 @@ static ssize_t dsicm_show_ulps_timeout(struct device *dev,
40557 + t = ddata->ulps_timeout;
40558 + mutex_unlock(&ddata->lock);
40559 +
40560 +- return snprintf(buf, PAGE_SIZE, "%u\n", t);
40561 ++ return sysfs_emit(buf, "%u\n", t);
40562 + }
40563 +
40564 + static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL);
40565 +diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
40566 +index 8d8b5ff7d43c8..3696eb09b69b4 100644
40567 +--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
40568 ++++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
40569 +@@ -476,7 +476,7 @@ static ssize_t show_cabc_available_modes(struct device *dev,
40570 + int i;
40571 +
40572 + if (!ddata->has_cabc)
40573 +- return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
40574 ++ return sysfs_emit(buf, "%s\n", cabc_modes[0]);
40575 +
40576 + for (i = 0, len = 0;
40577 + len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
40578 +diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
40579 +index afac1d9445aa2..57b7d1f490962 100644
40580 +--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
40581 ++++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
40582 +@@ -169,7 +169,7 @@ static ssize_t tpo_td043_vmirror_show(struct device *dev,
40583 + {
40584 + struct panel_drv_data *ddata = dev_get_drvdata(dev);
40585 +
40586 +- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror);
40587 ++ return sysfs_emit(buf, "%d\n", ddata->vmirror);
40588 + }
40589 +
40590 + static ssize_t tpo_td043_vmirror_store(struct device *dev,
40591 +@@ -199,7 +199,7 @@ static ssize_t tpo_td043_mode_show(struct device *dev,
40592 + {
40593 + struct panel_drv_data *ddata = dev_get_drvdata(dev);
40594 +
40595 +- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode);
40596 ++ return sysfs_emit(buf, "%d\n", ddata->mode);
40597 + }
40598 +
40599 + static ssize_t tpo_td043_mode_store(struct device *dev,
40600 +diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
40601 +index 0dbc6bf8268ac..092a1caa1208e 100644
40602 +--- a/drivers/video/fbdev/sm712fb.c
40603 ++++ b/drivers/video/fbdev/sm712fb.c
40604 +@@ -1047,7 +1047,7 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf,
40605 + if (count + p > total_size)
40606 + count = total_size - p;
40607 +
40608 +- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
40609 ++ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
40610 + if (!buffer)
40611 + return -ENOMEM;
40612 +
40613 +@@ -1059,25 +1059,14 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf,
40614 + while (count) {
40615 + c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
40616 + dst = buffer;
40617 +- for (i = c >> 2; i--;) {
40618 +- *dst = fb_readl(src++);
40619 +- *dst = big_swap(*dst);
40620 ++ for (i = (c + 3) >> 2; i--;) {
40621 ++ u32 val;
40622 ++
40623 ++ val = fb_readl(src);
40624 ++ *dst = big_swap(val);
40625 ++ src++;
40626 + dst++;
40627 + }
40628 +- if (c & 3) {
40629 +- u8 *dst8 = (u8 *)dst;
40630 +- u8 __iomem *src8 = (u8 __iomem *)src;
40631 +-
40632 +- for (i = c & 3; i--;) {
40633 +- if (i & 1) {
40634 +- *dst8++ = fb_readb(++src8);
40635 +- } else {
40636 +- *dst8++ = fb_readb(--src8);
40637 +- src8 += 2;
40638 +- }
40639 +- }
40640 +- src = (u32 __iomem *)src8;
40641 +- }
40642 +
40643 + if (copy_to_user(buf, buffer, c)) {
40644 + err = -EFAULT;
40645 +@@ -1130,7 +1119,7 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf,
40646 + count = total_size - p;
40647 + }
40648 +
40649 +- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
40650 ++ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
40651 + if (!buffer)
40652 + return -ENOMEM;
40653 +
40654 +@@ -1148,24 +1137,11 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf,
40655 + break;
40656 + }
40657 +
40658 +- for (i = c >> 2; i--;) {
40659 +- fb_writel(big_swap(*src), dst++);
40660 ++ for (i = (c + 3) >> 2; i--;) {
40661 ++ fb_writel(big_swap(*src), dst);
40662 ++ dst++;
40663 + src++;
40664 + }
40665 +- if (c & 3) {
40666 +- u8 *src8 = (u8 *)src;
40667 +- u8 __iomem *dst8 = (u8 __iomem *)dst;
40668 +-
40669 +- for (i = c & 3; i--;) {
40670 +- if (i & 1) {
40671 +- fb_writeb(*src8++, ++dst8);
40672 +- } else {
40673 +- fb_writeb(*src8++, --dst8);
40674 +- dst8 += 2;
40675 +- }
40676 +- }
40677 +- dst = (u32 __iomem *)dst8;
40678 +- }
40679 +
40680 + *ppos += c;
40681 + buf += c;
40682 +diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
40683 +index bfac3ee4a6422..28768c272b73d 100644
40684 +--- a/drivers/video/fbdev/smscufx.c
40685 ++++ b/drivers/video/fbdev/smscufx.c
40686 +@@ -1656,6 +1656,7 @@ static int ufx_usb_probe(struct usb_interface *interface,
40687 + info->par = dev;
40688 + info->pseudo_palette = dev->pseudo_palette;
40689 + info->fbops = &ufx_ops;
40690 ++ INIT_LIST_HEAD(&info->modelist);
40691 +
40692 + retval = fb_alloc_cmap(&info->cmap, 256, 0);
40693 + if (retval < 0) {
40694 +@@ -1666,8 +1667,6 @@ static int ufx_usb_probe(struct usb_interface *interface,
40695 + INIT_DELAYED_WORK(&dev->free_framebuffer_work,
40696 + ufx_free_framebuffer_work);
40697 +
40698 +- INIT_LIST_HEAD(&info->modelist);
40699 +-
40700 + retval = ufx_reg_read(dev, 0x3000, &id_rev);
40701 + check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval);
40702 + dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev);
40703 +diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
40704 +index b9cdd02c10009..90f48b71fd8f7 100644
40705 +--- a/drivers/video/fbdev/udlfb.c
40706 ++++ b/drivers/video/fbdev/udlfb.c
40707 +@@ -1426,7 +1426,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
40708 + struct device_attribute *a, char *buf) {
40709 + struct fb_info *fb_info = dev_get_drvdata(fbdev);
40710 + struct dlfb_data *dlfb = fb_info->par;
40711 +- return snprintf(buf, PAGE_SIZE, "%u\n",
40712 ++ return sysfs_emit(buf, "%u\n",
40713 + atomic_read(&dlfb->bytes_rendered));
40714 + }
40715 +
40716 +@@ -1434,7 +1434,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40717 + struct device_attribute *a, char *buf) {
40718 + struct fb_info *fb_info = dev_get_drvdata(fbdev);
40719 + struct dlfb_data *dlfb = fb_info->par;
40720 +- return snprintf(buf, PAGE_SIZE, "%u\n",
40721 ++ return sysfs_emit(buf, "%u\n",
40722 + atomic_read(&dlfb->bytes_identical));
40723 + }
40724 +
40725 +@@ -1442,7 +1442,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40726 + struct device_attribute *a, char *buf) {
40727 + struct fb_info *fb_info = dev_get_drvdata(fbdev);
40728 + struct dlfb_data *dlfb = fb_info->par;
40729 +- return snprintf(buf, PAGE_SIZE, "%u\n",
40730 ++ return sysfs_emit(buf, "%u\n",
40731 + atomic_read(&dlfb->bytes_sent));
40732 + }
40733 +
40734 +@@ -1450,7 +1450,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40735 + struct device_attribute *a, char *buf) {
40736 + struct fb_info *fb_info = dev_get_drvdata(fbdev);
40737 + struct dlfb_data *dlfb = fb_info->par;
40738 +- return snprintf(buf, PAGE_SIZE, "%u\n",
40739 ++ return sysfs_emit(buf, "%u\n",
40740 + atomic_read(&dlfb->cpu_kcycles_used));
40741 + }
40742 +
40743 +diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
40744 +index d96ab28f8ce4a..4e641a780726e 100644
40745 +--- a/drivers/video/fbdev/w100fb.c
40746 ++++ b/drivers/video/fbdev/w100fb.c
40747 +@@ -770,12 +770,18 @@ out:
40748 + fb_dealloc_cmap(&info->cmap);
40749 + kfree(info->pseudo_palette);
40750 + }
40751 +- if (remapped_fbuf != NULL)
40752 ++ if (remapped_fbuf != NULL) {
40753 + iounmap(remapped_fbuf);
40754 +- if (remapped_regs != NULL)
40755 ++ remapped_fbuf = NULL;
40756 ++ }
40757 ++ if (remapped_regs != NULL) {
40758 + iounmap(remapped_regs);
40759 +- if (remapped_base != NULL)
40760 ++ remapped_regs = NULL;
40761 ++ }
40762 ++ if (remapped_base != NULL) {
40763 + iounmap(remapped_base);
40764 ++ remapped_base = NULL;
40765 ++ }
40766 + if (info)
40767 + framebuffer_release(info);
40768 + return err;
40769 +@@ -795,8 +801,11 @@ static int w100fb_remove(struct platform_device *pdev)
40770 + fb_dealloc_cmap(&info->cmap);
40771 +
40772 + iounmap(remapped_base);
40773 ++ remapped_base = NULL;
40774 + iounmap(remapped_regs);
40775 ++ remapped_regs = NULL;
40776 + iounmap(remapped_fbuf);
40777 ++ remapped_fbuf = NULL;
40778 +
40779 + framebuffer_release(info);
40780 +
40781 +diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c
40782 +index 5419794fccf1e..423ea888d79af 100644
40783 +--- a/drivers/virt/acrn/hsm.c
40784 ++++ b/drivers/virt/acrn/hsm.c
40785 +@@ -136,8 +136,10 @@ static long acrn_dev_ioctl(struct file *filp, unsigned int cmd,
40786 + if (IS_ERR(vm_param))
40787 + return PTR_ERR(vm_param);
40788 +
40789 +- if ((vm_param->reserved0 | vm_param->reserved1) != 0)
40790 ++ if ((vm_param->reserved0 | vm_param->reserved1) != 0) {
40791 ++ kfree(vm_param);
40792 + return -EINVAL;
40793 ++ }
40794 +
40795 + vm = acrn_vm_create(vm, vm_param);
40796 + if (!vm) {
40797 +@@ -182,21 +184,29 @@ static long acrn_dev_ioctl(struct file *filp, unsigned int cmd,
40798 + return PTR_ERR(cpu_regs);
40799 +
40800 + for (i = 0; i < ARRAY_SIZE(cpu_regs->reserved); i++)
40801 +- if (cpu_regs->reserved[i])
40802 ++ if (cpu_regs->reserved[i]) {
40803 ++ kfree(cpu_regs);
40804 + return -EINVAL;
40805 ++ }
40806 +
40807 + for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_32); i++)
40808 +- if (cpu_regs->vcpu_regs.reserved_32[i])
40809 ++ if (cpu_regs->vcpu_regs.reserved_32[i]) {
40810 ++ kfree(cpu_regs);
40811 + return -EINVAL;
40812 ++ }
40813 +
40814 + for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_64); i++)
40815 +- if (cpu_regs->vcpu_regs.reserved_64[i])
40816 ++ if (cpu_regs->vcpu_regs.reserved_64[i]) {
40817 ++ kfree(cpu_regs);
40818 + return -EINVAL;
40819 ++ }
40820 +
40821 + for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.gdt.reserved); i++)
40822 + if (cpu_regs->vcpu_regs.gdt.reserved[i] |
40823 +- cpu_regs->vcpu_regs.idt.reserved[i])
40824 ++ cpu_regs->vcpu_regs.idt.reserved[i]) {
40825 ++ kfree(cpu_regs);
40826 + return -EINVAL;
40827 ++ }
40828 +
40829 + ret = hcall_set_vcpu_regs(vm->vmid, virt_to_phys(cpu_regs));
40830 + if (ret < 0)
40831 +diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
40832 +index c4f2e15c8a2ba..3b1b1e7a844b4 100644
40833 +--- a/drivers/virt/acrn/mm.c
40834 ++++ b/drivers/virt/acrn/mm.c
40835 +@@ -162,10 +162,34 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
40836 + void *remap_vaddr;
40837 + int ret, pinned;
40838 + u64 user_vm_pa;
40839 ++ unsigned long pfn;
40840 ++ struct vm_area_struct *vma;
40841 +
40842 + if (!vm || !memmap)
40843 + return -EINVAL;
40844 +
40845 ++ mmap_read_lock(current->mm);
40846 ++ vma = vma_lookup(current->mm, memmap->vma_base);
40847 ++ if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
40848 ++ if ((memmap->vma_base + memmap->len) > vma->vm_end) {
40849 ++ mmap_read_unlock(current->mm);
40850 ++ return -EINVAL;
40851 ++ }
40852 ++
40853 ++ ret = follow_pfn(vma, memmap->vma_base, &pfn);
40854 ++ mmap_read_unlock(current->mm);
40855 ++ if (ret < 0) {
40856 ++ dev_dbg(acrn_dev.this_device,
40857 ++ "Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
40858 ++ return ret;
40859 ++ }
40860 ++
40861 ++ return acrn_mm_region_add(vm, memmap->user_vm_pa,
40862 ++ PFN_PHYS(pfn), memmap->len,
40863 ++ ACRN_MEM_TYPE_WB, memmap->attr);
40864 ++ }
40865 ++ mmap_read_unlock(current->mm);
40866 ++
40867 + /* Get the page number of the map region */
40868 + nr_pages = memmap->len >> PAGE_SHIFT;
40869 + pages = vzalloc(nr_pages * sizeof(struct page *));
40870 +diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
40871 +index 22f15f444f757..75c8d560bbd36 100644
40872 +--- a/drivers/virtio/virtio.c
40873 ++++ b/drivers/virtio/virtio.c
40874 +@@ -526,8 +526,9 @@ int virtio_device_restore(struct virtio_device *dev)
40875 + goto err;
40876 + }
40877 +
40878 +- /* Finally, tell the device we're all set */
40879 +- virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
40880 ++ /* If restore didn't do it, mark device DRIVER_OK ourselves. */
40881 ++ if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
40882 ++ virtio_device_ready(dev);
40883 +
40884 + virtio_config_enable(dev);
40885 +
40886 +diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
40887 +index fdbde1db5ec59..d724f676608ba 100644
40888 +--- a/drivers/virtio/virtio_pci_common.c
40889 ++++ b/drivers/virtio/virtio_pci_common.c
40890 +@@ -24,46 +24,17 @@ MODULE_PARM_DESC(force_legacy,
40891 + "Force legacy mode for transitional virtio 1 devices");
40892 + #endif
40893 +
40894 +-/* disable irq handlers */
40895 +-void vp_disable_cbs(struct virtio_device *vdev)
40896 ++/* wait for pending irq handlers */
40897 ++void vp_synchronize_vectors(struct virtio_device *vdev)
40898 + {
40899 + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
40900 + int i;
40901 +
40902 +- if (vp_dev->intx_enabled) {
40903 +- /*
40904 +- * The below synchronize() guarantees that any
40905 +- * interrupt for this line arriving after
40906 +- * synchronize_irq() has completed is guaranteed to see
40907 +- * intx_soft_enabled == false.
40908 +- */
40909 +- WRITE_ONCE(vp_dev->intx_soft_enabled, false);
40910 ++ if (vp_dev->intx_enabled)
40911 + synchronize_irq(vp_dev->pci_dev->irq);
40912 +- }
40913 +-
40914 +- for (i = 0; i < vp_dev->msix_vectors; ++i)
40915 +- disable_irq(pci_irq_vector(vp_dev->pci_dev, i));
40916 +-}
40917 +-
40918 +-/* enable irq handlers */
40919 +-void vp_enable_cbs(struct virtio_device *vdev)
40920 +-{
40921 +- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
40922 +- int i;
40923 +-
40924 +- if (vp_dev->intx_enabled) {
40925 +- disable_irq(vp_dev->pci_dev->irq);
40926 +- /*
40927 +- * The above disable_irq() provides TSO ordering and
40928 +- * as such promotes the below store to store-release.
40929 +- */
40930 +- WRITE_ONCE(vp_dev->intx_soft_enabled, true);
40931 +- enable_irq(vp_dev->pci_dev->irq);
40932 +- return;
40933 +- }
40934 +
40935 + for (i = 0; i < vp_dev->msix_vectors; ++i)
40936 +- enable_irq(pci_irq_vector(vp_dev->pci_dev, i));
40937 ++ synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
40938 + }
40939 +
40940 + /* the notify function used when creating a virt queue */
40941 +@@ -113,9 +84,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
40942 + struct virtio_pci_device *vp_dev = opaque;
40943 + u8 isr;
40944 +
40945 +- if (!READ_ONCE(vp_dev->intx_soft_enabled))
40946 +- return IRQ_NONE;
40947 +-
40948 + /* reading the ISR has the effect of also clearing it so it's very
40949 + * important to save off the value. */
40950 + isr = ioread8(vp_dev->isr);
40951 +@@ -173,8 +141,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
40952 + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
40953 + "%s-config", name);
40954 + err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
40955 +- vp_config_changed, IRQF_NO_AUTOEN,
40956 +- vp_dev->msix_names[v],
40957 ++ vp_config_changed, 0, vp_dev->msix_names[v],
40958 + vp_dev);
40959 + if (err)
40960 + goto error;
40961 +@@ -193,8 +160,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
40962 + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
40963 + "%s-virtqueues", name);
40964 + err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
40965 +- vp_vring_interrupt, IRQF_NO_AUTOEN,
40966 +- vp_dev->msix_names[v],
40967 ++ vp_vring_interrupt, 0, vp_dev->msix_names[v],
40968 + vp_dev);
40969 + if (err)
40970 + goto error;
40971 +@@ -371,7 +337,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
40972 + "%s-%s",
40973 + dev_name(&vp_dev->vdev.dev), names[i]);
40974 + err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
40975 +- vring_interrupt, IRQF_NO_AUTOEN,
40976 ++ vring_interrupt, 0,
40977 + vp_dev->msix_names[msix_vec],
40978 + vqs[i]);
40979 + if (err)
40980 +diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
40981 +index 23f6c5c678d5e..eb17a29fc7ef1 100644
40982 +--- a/drivers/virtio/virtio_pci_common.h
40983 ++++ b/drivers/virtio/virtio_pci_common.h
40984 +@@ -63,7 +63,6 @@ struct virtio_pci_device {
40985 + /* MSI-X support */
40986 + int msix_enabled;
40987 + int intx_enabled;
40988 +- bool intx_soft_enabled;
40989 + cpumask_var_t *msix_affinity_masks;
40990 + /* Name strings for interrupts. This size should be enough,
40991 + * and I'm too lazy to allocate each name separately. */
40992 +@@ -102,10 +101,8 @@ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
40993 + return container_of(vdev, struct virtio_pci_device, vdev);
40994 + }
40995 +
40996 +-/* disable irq handlers */
40997 +-void vp_disable_cbs(struct virtio_device *vdev);
40998 +-/* enable irq handlers */
40999 +-void vp_enable_cbs(struct virtio_device *vdev);
41000 ++/* wait for pending irq handlers */
41001 ++void vp_synchronize_vectors(struct virtio_device *vdev);
41002 + /* the notify function used when creating a virt queue */
41003 + bool vp_notify(struct virtqueue *vq);
41004 + /* the config->del_vqs() implementation */
41005 +diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
41006 +index 34141b9abe278..6f4e34ce96b81 100644
41007 +--- a/drivers/virtio/virtio_pci_legacy.c
41008 ++++ b/drivers/virtio/virtio_pci_legacy.c
41009 +@@ -98,8 +98,8 @@ static void vp_reset(struct virtio_device *vdev)
41010 + /* Flush out the status write, and flush in device writes,
41011 + * including MSi-X interrupts, if any. */
41012 + vp_legacy_get_status(&vp_dev->ldev);
41013 +- /* Disable VQ/configuration callbacks. */
41014 +- vp_disable_cbs(vdev);
41015 ++ /* Flush pending VQ/configuration callbacks. */
41016 ++ vp_synchronize_vectors(vdev);
41017 + }
41018 +
41019 + static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
41020 +@@ -185,7 +185,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
41021 + }
41022 +
41023 + static const struct virtio_config_ops virtio_pci_config_ops = {
41024 +- .enable_cbs = vp_enable_cbs,
41025 + .get = vp_get,
41026 + .set = vp_set,
41027 + .get_status = vp_get_status,
41028 +diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
41029 +index 5455bc041fb69..30654d3a0b41e 100644
41030 +--- a/drivers/virtio/virtio_pci_modern.c
41031 ++++ b/drivers/virtio/virtio_pci_modern.c
41032 +@@ -172,8 +172,8 @@ static void vp_reset(struct virtio_device *vdev)
41033 + */
41034 + while (vp_modern_get_status(mdev))
41035 + msleep(1);
41036 +- /* Disable VQ/configuration callbacks. */
41037 +- vp_disable_cbs(vdev);
41038 ++ /* Flush pending VQ/configuration callbacks. */
41039 ++ vp_synchronize_vectors(vdev);
41040 + }
41041 +
41042 + static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
41043 +@@ -380,7 +380,6 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
41044 + }
41045 +
41046 + static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
41047 +- .enable_cbs = vp_enable_cbs,
41048 + .get = NULL,
41049 + .set = NULL,
41050 + .generation = vp_generation,
41051 +@@ -398,7 +397,6 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
41052 + };
41053 +
41054 + static const struct virtio_config_ops virtio_pci_config_ops = {
41055 +- .enable_cbs = vp_enable_cbs,
41056 + .get = vp_get,
41057 + .set = vp_set,
41058 + .generation = vp_generation,
41059 +diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
41060 +index 117bc2a8eb0a4..db843f8258602 100644
41061 +--- a/drivers/watchdog/rti_wdt.c
41062 ++++ b/drivers/watchdog/rti_wdt.c
41063 +@@ -228,6 +228,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
41064 + ret = pm_runtime_get_sync(dev);
41065 + if (ret) {
41066 + pm_runtime_put_noidle(dev);
41067 ++ pm_runtime_disable(&pdev->dev);
41068 + return dev_err_probe(dev, ret, "runtime pm failed\n");
41069 + }
41070 +
41071 +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41072 +index d61543fbd6528..11273b70271d1 100644
41073 +--- a/fs/binfmt_elf.c
41074 ++++ b/fs/binfmt_elf.c
41075 +@@ -170,8 +170,8 @@ static int padzero(unsigned long elf_bss)
41076 +
41077 + static int
41078 + create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
41079 +- unsigned long load_addr, unsigned long interp_load_addr,
41080 +- unsigned long e_entry)
41081 ++ unsigned long interp_load_addr,
41082 ++ unsigned long e_entry, unsigned long phdr_addr)
41083 + {
41084 + struct mm_struct *mm = current->mm;
41085 + unsigned long p = bprm->p;
41086 +@@ -257,7 +257,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
41087 + NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
41088 + NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
41089 + NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
41090 +- NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
41091 ++ NEW_AUX_ENT(AT_PHDR, phdr_addr);
41092 + NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
41093 + NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
41094 + NEW_AUX_ENT(AT_BASE, interp_load_addr);
41095 +@@ -823,7 +823,7 @@ static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
41096 + static int load_elf_binary(struct linux_binprm *bprm)
41097 + {
41098 + struct file *interpreter = NULL; /* to shut gcc up */
41099 +- unsigned long load_addr = 0, load_bias = 0;
41100 ++ unsigned long load_addr, load_bias = 0, phdr_addr = 0;
41101 + int load_addr_set = 0;
41102 + unsigned long error;
41103 + struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
41104 +@@ -1180,6 +1180,17 @@ out_free_interp:
41105 + reloc_func_desc = load_bias;
41106 + }
41107 + }
41108 ++
41109 ++ /*
41110 ++ * Figure out which segment in the file contains the Program
41111 ++ * Header table, and map to the associated memory address.
41112 ++ */
41113 ++ if (elf_ppnt->p_offset <= elf_ex->e_phoff &&
41114 ++ elf_ex->e_phoff < elf_ppnt->p_offset + elf_ppnt->p_filesz) {
41115 ++ phdr_addr = elf_ex->e_phoff - elf_ppnt->p_offset +
41116 ++ elf_ppnt->p_vaddr;
41117 ++ }
41118 ++
41119 + k = elf_ppnt->p_vaddr;
41120 + if ((elf_ppnt->p_flags & PF_X) && k < start_code)
41121 + start_code = k;
41122 +@@ -1215,6 +1226,7 @@ out_free_interp:
41123 + }
41124 +
41125 + e_entry = elf_ex->e_entry + load_bias;
41126 ++ phdr_addr += load_bias;
41127 + elf_bss += load_bias;
41128 + elf_brk += load_bias;
41129 + start_code += load_bias;
41130 +@@ -1278,8 +1290,8 @@ out_free_interp:
41131 + goto out;
41132 + #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
41133 +
41134 +- retval = create_elf_tables(bprm, elf_ex,
41135 +- load_addr, interp_load_addr, e_entry);
41136 ++ retval = create_elf_tables(bprm, elf_ex, interp_load_addr,
41137 ++ e_entry, phdr_addr);
41138 + if (retval < 0)
41139 + goto out;
41140 +
41141 +@@ -1630,17 +1642,16 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
41142 + * long file_ofs
41143 + * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
41144 + */
41145 +-static int fill_files_note(struct memelfnote *note)
41146 ++static int fill_files_note(struct memelfnote *note, struct coredump_params *cprm)
41147 + {
41148 +- struct mm_struct *mm = current->mm;
41149 +- struct vm_area_struct *vma;
41150 + unsigned count, size, names_ofs, remaining, n;
41151 + user_long_t *data;
41152 + user_long_t *start_end_ofs;
41153 + char *name_base, *name_curpos;
41154 ++ int i;
41155 +
41156 + /* *Estimated* file count and total data size needed */
41157 +- count = mm->map_count;
41158 ++ count = cprm->vma_count;
41159 + if (count > UINT_MAX / 64)
41160 + return -EINVAL;
41161 + size = count * 64;
41162 +@@ -1662,11 +1673,12 @@ static int fill_files_note(struct memelfnote *note)
41163 + name_base = name_curpos = ((char *)data) + names_ofs;
41164 + remaining = size - names_ofs;
41165 + count = 0;
41166 +- for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
41167 ++ for (i = 0; i < cprm->vma_count; i++) {
41168 ++ struct core_vma_metadata *m = &cprm->vma_meta[i];
41169 + struct file *file;
41170 + const char *filename;
41171 +
41172 +- file = vma->vm_file;
41173 ++ file = m->file;
41174 + if (!file)
41175 + continue;
41176 + filename = file_path(file, name_curpos, remaining);
41177 +@@ -1686,9 +1698,9 @@ static int fill_files_note(struct memelfnote *note)
41178 + memmove(name_curpos, filename, n);
41179 + name_curpos += n;
41180 +
41181 +- *start_end_ofs++ = vma->vm_start;
41182 +- *start_end_ofs++ = vma->vm_end;
41183 +- *start_end_ofs++ = vma->vm_pgoff;
41184 ++ *start_end_ofs++ = m->start;
41185 ++ *start_end_ofs++ = m->end;
41186 ++ *start_end_ofs++ = m->pgoff;
41187 + count++;
41188 + }
41189 +
41190 +@@ -1699,7 +1711,7 @@ static int fill_files_note(struct memelfnote *note)
41191 + * Count usually is less than mm->map_count,
41192 + * we need to move filenames down.
41193 + */
41194 +- n = mm->map_count - count;
41195 ++ n = cprm->vma_count - count;
41196 + if (n != 0) {
41197 + unsigned shift_bytes = n * 3 * sizeof(data[0]);
41198 + memmove(name_base - shift_bytes, name_base,
41199 +@@ -1811,7 +1823,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
41200 +
41201 + static int fill_note_info(struct elfhdr *elf, int phdrs,
41202 + struct elf_note_info *info,
41203 +- const kernel_siginfo_t *siginfo, struct pt_regs *regs)
41204 ++ struct coredump_params *cprm)
41205 + {
41206 + struct task_struct *dump_task = current;
41207 + const struct user_regset_view *view = task_user_regset_view(dump_task);
41208 +@@ -1883,7 +1895,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
41209 + * Now fill in each thread's information.
41210 + */
41211 + for (t = info->thread; t != NULL; t = t->next)
41212 +- if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
41213 ++ if (!fill_thread_core_info(t, view, cprm->siginfo->si_signo, &info->size))
41214 + return 0;
41215 +
41216 + /*
41217 +@@ -1892,13 +1904,13 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
41218 + fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
41219 + info->size += notesize(&info->psinfo);
41220 +
41221 +- fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
41222 ++ fill_siginfo_note(&info->signote, &info->csigdata, cprm->siginfo);
41223 + info->size += notesize(&info->signote);
41224 +
41225 + fill_auxv_note(&info->auxv, current->mm);
41226 + info->size += notesize(&info->auxv);
41227 +
41228 +- if (fill_files_note(&info->files) == 0)
41229 ++ if (fill_files_note(&info->files, cprm) == 0)
41230 + info->size += notesize(&info->files);
41231 +
41232 + return 1;
41233 +@@ -2040,7 +2052,7 @@ static int elf_note_info_init(struct elf_note_info *info)
41234 +
41235 + static int fill_note_info(struct elfhdr *elf, int phdrs,
41236 + struct elf_note_info *info,
41237 +- const kernel_siginfo_t *siginfo, struct pt_regs *regs)
41238 ++ struct coredump_params *cprm)
41239 + {
41240 + struct core_thread *ct;
41241 + struct elf_thread_status *ets;
41242 +@@ -2061,13 +2073,13 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
41243 + list_for_each_entry(ets, &info->thread_list, list) {
41244 + int sz;
41245 +
41246 +- sz = elf_dump_thread_status(siginfo->si_signo, ets);
41247 ++ sz = elf_dump_thread_status(cprm->siginfo->si_signo, ets);
41248 + info->thread_status_size += sz;
41249 + }
41250 + /* now collect the dump for the current */
41251 + memset(info->prstatus, 0, sizeof(*info->prstatus));
41252 +- fill_prstatus(&info->prstatus->common, current, siginfo->si_signo);
41253 +- elf_core_copy_regs(&info->prstatus->pr_reg, regs);
41254 ++ fill_prstatus(&info->prstatus->common, current, cprm->siginfo->si_signo);
41255 ++ elf_core_copy_regs(&info->prstatus->pr_reg, cprm->regs);
41256 +
41257 + /* Set up header */
41258 + fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
41259 +@@ -2083,18 +2095,18 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
41260 + fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
41261 + sizeof(*info->psinfo), info->psinfo);
41262 +
41263 +- fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
41264 ++ fill_siginfo_note(info->notes + 2, &info->csigdata, cprm->siginfo);
41265 + fill_auxv_note(info->notes + 3, current->mm);
41266 + info->numnote = 4;
41267 +
41268 +- if (fill_files_note(info->notes + info->numnote) == 0) {
41269 ++ if (fill_files_note(info->notes + info->numnote, cprm) == 0) {
41270 + info->notes_files = info->notes + info->numnote;
41271 + info->numnote++;
41272 + }
41273 +
41274 + /* Try to dump the FPU. */
41275 +- info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
41276 +- info->fpu);
41277 ++ info->prstatus->pr_fpvalid =
41278 ++ elf_core_copy_task_fpregs(current, cprm->regs, info->fpu);
41279 + if (info->prstatus->pr_fpvalid)
41280 + fill_note(info->notes + info->numnote++,
41281 + "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
41282 +@@ -2180,8 +2192,7 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
41283 + static int elf_core_dump(struct coredump_params *cprm)
41284 + {
41285 + int has_dumped = 0;
41286 +- int vma_count, segs, i;
41287 +- size_t vma_data_size;
41288 ++ int segs, i;
41289 + struct elfhdr elf;
41290 + loff_t offset = 0, dataoff;
41291 + struct elf_note_info info = { };
41292 +@@ -2189,16 +2200,12 @@ static int elf_core_dump(struct coredump_params *cprm)
41293 + struct elf_shdr *shdr4extnum = NULL;
41294 + Elf_Half e_phnum;
41295 + elf_addr_t e_shoff;
41296 +- struct core_vma_metadata *vma_meta;
41297 +-
41298 +- if (dump_vma_snapshot(cprm, &vma_count, &vma_meta, &vma_data_size))
41299 +- return 0;
41300 +
41301 + /*
41302 + * The number of segs are recored into ELF header as 16bit value.
41303 + * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
41304 + */
41305 +- segs = vma_count + elf_core_extra_phdrs();
41306 ++ segs = cprm->vma_count + elf_core_extra_phdrs();
41307 +
41308 + /* for notes section */
41309 + segs++;
41310 +@@ -2212,7 +2219,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41311 + * Collect all the non-memory information about the process for the
41312 + * notes. This also sets up the file header.
41313 + */
41314 +- if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs))
41315 ++ if (!fill_note_info(&elf, e_phnum, &info, cprm))
41316 + goto end_coredump;
41317 +
41318 + has_dumped = 1;
41319 +@@ -2237,7 +2244,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41320 +
41321 + dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
41322 +
41323 +- offset += vma_data_size;
41324 ++ offset += cprm->vma_data_size;
41325 + offset += elf_core_extra_data_size();
41326 + e_shoff = offset;
41327 +
41328 +@@ -2257,8 +2264,8 @@ static int elf_core_dump(struct coredump_params *cprm)
41329 + goto end_coredump;
41330 +
41331 + /* Write program headers for segments dump */
41332 +- for (i = 0; i < vma_count; i++) {
41333 +- struct core_vma_metadata *meta = vma_meta + i;
41334 ++ for (i = 0; i < cprm->vma_count; i++) {
41335 ++ struct core_vma_metadata *meta = cprm->vma_meta + i;
41336 + struct elf_phdr phdr;
41337 +
41338 + phdr.p_type = PT_LOAD;
41339 +@@ -2295,8 +2302,8 @@ static int elf_core_dump(struct coredump_params *cprm)
41340 + /* Align to page */
41341 + dump_skip_to(cprm, dataoff);
41342 +
41343 +- for (i = 0; i < vma_count; i++) {
41344 +- struct core_vma_metadata *meta = vma_meta + i;
41345 ++ for (i = 0; i < cprm->vma_count; i++) {
41346 ++ struct core_vma_metadata *meta = cprm->vma_meta + i;
41347 +
41348 + if (!dump_user_range(cprm, meta->start, meta->dump_size))
41349 + goto end_coredump;
41350 +@@ -2313,7 +2320,6 @@ static int elf_core_dump(struct coredump_params *cprm)
41351 + end_coredump:
41352 + free_note_info(&info);
41353 + kfree(shdr4extnum);
41354 +- kvfree(vma_meta);
41355 + kfree(phdr4note);
41356 + return has_dumped;
41357 + }
41358 +diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
41359 +index c6f588dc4a9db..1a25536b01201 100644
41360 +--- a/fs/binfmt_elf_fdpic.c
41361 ++++ b/fs/binfmt_elf_fdpic.c
41362 +@@ -1465,7 +1465,7 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm,
41363 + static int elf_fdpic_core_dump(struct coredump_params *cprm)
41364 + {
41365 + int has_dumped = 0;
41366 +- int vma_count, segs;
41367 ++ int segs;
41368 + int i;
41369 + struct elfhdr *elf = NULL;
41370 + loff_t offset = 0, dataoff;
41371 +@@ -1480,8 +1480,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
41372 + elf_addr_t e_shoff;
41373 + struct core_thread *ct;
41374 + struct elf_thread_status *tmp;
41375 +- struct core_vma_metadata *vma_meta = NULL;
41376 +- size_t vma_data_size;
41377 +
41378 + /* alloc memory for large data structures: too large to be on stack */
41379 + elf = kmalloc(sizeof(*elf), GFP_KERNEL);
41380 +@@ -1491,9 +1489,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
41381 + if (!psinfo)
41382 + goto end_coredump;
41383 +
41384 +- if (dump_vma_snapshot(cprm, &vma_count, &vma_meta, &vma_data_size))
41385 +- goto end_coredump;
41386 +-
41387 + for (ct = current->signal->core_state->dumper.next;
41388 + ct; ct = ct->next) {
41389 + tmp = elf_dump_thread_status(cprm->siginfo->si_signo,
41390 +@@ -1513,7 +1508,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
41391 + tmp->next = thread_list;
41392 + thread_list = tmp;
41393 +
41394 +- segs = vma_count + elf_core_extra_phdrs();
41395 ++ segs = cprm->vma_count + elf_core_extra_phdrs();
41396 +
41397 + /* for notes section */
41398 + segs++;
41399 +@@ -1558,7 +1553,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
41400 + /* Page-align dumped data */
41401 + dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
41402 +
41403 +- offset += vma_data_size;
41404 ++ offset += cprm->vma_data_size;
41405 + offset += elf_core_extra_data_size();
41406 + e_shoff = offset;
41407 +
41408 +@@ -1578,8 +1573,8 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
41409 + goto end_coredump;
41410 +
41411 + /* write program headers for segments dump */
41412 +- for (i = 0; i < vma_count; i++) {
41413 +- struct core_vma_metadata *meta = vma_meta + i;
41414 ++ for (i = 0; i < cprm->vma_count; i++) {
41415 ++ struct core_vma_metadata *meta = cprm->vma_meta + i;
41416 + struct elf_phdr phdr;
41417 + size_t sz;
41418 +
41419 +@@ -1628,7 +1623,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
41420 +
41421 + dump_skip_to(cprm, dataoff);
41422 +
41423 +- if (!elf_fdpic_dump_segments(cprm, vma_meta, vma_count))
41424 ++ if (!elf_fdpic_dump_segments(cprm, cprm->vma_meta, cprm->vma_count))
41425 + goto end_coredump;
41426 +
41427 + if (!elf_core_write_extra_data(cprm))
41428 +@@ -1652,7 +1647,6 @@ end_coredump:
41429 + thread_list = thread_list->next;
41430 + kfree(tmp);
41431 + }
41432 +- kvfree(vma_meta);
41433 + kfree(phdr4note);
41434 + kfree(elf);
41435 + kfree(psinfo);
41436 +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
41437 +index 8202ad6aa1317..a0aa6c7e23351 100644
41438 +--- a/fs/btrfs/block-group.c
41439 ++++ b/fs/btrfs/block-group.c
41440 +@@ -1522,8 +1522,12 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
41441 + if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
41442 + return;
41443 +
41444 +- if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
41445 ++ sb_start_write(fs_info->sb);
41446 ++
41447 ++ if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
41448 ++ sb_end_write(fs_info->sb);
41449 + return;
41450 ++ }
41451 +
41452 + /*
41453 + * Long running balances can keep us blocked here for eternity, so
41454 +@@ -1531,6 +1535,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
41455 + */
41456 + if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) {
41457 + btrfs_exclop_finish(fs_info);
41458 ++ sb_end_write(fs_info->sb);
41459 + return;
41460 + }
41461 +
41462 +@@ -1605,6 +1610,7 @@ next:
41463 + spin_unlock(&fs_info->unused_bgs_lock);
41464 + mutex_unlock(&fs_info->reclaim_bgs_lock);
41465 + btrfs_exclop_finish(fs_info);
41466 ++ sb_end_write(fs_info->sb);
41467 + }
41468 +
41469 + void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
41470 +diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
41471 +index 71e5b2e9a1ba8..6158b870a269d 100644
41472 +--- a/fs/btrfs/compression.c
41473 ++++ b/fs/btrfs/compression.c
41474 +@@ -808,7 +808,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
41475 + u64 em_len;
41476 + u64 em_start;
41477 + struct extent_map *em;
41478 +- blk_status_t ret = BLK_STS_RESOURCE;
41479 ++ blk_status_t ret;
41480 + int faili = 0;
41481 + u8 *sums;
41482 +
41483 +@@ -821,14 +821,18 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
41484 + read_lock(&em_tree->lock);
41485 + em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
41486 + read_unlock(&em_tree->lock);
41487 +- if (!em)
41488 +- return BLK_STS_IOERR;
41489 ++ if (!em) {
41490 ++ ret = BLK_STS_IOERR;
41491 ++ goto out;
41492 ++ }
41493 +
41494 + ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
41495 + compressed_len = em->block_len;
41496 + cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
41497 +- if (!cb)
41498 ++ if (!cb) {
41499 ++ ret = BLK_STS_RESOURCE;
41500 + goto out;
41501 ++ }
41502 +
41503 + refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
41504 + cb->errors = 0;
41505 +@@ -851,8 +855,10 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
41506 + nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
41507 + cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
41508 + GFP_NOFS);
41509 +- if (!cb->compressed_pages)
41510 ++ if (!cb->compressed_pages) {
41511 ++ ret = BLK_STS_RESOURCE;
41512 + goto fail1;
41513 ++ }
41514 +
41515 + for (pg_index = 0; pg_index < nr_pages; pg_index++) {
41516 + cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS);
41517 +@@ -938,7 +944,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
41518 + comp_bio = NULL;
41519 + }
41520 + }
41521 +- return 0;
41522 ++ return BLK_STS_OK;
41523 +
41524 + fail2:
41525 + while (faili >= 0) {
41526 +@@ -951,6 +957,8 @@ fail1:
41527 + kfree(cb);
41528 + out:
41529 + free_extent_map(em);
41530 ++ bio->bi_status = ret;
41531 ++ bio_endio(bio);
41532 + return ret;
41533 + finish_cb:
41534 + if (comp_bio) {
41535 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
41536 +index 48590a3807621..117afcda5affb 100644
41537 +--- a/fs/btrfs/disk-io.c
41538 ++++ b/fs/btrfs/disk-io.c
41539 +@@ -441,17 +441,31 @@ static int csum_one_extent_buffer(struct extent_buffer *eb)
41540 + else
41541 + ret = btrfs_check_leaf_full(eb);
41542 +
41543 +- if (ret < 0) {
41544 +- btrfs_print_tree(eb, 0);
41545 ++ if (ret < 0)
41546 ++ goto error;
41547 ++
41548 ++ /*
41549 ++ * Also check the generation, the eb reached here must be newer than
41550 ++ * last committed. Or something seriously wrong happened.
41551 ++ */
41552 ++ if (unlikely(btrfs_header_generation(eb) <= fs_info->last_trans_committed)) {
41553 ++ ret = -EUCLEAN;
41554 + btrfs_err(fs_info,
41555 +- "block=%llu write time tree block corruption detected",
41556 +- eb->start);
41557 +- WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
41558 +- return ret;
41559 ++ "block=%llu bad generation, have %llu expect > %llu",
41560 ++ eb->start, btrfs_header_generation(eb),
41561 ++ fs_info->last_trans_committed);
41562 ++ goto error;
41563 + }
41564 + write_extent_buffer(eb, result, 0, fs_info->csum_size);
41565 +
41566 + return 0;
41567 ++
41568 ++error:
41569 ++ btrfs_print_tree(eb, 0);
41570 ++ btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
41571 ++ eb->start);
41572 ++ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
41573 ++ return ret;
41574 + }
41575 +
41576 + /* Checksum all dirty extent buffers in one bio_vec */
41577 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
41578 +index 4c91060d103ae..99028984340aa 100644
41579 +--- a/fs/btrfs/extent_io.c
41580 ++++ b/fs/btrfs/extent_io.c
41581 +@@ -2639,7 +2639,6 @@ int btrfs_repair_one_sector(struct inode *inode,
41582 + const int icsum = bio_offset >> fs_info->sectorsize_bits;
41583 + struct bio *repair_bio;
41584 + struct btrfs_bio *repair_bbio;
41585 +- blk_status_t status;
41586 +
41587 + btrfs_debug(fs_info,
41588 + "repair read error: read error at %llu", start);
41589 +@@ -2678,13 +2677,13 @@ int btrfs_repair_one_sector(struct inode *inode,
41590 + "repair read error: submitting new read to mirror %d",
41591 + failrec->this_mirror);
41592 +
41593 +- status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
41594 +- failrec->bio_flags);
41595 +- if (status) {
41596 +- free_io_failure(failure_tree, tree, failrec);
41597 +- bio_put(repair_bio);
41598 +- }
41599 +- return blk_status_to_errno(status);
41600 ++ /*
41601 ++ * At this point we have a bio, so any errors from submit_bio_hook()
41602 ++ * will be handled by the endio on the repair_bio, so we can't return an
41603 ++ * error here.
41604 ++ */
41605 ++ submit_bio_hook(inode, repair_bio, failrec->this_mirror, failrec->bio_flags);
41606 ++ return BLK_STS_OK;
41607 + }
41608 +
41609 + static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
41610 +@@ -4780,11 +4779,12 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
41611 + return ret;
41612 + }
41613 + if (cache) {
41614 +- /* Impiles write in zoned mode */
41615 +- btrfs_put_block_group(cache);
41616 +- /* Mark the last eb in a block group */
41617 ++ /*
41618 ++ * Implies write in zoned mode. Mark the last eb in a block group.
41619 ++ */
41620 + if (cache->seq_zone && eb->start + eb->len == cache->zone_capacity)
41621 + set_bit(EXTENT_BUFFER_ZONE_FINISH, &eb->bflags);
41622 ++ btrfs_put_block_group(cache);
41623 + }
41624 + ret = write_one_eb(eb, wbc, epd);
41625 + free_extent_buffer(eb);
41626 +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
41627 +index 90c5c38836ab3..77c8f298f52e2 100644
41628 +--- a/fs/btrfs/file-item.c
41629 ++++ b/fs/btrfs/file-item.c
41630 +@@ -305,7 +305,7 @@ found:
41631 + read_extent_buffer(path->nodes[0], dst, (unsigned long)item,
41632 + ret * csum_size);
41633 + out:
41634 +- if (ret == -ENOENT)
41635 ++ if (ret == -ENOENT || ret == -EFBIG)
41636 + ret = 0;
41637 + return ret;
41638 + }
41639 +@@ -368,6 +368,7 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
41640 + {
41641 + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
41642 + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
41643 ++ struct btrfs_bio *bbio = NULL;
41644 + struct btrfs_path *path;
41645 + const u32 sectorsize = fs_info->sectorsize;
41646 + const u32 csum_size = fs_info->csum_size;
41647 +@@ -377,6 +378,7 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
41648 + u8 *csum;
41649 + const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits;
41650 + int count = 0;
41651 ++ blk_status_t ret = BLK_STS_OK;
41652 +
41653 + if ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) ||
41654 + test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state))
41655 +@@ -400,7 +402,7 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
41656 + return BLK_STS_RESOURCE;
41657 +
41658 + if (!dst) {
41659 +- struct btrfs_bio *bbio = btrfs_bio(bio);
41660 ++ bbio = btrfs_bio(bio);
41661 +
41662 + if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
41663 + bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS);
41664 +@@ -456,21 +458,27 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
41665 +
41666 + count = search_csum_tree(fs_info, path, cur_disk_bytenr,
41667 + search_len, csum_dst);
41668 +- if (count <= 0) {
41669 +- /*
41670 +- * Either we hit a critical error or we didn't find
41671 +- * the csum.
41672 +- * Either way, we put zero into the csums dst, and skip
41673 +- * to the next sector.
41674 +- */
41675 ++ if (count < 0) {
41676 ++ ret = errno_to_blk_status(count);
41677 ++ if (bbio)
41678 ++ btrfs_bio_free_csum(bbio);
41679 ++ break;
41680 ++ }
41681 ++
41682 ++ /*
41683 ++ * We didn't find a csum for this range. We need to make sure
41684 ++ * we complain loudly about this, because we are not NODATASUM.
41685 ++ *
41686 ++ * However for the DATA_RELOC inode we could potentially be
41687 ++ * relocating data extents for a NODATASUM inode, so the inode
41688 ++ * itself won't be marked with NODATASUM, but the extent we're
41689 ++ * copying is in fact NODATASUM. If we don't find a csum we
41690 ++ * assume this is the case.
41691 ++ */
41692 ++ if (count == 0) {
41693 + memset(csum_dst, 0, csum_size);
41694 + count = 1;
41695 +
41696 +- /*
41697 +- * For data reloc inode, we need to mark the range
41698 +- * NODATASUM so that balance won't report false csum
41699 +- * error.
41700 +- */
41701 + if (BTRFS_I(inode)->root->root_key.objectid ==
41702 + BTRFS_DATA_RELOC_TREE_OBJECTID) {
41703 + u64 file_offset;
41704 +@@ -491,7 +499,7 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
41705 + }
41706 +
41707 + btrfs_free_path(path);
41708 +- return BLK_STS_OK;
41709 ++ return ret;
41710 + }
41711 +
41712 + int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
41713 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
41714 +index 5bbea5ec31fc5..0f4408f9daddc 100644
41715 +--- a/fs/btrfs/inode.c
41716 ++++ b/fs/btrfs/inode.c
41717 +@@ -2538,10 +2538,15 @@ blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
41718 + goto out;
41719 +
41720 + if (bio_flags & EXTENT_BIO_COMPRESSED) {
41721 ++ /*
41722 ++ * btrfs_submit_compressed_read will handle completing
41723 ++ * the bio if there were any errors, so just return
41724 ++ * here.
41725 ++ */
41726 + ret = btrfs_submit_compressed_read(inode, bio,
41727 + mirror_num,
41728 + bio_flags);
41729 +- goto out;
41730 ++ goto out_no_endio;
41731 + } else {
41732 + /*
41733 + * Lookup bio sums does extra checks around whether we
41734 +@@ -2575,6 +2580,7 @@ out:
41735 + bio->bi_status = ret;
41736 + bio_endio(bio);
41737 + }
41738 ++out_no_endio:
41739 + return ret;
41740 + }
41741 +
41742 +diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
41743 +index a3930da4eb3fb..e437238cc603e 100644
41744 +--- a/fs/btrfs/reflink.c
41745 ++++ b/fs/btrfs/reflink.c
41746 +@@ -505,8 +505,11 @@ process_slot:
41747 + */
41748 + ASSERT(key.offset == 0);
41749 + ASSERT(datal <= fs_info->sectorsize);
41750 +- if (key.offset != 0 || datal > fs_info->sectorsize)
41751 +- return -EUCLEAN;
41752 ++ if (WARN_ON(key.offset != 0) ||
41753 ++ WARN_ON(datal > fs_info->sectorsize)) {
41754 ++ ret = -EUCLEAN;
41755 ++ goto out;
41756 ++ }
41757 +
41758 + ret = clone_copy_inline_extent(inode, path, &new_key,
41759 + drop_start, datal, size,
41760 +diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
41761 +index 294242c194d80..62382ae1eb02a 100644
41762 +--- a/fs/btrfs/space-info.c
41763 ++++ b/fs/btrfs/space-info.c
41764 +@@ -1061,7 +1061,6 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
41765 + trans_rsv->reserved;
41766 + if (block_rsv_size < space_info->bytes_may_use)
41767 + delalloc_size = space_info->bytes_may_use - block_rsv_size;
41768 +- spin_unlock(&space_info->lock);
41769 +
41770 + /*
41771 + * We don't want to include the global_rsv in our calculation,
41772 +@@ -1092,6 +1091,8 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
41773 + flush = FLUSH_DELAYED_REFS_NR;
41774 + }
41775 +
41776 ++ spin_unlock(&space_info->lock);
41777 ++
41778 + /*
41779 + * We don't want to reclaim everything, just a portion, so scale
41780 + * down the to_reclaim by 1/4. If it takes us down to 0,
41781 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
41782 +index b07d382d53a86..f5b11c1a000aa 100644
41783 +--- a/fs/btrfs/volumes.c
41784 ++++ b/fs/btrfs/volumes.c
41785 +@@ -534,15 +534,48 @@ error:
41786 + return ret;
41787 + }
41788 +
41789 +-static bool device_path_matched(const char *path, struct btrfs_device *device)
41790 ++/*
41791 ++ * Check if the device in the path matches the device in the given struct device.
41792 ++ *
41793 ++ * Returns:
41794 ++ * true If it is the same device.
41795 ++ * false If it is not the same device or on error.
41796 ++ */
41797 ++static bool device_matched(const struct btrfs_device *device, const char *path)
41798 + {
41799 +- int found;
41800 ++ char *device_name;
41801 ++ dev_t dev_old;
41802 ++ dev_t dev_new;
41803 ++ int ret;
41804 ++
41805 ++ /*
41806 ++ * If we are looking for a device with the matching dev_t, then skip
41807 ++ * device without a name (a missing device).
41808 ++ */
41809 ++ if (!device->name)
41810 ++ return false;
41811 ++
41812 ++ device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
41813 ++ if (!device_name)
41814 ++ return false;
41815 +
41816 + rcu_read_lock();
41817 +- found = strcmp(rcu_str_deref(device->name), path);
41818 ++ scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name));
41819 + rcu_read_unlock();
41820 +
41821 +- return found == 0;
41822 ++ ret = lookup_bdev(device_name, &dev_old);
41823 ++ kfree(device_name);
41824 ++ if (ret)
41825 ++ return false;
41826 ++
41827 ++ ret = lookup_bdev(path, &dev_new);
41828 ++ if (ret)
41829 ++ return false;
41830 ++
41831 ++ if (dev_old == dev_new)
41832 ++ return true;
41833 ++
41834 ++ return false;
41835 + }
41836 +
41837 + /*
41838 +@@ -575,9 +608,7 @@ static int btrfs_free_stale_devices(const char *path,
41839 + &fs_devices->devices, dev_list) {
41840 + if (skip_device && skip_device == device)
41841 + continue;
41842 +- if (path && !device->name)
41843 +- continue;
41844 +- if (path && !device_path_matched(path, device))
41845 ++ if (path && !device_matched(device, path))
41846 + continue;
41847 + if (fs_devices->opened) {
41848 + /* for an already deleted device return 0 */
41849 +@@ -8299,10 +8330,12 @@ static int relocating_repair_kthread(void *data)
41850 + target = cache->start;
41851 + btrfs_put_block_group(cache);
41852 +
41853 ++ sb_start_write(fs_info->sb);
41854 + if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
41855 + btrfs_info(fs_info,
41856 + "zoned: skip relocating block group %llu to repair: EBUSY",
41857 + target);
41858 ++ sb_end_write(fs_info->sb);
41859 + return -EBUSY;
41860 + }
41861 +
41862 +@@ -8330,6 +8363,7 @@ out:
41863 + btrfs_put_block_group(cache);
41864 + mutex_unlock(&fs_info->reclaim_bgs_lock);
41865 + btrfs_exclop_finish(fs_info);
41866 ++ sb_end_write(fs_info->sb);
41867 +
41868 + return ret;
41869 + }
41870 +diff --git a/fs/buffer.c b/fs/buffer.c
41871 +index 8e112b6bd3719..c76a8ef60a758 100644
41872 +--- a/fs/buffer.c
41873 ++++ b/fs/buffer.c
41874 +@@ -1235,16 +1235,18 @@ static void bh_lru_install(struct buffer_head *bh)
41875 + int i;
41876 +
41877 + check_irqs_on();
41878 ++ bh_lru_lock();
41879 ++
41880 + /*
41881 + * the refcount of buffer_head in bh_lru prevents dropping the
41882 + * attached page(i.e., try_to_free_buffers) so it could cause
41883 + * failing page migration.
41884 + * Skip putting upcoming bh into bh_lru until migration is done.
41885 + */
41886 +- if (lru_cache_disabled())
41887 ++ if (lru_cache_disabled()) {
41888 ++ bh_lru_unlock();
41889 + return;
41890 +-
41891 +- bh_lru_lock();
41892 ++ }
41893 +
41894 + b = this_cpu_ptr(&bh_lrus);
41895 + for (i = 0; i < BH_LRU_SIZE; i++) {
41896 +diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
41897 +index cdce1609c5c26..180c234c2f46c 100644
41898 +--- a/fs/cifs/cifs_swn.c
41899 ++++ b/fs/cifs/cifs_swn.c
41900 +@@ -396,11 +396,11 @@ static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const ch
41901 + switch (state) {
41902 + case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE:
41903 + cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name);
41904 +- cifs_mark_tcp_ses_conns_for_reconnect(swnreg->tcon->ses->server, true);
41905 ++ cifs_signal_cifsd_for_reconnect(swnreg->tcon->ses->server, true);
41906 + break;
41907 + case CIFS_SWN_RESOURCE_STATE_AVAILABLE:
41908 + cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name);
41909 +- cifs_mark_tcp_ses_conns_for_reconnect(swnreg->tcon->ses->server, true);
41910 ++ cifs_signal_cifsd_for_reconnect(swnreg->tcon->ses->server, true);
41911 + break;
41912 + case CIFS_SWN_RESOURCE_STATE_UNKNOWN:
41913 + cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name);
41914 +@@ -498,7 +498,7 @@ static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *a
41915 + goto unlock;
41916 + }
41917 +
41918 +- cifs_mark_tcp_ses_conns_for_reconnect(tcon->ses->server, false);
41919 ++ cifs_signal_cifsd_for_reconnect(tcon->ses->server, false);
41920 +
41921 + unlock:
41922 + mutex_unlock(&tcon->ses->server->srv_mutex);
41923 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
41924 +index 082c214786867..6e5246122ee2c 100644
41925 +--- a/fs/cifs/cifsfs.c
41926 ++++ b/fs/cifs/cifsfs.c
41927 +@@ -210,6 +210,9 @@ cifs_read_super(struct super_block *sb)
41928 + if (rc)
41929 + goto out_no_root;
41930 + /* tune readahead according to rsize if readahead size not set on mount */
41931 ++ if (cifs_sb->ctx->rsize == 0)
41932 ++ cifs_sb->ctx->rsize =
41933 ++ tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
41934 + if (cifs_sb->ctx->rasize)
41935 + sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
41936 + else
41937 +@@ -254,6 +257,9 @@ static void cifs_kill_sb(struct super_block *sb)
41938 + struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
41939 + struct cifs_tcon *tcon;
41940 + struct cached_fid *cfid;
41941 ++ struct rb_root *root = &cifs_sb->tlink_tree;
41942 ++ struct rb_node *node;
41943 ++ struct tcon_link *tlink;
41944 +
41945 + /*
41946 + * We ned to release all dentries for the cached directories
41947 +@@ -263,16 +269,18 @@ static void cifs_kill_sb(struct super_block *sb)
41948 + dput(cifs_sb->root);
41949 + cifs_sb->root = NULL;
41950 + }
41951 +- tcon = cifs_sb_master_tcon(cifs_sb);
41952 +- if (tcon) {
41953 ++ node = rb_first(root);
41954 ++ while (node != NULL) {
41955 ++ tlink = rb_entry(node, struct tcon_link, tl_rbnode);
41956 ++ tcon = tlink_tcon(tlink);
41957 + cfid = &tcon->crfid;
41958 + mutex_lock(&cfid->fid_mutex);
41959 + if (cfid->dentry) {
41960 +-
41961 + dput(cfid->dentry);
41962 + cfid->dentry = NULL;
41963 + }
41964 + mutex_unlock(&cfid->fid_mutex);
41965 ++ node = rb_next(node);
41966 + }
41967 +
41968 + kill_anon_super(sb);
41969 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
41970 +index d3701295402d2..0df3b24a0bf4c 100644
41971 +--- a/fs/cifs/cifsproto.h
41972 ++++ b/fs/cifs/cifsproto.h
41973 +@@ -132,6 +132,9 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
41974 + struct smb_hdr *out_buf,
41975 + int *bytes_returned);
41976 + void
41977 ++cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
41978 ++ bool all_channels);
41979 ++void
41980 + cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
41981 + bool mark_smb_session);
41982 + extern int cifs_reconnect(struct TCP_Server_Info *server,
41983 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
41984 +index d3020abfe404a..d6f8ccc7bfe2f 100644
41985 +--- a/fs/cifs/connect.c
41986 ++++ b/fs/cifs/connect.c
41987 +@@ -162,11 +162,51 @@ static void cifs_resolve_server(struct work_struct *work)
41988 + mutex_unlock(&server->srv_mutex);
41989 + }
41990 +
41991 ++/*
41992 ++ * Update the tcpStatus for the server.
41993 ++ * This is used to signal the cifsd thread to call cifs_reconnect
41994 ++ * ONLY cifsd thread should call cifs_reconnect. For any other
41995 ++ * thread, use this function
41996 ++ *
41997 ++ * @server: the tcp ses for which reconnect is needed
41998 ++ * @all_channels: if this needs to be done for all channels
41999 ++ */
42000 ++void
42001 ++cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
42002 ++ bool all_channels)
42003 ++{
42004 ++ struct TCP_Server_Info *pserver;
42005 ++ struct cifs_ses *ses;
42006 ++ int i;
42007 ++
42008 ++ /* If server is a channel, select the primary channel */
42009 ++ pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
42010 ++
42011 ++ spin_lock(&cifs_tcp_ses_lock);
42012 ++ if (!all_channels) {
42013 ++ pserver->tcpStatus = CifsNeedReconnect;
42014 ++ spin_unlock(&cifs_tcp_ses_lock);
42015 ++ return;
42016 ++ }
42017 ++
42018 ++ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
42019 ++ spin_lock(&ses->chan_lock);
42020 ++ for (i = 0; i < ses->chan_count; i++)
42021 ++ ses->chans[i].server->tcpStatus = CifsNeedReconnect;
42022 ++ spin_unlock(&ses->chan_lock);
42023 ++ }
42024 ++ spin_unlock(&cifs_tcp_ses_lock);
42025 ++}
42026 ++
42027 + /*
42028 + * Mark all sessions and tcons for reconnect.
42029 ++ * IMPORTANT: make sure that this gets called only from
42030 ++ * cifsd thread. For any other thread, use
42031 ++ * cifs_signal_cifsd_for_reconnect
42032 + *
42033 ++ * @server: the tcp ses for which reconnect is needed
42034 + * @server needs to be previously set to CifsNeedReconnect.
42035 +- *
42036 ++ * @mark_smb_session: whether even sessions need to be marked
42037 + */
42038 + void
42039 + cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
42040 +@@ -3473,6 +3513,9 @@ static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path,
42041 + struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
42042 + char *oldmnt = cifs_sb->ctx->mount_options;
42043 +
42044 ++ cifs_dbg(FYI, "%s: full_path=%s ref_path=%s target=%s\n", __func__, full_path, ref_path,
42045 ++ dfs_cache_get_tgt_name(tit));
42046 ++
42047 + rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref);
42048 + if (rc)
42049 + goto out;
42050 +@@ -3571,13 +3614,18 @@ static int __follow_dfs_link(struct mount_ctx *mnt_ctx)
42051 + if (rc)
42052 + goto out;
42053 +
42054 +- /* Try all dfs link targets */
42055 ++ /* Try all dfs link targets. If an I/O fails from currently connected DFS target with an
42056 ++ * error other than STATUS_PATH_NOT_COVERED (-EREMOTE), then retry it from other targets as
42057 ++ * specified in MS-DFSC "3.1.5.2 I/O Operation to Target Fails with an Error Other Than
42058 ++ * STATUS_PATH_NOT_COVERED."
42059 ++ */
42060 + for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl);
42061 + tit; tit = dfs_cache_get_next_tgt(&tl, tit)) {
42062 + rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit);
42063 + if (!rc) {
42064 + rc = is_path_remote(mnt_ctx);
42065 +- break;
42066 ++ if (!rc || rc == -EREMOTE)
42067 ++ break;
42068 + }
42069 + }
42070 +
42071 +@@ -3651,7 +3699,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
42072 + goto error;
42073 +
42074 + rc = is_path_remote(&mnt_ctx);
42075 +- if (rc == -EREMOTE)
42076 ++ if (rc)
42077 + rc = follow_dfs_link(&mnt_ctx);
42078 + if (rc)
42079 + goto error;
42080 +diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
42081 +index 831f42458bf6d..30e040da4f096 100644
42082 +--- a/fs/cifs/dfs_cache.c
42083 ++++ b/fs/cifs/dfs_cache.c
42084 +@@ -1355,7 +1355,7 @@ static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cach
42085 + }
42086 +
42087 + cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
42088 +- cifs_mark_tcp_ses_conns_for_reconnect(tcon->ses->server, true);
42089 ++ cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
42090 + }
42091 +
42092 + /* Refresh dfs referral of tcon and mark it for reconnect if needed */
42093 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
42094 +index e7af802dcfa60..a2723f7cb5e9d 100644
42095 +--- a/fs/cifs/file.c
42096 ++++ b/fs/cifs/file.c
42097 +@@ -3740,6 +3740,11 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
42098 + break;
42099 + }
42100 +
42101 ++ if (cifs_sb->ctx->rsize == 0)
42102 ++ cifs_sb->ctx->rsize =
42103 ++ server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
42104 ++ cifs_sb->ctx);
42105 ++
42106 + rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
42107 + &rsize, credits);
42108 + if (rc)
42109 +@@ -4474,6 +4479,11 @@ static void cifs_readahead(struct readahead_control *ractl)
42110 + }
42111 + }
42112 +
42113 ++ if (cifs_sb->ctx->rsize == 0)
42114 ++ cifs_sb->ctx->rsize =
42115 ++ server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
42116 ++ cifs_sb->ctx);
42117 ++
42118 + rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
42119 + &rsize, credits);
42120 + if (rc)
42121 +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
42122 +index b2fb7bd119366..c71c9a44bef4b 100644
42123 +--- a/fs/cifs/smb1ops.c
42124 ++++ b/fs/cifs/smb1ops.c
42125 +@@ -228,7 +228,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
42126 + spin_unlock(&GlobalMid_Lock);
42127 +
42128 + if (reconnect) {
42129 +- cifs_mark_tcp_ses_conns_for_reconnect(server, false);
42130 ++ cifs_signal_cifsd_for_reconnect(server, false);
42131 + }
42132 +
42133 + return mid;
42134 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
42135 +index af5d0830bc8a8..5d120cd8bc78f 100644
42136 +--- a/fs/cifs/smb2ops.c
42137 ++++ b/fs/cifs/smb2ops.c
42138 +@@ -25,6 +25,7 @@
42139 + #include "smb2glob.h"
42140 + #include "cifs_ioctl.h"
42141 + #include "smbdirect.h"
42142 ++#include "fscache.h"
42143 + #include "fs_context.h"
42144 +
42145 + /* Change credits for different ops and return the total number of credits */
42146 +@@ -1642,6 +1643,7 @@ smb2_ioctl_query_info(const unsigned int xid,
42147 + unsigned int size[2];
42148 + void *data[2];
42149 + int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
42150 ++ void (*free_req1_func)(struct smb_rqst *r);
42151 +
42152 + vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
42153 + if (vars == NULL)
42154 +@@ -1651,27 +1653,29 @@ smb2_ioctl_query_info(const unsigned int xid,
42155 +
42156 + resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
42157 +
42158 +- if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
42159 +- goto e_fault;
42160 +-
42161 ++ if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) {
42162 ++ rc = -EFAULT;
42163 ++ goto free_vars;
42164 ++ }
42165 + if (qi.output_buffer_length > 1024) {
42166 +- kfree(vars);
42167 +- return -EINVAL;
42168 ++ rc = -EINVAL;
42169 ++ goto free_vars;
42170 + }
42171 +
42172 + if (!ses || !server) {
42173 +- kfree(vars);
42174 +- return -EIO;
42175 ++ rc = -EIO;
42176 ++ goto free_vars;
42177 + }
42178 +
42179 + if (smb3_encryption_required(tcon))
42180 + flags |= CIFS_TRANSFORM_REQ;
42181 +
42182 +- buffer = memdup_user(arg + sizeof(struct smb_query_info),
42183 +- qi.output_buffer_length);
42184 +- if (IS_ERR(buffer)) {
42185 +- kfree(vars);
42186 +- return PTR_ERR(buffer);
42187 ++ if (qi.output_buffer_length) {
42188 ++ buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length);
42189 ++ if (IS_ERR(buffer)) {
42190 ++ rc = PTR_ERR(buffer);
42191 ++ goto free_vars;
42192 ++ }
42193 + }
42194 +
42195 + /* Open */
42196 +@@ -1709,45 +1713,45 @@ smb2_ioctl_query_info(const unsigned int xid,
42197 + rc = SMB2_open_init(tcon, server,
42198 + &rqst[0], &oplock, &oparms, path);
42199 + if (rc)
42200 +- goto iqinf_exit;
42201 ++ goto free_output_buffer;
42202 + smb2_set_next_command(tcon, &rqst[0]);
42203 +
42204 + /* Query */
42205 + if (qi.flags & PASSTHRU_FSCTL) {
42206 + /* Can eventually relax perm check since server enforces too */
42207 +- if (!capable(CAP_SYS_ADMIN))
42208 ++ if (!capable(CAP_SYS_ADMIN)) {
42209 + rc = -EPERM;
42210 +- else {
42211 +- rqst[1].rq_iov = &vars->io_iov[0];
42212 +- rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
42213 +-
42214 +- rc = SMB2_ioctl_init(tcon, server,
42215 +- &rqst[1],
42216 +- COMPOUND_FID, COMPOUND_FID,
42217 +- qi.info_type, true, buffer,
42218 +- qi.output_buffer_length,
42219 +- CIFSMaxBufSize -
42220 +- MAX_SMB2_CREATE_RESPONSE_SIZE -
42221 +- MAX_SMB2_CLOSE_RESPONSE_SIZE);
42222 ++ goto free_open_req;
42223 + }
42224 ++ rqst[1].rq_iov = &vars->io_iov[0];
42225 ++ rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
42226 ++
42227 ++ rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
42228 ++ qi.info_type, true, buffer, qi.output_buffer_length,
42229 ++ CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
42230 ++ MAX_SMB2_CLOSE_RESPONSE_SIZE);
42231 ++ free_req1_func = SMB2_ioctl_free;
42232 + } else if (qi.flags == PASSTHRU_SET_INFO) {
42233 + /* Can eventually relax perm check since server enforces too */
42234 +- if (!capable(CAP_SYS_ADMIN))
42235 ++ if (!capable(CAP_SYS_ADMIN)) {
42236 + rc = -EPERM;
42237 +- else {
42238 +- rqst[1].rq_iov = &vars->si_iov[0];
42239 +- rqst[1].rq_nvec = 1;
42240 +-
42241 +- size[0] = 8;
42242 +- data[0] = buffer;
42243 +-
42244 +- rc = SMB2_set_info_init(tcon, server,
42245 +- &rqst[1],
42246 +- COMPOUND_FID, COMPOUND_FID,
42247 +- current->tgid,
42248 +- FILE_END_OF_FILE_INFORMATION,
42249 +- SMB2_O_INFO_FILE, 0, data, size);
42250 ++ goto free_open_req;
42251 ++ }
42252 ++ if (qi.output_buffer_length < 8) {
42253 ++ rc = -EINVAL;
42254 ++ goto free_open_req;
42255 + }
42256 ++ rqst[1].rq_iov = &vars->si_iov[0];
42257 ++ rqst[1].rq_nvec = 1;
42258 ++
42259 ++ /* MS-FSCC 2.4.13 FileEndOfFileInformation */
42260 ++ size[0] = 8;
42261 ++ data[0] = buffer;
42262 ++
42263 ++ rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
42264 ++ current->tgid, FILE_END_OF_FILE_INFORMATION,
42265 ++ SMB2_O_INFO_FILE, 0, data, size);
42266 ++ free_req1_func = SMB2_set_info_free;
42267 + } else if (qi.flags == PASSTHRU_QUERY_INFO) {
42268 + rqst[1].rq_iov = &vars->qi_iov[0];
42269 + rqst[1].rq_nvec = 1;
42270 +@@ -1758,6 +1762,7 @@ smb2_ioctl_query_info(const unsigned int xid,
42271 + qi.info_type, qi.additional_information,
42272 + qi.input_buffer_length,
42273 + qi.output_buffer_length, buffer);
42274 ++ free_req1_func = SMB2_query_info_free;
42275 + } else { /* unknown flags */
42276 + cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
42277 + qi.flags);
42278 +@@ -1765,7 +1770,7 @@ smb2_ioctl_query_info(const unsigned int xid,
42279 + }
42280 +
42281 + if (rc)
42282 +- goto iqinf_exit;
42283 ++ goto free_open_req;
42284 + smb2_set_next_command(tcon, &rqst[1]);
42285 + smb2_set_related(&rqst[1]);
42286 +
42287 +@@ -1776,14 +1781,14 @@ smb2_ioctl_query_info(const unsigned int xid,
42288 + rc = SMB2_close_init(tcon, server,
42289 + &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
42290 + if (rc)
42291 +- goto iqinf_exit;
42292 ++ goto free_req_1;
42293 + smb2_set_related(&rqst[2]);
42294 +
42295 + rc = compound_send_recv(xid, ses, server,
42296 + flags, 3, rqst,
42297 + resp_buftype, rsp_iov);
42298 + if (rc)
42299 +- goto iqinf_exit;
42300 ++ goto out;
42301 +
42302 + /* No need to bump num_remote_opens since handle immediately closed */
42303 + if (qi.flags & PASSTHRU_FSCTL) {
42304 +@@ -1793,18 +1798,22 @@ smb2_ioctl_query_info(const unsigned int xid,
42305 + qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
42306 + if (qi.input_buffer_length > 0 &&
42307 + le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
42308 +- > rsp_iov[1].iov_len)
42309 +- goto e_fault;
42310 ++ > rsp_iov[1].iov_len) {
42311 ++ rc = -EFAULT;
42312 ++ goto out;
42313 ++ }
42314 +
42315 + if (copy_to_user(&pqi->input_buffer_length,
42316 + &qi.input_buffer_length,
42317 +- sizeof(qi.input_buffer_length)))
42318 +- goto e_fault;
42319 ++ sizeof(qi.input_buffer_length))) {
42320 ++ rc = -EFAULT;
42321 ++ goto out;
42322 ++ }
42323 +
42324 + if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
42325 + (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
42326 + qi.input_buffer_length))
42327 +- goto e_fault;
42328 ++ rc = -EFAULT;
42329 + } else {
42330 + pqi = (struct smb_query_info __user *)arg;
42331 + qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
42332 +@@ -1812,28 +1821,30 @@ smb2_ioctl_query_info(const unsigned int xid,
42333 + qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
42334 + if (copy_to_user(&pqi->input_buffer_length,
42335 + &qi.input_buffer_length,
42336 +- sizeof(qi.input_buffer_length)))
42337 +- goto e_fault;
42338 ++ sizeof(qi.input_buffer_length))) {
42339 ++ rc = -EFAULT;
42340 ++ goto out;
42341 ++ }
42342 +
42343 + if (copy_to_user(pqi + 1, qi_rsp->Buffer,
42344 + qi.input_buffer_length))
42345 +- goto e_fault;
42346 ++ rc = -EFAULT;
42347 + }
42348 +
42349 +- iqinf_exit:
42350 +- cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
42351 +- cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
42352 +- cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
42353 ++out:
42354 + free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
42355 + free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
42356 + free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
42357 +- kfree(vars);
42358 ++ SMB2_close_free(&rqst[2]);
42359 ++free_req_1:
42360 ++ free_req1_func(&rqst[1]);
42361 ++free_open_req:
42362 ++ SMB2_open_free(&rqst[0]);
42363 ++free_output_buffer:
42364 + kfree(buffer);
42365 ++free_vars:
42366 ++ kfree(vars);
42367 + return rc;
42368 +-
42369 +-e_fault:
42370 +- rc = -EFAULT;
42371 +- goto iqinf_exit;
42372 + }
42373 +
42374 + static ssize_t
42375 +@@ -3887,29 +3898,38 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
42376 + {
42377 + int rc;
42378 + unsigned int xid;
42379 ++ struct inode *inode;
42380 + struct cifsFileInfo *cfile = file->private_data;
42381 ++ struct cifsInodeInfo *cifsi;
42382 + __le64 eof;
42383 +
42384 + xid = get_xid();
42385 +
42386 +- if (off >= i_size_read(file->f_inode) ||
42387 +- off + len >= i_size_read(file->f_inode)) {
42388 ++ inode = d_inode(cfile->dentry);
42389 ++ cifsi = CIFS_I(inode);
42390 ++
42391 ++ if (off >= i_size_read(inode) ||
42392 ++ off + len >= i_size_read(inode)) {
42393 + rc = -EINVAL;
42394 + goto out;
42395 + }
42396 +
42397 + rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
42398 +- i_size_read(file->f_inode) - off - len, off);
42399 ++ i_size_read(inode) - off - len, off);
42400 + if (rc < 0)
42401 + goto out;
42402 +
42403 +- eof = cpu_to_le64(i_size_read(file->f_inode) - len);
42404 ++ eof = cpu_to_le64(i_size_read(inode) - len);
42405 + rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
42406 + cfile->fid.volatile_fid, cfile->pid, &eof);
42407 + if (rc < 0)
42408 + goto out;
42409 +
42410 + rc = 0;
42411 ++
42412 ++ cifsi->server_eof = i_size_read(inode) - len;
42413 ++ truncate_setsize(inode, cifsi->server_eof);
42414 ++ fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof);
42415 + out:
42416 + free_xid(xid);
42417 + return rc;
42418 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
42419 +index 7e7909b1ae118..f82d6fcb5c646 100644
42420 +--- a/fs/cifs/smb2pdu.c
42421 ++++ b/fs/cifs/smb2pdu.c
42422 +@@ -3858,8 +3858,10 @@ void smb2_reconnect_server(struct work_struct *work)
42423 + tcon = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
42424 + if (!tcon) {
42425 + resched = true;
42426 +- list_del_init(&ses->rlist);
42427 +- cifs_put_smb_ses(ses);
42428 ++ list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
42429 ++ list_del_init(&ses->rlist);
42430 ++ cifs_put_smb_ses(ses);
42431 ++ }
42432 + goto done;
42433 + }
42434 +
42435 +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
42436 +index a4c3e027cca25..eeb1a699bd6f2 100644
42437 +--- a/fs/cifs/transport.c
42438 ++++ b/fs/cifs/transport.c
42439 +@@ -430,7 +430,7 @@ unmask:
42440 + * be taken as the remainder of this one. We need to kill the
42441 + * socket so the server throws away the partial SMB
42442 + */
42443 +- cifs_mark_tcp_ses_conns_for_reconnect(server, false);
42444 ++ cifs_signal_cifsd_for_reconnect(server, false);
42445 + trace_smb3_partial_send_reconnect(server->CurrentMid,
42446 + server->conn_id, server->hostname);
42447 + }
42448 +diff --git a/fs/coredump.c b/fs/coredump.c
42449 +index 1c060c0a2d72f..7ed7d601e5e00 100644
42450 +--- a/fs/coredump.c
42451 ++++ b/fs/coredump.c
42452 +@@ -42,6 +42,7 @@
42453 + #include <linux/path.h>
42454 + #include <linux/timekeeping.h>
42455 + #include <linux/sysctl.h>
42456 ++#include <linux/elf.h>
42457 +
42458 + #include <linux/uaccess.h>
42459 + #include <asm/mmu_context.h>
42460 +@@ -53,6 +54,9 @@
42461 +
42462 + #include <trace/events/sched.h>
42463 +
42464 ++static bool dump_vma_snapshot(struct coredump_params *cprm);
42465 ++static void free_vma_snapshot(struct coredump_params *cprm);
42466 ++
42467 + static int core_uses_pid;
42468 + static unsigned int core_pipe_limit;
42469 + static char core_pattern[CORENAME_MAX_SIZE] = "core";
42470 +@@ -531,6 +535,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
42471 + * by any locks.
42472 + */
42473 + .mm_flags = mm->flags,
42474 ++ .vma_meta = NULL,
42475 + };
42476 +
42477 + audit_core_dumps(siginfo->si_signo);
42478 +@@ -745,6 +750,9 @@ void do_coredump(const kernel_siginfo_t *siginfo)
42479 + pr_info("Core dump to |%s disabled\n", cn.corename);
42480 + goto close_fail;
42481 + }
42482 ++ if (!dump_vma_snapshot(&cprm))
42483 ++ goto close_fail;
42484 ++
42485 + file_start_write(cprm.file);
42486 + core_dumped = binfmt->core_dump(&cprm);
42487 + /*
42488 +@@ -758,6 +766,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
42489 + dump_emit(&cprm, "", 1);
42490 + }
42491 + file_end_write(cprm.file);
42492 ++ free_vma_snapshot(&cprm);
42493 + }
42494 + if (ispipe && core_pipe_limit)
42495 + wait_for_dump_helpers(cprm.file);
42496 +@@ -980,6 +989,8 @@ static bool always_dump_vma(struct vm_area_struct *vma)
42497 + return false;
42498 + }
42499 +
42500 ++#define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1
42501 ++
42502 + /*
42503 + * Decide how much of @vma's contents should be included in a core dump.
42504 + */
42505 +@@ -1039,9 +1050,20 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42506 + * dump the first page to aid in determining what was mapped here.
42507 + */
42508 + if (FILTER(ELF_HEADERS) &&
42509 +- vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ) &&
42510 +- (READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
42511 +- return PAGE_SIZE;
42512 ++ vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
42513 ++ if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
42514 ++ return PAGE_SIZE;
42515 ++
42516 ++ /*
42517 ++ * ELF libraries aren't always executable.
42518 ++ * We'll want to check whether the mapping starts with the ELF
42519 ++ * magic, but not now - we're holding the mmap lock,
42520 ++ * so copy_from_user() doesn't work here.
42521 ++ * Use a placeholder instead, and fix it up later in
42522 ++ * dump_vma_snapshot().
42523 ++ */
42524 ++ return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER;
42525 ++ }
42526 +
42527 + #undef FILTER
42528 +
42529 +@@ -1078,18 +1100,29 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
42530 + return gate_vma;
42531 + }
42532 +
42533 ++static void free_vma_snapshot(struct coredump_params *cprm)
42534 ++{
42535 ++ if (cprm->vma_meta) {
42536 ++ int i;
42537 ++ for (i = 0; i < cprm->vma_count; i++) {
42538 ++ struct file *file = cprm->vma_meta[i].file;
42539 ++ if (file)
42540 ++ fput(file);
42541 ++ }
42542 ++ kvfree(cprm->vma_meta);
42543 ++ cprm->vma_meta = NULL;
42544 ++ }
42545 ++}
42546 ++
42547 + /*
42548 + * Under the mmap_lock, take a snapshot of relevant information about the task's
42549 + * VMAs.
42550 + */
42551 +-int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
42552 +- struct core_vma_metadata **vma_meta,
42553 +- size_t *vma_data_size_ptr)
42554 ++static bool dump_vma_snapshot(struct coredump_params *cprm)
42555 + {
42556 + struct vm_area_struct *vma, *gate_vma;
42557 + struct mm_struct *mm = current->mm;
42558 + int i;
42559 +- size_t vma_data_size = 0;
42560 +
42561 + /*
42562 + * Once the stack expansion code is fixed to not change VMA bounds
42563 +@@ -1097,36 +1130,51 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
42564 + * mmap_lock in read mode.
42565 + */
42566 + if (mmap_write_lock_killable(mm))
42567 +- return -EINTR;
42568 ++ return false;
42569 +
42570 ++ cprm->vma_data_size = 0;
42571 + gate_vma = get_gate_vma(mm);
42572 +- *vma_count = mm->map_count + (gate_vma ? 1 : 0);
42573 ++ cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0);
42574 +
42575 +- *vma_meta = kvmalloc_array(*vma_count, sizeof(**vma_meta), GFP_KERNEL);
42576 +- if (!*vma_meta) {
42577 ++ cprm->vma_meta = kvmalloc_array(cprm->vma_count, sizeof(*cprm->vma_meta), GFP_KERNEL);
42578 ++ if (!cprm->vma_meta) {
42579 + mmap_write_unlock(mm);
42580 +- return -ENOMEM;
42581 ++ return false;
42582 + }
42583 +
42584 + for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
42585 + vma = next_vma(vma, gate_vma), i++) {
42586 +- struct core_vma_metadata *m = (*vma_meta) + i;
42587 ++ struct core_vma_metadata *m = cprm->vma_meta + i;
42588 +
42589 + m->start = vma->vm_start;
42590 + m->end = vma->vm_end;
42591 + m->flags = vma->vm_flags;
42592 + m->dump_size = vma_dump_size(vma, cprm->mm_flags);
42593 ++ m->pgoff = vma->vm_pgoff;
42594 +
42595 +- vma_data_size += m->dump_size;
42596 ++ m->file = vma->vm_file;
42597 ++ if (m->file)
42598 ++ get_file(m->file);
42599 + }
42600 +
42601 + mmap_write_unlock(mm);
42602 +
42603 +- if (WARN_ON(i != *vma_count)) {
42604 +- kvfree(*vma_meta);
42605 +- return -EFAULT;
42606 ++ for (i = 0; i < cprm->vma_count; i++) {
42607 ++ struct core_vma_metadata *m = cprm->vma_meta + i;
42608 ++
42609 ++ if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) {
42610 ++ char elfmag[SELFMAG];
42611 ++
42612 ++ if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) ||
42613 ++ memcmp(elfmag, ELFMAG, SELFMAG) != 0) {
42614 ++ m->dump_size = 0;
42615 ++ } else {
42616 ++ m->dump_size = PAGE_SIZE;
42617 ++ }
42618 ++ }
42619 ++
42620 ++ cprm->vma_data_size += m->dump_size;
42621 + }
42622 +
42623 +- *vma_data_size_ptr = vma_data_size;
42624 +- return 0;
42625 ++ return true;
42626 + }
42627 +diff --git a/fs/erofs/sysfs.c b/fs/erofs/sysfs.c
42628 +index dac252bc92281..f3babf1e66083 100644
42629 +--- a/fs/erofs/sysfs.c
42630 ++++ b/fs/erofs/sysfs.c
42631 +@@ -221,9 +221,11 @@ void erofs_unregister_sysfs(struct super_block *sb)
42632 + {
42633 + struct erofs_sb_info *sbi = EROFS_SB(sb);
42634 +
42635 +- kobject_del(&sbi->s_kobj);
42636 +- kobject_put(&sbi->s_kobj);
42637 +- wait_for_completion(&sbi->s_kobj_unregister);
42638 ++ if (sbi->s_kobj.state_in_sysfs) {
42639 ++ kobject_del(&sbi->s_kobj);
42640 ++ kobject_put(&sbi->s_kobj);
42641 ++ wait_for_completion(&sbi->s_kobj_unregister);
42642 ++ }
42643 + }
42644 +
42645 + int __init erofs_init_sysfs(void)
42646 +diff --git a/fs/exec.c b/fs/exec.c
42647 +index 79f2c9483302d..40b1008fb0f79 100644
42648 +--- a/fs/exec.c
42649 ++++ b/fs/exec.c
42650 +@@ -495,8 +495,14 @@ static int bprm_stack_limits(struct linux_binprm *bprm)
42651 + * the stack. They aren't stored until much later when we can't
42652 + * signal to the parent that the child has run out of stack space.
42653 + * Instead, calculate it here so it's possible to fail gracefully.
42654 ++ *
42655 ++ * In the case of argc = 0, make sure there is space for adding a
42656 ++ * empty string (which will bump argc to 1), to ensure confused
42657 ++ * userspace programs don't start processing from argv[1], thinking
42658 ++ * argc can never be 0, to keep them from walking envp by accident.
42659 ++ * See do_execveat_common().
42660 + */
42661 +- ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
42662 ++ ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *);
42663 + if (limit <= ptr_size)
42664 + return -E2BIG;
42665 + limit -= ptr_size;
42666 +@@ -1897,6 +1903,9 @@ static int do_execveat_common(int fd, struct filename *filename,
42667 + }
42668 +
42669 + retval = count(argv, MAX_ARG_STRINGS);
42670 ++ if (retval == 0)
42671 ++ pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n",
42672 ++ current->comm, bprm->filename);
42673 + if (retval < 0)
42674 + goto out_free;
42675 + bprm->argc = retval;
42676 +@@ -1923,6 +1932,19 @@ static int do_execveat_common(int fd, struct filename *filename,
42677 + if (retval < 0)
42678 + goto out_free;
42679 +
42680 ++ /*
42681 ++ * When argv is empty, add an empty string ("") as argv[0] to
42682 ++ * ensure confused userspace programs that start processing
42683 ++ * from argv[1] won't end up walking envp. See also
42684 ++ * bprm_stack_limits().
42685 ++ */
42686 ++ if (bprm->argc == 0) {
42687 ++ retval = copy_string_kernel("", bprm);
42688 ++ if (retval < 0)
42689 ++ goto out_free;
42690 ++ bprm->argc = 1;
42691 ++ }
42692 ++
42693 + retval = bprm_execve(bprm, fd, filename, flags);
42694 + out_free:
42695 + free_bprm(bprm);
42696 +@@ -1951,6 +1973,8 @@ int kernel_execve(const char *kernel_filename,
42697 + }
42698 +
42699 + retval = count_strings_kernel(argv);
42700 ++ if (WARN_ON_ONCE(retval == 0))
42701 ++ retval = -EINVAL;
42702 + if (retval < 0)
42703 + goto out_free;
42704 + bprm->argc = retval;
42705 +diff --git a/fs/ext2/super.c b/fs/ext2/super.c
42706 +index 94f1fbd7d3ac2..6d4f5ef747660 100644
42707 +--- a/fs/ext2/super.c
42708 ++++ b/fs/ext2/super.c
42709 +@@ -753,8 +753,12 @@ static loff_t ext2_max_size(int bits)
42710 + res += 1LL << (bits-2);
42711 + res += 1LL << (2*(bits-2));
42712 + res += 1LL << (3*(bits-2));
42713 ++ /* Compute how many metadata blocks are needed */
42714 ++ meta_blocks = 1;
42715 ++ meta_blocks += 1 + ppb;
42716 ++ meta_blocks += 1 + ppb + ppb * ppb;
42717 + /* Does block tree limit file size? */
42718 +- if (res < upper_limit)
42719 ++ if (res + meta_blocks <= upper_limit)
42720 + goto check_lfs;
42721 +
42722 + res = upper_limit;
42723 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
42724 +index e429418036050..9c076262770d9 100644
42725 +--- a/fs/ext4/inline.c
42726 ++++ b/fs/ext4/inline.c
42727 +@@ -1783,19 +1783,20 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
42728 + void *inline_pos;
42729 + unsigned int offset;
42730 + struct ext4_dir_entry_2 *de;
42731 +- bool ret = true;
42732 ++ bool ret = false;
42733 +
42734 + err = ext4_get_inode_loc(dir, &iloc);
42735 + if (err) {
42736 + EXT4_ERROR_INODE_ERR(dir, -err,
42737 + "error %d getting inode %lu block",
42738 + err, dir->i_ino);
42739 +- return true;
42740 ++ return false;
42741 + }
42742 +
42743 + down_read(&EXT4_I(dir)->xattr_sem);
42744 + if (!ext4_has_inline_data(dir)) {
42745 + *has_inline_data = 0;
42746 ++ ret = true;
42747 + goto out;
42748 + }
42749 +
42750 +@@ -1804,7 +1805,6 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
42751 + ext4_warning(dir->i_sb,
42752 + "bad inline directory (dir #%lu) - no `..'",
42753 + dir->i_ino);
42754 +- ret = true;
42755 + goto out;
42756 + }
42757 +
42758 +@@ -1823,16 +1823,15 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
42759 + dir->i_ino, le32_to_cpu(de->inode),
42760 + le16_to_cpu(de->rec_len), de->name_len,
42761 + inline_size);
42762 +- ret = true;
42763 + goto out;
42764 + }
42765 + if (le32_to_cpu(de->inode)) {
42766 +- ret = false;
42767 + goto out;
42768 + }
42769 + offset += ext4_rec_len_from_disk(de->rec_len, inline_size);
42770 + }
42771 +
42772 ++ ret = true;
42773 + out:
42774 + up_read(&EXT4_I(dir)->xattr_sem);
42775 + brelse(iloc.bh);
42776 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
42777 +index 01c9e4f743ba9..531a94f48637c 100644
42778 +--- a/fs/ext4/inode.c
42779 ++++ b/fs/ext4/inode.c
42780 +@@ -1993,6 +1993,15 @@ static int ext4_writepage(struct page *page,
42781 + else
42782 + len = PAGE_SIZE;
42783 +
42784 ++ /* Should never happen but for bugs in other kernel subsystems */
42785 ++ if (!page_has_buffers(page)) {
42786 ++ ext4_warning_inode(inode,
42787 ++ "page %lu does not have buffers attached", page->index);
42788 ++ ClearPageDirty(page);
42789 ++ unlock_page(page);
42790 ++ return 0;
42791 ++ }
42792 ++
42793 + page_bufs = page_buffers(page);
42794 + /*
42795 + * We cannot do block allocation or other extent handling in this
42796 +@@ -2594,6 +2603,22 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
42797 + wait_on_page_writeback(page);
42798 + BUG_ON(PageWriteback(page));
42799 +
42800 ++ /*
42801 ++ * Should never happen but for buggy code in
42802 ++ * other subsystems that call
42803 ++ * set_page_dirty() without properly warning
42804 ++ * the file system first. See [1] for more
42805 ++ * information.
42806 ++ *
42807 ++ * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@×××××××××××.cz
42808 ++ */
42809 ++ if (!page_has_buffers(page)) {
42810 ++ ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", page->index);
42811 ++ ClearPageDirty(page);
42812 ++ unlock_page(page);
42813 ++ continue;
42814 ++ }
42815 ++
42816 + if (mpd->map.m_len == 0)
42817 + mpd->first_page = page->index;
42818 + mpd->next_page = page->index + 1;
42819 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42820 +index 67ac95c4cd9b8..1f37eb0176ccc 100644
42821 +--- a/fs/ext4/mballoc.c
42822 ++++ b/fs/ext4/mballoc.c
42823 +@@ -1000,7 +1000,7 @@ static inline int should_optimize_scan(struct ext4_allocation_context *ac)
42824 + return 0;
42825 + if (ac->ac_criteria >= 2)
42826 + return 0;
42827 +- if (ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
42828 ++ if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
42829 + return 0;
42830 + return 1;
42831 + }
42832 +@@ -3899,69 +3899,95 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
42833 + struct ext4_sb_info *sbi = EXT4_SB(sb);
42834 + ext4_group_t group;
42835 + ext4_grpblk_t blkoff;
42836 +- int i, clen, err;
42837 ++ int i, err;
42838 + int already;
42839 ++ unsigned int clen, clen_changed, thisgrp_len;
42840 +
42841 +- clen = EXT4_B2C(sbi, len);
42842 ++ while (len > 0) {
42843 ++ ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
42844 +
42845 +- ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
42846 +- bitmap_bh = ext4_read_block_bitmap(sb, group);
42847 +- if (IS_ERR(bitmap_bh)) {
42848 +- err = PTR_ERR(bitmap_bh);
42849 +- bitmap_bh = NULL;
42850 +- goto out_err;
42851 +- }
42852 ++ /*
42853 ++ * Check to see if we are freeing blocks across a group
42854 ++ * boundary.
42855 ++ * In case of flex_bg, this can happen that (block, len) may
42856 ++ * span across more than one group. In that case we need to
42857 ++ * get the corresponding group metadata to work with.
42858 ++ * For this we have goto again loop.
42859 ++ */
42860 ++ thisgrp_len = min_t(unsigned int, (unsigned int)len,
42861 ++ EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
42862 ++ clen = EXT4_NUM_B2C(sbi, thisgrp_len);
42863 +
42864 +- err = -EIO;
42865 +- gdp = ext4_get_group_desc(sb, group, &gdp_bh);
42866 +- if (!gdp)
42867 +- goto out_err;
42868 ++ bitmap_bh = ext4_read_block_bitmap(sb, group);
42869 ++ if (IS_ERR(bitmap_bh)) {
42870 ++ err = PTR_ERR(bitmap_bh);
42871 ++ bitmap_bh = NULL;
42872 ++ break;
42873 ++ }
42874 +
42875 +- ext4_lock_group(sb, group);
42876 +- already = 0;
42877 +- for (i = 0; i < clen; i++)
42878 +- if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == !state)
42879 +- already++;
42880 ++ err = -EIO;
42881 ++ gdp = ext4_get_group_desc(sb, group, &gdp_bh);
42882 ++ if (!gdp)
42883 ++ break;
42884 +
42885 +- if (state)
42886 +- ext4_set_bits(bitmap_bh->b_data, blkoff, clen);
42887 +- else
42888 +- mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen);
42889 +- if (ext4_has_group_desc_csum(sb) &&
42890 +- (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
42891 +- gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
42892 +- ext4_free_group_clusters_set(sb, gdp,
42893 +- ext4_free_clusters_after_init(sb,
42894 +- group, gdp));
42895 +- }
42896 +- if (state)
42897 +- clen = ext4_free_group_clusters(sb, gdp) - clen + already;
42898 +- else
42899 +- clen = ext4_free_group_clusters(sb, gdp) + clen - already;
42900 ++ ext4_lock_group(sb, group);
42901 ++ already = 0;
42902 ++ for (i = 0; i < clen; i++)
42903 ++ if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
42904 ++ !state)
42905 ++ already++;
42906 ++
42907 ++ clen_changed = clen - already;
42908 ++ if (state)
42909 ++ ext4_set_bits(bitmap_bh->b_data, blkoff, clen);
42910 ++ else
42911 ++ mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen);
42912 ++ if (ext4_has_group_desc_csum(sb) &&
42913 ++ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
42914 ++ gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
42915 ++ ext4_free_group_clusters_set(sb, gdp,
42916 ++ ext4_free_clusters_after_init(sb, group, gdp));
42917 ++ }
42918 ++ if (state)
42919 ++ clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
42920 ++ else
42921 ++ clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
42922 +
42923 +- ext4_free_group_clusters_set(sb, gdp, clen);
42924 +- ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
42925 +- ext4_group_desc_csum_set(sb, group, gdp);
42926 ++ ext4_free_group_clusters_set(sb, gdp, clen);
42927 ++ ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
42928 ++ ext4_group_desc_csum_set(sb, group, gdp);
42929 +
42930 +- ext4_unlock_group(sb, group);
42931 ++ ext4_unlock_group(sb, group);
42932 +
42933 +- if (sbi->s_log_groups_per_flex) {
42934 +- ext4_group_t flex_group = ext4_flex_group(sbi, group);
42935 ++ if (sbi->s_log_groups_per_flex) {
42936 ++ ext4_group_t flex_group = ext4_flex_group(sbi, group);
42937 ++ struct flex_groups *fg = sbi_array_rcu_deref(sbi,
42938 ++ s_flex_groups, flex_group);
42939 +
42940 +- atomic64_sub(len,
42941 +- &sbi_array_rcu_deref(sbi, s_flex_groups,
42942 +- flex_group)->free_clusters);
42943 ++ if (state)
42944 ++ atomic64_sub(clen_changed, &fg->free_clusters);
42945 ++ else
42946 ++ atomic64_add(clen_changed, &fg->free_clusters);
42947 ++
42948 ++ }
42949 ++
42950 ++ err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
42951 ++ if (err)
42952 ++ break;
42953 ++ sync_dirty_buffer(bitmap_bh);
42954 ++ err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
42955 ++ sync_dirty_buffer(gdp_bh);
42956 ++ if (err)
42957 ++ break;
42958 ++
42959 ++ block += thisgrp_len;
42960 ++ len -= thisgrp_len;
42961 ++ brelse(bitmap_bh);
42962 ++ BUG_ON(len < 0);
42963 + }
42964 +
42965 +- err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
42966 + if (err)
42967 +- goto out_err;
42968 +- sync_dirty_buffer(bitmap_bh);
42969 +- err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
42970 +- sync_dirty_buffer(gdp_bh);
42971 +-
42972 +-out_err:
42973 +- brelse(bitmap_bh);
42974 ++ brelse(bitmap_bh);
42975 + }
42976 +
42977 + /*
42978 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
42979 +index 8cf0a924a49bf..39e223f7bf64d 100644
42980 +--- a/fs/ext4/namei.c
42981 ++++ b/fs/ext4/namei.c
42982 +@@ -2997,14 +2997,14 @@ bool ext4_empty_dir(struct inode *inode)
42983 + if (inode->i_size < ext4_dir_rec_len(1, NULL) +
42984 + ext4_dir_rec_len(2, NULL)) {
42985 + EXT4_ERROR_INODE(inode, "invalid size");
42986 +- return true;
42987 ++ return false;
42988 + }
42989 + /* The first directory block must not be a hole,
42990 + * so treat it as DIRENT_HTREE
42991 + */
42992 + bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
42993 + if (IS_ERR(bh))
42994 +- return true;
42995 ++ return false;
42996 +
42997 + de = (struct ext4_dir_entry_2 *) bh->b_data;
42998 + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
42999 +@@ -3012,7 +3012,7 @@ bool ext4_empty_dir(struct inode *inode)
43000 + le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) {
43001 + ext4_warning_inode(inode, "directory missing '.'");
43002 + brelse(bh);
43003 +- return true;
43004 ++ return false;
43005 + }
43006 + offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
43007 + de = ext4_next_entry(de, sb->s_blocksize);
43008 +@@ -3021,7 +3021,7 @@ bool ext4_empty_dir(struct inode *inode)
43009 + le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
43010 + ext4_warning_inode(inode, "directory missing '..'");
43011 + brelse(bh);
43012 +- return true;
43013 ++ return false;
43014 + }
43015 + offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
43016 + while (offset < inode->i_size) {
43017 +@@ -3035,7 +3035,7 @@ bool ext4_empty_dir(struct inode *inode)
43018 + continue;
43019 + }
43020 + if (IS_ERR(bh))
43021 +- return true;
43022 ++ return false;
43023 + }
43024 + de = (struct ext4_dir_entry_2 *) (bh->b_data +
43025 + (offset & (sb->s_blocksize - 1)));
43026 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
43027 +index c5021ca0a28ad..bed29f96ccc7e 100644
43028 +--- a/fs/ext4/super.c
43029 ++++ b/fs/ext4/super.c
43030 +@@ -2021,12 +2021,12 @@ static int ext4_set_test_dummy_encryption(struct super_block *sb, char *arg)
43031 + #define EXT4_SPEC_s_commit_interval (1 << 16)
43032 + #define EXT4_SPEC_s_fc_debug_max_replay (1 << 17)
43033 + #define EXT4_SPEC_s_sb_block (1 << 18)
43034 ++#define EXT4_SPEC_mb_optimize_scan (1 << 19)
43035 +
43036 + struct ext4_fs_context {
43037 + char *s_qf_names[EXT4_MAXQUOTAS];
43038 + char *test_dummy_enc_arg;
43039 + int s_jquota_fmt; /* Format of quota to use */
43040 +- int mb_optimize_scan;
43041 + #ifdef CONFIG_EXT4_DEBUG
43042 + int s_fc_debug_max_replay;
43043 + #endif
43044 +@@ -2045,8 +2045,8 @@ struct ext4_fs_context {
43045 + unsigned int mask_s_mount_opt;
43046 + unsigned int vals_s_mount_opt2;
43047 + unsigned int mask_s_mount_opt2;
43048 +- unsigned int vals_s_mount_flags;
43049 +- unsigned int mask_s_mount_flags;
43050 ++ unsigned long vals_s_mount_flags;
43051 ++ unsigned long mask_s_mount_flags;
43052 + unsigned int opt_flags; /* MOPT flags */
43053 + unsigned int spec;
43054 + u32 s_max_batch_time;
43055 +@@ -2149,23 +2149,36 @@ static inline void ctx_set_##name(struct ext4_fs_context *ctx, \
43056 + { \
43057 + ctx->mask_s_##name |= flag; \
43058 + ctx->vals_s_##name |= flag; \
43059 +-} \
43060 ++}
43061 ++
43062 ++#define EXT4_CLEAR_CTX(name) \
43063 + static inline void ctx_clear_##name(struct ext4_fs_context *ctx, \
43064 + unsigned long flag) \
43065 + { \
43066 + ctx->mask_s_##name |= flag; \
43067 + ctx->vals_s_##name &= ~flag; \
43068 +-} \
43069 ++}
43070 ++
43071 ++#define EXT4_TEST_CTX(name) \
43072 + static inline unsigned long \
43073 + ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag) \
43074 + { \
43075 + return (ctx->vals_s_##name & flag); \
43076 +-} \
43077 ++}
43078 +
43079 +-EXT4_SET_CTX(flags);
43080 ++EXT4_SET_CTX(flags); /* set only */
43081 + EXT4_SET_CTX(mount_opt);
43082 ++EXT4_CLEAR_CTX(mount_opt);
43083 ++EXT4_TEST_CTX(mount_opt);
43084 + EXT4_SET_CTX(mount_opt2);
43085 +-EXT4_SET_CTX(mount_flags);
43086 ++EXT4_CLEAR_CTX(mount_opt2);
43087 ++EXT4_TEST_CTX(mount_opt2);
43088 ++
43089 ++static inline void ctx_set_mount_flag(struct ext4_fs_context *ctx, int bit)
43090 ++{
43091 ++ set_bit(bit, &ctx->mask_s_mount_flags);
43092 ++ set_bit(bit, &ctx->vals_s_mount_flags);
43093 ++}
43094 +
43095 + static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
43096 + {
43097 +@@ -2235,7 +2248,7 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
43098 + param->key);
43099 + return 0;
43100 + case Opt_abort:
43101 +- ctx_set_mount_flags(ctx, EXT4_MF_FS_ABORTED);
43102 ++ ctx_set_mount_flag(ctx, EXT4_MF_FS_ABORTED);
43103 + return 0;
43104 + case Opt_i_version:
43105 + ext4_msg(NULL, KERN_WARNING, deprecated_msg, param->key, "5.20");
43106 +@@ -2451,12 +2464,17 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
43107 + ctx_clear_mount_opt(ctx, m->mount_opt);
43108 + return 0;
43109 + case Opt_mb_optimize_scan:
43110 +- if (result.int_32 != 0 && result.int_32 != 1) {
43111 ++ if (result.int_32 == 1) {
43112 ++ ctx_set_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
43113 ++ ctx->spec |= EXT4_SPEC_mb_optimize_scan;
43114 ++ } else if (result.int_32 == 0) {
43115 ++ ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
43116 ++ ctx->spec |= EXT4_SPEC_mb_optimize_scan;
43117 ++ } else {
43118 + ext4_msg(NULL, KERN_WARNING,
43119 + "mb_optimize_scan should be set to 0 or 1.");
43120 + return -EINVAL;
43121 + }
43122 +- ctx->mb_optimize_scan = result.int_32;
43123 + return 0;
43124 + }
43125 +
43126 +@@ -4369,7 +4387,6 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
43127 +
43128 + /* Set defaults for the variables that will be set during parsing */
43129 + ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
43130 +- ctx->mb_optimize_scan = DEFAULT_MB_OPTIMIZE_SCAN;
43131 +
43132 + sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
43133 + sbi->s_sectors_written_start =
43134 +@@ -5320,12 +5337,12 @@ no_journal:
43135 + * turned off by passing "mb_optimize_scan=0". This can also be
43136 + * turned on forcefully by passing "mb_optimize_scan=1".
43137 + */
43138 +- if (ctx->mb_optimize_scan == 1)
43139 +- set_opt2(sb, MB_OPTIMIZE_SCAN);
43140 +- else if (ctx->mb_optimize_scan == 0)
43141 +- clear_opt2(sb, MB_OPTIMIZE_SCAN);
43142 +- else if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD)
43143 +- set_opt2(sb, MB_OPTIMIZE_SCAN);
43144 ++ if (!(ctx->spec & EXT4_SPEC_mb_optimize_scan)) {
43145 ++ if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD)
43146 ++ set_opt2(sb, MB_OPTIMIZE_SCAN);
43147 ++ else
43148 ++ clear_opt2(sb, MB_OPTIMIZE_SCAN);
43149 ++ }
43150 +
43151 + err = ext4_mb_init(sb);
43152 + if (err) {
43153 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
43154 +index 982f0170639fc..bf3ba85cf325b 100644
43155 +--- a/fs/f2fs/checkpoint.c
43156 ++++ b/fs/f2fs/checkpoint.c
43157 +@@ -864,6 +864,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
43158 + struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
43159 + struct f2fs_checkpoint *cp_block = NULL;
43160 + unsigned long long cur_version = 0, pre_version = 0;
43161 ++ unsigned int cp_blocks;
43162 + int err;
43163 +
43164 + err = get_checkpoint_version(sbi, cp_addr, &cp_block,
43165 +@@ -871,15 +872,16 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
43166 + if (err)
43167 + return NULL;
43168 +
43169 +- if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
43170 +- sbi->blocks_per_seg) {
43171 ++ cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count);
43172 ++
43173 ++ if (cp_blocks > sbi->blocks_per_seg || cp_blocks <= F2FS_CP_PACKS) {
43174 + f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
43175 + le32_to_cpu(cp_block->cp_pack_total_block_count));
43176 + goto invalid_cp;
43177 + }
43178 + pre_version = *version;
43179 +
43180 +- cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
43181 ++ cp_addr += cp_blocks - 1;
43182 + err = get_checkpoint_version(sbi, cp_addr, &cp_block,
43183 + &cp_page_2, version);
43184 + if (err)
43185 +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
43186 +index d0c3aeba59454..3b162506b269a 100644
43187 +--- a/fs/f2fs/compress.c
43188 ++++ b/fs/f2fs/compress.c
43189 +@@ -314,10 +314,9 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic)
43190 + }
43191 +
43192 + if (ret != PAGE_SIZE << dic->log_cluster_size) {
43193 +- printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
43194 ++ printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
43195 + "expected:%lu\n", KERN_ERR,
43196 +- F2FS_I_SB(dic->inode)->sb->s_id,
43197 +- dic->rlen,
43198 ++ F2FS_I_SB(dic->inode)->sb->s_id, ret,
43199 + PAGE_SIZE << dic->log_cluster_size);
43200 + return -EIO;
43201 + }
43202 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
43203 +index 8c417864c66ae..bdfa8bed10b2c 100644
43204 +--- a/fs/f2fs/data.c
43205 ++++ b/fs/f2fs/data.c
43206 +@@ -3163,8 +3163,12 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
43207 + /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
43208 + if (wbc->sync_mode == WB_SYNC_ALL)
43209 + atomic_inc(&sbi->wb_sync_req[DATA]);
43210 +- else if (atomic_read(&sbi->wb_sync_req[DATA]))
43211 ++ else if (atomic_read(&sbi->wb_sync_req[DATA])) {
43212 ++ /* to avoid potential deadlock */
43213 ++ if (current->plug)
43214 ++ blk_finish_plug(current->plug);
43215 + goto skip_write;
43216 ++ }
43217 +
43218 + if (__should_serialize_io(inode, wbc)) {
43219 + mutex_lock(&sbi->writepages);
43220 +@@ -3353,7 +3357,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
43221 +
43222 + *fsdata = NULL;
43223 +
43224 +- if (len == PAGE_SIZE)
43225 ++ if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
43226 + goto repeat;
43227 +
43228 + ret = f2fs_prepare_compress_overwrite(inode, pagep,
43229 +diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
43230 +index 8c50518475a99..b449c7a372a4b 100644
43231 +--- a/fs/f2fs/debug.c
43232 ++++ b/fs/f2fs/debug.c
43233 +@@ -21,7 +21,7 @@
43234 + #include "gc.h"
43235 +
43236 + static LIST_HEAD(f2fs_stat_list);
43237 +-static DEFINE_MUTEX(f2fs_stat_mutex);
43238 ++static DEFINE_RAW_SPINLOCK(f2fs_stat_lock);
43239 + #ifdef CONFIG_DEBUG_FS
43240 + static struct dentry *f2fs_debugfs_root;
43241 + #endif
43242 +@@ -338,14 +338,16 @@ static char *s_flag[] = {
43243 + [SBI_QUOTA_SKIP_FLUSH] = " quota_skip_flush",
43244 + [SBI_QUOTA_NEED_REPAIR] = " quota_need_repair",
43245 + [SBI_IS_RESIZEFS] = " resizefs",
43246 ++ [SBI_IS_FREEZING] = " freezefs",
43247 + };
43248 +
43249 + static int stat_show(struct seq_file *s, void *v)
43250 + {
43251 + struct f2fs_stat_info *si;
43252 + int i = 0, j = 0;
43253 ++ unsigned long flags;
43254 +
43255 +- mutex_lock(&f2fs_stat_mutex);
43256 ++ raw_spin_lock_irqsave(&f2fs_stat_lock, flags);
43257 + list_for_each_entry(si, &f2fs_stat_list, stat_list) {
43258 + update_general_status(si->sbi);
43259 +
43260 +@@ -573,7 +575,7 @@ static int stat_show(struct seq_file *s, void *v)
43261 + seq_printf(s, " - paged : %llu KB\n",
43262 + si->page_mem >> 10);
43263 + }
43264 +- mutex_unlock(&f2fs_stat_mutex);
43265 ++ raw_spin_unlock_irqrestore(&f2fs_stat_lock, flags);
43266 + return 0;
43267 + }
43268 +
43269 +@@ -584,6 +586,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
43270 + {
43271 + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
43272 + struct f2fs_stat_info *si;
43273 ++ unsigned long flags;
43274 + int i;
43275 +
43276 + si = f2fs_kzalloc(sbi, sizeof(struct f2fs_stat_info), GFP_KERNEL);
43277 +@@ -619,9 +622,9 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
43278 + atomic_set(&sbi->max_aw_cnt, 0);
43279 + atomic_set(&sbi->max_vw_cnt, 0);
43280 +
43281 +- mutex_lock(&f2fs_stat_mutex);
43282 ++ raw_spin_lock_irqsave(&f2fs_stat_lock, flags);
43283 + list_add_tail(&si->stat_list, &f2fs_stat_list);
43284 +- mutex_unlock(&f2fs_stat_mutex);
43285 ++ raw_spin_unlock_irqrestore(&f2fs_stat_lock, flags);
43286 +
43287 + return 0;
43288 + }
43289 +@@ -629,10 +632,11 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
43290 + void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
43291 + {
43292 + struct f2fs_stat_info *si = F2FS_STAT(sbi);
43293 ++ unsigned long flags;
43294 +
43295 +- mutex_lock(&f2fs_stat_mutex);
43296 ++ raw_spin_lock_irqsave(&f2fs_stat_lock, flags);
43297 + list_del(&si->stat_list);
43298 +- mutex_unlock(&f2fs_stat_mutex);
43299 ++ raw_spin_unlock_irqrestore(&f2fs_stat_lock, flags);
43300 +
43301 + kfree(si);
43302 + }
43303 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
43304 +index 68b44015514f5..2514597f5b26b 100644
43305 +--- a/fs/f2fs/f2fs.h
43306 ++++ b/fs/f2fs/f2fs.h
43307 +@@ -1267,6 +1267,7 @@ enum {
43308 + SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
43309 + SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
43310 + SBI_IS_RESIZEFS, /* resizefs is in process */
43311 ++ SBI_IS_FREEZING, /* freezefs is in process */
43312 + };
43313 +
43314 + enum {
43315 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
43316 +index 3c98ef6af97d1..b110c3a7db6ae 100644
43317 +--- a/fs/f2fs/file.c
43318 ++++ b/fs/f2fs/file.c
43319 +@@ -2008,7 +2008,10 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
43320 +
43321 + inode_lock(inode);
43322 +
43323 +- f2fs_disable_compressed_file(inode);
43324 ++ if (!f2fs_disable_compressed_file(inode)) {
43325 ++ ret = -EINVAL;
43326 ++ goto out;
43327 ++ }
43328 +
43329 + if (f2fs_is_atomic_file(inode)) {
43330 + if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
43331 +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
43332 +index ee308a8de4327..e020804f7b075 100644
43333 +--- a/fs/f2fs/gc.c
43334 ++++ b/fs/f2fs/gc.c
43335 +@@ -1038,8 +1038,10 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
43336 + set_sbi_flag(sbi, SBI_NEED_FSCK);
43337 + }
43338 +
43339 +- if (f2fs_check_nid_range(sbi, dni->ino))
43340 ++ if (f2fs_check_nid_range(sbi, dni->ino)) {
43341 ++ f2fs_put_page(node_page, 1);
43342 + return false;
43343 ++ }
43344 +
43345 + *nofs = ofs_of_node(node_page);
43346 + source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
43347 +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
43348 +index 0ec8e32a00b47..71f232dcf3c20 100644
43349 +--- a/fs/f2fs/inode.c
43350 ++++ b/fs/f2fs/inode.c
43351 +@@ -778,7 +778,8 @@ void f2fs_evict_inode(struct inode *inode)
43352 + f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
43353 + f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
43354 +
43355 +- sb_start_intwrite(inode->i_sb);
43356 ++ if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
43357 ++ sb_start_intwrite(inode->i_sb);
43358 + set_inode_flag(inode, FI_NO_ALLOC);
43359 + i_size_write(inode, 0);
43360 + retry:
43361 +@@ -809,7 +810,8 @@ retry:
43362 + if (dquot_initialize_needed(inode))
43363 + set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
43364 + }
43365 +- sb_end_intwrite(inode->i_sb);
43366 ++ if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
43367 ++ sb_end_intwrite(inode->i_sb);
43368 + no_delete:
43369 + dquot_drop(inode);
43370 +
43371 +@@ -885,6 +887,7 @@ void f2fs_handle_failed_inode(struct inode *inode)
43372 + err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
43373 + if (err) {
43374 + set_sbi_flag(sbi, SBI_NEED_FSCK);
43375 ++ set_inode_flag(inode, FI_FREE_NID);
43376 + f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
43377 + goto out;
43378 + }
43379 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
43380 +index 50b2874e758c9..4ff7dfb542502 100644
43381 +--- a/fs/f2fs/node.c
43382 ++++ b/fs/f2fs/node.c
43383 +@@ -2111,8 +2111,12 @@ static int f2fs_write_node_pages(struct address_space *mapping,
43384 +
43385 + if (wbc->sync_mode == WB_SYNC_ALL)
43386 + atomic_inc(&sbi->wb_sync_req[NODE]);
43387 +- else if (atomic_read(&sbi->wb_sync_req[NODE]))
43388 ++ else if (atomic_read(&sbi->wb_sync_req[NODE])) {
43389 ++ /* to avoid potential deadlock */
43390 ++ if (current->plug)
43391 ++ blk_finish_plug(current->plug);
43392 + goto skip_write;
43393 ++ }
43394 +
43395 + trace_f2fs_writepages(mapping->host, wbc, NODE);
43396 +
43397 +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
43398 +index 1dabc8244083d..416d802ebbea6 100644
43399 +--- a/fs/f2fs/segment.c
43400 ++++ b/fs/f2fs/segment.c
43401 +@@ -4789,6 +4789,13 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi)
43402 +
43403 + sanity_check_seg_type(sbi, curseg->seg_type);
43404 +
43405 ++ if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
43406 ++ f2fs_err(sbi,
43407 ++ "Current segment has invalid alloc_type:%d",
43408 ++ curseg->alloc_type);
43409 ++ return -EFSCORRUPTED;
43410 ++ }
43411 ++
43412 + if (f2fs_test_bit(blkofs, se->cur_valid_map))
43413 + goto out;
43414 +
43415 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
43416 +index baefd398ec1a3..c4f8510fac930 100644
43417 +--- a/fs/f2fs/super.c
43418 ++++ b/fs/f2fs/super.c
43419 +@@ -1662,11 +1662,15 @@ static int f2fs_freeze(struct super_block *sb)
43420 + /* ensure no checkpoint required */
43421 + if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
43422 + return -EINVAL;
43423 ++
43424 ++ /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
43425 ++ set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
43426 + return 0;
43427 + }
43428 +
43429 + static int f2fs_unfreeze(struct super_block *sb)
43430 + {
43431 ++ clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
43432 + return 0;
43433 + }
43434 +
43435 +@@ -2688,7 +2692,7 @@ int f2fs_quota_sync(struct super_block *sb, int type)
43436 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
43437 + struct quota_info *dqopt = sb_dqopt(sb);
43438 + int cnt;
43439 +- int ret;
43440 ++ int ret = 0;
43441 +
43442 + /*
43443 + * Now when everything is written we can discard the pagecache so
43444 +@@ -2699,8 +2703,8 @@ int f2fs_quota_sync(struct super_block *sb, int type)
43445 + if (type != -1 && cnt != type)
43446 + continue;
43447 +
43448 +- if (!sb_has_quota_active(sb, type))
43449 +- return 0;
43450 ++ if (!sb_has_quota_active(sb, cnt))
43451 ++ continue;
43452 +
43453 + inode_lock(dqopt->files[cnt]);
43454 +
43455 +diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
43456 +index 8ac5066712454..bdb1b5c05be2e 100644
43457 +--- a/fs/f2fs/sysfs.c
43458 ++++ b/fs/f2fs/sysfs.c
43459 +@@ -481,7 +481,7 @@ out:
43460 + } else if (t == GC_IDLE_AT) {
43461 + if (!sbi->am.atgc_enabled)
43462 + return -EINVAL;
43463 +- sbi->gc_mode = GC_AT;
43464 ++ sbi->gc_mode = GC_IDLE_AT;
43465 + } else {
43466 + sbi->gc_mode = GC_NORMAL;
43467 + }
43468 +diff --git a/fs/file.c b/fs/file.c
43469 +index 97d212a9b8144..ee93173467025 100644
43470 +--- a/fs/file.c
43471 ++++ b/fs/file.c
43472 +@@ -87,6 +87,21 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
43473 + copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
43474 + }
43475 +
43476 ++/*
43477 ++ * Note how the fdtable bitmap allocations very much have to be a multiple of
43478 ++ * BITS_PER_LONG. This is not only because we walk those things in chunks of
43479 ++ * 'unsigned long' in some places, but simply because that is how the Linux
43480 ++ * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
43481 ++ * they are very much "bits in an array of unsigned long".
43482 ++ *
43483 ++ * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied
43484 ++ * by that "1024/sizeof(ptr)" before, we already know there are sufficient
43485 ++ * clear low bits. Clang seems to realize that, gcc ends up being confused.
43486 ++ *
43487 ++ * On a 128-bit machine, the ALIGN() would actually matter. In the meantime,
43488 ++ * let's consider it documentation (and maybe a test-case for gcc to improve
43489 ++ * its code generation ;)
43490 ++ */
43491 + static struct fdtable * alloc_fdtable(unsigned int nr)
43492 + {
43493 + struct fdtable *fdt;
43494 +@@ -102,6 +117,7 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
43495 + nr /= (1024 / sizeof(struct file *));
43496 + nr = roundup_pow_of_two(nr + 1);
43497 + nr *= (1024 / sizeof(struct file *));
43498 ++ nr = ALIGN(nr, BITS_PER_LONG);
43499 + /*
43500 + * Note that this can drive nr *below* what we had passed if sysctl_nr_open
43501 + * had been set lower between the check in expand_files() and here. Deal
43502 +@@ -269,6 +285,19 @@ static unsigned int count_open_files(struct fdtable *fdt)
43503 + return i;
43504 + }
43505 +
43506 ++/*
43507 ++ * Note that a sane fdtable size always has to be a multiple of
43508 ++ * BITS_PER_LONG, since we have bitmaps that are sized by this.
43509 ++ *
43510 ++ * 'max_fds' will normally already be properly aligned, but it
43511 ++ * turns out that in the close_range() -> __close_range() ->
43512 ++ * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end
43513 ++ * up having a 'max_fds' value that isn't already aligned.
43514 ++ *
43515 ++ * Rather than make close_range() have to worry about this,
43516 ++ * just make that BITS_PER_LONG alignment be part of a sane
43517 ++ * fdtable size. Becuase that's really what it is.
43518 ++ */
43519 + static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
43520 + {
43521 + unsigned int count;
43522 +@@ -276,7 +305,7 @@ static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
43523 + count = count_open_files(fdt);
43524 + if (max_fds < NR_OPEN_DEFAULT)
43525 + max_fds = NR_OPEN_DEFAULT;
43526 +- return min(count, max_fds);
43527 ++ return ALIGN(min(count, max_fds), BITS_PER_LONG);
43528 + }
43529 +
43530 + /*
43531 +diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
43532 +index d67108489148e..fbdb7a30470a3 100644
43533 +--- a/fs/gfs2/bmap.c
43534 ++++ b/fs/gfs2/bmap.c
43535 +@@ -2146,7 +2146,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
43536 +
43537 + ret = do_shrink(inode, newsize);
43538 + out:
43539 +- gfs2_rs_delete(ip, NULL);
43540 ++ gfs2_rs_delete(ip);
43541 + gfs2_qa_put(ip);
43542 + return ret;
43543 + }
43544 +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
43545 +index 8c39a8571b1fa..b53ad18e5ccbf 100644
43546 +--- a/fs/gfs2/file.c
43547 ++++ b/fs/gfs2/file.c
43548 +@@ -706,7 +706,7 @@ static int gfs2_release(struct inode *inode, struct file *file)
43549 +
43550 + if (file->f_mode & FMODE_WRITE) {
43551 + if (gfs2_rs_active(&ip->i_res))
43552 +- gfs2_rs_delete(ip, &inode->i_writecount);
43553 ++ gfs2_rs_delete(ip);
43554 + gfs2_qa_put(ip);
43555 + }
43556 + return 0;
43557 +@@ -1083,6 +1083,7 @@ out_uninit:
43558 + gfs2_holder_uninit(gh);
43559 + if (statfs_gh)
43560 + kfree(statfs_gh);
43561 ++ from->count = orig_count - read;
43562 + return read ? read : ret;
43563 + }
43564 +
43565 +diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
43566 +index 89905f4f29bb6..66a123306aecb 100644
43567 +--- a/fs/gfs2/inode.c
43568 ++++ b/fs/gfs2/inode.c
43569 +@@ -793,7 +793,7 @@ fail_free_inode:
43570 + if (free_vfs_inode) /* else evict will do the put for us */
43571 + gfs2_glock_put(ip->i_gl);
43572 + }
43573 +- gfs2_rs_delete(ip, NULL);
43574 ++ gfs2_rs_deltree(&ip->i_res);
43575 + gfs2_qa_put(ip);
43576 + fail_free_acls:
43577 + posix_acl_release(default_acl);
43578 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
43579 +index 0fb3c01bc5577..3b34bb24d0af4 100644
43580 +--- a/fs/gfs2/rgrp.c
43581 ++++ b/fs/gfs2/rgrp.c
43582 +@@ -680,13 +680,14 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
43583 + /**
43584 + * gfs2_rs_delete - delete a multi-block reservation
43585 + * @ip: The inode for this reservation
43586 +- * @wcount: The inode's write count, or NULL
43587 + *
43588 + */
43589 +-void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
43590 ++void gfs2_rs_delete(struct gfs2_inode *ip)
43591 + {
43592 ++ struct inode *inode = &ip->i_inode;
43593 ++
43594 + down_write(&ip->i_rw_mutex);
43595 +- if ((wcount == NULL) || (atomic_read(wcount) <= 1))
43596 ++ if (atomic_read(&inode->i_writecount) <= 1)
43597 + gfs2_rs_deltree(&ip->i_res);
43598 + up_write(&ip->i_rw_mutex);
43599 + }
43600 +@@ -1415,7 +1416,8 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
43601 +
43602 + start = r.start >> bs_shift;
43603 + end = start + (r.len >> bs_shift);
43604 +- minlen = max_t(u64, r.minlen,
43605 ++ minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize);
43606 ++ minlen = max_t(u64, minlen,
43607 + q->limits.discard_granularity) >> bs_shift;
43608 +
43609 + if (end <= start || minlen > sdp->sd_max_rg_data)
43610 +diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
43611 +index 3e2ca1fb43056..46dd94e9e085c 100644
43612 +--- a/fs/gfs2/rgrp.h
43613 ++++ b/fs/gfs2/rgrp.h
43614 +@@ -45,7 +45,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
43615 + bool dinode, u64 *generation);
43616 +
43617 + extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
43618 +-extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
43619 ++extern void gfs2_rs_delete(struct gfs2_inode *ip);
43620 + extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
43621 + u64 bstart, u32 blen, int meta);
43622 + extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
43623 +diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
43624 +index 64c67090f5036..143a47359d1b8 100644
43625 +--- a/fs/gfs2/super.c
43626 ++++ b/fs/gfs2/super.c
43627 +@@ -1396,7 +1396,7 @@ out:
43628 + truncate_inode_pages_final(&inode->i_data);
43629 + if (ip->i_qadata)
43630 + gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
43631 +- gfs2_rs_delete(ip, NULL);
43632 ++ gfs2_rs_deltree(&ip->i_res);
43633 + gfs2_ordered_del_inode(ip);
43634 + clear_inode(inode);
43635 + gfs2_dir_hash_inval(ip);
43636 +diff --git a/fs/io_uring.c b/fs/io_uring.c
43637 +index 4715980e90150..5e6788ab188fa 100644
43638 +--- a/fs/io_uring.c
43639 ++++ b/fs/io_uring.c
43640 +@@ -2813,8 +2813,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
43641 +
43642 + static bool __io_complete_rw_common(struct io_kiocb *req, long res)
43643 + {
43644 +- if (req->rw.kiocb.ki_flags & IOCB_WRITE)
43645 ++ if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
43646 + kiocb_end_write(req);
43647 ++ fsnotify_modify(req->file);
43648 ++ } else {
43649 ++ fsnotify_access(req->file);
43650 ++ }
43651 + if (unlikely(res != req->result)) {
43652 + if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
43653 + io_rw_should_reissue(req)) {
43654 +@@ -3436,13 +3440,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
43655 + ret = nr;
43656 + break;
43657 + }
43658 ++ ret += nr;
43659 + if (!iov_iter_is_bvec(iter)) {
43660 + iov_iter_advance(iter, nr);
43661 + } else {
43662 +- req->rw.len -= nr;
43663 + req->rw.addr += nr;
43664 ++ req->rw.len -= nr;
43665 ++ if (!req->rw.len)
43666 ++ break;
43667 + }
43668 +- ret += nr;
43669 + if (nr != iovec.iov_len)
43670 + break;
43671 + }
43672 +@@ -4301,6 +4307,8 @@ static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
43673 + req->sync.len);
43674 + if (ret < 0)
43675 + req_set_fail(req);
43676 ++ else
43677 ++ fsnotify_modify(req->file);
43678 + io_req_complete(req, ret);
43679 + return 0;
43680 + }
43681 +@@ -5258,8 +5266,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
43682 + accept->nofile = rlimit(RLIMIT_NOFILE);
43683 +
43684 + accept->file_slot = READ_ONCE(sqe->file_index);
43685 +- if (accept->file_slot && ((req->open.how.flags & O_CLOEXEC) ||
43686 +- (accept->flags & SOCK_CLOEXEC)))
43687 ++ if (accept->file_slot && (accept->flags & SOCK_CLOEXEC))
43688 + return -EINVAL;
43689 + if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
43690 + return -EINVAL;
43691 +@@ -5407,7 +5414,7 @@ struct io_poll_table {
43692 + };
43693 +
43694 + #define IO_POLL_CANCEL_FLAG BIT(31)
43695 +-#define IO_POLL_REF_MASK ((1u << 20)-1)
43696 ++#define IO_POLL_REF_MASK GENMASK(30, 0)
43697 +
43698 + /*
43699 + * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
43700 +@@ -5863,6 +5870,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
43701 + list = &ctx->cancel_hash[i];
43702 + hlist_for_each_entry_safe(req, tmp, list, hash_node) {
43703 + if (io_match_task_safe(req, tsk, cancel_all)) {
43704 ++ hlist_del_init(&req->hash_node);
43705 + io_poll_cancel_req(req);
43706 + found = true;
43707 + }
43708 +@@ -8233,6 +8241,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
43709 + fput(fpl->fp[i]);
43710 + } else {
43711 + kfree_skb(skb);
43712 ++ free_uid(fpl->user);
43713 + kfree(fpl);
43714 + }
43715 +
43716 +diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
43717 +index 6c51a75d0be61..d020a2e81a24c 100644
43718 +--- a/fs/iomap/buffered-io.c
43719 ++++ b/fs/iomap/buffered-io.c
43720 +@@ -480,7 +480,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
43721 +
43722 + void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
43723 + {
43724 +- trace_iomap_invalidatepage(folio->mapping->host, offset, len);
43725 ++ trace_iomap_invalidatepage(folio->mapping->host,
43726 ++ folio_pos(folio) + offset, len);
43727 +
43728 + /*
43729 + * If we're invalidating the entire folio, clear the dirty state
43730 +diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
43731 +index b288c8ae1236b..837cd55fd4c5e 100644
43732 +--- a/fs/jffs2/build.c
43733 ++++ b/fs/jffs2/build.c
43734 +@@ -415,13 +415,15 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c)
43735 + jffs2_free_ino_caches(c);
43736 + jffs2_free_raw_node_refs(c);
43737 + ret = -EIO;
43738 +- goto out_free;
43739 ++ goto out_sum_exit;
43740 + }
43741 +
43742 + jffs2_calc_trigger_levels(c);
43743 +
43744 + return 0;
43745 +
43746 ++ out_sum_exit:
43747 ++ jffs2_sum_exit(c);
43748 + out_free:
43749 + kvfree(c->blocks);
43750 +
43751 +diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
43752 +index 2ac410477c4f4..71f03a5d36ed2 100644
43753 +--- a/fs/jffs2/fs.c
43754 ++++ b/fs/jffs2/fs.c
43755 +@@ -603,8 +603,8 @@ out_root:
43756 + jffs2_free_ino_caches(c);
43757 + jffs2_free_raw_node_refs(c);
43758 + kvfree(c->blocks);
43759 +- out_inohash:
43760 + jffs2_clear_xattr_subsystem(c);
43761 ++ out_inohash:
43762 + kfree(c->inocache_list);
43763 + out_wbuf:
43764 + jffs2_flash_cleanup(c);
43765 +diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
43766 +index b676056826beb..29671e33a1714 100644
43767 +--- a/fs/jffs2/scan.c
43768 ++++ b/fs/jffs2/scan.c
43769 +@@ -136,7 +136,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
43770 + if (!s) {
43771 + JFFS2_WARNING("Can't allocate memory for summary\n");
43772 + ret = -ENOMEM;
43773 +- goto out;
43774 ++ goto out_buf;
43775 + }
43776 + }
43777 +
43778 +@@ -275,13 +275,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
43779 + }
43780 + ret = 0;
43781 + out:
43782 ++ jffs2_sum_reset_collected(s);
43783 ++ kfree(s);
43784 ++ out_buf:
43785 + if (buf_size)
43786 + kfree(flashbuf);
43787 + #ifndef __ECOS
43788 + else
43789 + mtd_unpoint(c->mtd, 0, c->mtd->size);
43790 + #endif
43791 +- kfree(s);
43792 + return ret;
43793 + }
43794 +
43795 +diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
43796 +index 91f4ec93dab1f..d8502f4989d9d 100644
43797 +--- a/fs/jfs/jfs_dmap.c
43798 ++++ b/fs/jfs/jfs_dmap.c
43799 +@@ -148,6 +148,7 @@ static const s8 budtab[256] = {
43800 + * 0 - success
43801 + * -ENOMEM - insufficient memory
43802 + * -EIO - i/o error
43803 ++ * -EINVAL - wrong bmap data
43804 + */
43805 + int dbMount(struct inode *ipbmap)
43806 + {
43807 +@@ -179,6 +180,12 @@ int dbMount(struct inode *ipbmap)
43808 + bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
43809 + bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
43810 + bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
43811 ++ if (!bmp->db_numag) {
43812 ++ release_metapage(mp);
43813 ++ kfree(bmp);
43814 ++ return -EINVAL;
43815 ++ }
43816 ++
43817 + bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
43818 + bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
43819 + bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
43820 +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
43821 +index c343666d9a428..6464dde03705c 100644
43822 +--- a/fs/nfs/callback_proc.c
43823 ++++ b/fs/nfs/callback_proc.c
43824 +@@ -358,12 +358,11 @@ __be32 nfs4_callback_devicenotify(void *argp, void *resp,
43825 + struct cb_process_state *cps)
43826 + {
43827 + struct cb_devicenotifyargs *args = argp;
43828 ++ const struct pnfs_layoutdriver_type *ld = NULL;
43829 + uint32_t i;
43830 + __be32 res = 0;
43831 +- struct nfs_client *clp = cps->clp;
43832 +- struct nfs_server *server = NULL;
43833 +
43834 +- if (!clp) {
43835 ++ if (!cps->clp) {
43836 + res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
43837 + goto out;
43838 + }
43839 +@@ -371,23 +370,15 @@ __be32 nfs4_callback_devicenotify(void *argp, void *resp,
43840 + for (i = 0; i < args->ndevs; i++) {
43841 + struct cb_devicenotifyitem *dev = &args->devs[i];
43842 +
43843 +- if (!server ||
43844 +- server->pnfs_curr_ld->id != dev->cbd_layout_type) {
43845 +- rcu_read_lock();
43846 +- list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
43847 +- if (server->pnfs_curr_ld &&
43848 +- server->pnfs_curr_ld->id == dev->cbd_layout_type) {
43849 +- rcu_read_unlock();
43850 +- goto found;
43851 +- }
43852 +- rcu_read_unlock();
43853 +- continue;
43854 ++ if (!ld || ld->id != dev->cbd_layout_type) {
43855 ++ pnfs_put_layoutdriver(ld);
43856 ++ ld = pnfs_find_layoutdriver(dev->cbd_layout_type);
43857 ++ if (!ld)
43858 ++ continue;
43859 + }
43860 +-
43861 +- found:
43862 +- nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
43863 ++ nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id);
43864 + }
43865 +-
43866 ++ pnfs_put_layoutdriver(ld);
43867 + out:
43868 + kfree(args->devs);
43869 + return res;
43870 +diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
43871 +index f90de8043b0f9..8dcb08e1a885d 100644
43872 +--- a/fs/nfs/callback_xdr.c
43873 ++++ b/fs/nfs/callback_xdr.c
43874 +@@ -271,10 +271,6 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
43875 + n = ntohl(*p++);
43876 + if (n == 0)
43877 + goto out;
43878 +- if (n > ULONG_MAX / sizeof(*args->devs)) {
43879 +- status = htonl(NFS4ERR_BADXDR);
43880 +- goto out;
43881 +- }
43882 +
43883 + args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL);
43884 + if (!args->devs) {
43885 +diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
43886 +index 7fba7711e6b3a..3d5ba43f44bb6 100644
43887 +--- a/fs/nfs/nfs2xdr.c
43888 ++++ b/fs/nfs/nfs2xdr.c
43889 +@@ -949,7 +949,7 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
43890 +
43891 + error = decode_filename_inline(xdr, &entry->name, &entry->len);
43892 + if (unlikely(error))
43893 +- return error;
43894 ++ return -EAGAIN;
43895 +
43896 + /*
43897 + * The type (size and byte order) of nfscookie isn't defined in
43898 +diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
43899 +index 9274c9c5efea6..7ab60ad98776f 100644
43900 +--- a/fs/nfs/nfs3xdr.c
43901 ++++ b/fs/nfs/nfs3xdr.c
43902 +@@ -1967,7 +1967,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
43903 + bool plus)
43904 + {
43905 + struct user_namespace *userns = rpc_userns(entry->server->client);
43906 +- struct nfs_entry old = *entry;
43907 + __be32 *p;
43908 + int error;
43909 + u64 new_cookie;
43910 +@@ -1987,15 +1986,15 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
43911 +
43912 + error = decode_fileid3(xdr, &entry->ino);
43913 + if (unlikely(error))
43914 +- return error;
43915 ++ return -EAGAIN;
43916 +
43917 + error = decode_inline_filename3(xdr, &entry->name, &entry->len);
43918 + if (unlikely(error))
43919 +- return error;
43920 ++ return -EAGAIN;
43921 +
43922 + error = decode_cookie3(xdr, &new_cookie);
43923 + if (unlikely(error))
43924 +- return error;
43925 ++ return -EAGAIN;
43926 +
43927 + entry->d_type = DT_UNKNOWN;
43928 +
43929 +@@ -2003,7 +2002,7 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
43930 + entry->fattr->valid = 0;
43931 + error = decode_post_op_attr(xdr, entry->fattr, userns);
43932 + if (unlikely(error))
43933 +- return error;
43934 ++ return -EAGAIN;
43935 + if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
43936 + entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
43937 +
43938 +@@ -2018,11 +2017,8 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
43939 + return -EAGAIN;
43940 + if (*p != xdr_zero) {
43941 + error = decode_nfs_fh3(xdr, entry->fh);
43942 +- if (unlikely(error)) {
43943 +- if (error == -E2BIG)
43944 +- goto out_truncated;
43945 +- return error;
43946 +- }
43947 ++ if (unlikely(error))
43948 ++ return -EAGAIN;
43949 + } else
43950 + zero_nfs_fh3(entry->fh);
43951 + }
43952 +@@ -2031,11 +2027,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
43953 + entry->cookie = new_cookie;
43954 +
43955 + return 0;
43956 +-
43957 +-out_truncated:
43958 +- dprintk("NFS: directory entry contains invalid file handle\n");
43959 +- *entry = old;
43960 +- return -EAGAIN;
43961 + }
43962 +
43963 + /*
43964 +@@ -2228,6 +2219,7 @@ static int decode_fsinfo3resok(struct xdr_stream *xdr,
43965 + /* ignore properties */
43966 + result->lease_time = 0;
43967 + result->change_attr_type = NFS4_CHANGE_TYPE_IS_UNDEFINED;
43968 ++ result->xattr_support = 0;
43969 + return 0;
43970 + }
43971 +
43972 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
43973 +index 0e0db6c276196..c36fa0d0d438b 100644
43974 +--- a/fs/nfs/nfs4proc.c
43975 ++++ b/fs/nfs/nfs4proc.c
43976 +@@ -8333,6 +8333,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
43977 + case -NFS4ERR_DEADSESSION:
43978 + nfs4_schedule_session_recovery(clp->cl_session,
43979 + task->tk_status);
43980 ++ return;
43981 + }
43982 + if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
43983 + res->dir != NFS4_CDFS4_BOTH) {
43984 +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
43985 +index ad7f83dc9a2df..815d630802451 100644
43986 +--- a/fs/nfs/pagelist.c
43987 ++++ b/fs/nfs/pagelist.c
43988 +@@ -1218,6 +1218,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
43989 +
43990 + do {
43991 + list_splice_init(&mirror->pg_list, &head);
43992 ++ mirror->pg_recoalesce = 0;
43993 +
43994 + while (!list_empty(&head)) {
43995 + struct nfs_page *req;
43996 +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
43997 +index 7c9090a28e5c3..7ddd003ab8b1a 100644
43998 +--- a/fs/nfs/pnfs.c
43999 ++++ b/fs/nfs/pnfs.c
44000 +@@ -92,6 +92,17 @@ find_pnfs_driver(u32 id)
44001 + return local;
44002 + }
44003 +
44004 ++const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id)
44005 ++{
44006 ++ return find_pnfs_driver(id);
44007 ++}
44008 ++
44009 ++void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld)
44010 ++{
44011 ++ if (ld)
44012 ++ module_put(ld->owner);
44013 ++}
44014 ++
44015 + void
44016 + unset_pnfs_layoutdriver(struct nfs_server *nfss)
44017 + {
44018 +diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
44019 +index f4d7548d67b24..07f11489e4e9f 100644
44020 +--- a/fs/nfs/pnfs.h
44021 ++++ b/fs/nfs/pnfs.h
44022 +@@ -234,6 +234,8 @@ struct pnfs_devicelist {
44023 +
44024 + extern int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *);
44025 + extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *);
44026 ++extern const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id);
44027 ++extern void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld);
44028 +
44029 + /* nfs4proc.c */
44030 + extern size_t max_response_pages(struct nfs_server *server);
44031 +diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
44032 +index 73dcaa99fa9ba..e3570c656b0f9 100644
44033 +--- a/fs/nfs/proc.c
44034 ++++ b/fs/nfs/proc.c
44035 +@@ -92,6 +92,7 @@ nfs_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
44036 + info->maxfilesize = 0x7FFFFFFF;
44037 + info->lease_time = 0;
44038 + info->change_attr_type = NFS4_CHANGE_TYPE_IS_UNDEFINED;
44039 ++ info->xattr_support = 0;
44040 + return 0;
44041 + }
44042 +
44043 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
44044 +index 987a187bd39aa..60693ab6a0325 100644
44045 +--- a/fs/nfs/write.c
44046 ++++ b/fs/nfs/write.c
44047 +@@ -316,7 +316,10 @@ static void nfs_mapping_set_error(struct page *page, int error)
44048 + struct address_space *mapping = page_file_mapping(page);
44049 +
44050 + SetPageError(page);
44051 +- mapping_set_error(mapping, error);
44052 ++ filemap_set_wb_err(mapping, error);
44053 ++ if (mapping->host)
44054 ++ errseq_set(&mapping->host->i_sb->s_wb_err,
44055 ++ error == -ENOSPC ? -ENOSPC : -EIO);
44056 + nfs_set_pageerror(mapping);
44057 + }
44058 +
44059 +@@ -1409,6 +1412,8 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
44060 + {
44061 + int priority = flush_task_priority(how);
44062 +
44063 ++ if (IS_SWAPFILE(hdr->inode))
44064 ++ task_setup_data->flags |= RPC_TASK_SWAPPER;
44065 + task_setup_data->priority = priority;
44066 + rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client);
44067 + trace_nfs_initiate_write(hdr);
44068 +diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
44069 +index 8bc807c5fea4c..cc2831cec6695 100644
44070 +--- a/fs/nfsd/filecache.c
44071 ++++ b/fs/nfsd/filecache.c
44072 +@@ -632,7 +632,7 @@ nfsd_file_cache_init(void)
44073 + if (!nfsd_filecache_wq)
44074 + goto out;
44075 +
44076 +- nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
44077 ++ nfsd_file_hashtbl = kvcalloc(NFSD_FILE_HASH_SIZE,
44078 + sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
44079 + if (!nfsd_file_hashtbl) {
44080 + pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
44081 +@@ -700,7 +700,7 @@ out_err:
44082 + nfsd_file_slab = NULL;
44083 + kmem_cache_destroy(nfsd_file_mark_slab);
44084 + nfsd_file_mark_slab = NULL;
44085 +- kfree(nfsd_file_hashtbl);
44086 ++ kvfree(nfsd_file_hashtbl);
44087 + nfsd_file_hashtbl = NULL;
44088 + destroy_workqueue(nfsd_filecache_wq);
44089 + nfsd_filecache_wq = NULL;
44090 +@@ -811,7 +811,7 @@ nfsd_file_cache_shutdown(void)
44091 + fsnotify_wait_marks_destroyed();
44092 + kmem_cache_destroy(nfsd_file_mark_slab);
44093 + nfsd_file_mark_slab = NULL;
44094 +- kfree(nfsd_file_hashtbl);
44095 ++ kvfree(nfsd_file_hashtbl);
44096 + nfsd_file_hashtbl = NULL;
44097 + destroy_workqueue(nfsd_filecache_wq);
44098 + nfsd_filecache_wq = NULL;
44099 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
44100 +index 32063733443d4..f3b71fd1d1341 100644
44101 +--- a/fs/nfsd/nfs4state.c
44102 ++++ b/fs/nfsd/nfs4state.c
44103 +@@ -4711,6 +4711,14 @@ nfsd_break_deleg_cb(struct file_lock *fl)
44104 + return ret;
44105 + }
44106 +
44107 ++/**
44108 ++ * nfsd_breaker_owns_lease - Check if lease conflict was resolved
44109 ++ * @fl: Lock state to check
44110 ++ *
44111 ++ * Return values:
44112 ++ * %true: Lease conflict was resolved
44113 ++ * %false: Lease conflict was not resolved.
44114 ++ */
44115 + static bool nfsd_breaker_owns_lease(struct file_lock *fl)
44116 + {
44117 + struct nfs4_delegation *dl = fl->fl_owner;
44118 +@@ -4718,11 +4726,11 @@ static bool nfsd_breaker_owns_lease(struct file_lock *fl)
44119 + struct nfs4_client *clp;
44120 +
44121 + if (!i_am_nfsd())
44122 +- return NULL;
44123 ++ return false;
44124 + rqst = kthread_data(current);
44125 + /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
44126 + if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
44127 +- return NULL;
44128 ++ return false;
44129 + clp = *(rqst->rq_lease_breaker);
44130 + return dl->dl_stid.sc_client == clp;
44131 + }
44132 +diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
44133 +index 18b8eb43a19bc..fcdab8a8a41f4 100644
44134 +--- a/fs/nfsd/nfsproc.c
44135 ++++ b/fs/nfsd/nfsproc.c
44136 +@@ -230,7 +230,7 @@ nfsd_proc_write(struct svc_rqst *rqstp)
44137 + unsigned long cnt = argp->len;
44138 + unsigned int nvecs;
44139 +
44140 +- dprintk("nfsd: WRITE %s %d bytes at %d\n",
44141 ++ dprintk("nfsd: WRITE %s %u bytes at %d\n",
44142 + SVCFH_fmt(&argp->fh),
44143 + argp->len, argp->offset);
44144 +
44145 +diff --git a/fs/nfsd/xdr.h b/fs/nfsd/xdr.h
44146 +index 528fb299430e6..852f71580bd06 100644
44147 +--- a/fs/nfsd/xdr.h
44148 ++++ b/fs/nfsd/xdr.h
44149 +@@ -32,7 +32,7 @@ struct nfsd_readargs {
44150 + struct nfsd_writeargs {
44151 + svc_fh fh;
44152 + __u32 offset;
44153 +- int len;
44154 ++ __u32 len;
44155 + struct xdr_buf payload;
44156 + };
44157 +
44158 +diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
44159 +index 4474adb393ca8..517b71c73aa96 100644
44160 +--- a/fs/ntfs/inode.c
44161 ++++ b/fs/ntfs/inode.c
44162 +@@ -1881,6 +1881,10 @@ int ntfs_read_inode_mount(struct inode *vi)
44163 + }
44164 + /* Now allocate memory for the attribute list. */
44165 + ni->attr_list_size = (u32)ntfs_attr_size(a);
44166 ++ if (!ni->attr_list_size) {
44167 ++ ntfs_error(sb, "Attr_list_size is zero");
44168 ++ goto put_err_out;
44169 ++ }
44170 + ni->attr_list = ntfs_malloc_nofs(ni->attr_list_size);
44171 + if (!ni->attr_list) {
44172 + ntfs_error(sb, "Not enough memory to allocate buffer "
44173 +diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
44174 +index f033de733adb3..effe92c7d6937 100644
44175 +--- a/fs/ocfs2/quota_global.c
44176 ++++ b/fs/ocfs2/quota_global.c
44177 +@@ -337,7 +337,6 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
44178 + /* Read information header from global quota file */
44179 + int ocfs2_global_read_info(struct super_block *sb, int type)
44180 + {
44181 +- struct inode *gqinode = NULL;
44182 + unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
44183 + GROUP_QUOTA_SYSTEM_INODE };
44184 + struct ocfs2_global_disk_dqinfo dinfo;
44185 +@@ -346,29 +345,31 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
44186 + u64 pcount;
44187 + int status;
44188 +
44189 ++ oinfo->dqi_gi.dqi_sb = sb;
44190 ++ oinfo->dqi_gi.dqi_type = type;
44191 ++ ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
44192 ++ oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
44193 ++ oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
44194 ++ oinfo->dqi_gqi_bh = NULL;
44195 ++ oinfo->dqi_gqi_count = 0;
44196 ++
44197 + /* Read global header */
44198 +- gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
44199 ++ oinfo->dqi_gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
44200 + OCFS2_INVALID_SLOT);
44201 +- if (!gqinode) {
44202 ++ if (!oinfo->dqi_gqinode) {
44203 + mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
44204 + type);
44205 + status = -EINVAL;
44206 + goto out_err;
44207 + }
44208 +- oinfo->dqi_gi.dqi_sb = sb;
44209 +- oinfo->dqi_gi.dqi_type = type;
44210 +- oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
44211 +- oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
44212 +- oinfo->dqi_gqi_bh = NULL;
44213 +- oinfo->dqi_gqi_count = 0;
44214 +- oinfo->dqi_gqinode = gqinode;
44215 ++
44216 + status = ocfs2_lock_global_qf(oinfo, 0);
44217 + if (status < 0) {
44218 + mlog_errno(status);
44219 + goto out_err;
44220 + }
44221 +
44222 +- status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
44223 ++ status = ocfs2_extent_map_get_blocks(oinfo->dqi_gqinode, 0, &oinfo->dqi_giblk,
44224 + &pcount, NULL);
44225 + if (status < 0)
44226 + goto out_unlock;
44227 +diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
44228 +index 0e4b16d4c037f..b1a8b046f4c22 100644
44229 +--- a/fs/ocfs2/quota_local.c
44230 ++++ b/fs/ocfs2/quota_local.c
44231 +@@ -702,8 +702,6 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
44232 + info->dqi_priv = oinfo;
44233 + oinfo->dqi_type = type;
44234 + INIT_LIST_HEAD(&oinfo->dqi_chunk);
44235 +- oinfo->dqi_gqinode = NULL;
44236 +- ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
44237 + oinfo->dqi_rec = NULL;
44238 + oinfo->dqi_lqi_bh = NULL;
44239 + oinfo->dqi_libh = NULL;
44240 +diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c
44241 +index 6d8d4bf208377..2e244ada1f970 100644
44242 +--- a/fs/proc/bootconfig.c
44243 ++++ b/fs/proc/bootconfig.c
44244 +@@ -32,6 +32,8 @@ static int __init copy_xbc_key_value_list(char *dst, size_t size)
44245 + int ret = 0;
44246 +
44247 + key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL);
44248 ++ if (!key)
44249 ++ return -ENOMEM;
44250 +
44251 + xbc_for_each_key_value(leaf, val) {
44252 + ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX);
44253 +diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
44254 +index f243cb5e6a4fb..e26162f102ffe 100644
44255 +--- a/fs/pstore/platform.c
44256 ++++ b/fs/pstore/platform.c
44257 +@@ -143,21 +143,22 @@ static void pstore_timer_kick(void)
44258 + mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
44259 + }
44260 +
44261 +-/*
44262 +- * Should pstore_dump() wait for a concurrent pstore_dump()? If
44263 +- * not, the current pstore_dump() will report a failure to dump
44264 +- * and return.
44265 +- */
44266 +-static bool pstore_cannot_wait(enum kmsg_dump_reason reason)
44267 ++static bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
44268 + {
44269 +- /* In NMI path, pstore shouldn't block regardless of reason. */
44270 ++ /*
44271 ++ * In case of NMI path, pstore shouldn't be blocked
44272 ++ * regardless of reason.
44273 ++ */
44274 + if (in_nmi())
44275 + return true;
44276 +
44277 + switch (reason) {
44278 + /* In panic case, other cpus are stopped by smp_send_stop(). */
44279 + case KMSG_DUMP_PANIC:
44280 +- /* Emergency restart shouldn't be blocked. */
44281 ++ /*
44282 ++ * Emergency restart shouldn't be blocked by spinning on
44283 ++ * pstore_info::buf_lock.
44284 ++ */
44285 + case KMSG_DUMP_EMERG:
44286 + return true;
44287 + default:
44288 +@@ -389,21 +390,19 @@ static void pstore_dump(struct kmsg_dumper *dumper,
44289 + unsigned long total = 0;
44290 + const char *why;
44291 + unsigned int part = 1;
44292 ++ unsigned long flags = 0;
44293 + int ret;
44294 +
44295 + why = kmsg_dump_reason_str(reason);
44296 +
44297 +- if (down_trylock(&psinfo->buf_lock)) {
44298 +- /* Failed to acquire lock: give up if we cannot wait. */
44299 +- if (pstore_cannot_wait(reason)) {
44300 +- pr_err("dump skipped in %s path: may corrupt error record\n",
44301 +- in_nmi() ? "NMI" : why);
44302 +- return;
44303 +- }
44304 +- if (down_interruptible(&psinfo->buf_lock)) {
44305 +- pr_err("could not grab semaphore?!\n");
44306 ++ if (pstore_cannot_block_path(reason)) {
44307 ++ if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
44308 ++ pr_err("dump skipped in %s path because of concurrent dump\n",
44309 ++ in_nmi() ? "NMI" : why);
44310 + return;
44311 + }
44312 ++ } else {
44313 ++ spin_lock_irqsave(&psinfo->buf_lock, flags);
44314 + }
44315 +
44316 + kmsg_dump_rewind(&iter);
44317 +@@ -467,8 +466,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
44318 + total += record.size;
44319 + part++;
44320 + }
44321 +-
44322 +- up(&psinfo->buf_lock);
44323 ++ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
44324 + }
44325 +
44326 + static struct kmsg_dumper pstore_dumper = {
44327 +@@ -594,7 +592,7 @@ int pstore_register(struct pstore_info *psi)
44328 + psi->write_user = pstore_write_user_compat;
44329 + psinfo = psi;
44330 + mutex_init(&psinfo->read_mutex);
44331 +- sema_init(&psinfo->buf_lock, 1);
44332 ++ spin_lock_init(&psinfo->buf_lock);
44333 +
44334 + if (psi->flags & PSTORE_FLAGS_DMESG)
44335 + allocate_buf_for_compression();
44336 +diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
44337 +index dbe72f664abf3..86151889548e3 100644
44338 +--- a/fs/ubifs/dir.c
44339 ++++ b/fs/ubifs/dir.c
44340 +@@ -349,20 +349,97 @@ out_budg:
44341 + return err;
44342 + }
44343 +
44344 +-static int do_tmpfile(struct inode *dir, struct dentry *dentry,
44345 +- umode_t mode, struct inode **whiteout)
44346 ++static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
44347 ++{
44348 ++ int err;
44349 ++ umode_t mode = S_IFCHR | WHITEOUT_MODE;
44350 ++ struct inode *inode;
44351 ++ struct ubifs_info *c = dir->i_sb->s_fs_info;
44352 ++ struct fscrypt_name nm;
44353 ++
44354 ++ /*
44355 ++ * Create an inode('nlink = 1') for whiteout without updating journal,
44356 ++ * let ubifs_jnl_rename() store it on flash to complete rename whiteout
44357 ++ * atomically.
44358 ++ */
44359 ++
44360 ++ dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
44361 ++ dentry, mode, dir->i_ino);
44362 ++
44363 ++ err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
44364 ++ if (err)
44365 ++ return ERR_PTR(err);
44366 ++
44367 ++ inode = ubifs_new_inode(c, dir, mode);
44368 ++ if (IS_ERR(inode)) {
44369 ++ err = PTR_ERR(inode);
44370 ++ goto out_free;
44371 ++ }
44372 ++
44373 ++ init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
44374 ++ ubifs_assert(c, inode->i_op == &ubifs_file_inode_operations);
44375 ++
44376 ++ err = ubifs_init_security(dir, inode, &dentry->d_name);
44377 ++ if (err)
44378 ++ goto out_inode;
44379 ++
44380 ++ /* The dir size is updated by do_rename. */
44381 ++ insert_inode_hash(inode);
44382 ++
44383 ++ return inode;
44384 ++
44385 ++out_inode:
44386 ++ make_bad_inode(inode);
44387 ++ iput(inode);
44388 ++out_free:
44389 ++ fscrypt_free_filename(&nm);
44390 ++ ubifs_err(c, "cannot create whiteout file, error %d", err);
44391 ++ return ERR_PTR(err);
44392 ++}
44393 ++
44394 ++/**
44395 ++ * lock_2_inodes - a wrapper for locking two UBIFS inodes.
44396 ++ * @inode1: first inode
44397 ++ * @inode2: second inode
44398 ++ *
44399 ++ * We do not implement any tricks to guarantee strict lock ordering, because
44400 ++ * VFS has already done it for us on the @i_mutex. So this is just a simple
44401 ++ * wrapper function.
44402 ++ */
44403 ++static void lock_2_inodes(struct inode *inode1, struct inode *inode2)
44404 ++{
44405 ++ mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
44406 ++ mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
44407 ++}
44408 ++
44409 ++/**
44410 ++ * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes.
44411 ++ * @inode1: first inode
44412 ++ * @inode2: second inode
44413 ++ */
44414 ++static void unlock_2_inodes(struct inode *inode1, struct inode *inode2)
44415 ++{
44416 ++ mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
44417 ++ mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
44418 ++}
44419 ++
44420 ++static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
44421 ++ struct dentry *dentry, umode_t mode)
44422 + {
44423 + struct inode *inode;
44424 + struct ubifs_info *c = dir->i_sb->s_fs_info;
44425 +- struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1};
44426 ++ struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
44427 ++ .dirtied_ino = 1};
44428 + struct ubifs_budget_req ino_req = { .dirtied_ino = 1 };
44429 +- struct ubifs_inode *ui, *dir_ui = ubifs_inode(dir);
44430 ++ struct ubifs_inode *ui;
44431 + int err, instantiated = 0;
44432 + struct fscrypt_name nm;
44433 +
44434 + /*
44435 +- * Budget request settings: new dirty inode, new direntry,
44436 +- * budget for dirtied inode will be released via writeback.
44437 ++ * Budget request settings: new inode, new direntry, changing the
44438 ++ * parent directory inode.
44439 ++ * Allocate budget separately for new dirtied inode, the budget will
44440 ++ * be released via writeback.
44441 + */
44442 +
44443 + dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
44444 +@@ -392,42 +469,30 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
44445 + }
44446 + ui = ubifs_inode(inode);
44447 +
44448 +- if (whiteout) {
44449 +- init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
44450 +- ubifs_assert(c, inode->i_op == &ubifs_file_inode_operations);
44451 +- }
44452 +-
44453 + err = ubifs_init_security(dir, inode, &dentry->d_name);
44454 + if (err)
44455 + goto out_inode;
44456 +
44457 + mutex_lock(&ui->ui_mutex);
44458 + insert_inode_hash(inode);
44459 +-
44460 +- if (whiteout) {
44461 +- mark_inode_dirty(inode);
44462 +- drop_nlink(inode);
44463 +- *whiteout = inode;
44464 +- } else {
44465 +- d_tmpfile(dentry, inode);
44466 +- }
44467 ++ d_tmpfile(dentry, inode);
44468 + ubifs_assert(c, ui->dirty);
44469 +
44470 + instantiated = 1;
44471 + mutex_unlock(&ui->ui_mutex);
44472 +
44473 +- mutex_lock(&dir_ui->ui_mutex);
44474 ++ lock_2_inodes(dir, inode);
44475 + err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
44476 + if (err)
44477 + goto out_cancel;
44478 +- mutex_unlock(&dir_ui->ui_mutex);
44479 ++ unlock_2_inodes(dir, inode);
44480 +
44481 + ubifs_release_budget(c, &req);
44482 +
44483 + return 0;
44484 +
44485 + out_cancel:
44486 +- mutex_unlock(&dir_ui->ui_mutex);
44487 ++ unlock_2_inodes(dir, inode);
44488 + out_inode:
44489 + make_bad_inode(inode);
44490 + if (!instantiated)
44491 +@@ -441,12 +506,6 @@ out_budg:
44492 + return err;
44493 + }
44494 +
44495 +-static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
44496 +- struct dentry *dentry, umode_t mode)
44497 +-{
44498 +- return do_tmpfile(dir, dentry, mode, NULL);
44499 +-}
44500 +-
44501 + /**
44502 + * vfs_dent_type - get VFS directory entry type.
44503 + * @type: UBIFS directory entry type
44504 +@@ -660,32 +719,6 @@ static int ubifs_dir_release(struct inode *dir, struct file *file)
44505 + return 0;
44506 + }
44507 +
44508 +-/**
44509 +- * lock_2_inodes - a wrapper for locking two UBIFS inodes.
44510 +- * @inode1: first inode
44511 +- * @inode2: second inode
44512 +- *
44513 +- * We do not implement any tricks to guarantee strict lock ordering, because
44514 +- * VFS has already done it for us on the @i_mutex. So this is just a simple
44515 +- * wrapper function.
44516 +- */
44517 +-static void lock_2_inodes(struct inode *inode1, struct inode *inode2)
44518 +-{
44519 +- mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
44520 +- mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
44521 +-}
44522 +-
44523 +-/**
44524 +- * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes.
44525 +- * @inode1: first inode
44526 +- * @inode2: second inode
44527 +- */
44528 +-static void unlock_2_inodes(struct inode *inode1, struct inode *inode2)
44529 +-{
44530 +- mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
44531 +- mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
44532 +-}
44533 +-
44534 + static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
44535 + struct dentry *dentry)
44536 + {
44537 +@@ -949,7 +982,8 @@ static int ubifs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
44538 + struct ubifs_inode *dir_ui = ubifs_inode(dir);
44539 + struct ubifs_info *c = dir->i_sb->s_fs_info;
44540 + int err, sz_change;
44541 +- struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1 };
44542 ++ struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
44543 ++ .dirtied_ino = 1};
44544 + struct fscrypt_name nm;
44545 +
44546 + /*
44547 +@@ -1264,17 +1298,19 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
44548 + .dirtied_ino = 3 };
44549 + struct ubifs_budget_req ino_req = { .dirtied_ino = 1,
44550 + .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
44551 ++ struct ubifs_budget_req wht_req;
44552 + struct timespec64 time;
44553 + unsigned int saved_nlink;
44554 + struct fscrypt_name old_nm, new_nm;
44555 +
44556 + /*
44557 +- * Budget request settings: deletion direntry, new direntry, removing
44558 +- * the old inode, and changing old and new parent directory inodes.
44559 ++ * Budget request settings:
44560 ++ * req: deletion direntry, new direntry, removing the old inode,
44561 ++ * and changing old and new parent directory inodes.
44562 ++ *
44563 ++ * wht_req: new whiteout inode for RENAME_WHITEOUT.
44564 + *
44565 +- * However, this operation also marks the target inode as dirty and
44566 +- * does not write it, so we allocate budget for the target inode
44567 +- * separately.
44568 ++ * ino_req: marks the target inode as dirty and does not write it.
44569 + */
44570 +
44571 + dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu flags 0x%x",
44572 +@@ -1331,20 +1367,44 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
44573 + goto out_release;
44574 + }
44575 +
44576 +- err = do_tmpfile(old_dir, old_dentry, S_IFCHR | WHITEOUT_MODE, &whiteout);
44577 +- if (err) {
44578 ++ /*
44579 ++ * The whiteout inode without dentry is pinned in memory,
44580 ++ * umount won't happen during rename process because we
44581 ++ * got parent dentry.
44582 ++ */
44583 ++ whiteout = create_whiteout(old_dir, old_dentry);
44584 ++ if (IS_ERR(whiteout)) {
44585 ++ err = PTR_ERR(whiteout);
44586 + kfree(dev);
44587 + goto out_release;
44588 + }
44589 +
44590 +- spin_lock(&whiteout->i_lock);
44591 +- whiteout->i_state |= I_LINKABLE;
44592 +- spin_unlock(&whiteout->i_lock);
44593 +-
44594 + whiteout_ui = ubifs_inode(whiteout);
44595 + whiteout_ui->data = dev;
44596 + whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0));
44597 + ubifs_assert(c, !whiteout_ui->dirty);
44598 ++
44599 ++ memset(&wht_req, 0, sizeof(struct ubifs_budget_req));
44600 ++ wht_req.new_ino = 1;
44601 ++ wht_req.new_ino_d = ALIGN(whiteout_ui->data_len, 8);
44602 ++ /*
44603 ++ * To avoid deadlock between space budget (holds ui_mutex and
44604 ++ * waits wb work) and writeback work(waits ui_mutex), do space
44605 ++ * budget before ubifs inodes locked.
44606 ++ */
44607 ++ err = ubifs_budget_space(c, &wht_req);
44608 ++ if (err) {
44609 ++ /*
44610 ++ * Whiteout inode can not be written on flash by
44611 ++ * ubifs_jnl_write_inode(), because it's neither
44612 ++ * dirty nor zero-nlink.
44613 ++ */
44614 ++ iput(whiteout);
44615 ++ goto out_release;
44616 ++ }
44617 ++
44618 ++ /* Add the old_dentry size to the old_dir size. */
44619 ++ old_sz -= CALC_DENT_SIZE(fname_len(&old_nm));
44620 + }
44621 +
44622 + lock_4_inodes(old_dir, new_dir, new_inode, whiteout);
44623 +@@ -1416,29 +1476,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
44624 + sync = IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir);
44625 + if (unlink && IS_SYNC(new_inode))
44626 + sync = 1;
44627 +- }
44628 +-
44629 +- if (whiteout) {
44630 +- struct ubifs_budget_req wht_req = { .dirtied_ino = 1,
44631 +- .dirtied_ino_d = \
44632 +- ALIGN(ubifs_inode(whiteout)->data_len, 8) };
44633 +-
44634 +- err = ubifs_budget_space(c, &wht_req);
44635 +- if (err) {
44636 +- kfree(whiteout_ui->data);
44637 +- whiteout_ui->data_len = 0;
44638 +- iput(whiteout);
44639 +- goto out_release;
44640 +- }
44641 +-
44642 +- inc_nlink(whiteout);
44643 +- mark_inode_dirty(whiteout);
44644 +-
44645 +- spin_lock(&whiteout->i_lock);
44646 +- whiteout->i_state &= ~I_LINKABLE;
44647 +- spin_unlock(&whiteout->i_lock);
44648 +-
44649 +- iput(whiteout);
44650 ++ /*
44651 ++ * S_SYNC flag of whiteout inherits from the old_dir, and we
44652 ++ * have already checked the old dir inode. So there is no need
44653 ++ * to check whiteout.
44654 ++ */
44655 + }
44656 +
44657 + err = ubifs_jnl_rename(c, old_dir, old_inode, &old_nm, new_dir,
44658 +@@ -1449,6 +1491,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
44659 + unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
44660 + ubifs_release_budget(c, &req);
44661 +
44662 ++ if (whiteout) {
44663 ++ ubifs_release_budget(c, &wht_req);
44664 ++ iput(whiteout);
44665 ++ }
44666 ++
44667 + mutex_lock(&old_inode_ui->ui_mutex);
44668 + release = old_inode_ui->dirty;
44669 + mark_inode_dirty_sync(old_inode);
44670 +@@ -1457,11 +1504,16 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
44671 + if (release)
44672 + ubifs_release_budget(c, &ino_req);
44673 + if (IS_SYNC(old_inode))
44674 +- err = old_inode->i_sb->s_op->write_inode(old_inode, NULL);
44675 ++ /*
44676 ++ * Rename finished here. Although old inode cannot be updated
44677 ++ * on flash, old ctime is not a big problem, don't return err
44678 ++ * code to userspace.
44679 ++ */
44680 ++ old_inode->i_sb->s_op->write_inode(old_inode, NULL);
44681 +
44682 + fscrypt_free_filename(&old_nm);
44683 + fscrypt_free_filename(&new_nm);
44684 +- return err;
44685 ++ return 0;
44686 +
44687 + out_cancel:
44688 + if (unlink) {
44689 +@@ -1482,11 +1534,11 @@ out_cancel:
44690 + inc_nlink(old_dir);
44691 + }
44692 + }
44693 ++ unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
44694 + if (whiteout) {
44695 +- drop_nlink(whiteout);
44696 ++ ubifs_release_budget(c, &wht_req);
44697 + iput(whiteout);
44698 + }
44699 +- unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
44700 + out_release:
44701 + ubifs_release_budget(c, &ino_req);
44702 + ubifs_release_budget(c, &req);
44703 +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
44704 +index 5cfa28cd00cdc..6b45a037a0471 100644
44705 +--- a/fs/ubifs/file.c
44706 ++++ b/fs/ubifs/file.c
44707 +@@ -570,7 +570,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
44708 + }
44709 +
44710 + if (!PagePrivate(page)) {
44711 +- SetPagePrivate(page);
44712 ++ attach_page_private(page, (void *)1);
44713 + atomic_long_inc(&c->dirty_pg_cnt);
44714 + __set_page_dirty_nobuffers(page);
44715 + }
44716 +@@ -947,7 +947,7 @@ static int do_writepage(struct page *page, int len)
44717 + release_existing_page_budget(c);
44718 +
44719 + atomic_long_dec(&c->dirty_pg_cnt);
44720 +- ClearPagePrivate(page);
44721 ++ detach_page_private(page);
44722 + ClearPageChecked(page);
44723 +
44724 + kunmap(page);
44725 +@@ -1304,7 +1304,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset,
44726 + release_existing_page_budget(c);
44727 +
44728 + atomic_long_dec(&c->dirty_pg_cnt);
44729 +- ClearPagePrivate(page);
44730 ++ detach_page_private(page);
44731 + ClearPageChecked(page);
44732 + }
44733 +
44734 +@@ -1471,8 +1471,8 @@ static int ubifs_migrate_page(struct address_space *mapping,
44735 + return rc;
44736 +
44737 + if (PagePrivate(page)) {
44738 +- ClearPagePrivate(page);
44739 +- SetPagePrivate(newpage);
44740 ++ detach_page_private(page);
44741 ++ attach_page_private(newpage, (void *)1);
44742 + }
44743 +
44744 + if (mode != MIGRATE_SYNC_NO_COPY)
44745 +@@ -1496,7 +1496,7 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
44746 + return 0;
44747 + ubifs_assert(c, PagePrivate(page));
44748 + ubifs_assert(c, 0);
44749 +- ClearPagePrivate(page);
44750 ++ detach_page_private(page);
44751 + ClearPageChecked(page);
44752 + return 1;
44753 + }
44754 +@@ -1567,7 +1567,7 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
44755 + else {
44756 + if (!PageChecked(page))
44757 + ubifs_convert_page_budget(c);
44758 +- SetPagePrivate(page);
44759 ++ attach_page_private(page, (void *)1);
44760 + atomic_long_inc(&c->dirty_pg_cnt);
44761 + __set_page_dirty_nobuffers(page);
44762 + }
44763 +diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
44764 +index 789a7813f3fa2..1607a3c76681a 100644
44765 +--- a/fs/ubifs/io.c
44766 ++++ b/fs/ubifs/io.c
44767 +@@ -854,16 +854,42 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
44768 + */
44769 + n = aligned_len >> c->max_write_shift;
44770 + if (n) {
44771 +- n <<= c->max_write_shift;
44772 ++ int m = n - 1;
44773 ++
44774 + dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
44775 + wbuf->offs);
44776 +- err = ubifs_leb_write(c, wbuf->lnum, buf + written,
44777 +- wbuf->offs, n);
44778 ++
44779 ++ if (m) {
44780 ++ /* '(n-1)<<c->max_write_shift < len' is always true. */
44781 ++ m <<= c->max_write_shift;
44782 ++ err = ubifs_leb_write(c, wbuf->lnum, buf + written,
44783 ++ wbuf->offs, m);
44784 ++ if (err)
44785 ++ goto out;
44786 ++ wbuf->offs += m;
44787 ++ aligned_len -= m;
44788 ++ len -= m;
44789 ++ written += m;
44790 ++ }
44791 ++
44792 ++ /*
44793 ++ * The non-written len of buf may be less than 'n' because
44794 ++ * parameter 'len' is not 8 bytes aligned, so here we read
44795 ++ * min(len, n) bytes from buf.
44796 ++ */
44797 ++ n = 1 << c->max_write_shift;
44798 ++ memcpy(wbuf->buf, buf + written, min(len, n));
44799 ++ if (n > len) {
44800 ++ ubifs_assert(c, n - len < 8);
44801 ++ ubifs_pad(c, wbuf->buf + len, n - len);
44802 ++ }
44803 ++
44804 ++ err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, n);
44805 + if (err)
44806 + goto out;
44807 + wbuf->offs += n;
44808 + aligned_len -= n;
44809 +- len -= n;
44810 ++ len -= min(len, n);
44811 + written += n;
44812 + }
44813 +
44814 +diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
44815 +index c6a8634877803..71bcebe45f9c5 100644
44816 +--- a/fs/ubifs/ioctl.c
44817 ++++ b/fs/ubifs/ioctl.c
44818 +@@ -108,7 +108,7 @@ static int setflags(struct inode *inode, int flags)
44819 + struct ubifs_inode *ui = ubifs_inode(inode);
44820 + struct ubifs_info *c = inode->i_sb->s_fs_info;
44821 + struct ubifs_budget_req req = { .dirtied_ino = 1,
44822 +- .dirtied_ino_d = ui->data_len };
44823 ++ .dirtied_ino_d = ALIGN(ui->data_len, 8) };
44824 +
44825 + err = ubifs_budget_space(c, &req);
44826 + if (err)
44827 +diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
44828 +index 8ea680dba61e3..75dab0ae3939d 100644
44829 +--- a/fs/ubifs/journal.c
44830 ++++ b/fs/ubifs/journal.c
44831 +@@ -1207,9 +1207,9 @@ out_free:
44832 + * @sync: non-zero if the write-buffer has to be synchronized
44833 + *
44834 + * This function implements the re-name operation which may involve writing up
44835 +- * to 4 inodes and 2 directory entries. It marks the written inodes as clean
44836 +- * and returns zero on success. In case of failure, a negative error code is
44837 +- * returned.
44838 ++ * to 4 inodes(new inode, whiteout inode, old and new parent directory inodes)
44839 ++ * and 2 directory entries. It marks the written inodes as clean and returns
44840 ++ * zero on success. In case of failure, a negative error code is returned.
44841 + */
44842 + int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
44843 + const struct inode *old_inode,
44844 +@@ -1222,14 +1222,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
44845 + void *p;
44846 + union ubifs_key key;
44847 + struct ubifs_dent_node *dent, *dent2;
44848 +- int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0;
44849 ++ int err, dlen1, dlen2, ilen, wlen, lnum, offs, len, orphan_added = 0;
44850 + int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
44851 + int last_reference = !!(new_inode && new_inode->i_nlink == 0);
44852 + int move = (old_dir != new_dir);
44853 +- struct ubifs_inode *new_ui;
44854 ++ struct ubifs_inode *new_ui, *whiteout_ui;
44855 + u8 hash_old_dir[UBIFS_HASH_ARR_SZ];
44856 + u8 hash_new_dir[UBIFS_HASH_ARR_SZ];
44857 + u8 hash_new_inode[UBIFS_HASH_ARR_SZ];
44858 ++ u8 hash_whiteout_inode[UBIFS_HASH_ARR_SZ];
44859 + u8 hash_dent1[UBIFS_HASH_ARR_SZ];
44860 + u8 hash_dent2[UBIFS_HASH_ARR_SZ];
44861 +
44862 +@@ -1249,9 +1250,20 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
44863 + } else
44864 + ilen = 0;
44865 +
44866 ++ if (whiteout) {
44867 ++ whiteout_ui = ubifs_inode(whiteout);
44868 ++ ubifs_assert(c, mutex_is_locked(&whiteout_ui->ui_mutex));
44869 ++ ubifs_assert(c, whiteout->i_nlink == 1);
44870 ++ ubifs_assert(c, !whiteout_ui->dirty);
44871 ++ wlen = UBIFS_INO_NODE_SZ;
44872 ++ wlen += whiteout_ui->data_len;
44873 ++ } else
44874 ++ wlen = 0;
44875 ++
44876 + aligned_dlen1 = ALIGN(dlen1, 8);
44877 + aligned_dlen2 = ALIGN(dlen2, 8);
44878 +- len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
44879 ++ len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) +
44880 ++ ALIGN(wlen, 8) + ALIGN(plen, 8);
44881 + if (move)
44882 + len += plen;
44883 +
44884 +@@ -1313,6 +1325,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
44885 + p += ALIGN(ilen, 8);
44886 + }
44887 +
44888 ++ if (whiteout) {
44889 ++ pack_inode(c, p, whiteout, 0);
44890 ++ err = ubifs_node_calc_hash(c, p, hash_whiteout_inode);
44891 ++ if (err)
44892 ++ goto out_release;
44893 ++
44894 ++ p += ALIGN(wlen, 8);
44895 ++ }
44896 ++
44897 + if (!move) {
44898 + pack_inode(c, p, old_dir, 1);
44899 + err = ubifs_node_calc_hash(c, p, hash_old_dir);
44900 +@@ -1352,6 +1373,9 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
44901 + if (new_inode)
44902 + ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
44903 + new_inode->i_ino);
44904 ++ if (whiteout)
44905 ++ ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
44906 ++ whiteout->i_ino);
44907 + }
44908 + release_head(c, BASEHD);
44909 +
44910 +@@ -1368,8 +1392,6 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
44911 + err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm);
44912 + if (err)
44913 + goto out_ro;
44914 +-
44915 +- ubifs_delete_orphan(c, whiteout->i_ino);
44916 + } else {
44917 + err = ubifs_add_dirt(c, lnum, dlen2);
44918 + if (err)
44919 +@@ -1390,6 +1412,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
44920 + offs += ALIGN(ilen, 8);
44921 + }
44922 +
44923 ++ if (whiteout) {
44924 ++ ino_key_init(c, &key, whiteout->i_ino);
44925 ++ err = ubifs_tnc_add(c, &key, lnum, offs, wlen,
44926 ++ hash_whiteout_inode);
44927 ++ if (err)
44928 ++ goto out_ro;
44929 ++ offs += ALIGN(wlen, 8);
44930 ++ }
44931 ++
44932 + ino_key_init(c, &key, old_dir->i_ino);
44933 + err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir);
44934 + if (err)
44935 +@@ -1410,6 +1441,11 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
44936 + new_ui->synced_i_size = new_ui->ui_size;
44937 + spin_unlock(&new_ui->ui_lock);
44938 + }
44939 ++ /*
44940 ++ * No need to mark whiteout inode clean.
44941 ++ * Whiteout doesn't have non-zero size, no need to update
44942 ++ * synced_i_size for whiteout_ui.
44943 ++ */
44944 + mark_inode_clean(c, ubifs_inode(old_dir));
44945 + if (move)
44946 + mark_inode_clean(c, ubifs_inode(new_dir));
44947 +diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
44948 +index b501d0badaea2..5f88e484515a1 100644
44949 +--- a/include/drm/drm_connector.h
44950 ++++ b/include/drm/drm_connector.h
44951 +@@ -592,10 +592,16 @@ struct drm_display_info {
44952 + bool rgb_quant_range_selectable;
44953 +
44954 + /**
44955 +- * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
44956 +- * more stuff redundant with @bus_formats.
44957 ++ * @edid_hdmi_rgb444_dc_modes: Mask of supported hdmi deep color modes
44958 ++ * in RGB 4:4:4. Even more stuff redundant with @bus_formats.
44959 + */
44960 +- u8 edid_hdmi_dc_modes;
44961 ++ u8 edid_hdmi_rgb444_dc_modes;
44962 ++
44963 ++ /**
44964 ++ * @edid_hdmi_ycbcr444_dc_modes: Mask of supported hdmi deep color
44965 ++ * modes in YCbCr 4:4:4. Even more stuff redundant with @bus_formats.
44966 ++ */
44967 ++ u8 edid_hdmi_ycbcr444_dc_modes;
44968 +
44969 + /**
44970 + * @cea_rev: CEA revision of the HDMI sink.
44971 +diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
44972 +index 30359e434c3f3..fdf3cf6ccc021 100644
44973 +--- a/include/drm/drm_dp_helper.h
44974 ++++ b/include/drm/drm_dp_helper.h
44975 +@@ -456,7 +456,7 @@ struct drm_panel;
44976 + #define DP_FEC_CAPABILITY_1 0x091 /* 2.0 */
44977 +
44978 + /* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */
44979 +-#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xC /* 0x9E - 0x92 */
44980 ++#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xD /* 0x92 through 0x9E */
44981 + #define DP_PCON_DSC_ENCODER 0x092
44982 + # define DP_PCON_DSC_ENCODER_SUPPORTED (1 << 0)
44983 + # define DP_PCON_DSC_PPS_ENC_OVERRIDE (1 << 1)
44984 +@@ -1528,8 +1528,6 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
44985 + int lane);
44986 + u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
44987 + int lane);
44988 +-u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
44989 +- unsigned int lane);
44990 +
44991 + #define DP_BRANCH_OUI_HEADER_SIZE 0xc
44992 + #define DP_RECEIVER_CAP_SIZE 0xf
44993 +diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
44994 +index b84693fbd2b50..ec4f543c3d950 100644
44995 +--- a/include/drm/drm_modeset_lock.h
44996 ++++ b/include/drm/drm_modeset_lock.h
44997 +@@ -34,6 +34,7 @@ struct drm_modeset_lock;
44998 + * struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
44999 + * @ww_ctx: base acquire ctx
45000 + * @contended: used internally for -EDEADLK handling
45001 ++ * @stack_depot: used internally for contention debugging
45002 + * @locked: list of held locks
45003 + * @trylock_only: trylock mode used in atomic contexts/panic notifiers
45004 + * @interruptible: whether interruptible locking should be used.
45005 +diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
45006 +index a3dba31df01e9..6db58d1808665 100644
45007 +--- a/include/linux/atomic/atomic-arch-fallback.h
45008 ++++ b/include/linux/atomic/atomic-arch-fallback.h
45009 +@@ -151,7 +151,16 @@
45010 + static __always_inline int
45011 + arch_atomic_read_acquire(const atomic_t *v)
45012 + {
45013 +- return smp_load_acquire(&(v)->counter);
45014 ++ int ret;
45015 ++
45016 ++ if (__native_word(atomic_t)) {
45017 ++ ret = smp_load_acquire(&(v)->counter);
45018 ++ } else {
45019 ++ ret = arch_atomic_read(v);
45020 ++ __atomic_acquire_fence();
45021 ++ }
45022 ++
45023 ++ return ret;
45024 + }
45025 + #define arch_atomic_read_acquire arch_atomic_read_acquire
45026 + #endif
45027 +@@ -160,7 +169,12 @@ arch_atomic_read_acquire(const atomic_t *v)
45028 + static __always_inline void
45029 + arch_atomic_set_release(atomic_t *v, int i)
45030 + {
45031 +- smp_store_release(&(v)->counter, i);
45032 ++ if (__native_word(atomic_t)) {
45033 ++ smp_store_release(&(v)->counter, i);
45034 ++ } else {
45035 ++ __atomic_release_fence();
45036 ++ arch_atomic_set(v, i);
45037 ++ }
45038 + }
45039 + #define arch_atomic_set_release arch_atomic_set_release
45040 + #endif
45041 +@@ -1258,7 +1272,16 @@ arch_atomic_dec_if_positive(atomic_t *v)
45042 + static __always_inline s64
45043 + arch_atomic64_read_acquire(const atomic64_t *v)
45044 + {
45045 +- return smp_load_acquire(&(v)->counter);
45046 ++ s64 ret;
45047 ++
45048 ++ if (__native_word(atomic64_t)) {
45049 ++ ret = smp_load_acquire(&(v)->counter);
45050 ++ } else {
45051 ++ ret = arch_atomic64_read(v);
45052 ++ __atomic_acquire_fence();
45053 ++ }
45054 ++
45055 ++ return ret;
45056 + }
45057 + #define arch_atomic64_read_acquire arch_atomic64_read_acquire
45058 + #endif
45059 +@@ -1267,7 +1290,12 @@ arch_atomic64_read_acquire(const atomic64_t *v)
45060 + static __always_inline void
45061 + arch_atomic64_set_release(atomic64_t *v, s64 i)
45062 + {
45063 +- smp_store_release(&(v)->counter, i);
45064 ++ if (__native_word(atomic64_t)) {
45065 ++ smp_store_release(&(v)->counter, i);
45066 ++ } else {
45067 ++ __atomic_release_fence();
45068 ++ arch_atomic64_set(v, i);
45069 ++ }
45070 + }
45071 + #define arch_atomic64_set_release arch_atomic64_set_release
45072 + #endif
45073 +@@ -2358,4 +2386,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v)
45074 + #endif
45075 +
45076 + #endif /* _LINUX_ATOMIC_FALLBACK_H */
45077 +-// cca554917d7ea73d5e3e7397dd70c484cad9b2c4
45078 ++// 8e2cc06bc0d2c0967d2f8424762bd48555ee40ae
45079 +diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
45080 +index 049cf9421d831..f821b72433613 100644
45081 +--- a/include/linux/binfmts.h
45082 ++++ b/include/linux/binfmts.h
45083 +@@ -87,6 +87,9 @@ struct coredump_params {
45084 + loff_t written;
45085 + loff_t pos;
45086 + loff_t to_skip;
45087 ++ int vma_count;
45088 ++ size_t vma_data_size;
45089 ++ struct core_vma_metadata *vma_meta;
45090 + };
45091 +
45092 + /*
45093 +diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
45094 +index b4de2010fba55..bc5c04d711bbc 100644
45095 +--- a/include/linux/blk-cgroup.h
45096 ++++ b/include/linux/blk-cgroup.h
45097 +@@ -24,6 +24,7 @@
45098 + #include <linux/atomic.h>
45099 + #include <linux/kthread.h>
45100 + #include <linux/fs.h>
45101 ++#include <linux/blk-mq.h>
45102 +
45103 + /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
45104 + #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
45105 +@@ -604,6 +605,21 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
45106 + atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
45107 + }
45108 +
45109 ++/**
45110 ++ * blk_cgroup_mergeable - Determine whether to allow or disallow merges
45111 ++ * @rq: request to merge into
45112 ++ * @bio: bio to merge
45113 ++ *
45114 ++ * @bio and @rq should belong to the same cgroup and their issue_as_root should
45115 ++ * match. The latter is necessary as we don't want to throttle e.g. a metadata
45116 ++ * update because it happens to be next to a regular IO.
45117 ++ */
45118 ++static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
45119 ++{
45120 ++ return rq->bio->bi_blkg == bio->bi_blkg &&
45121 ++ bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
45122 ++}
45123 ++
45124 + void blk_cgroup_bio_start(struct bio *bio);
45125 + void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
45126 + void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
45127 +@@ -659,6 +675,7 @@ static inline void blkg_put(struct blkcg_gq *blkg) { }
45128 + static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
45129 + static inline void blkcg_bio_issue_init(struct bio *bio) { }
45130 + static inline void blk_cgroup_bio_start(struct bio *bio) { }
45131 ++static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
45132 +
45133 + #define blk_queue_for_each_rl(rl, q) \
45134 + for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
45135 +diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
45136 +index fe065c394fff6..86c0f85df8bb4 100644
45137 +--- a/include/linux/blk_types.h
45138 ++++ b/include/linux/blk_types.h
45139 +@@ -317,7 +317,8 @@ enum {
45140 + BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
45141 + * of this bio. */
45142 + BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
45143 +- BIO_TRACKED, /* set if bio goes through the rq_qos path */
45144 ++ BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
45145 ++ BIO_QOS_MERGED, /* but went through rq_qos merge path */
45146 + BIO_REMAPPED,
45147 + BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
45148 + BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */
45149 +diff --git a/include/linux/coredump.h b/include/linux/coredump.h
45150 +index 248a68c668b45..aa12ec94fae28 100644
45151 +--- a/include/linux/coredump.h
45152 ++++ b/include/linux/coredump.h
45153 +@@ -12,6 +12,8 @@ struct core_vma_metadata {
45154 + unsigned long start, end;
45155 + unsigned long flags;
45156 + unsigned long dump_size;
45157 ++ unsigned long pgoff;
45158 ++ struct file *file;
45159 + };
45160 +
45161 + /*
45162 +@@ -25,9 +27,6 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
45163 + extern int dump_align(struct coredump_params *cprm, int align);
45164 + int dump_user_range(struct coredump_params *cprm, unsigned long start,
45165 + unsigned long len);
45166 +-int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
45167 +- struct core_vma_metadata **vma_meta,
45168 +- size_t *vma_data_size_ptr);
45169 + extern void do_coredump(const kernel_siginfo_t *siginfo);
45170 + #else
45171 + static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
45172 +diff --git a/include/linux/fb.h b/include/linux/fb.h
45173 +index 02f362c661c80..3d7306c9a7065 100644
45174 +--- a/include/linux/fb.h
45175 ++++ b/include/linux/fb.h
45176 +@@ -502,6 +502,7 @@ struct fb_info {
45177 + } *apertures;
45178 +
45179 + bool skip_vt_switch; /* no VT switch on suspend/resume required */
45180 ++ bool forced_out; /* set when being removed by another driver */
45181 + };
45182 +
45183 + static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
45184 +diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
45185 +index 819ec92dc2a82..db924fe379c9c 100644
45186 +--- a/include/linux/lsm_hook_defs.h
45187 ++++ b/include/linux/lsm_hook_defs.h
45188 +@@ -332,6 +332,8 @@ LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname,
45189 + struct sockaddr *address, int addrlen)
45190 + LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc,
45191 + struct sock *sk, struct sock *newsk)
45192 ++LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc,
45193 ++ struct sk_buff *skb)
45194 + #endif /* CONFIG_SECURITY_NETWORK */
45195 +
45196 + #ifdef CONFIG_SECURITY_INFINIBAND
45197 +diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
45198 +index 3bf5c658bc448..419b5febc3ca5 100644
45199 +--- a/include/linux/lsm_hooks.h
45200 ++++ b/include/linux/lsm_hooks.h
45201 +@@ -1046,6 +1046,11 @@
45202 + * @asoc pointer to current sctp association structure.
45203 + * @sk pointer to current sock structure.
45204 + * @newsk pointer to new sock structure.
45205 ++ * @sctp_assoc_established:
45206 ++ * Passes the @asoc and @chunk->skb of the association COOKIE_ACK packet
45207 ++ * to the security module.
45208 ++ * @asoc pointer to sctp association structure.
45209 ++ * @skb pointer to skbuff of association packet.
45210 + *
45211 + * Security hooks for Infiniband
45212 + *
45213 +diff --git a/include/linux/migrate.h b/include/linux/migrate.h
45214 +index db96e10eb8da2..90e75d5a54d66 100644
45215 +--- a/include/linux/migrate.h
45216 ++++ b/include/linux/migrate.h
45217 +@@ -48,7 +48,15 @@ int folio_migrate_mapping(struct address_space *mapping,
45218 + struct folio *newfolio, struct folio *folio, int extra_count);
45219 +
45220 + extern bool numa_demotion_enabled;
45221 ++extern void migrate_on_reclaim_init(void);
45222 ++#ifdef CONFIG_HOTPLUG_CPU
45223 ++extern void set_migration_target_nodes(void);
45224 + #else
45225 ++static inline void set_migration_target_nodes(void) {}
45226 ++#endif
45227 ++#else
45228 ++
45229 ++static inline void set_migration_target_nodes(void) {}
45230 +
45231 + static inline void putback_movable_pages(struct list_head *l) {}
45232 + static inline int migrate_pages(struct list_head *l, new_page_t new,
45233 +diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
45234 +index 5b88cd51fadb5..dcf90144d70b7 100644
45235 +--- a/include/linux/mtd/rawnand.h
45236 ++++ b/include/linux/mtd/rawnand.h
45237 +@@ -1240,6 +1240,7 @@ struct nand_secure_region {
45238 + * @lock: Lock protecting the suspended field. Also used to serialize accesses
45239 + * to the NAND device
45240 + * @suspended: Set to 1 when the device is suspended, 0 when it's not
45241 ++ * @resume_wq: wait queue to sleep if rawnand is in suspended state.
45242 + * @cur_cs: Currently selected target. -1 means no target selected, otherwise we
45243 + * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
45244 + * NAND Controller drivers should not modify this value, but they're
45245 +@@ -1294,6 +1295,7 @@ struct nand_chip {
45246 + /* Internals */
45247 + struct mutex lock;
45248 + unsigned int suspended : 1;
45249 ++ wait_queue_head_t resume_wq;
45250 + int cur_cs;
45251 + int read_retries;
45252 + struct nand_secure_region *secure_regions;
45253 +diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h
45254 +index e6487a6911360..8676316547cc4 100644
45255 +--- a/include/linux/netfilter_netdev.h
45256 ++++ b/include/linux/netfilter_netdev.h
45257 +@@ -99,7 +99,7 @@ static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
45258 + return skb;
45259 +
45260 + nf_hook_state_init(&state, NF_NETDEV_EGRESS,
45261 +- NFPROTO_NETDEV, dev, NULL, NULL,
45262 ++ NFPROTO_NETDEV, NULL, dev, NULL,
45263 + dev_net(dev), NULL);
45264 +
45265 + /* nf assumes rcu_read_lock, not just read_lock_bh */
45266 +diff --git a/include/linux/nvme.h b/include/linux/nvme.h
45267 +index 855dd9b3e84be..a662435c9b6f1 100644
45268 +--- a/include/linux/nvme.h
45269 ++++ b/include/linux/nvme.h
45270 +@@ -337,6 +337,7 @@ enum {
45271 + NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
45272 + NVME_CTRL_VWC_PRESENT = 1 << 0,
45273 + NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
45274 ++ NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
45275 + NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
45276 + NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
45277 + NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
45278 +diff --git a/include/linux/pci.h b/include/linux/pci.h
45279 +index 8253a5413d7c4..678fecdf6b812 100644
45280 +--- a/include/linux/pci.h
45281 ++++ b/include/linux/pci.h
45282 +@@ -668,6 +668,7 @@ struct pci_bus {
45283 + struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
45284 + struct bin_attribute *legacy_mem; /* Legacy mem */
45285 + unsigned int is_added:1;
45286 ++ unsigned int unsafe_warn:1; /* warned about RW1C config write */
45287 + };
45288 +
45289 + #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
45290 +diff --git a/include/linux/pstore.h b/include/linux/pstore.h
45291 +index eb93a54cff31f..e97a8188f0fd8 100644
45292 +--- a/include/linux/pstore.h
45293 ++++ b/include/linux/pstore.h
45294 +@@ -14,7 +14,7 @@
45295 + #include <linux/errno.h>
45296 + #include <linux/kmsg_dump.h>
45297 + #include <linux/mutex.h>
45298 +-#include <linux/semaphore.h>
45299 ++#include <linux/spinlock.h>
45300 + #include <linux/time.h>
45301 + #include <linux/types.h>
45302 +
45303 +@@ -87,7 +87,7 @@ struct pstore_record {
45304 + * @owner: module which is responsible for this backend driver
45305 + * @name: name of the backend driver
45306 + *
45307 +- * @buf_lock: semaphore to serialize access to @buf
45308 ++ * @buf_lock: spinlock to serialize access to @buf
45309 + * @buf: preallocated crash dump buffer
45310 + * @bufsize: size of @buf available for crash dump bytes (must match
45311 + * smallest number of bytes available for writing to a
45312 +@@ -178,7 +178,7 @@ struct pstore_info {
45313 + struct module *owner;
45314 + const char *name;
45315 +
45316 +- struct semaphore buf_lock;
45317 ++ spinlock_t buf_lock;
45318 + char *buf;
45319 + size_t bufsize;
45320 +
45321 +diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
45322 +index bebc911161b6f..d373f1bcbf7ca 100644
45323 +--- a/include/linux/randomize_kstack.h
45324 ++++ b/include/linux/randomize_kstack.h
45325 +@@ -16,8 +16,20 @@ DECLARE_PER_CPU(u32, kstack_offset);
45326 + * alignment. Also, since this use is being explicitly masked to a max of
45327 + * 10 bits, stack-clash style attacks are unlikely. For more details see
45328 + * "VLAs" in Documentation/process/deprecated.rst
45329 ++ *
45330 ++ * The normal __builtin_alloca() is initialized with INIT_STACK_ALL (currently
45331 ++ * only with Clang and not GCC). Initializing the unused area on each syscall
45332 ++ * entry is expensive, and generating an implicit call to memset() may also be
45333 ++ * problematic (such as in noinstr functions). Therefore, if the compiler
45334 ++ * supports it (which it should if it initializes allocas), always use the
45335 ++ * "uninitialized" variant of the builtin.
45336 + */
45337 +-void *__builtin_alloca(size_t size);
45338 ++#if __has_builtin(__builtin_alloca_uninitialized)
45339 ++#define __kstack_alloca __builtin_alloca_uninitialized
45340 ++#else
45341 ++#define __kstack_alloca __builtin_alloca
45342 ++#endif
45343 ++
45344 + /*
45345 + * Use, at most, 10 bits of entropy. We explicitly cap this to keep the
45346 + * "VLA" from being unbounded (see above). 10 bits leaves enough room for
45347 +@@ -36,7 +48,7 @@ void *__builtin_alloca(size_t size);
45348 + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
45349 + &randomize_kstack_offset)) { \
45350 + u32 offset = raw_cpu_read(kstack_offset); \
45351 +- u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset)); \
45352 ++ u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
45353 + /* Keep allocation even after "ptr" loses scope. */ \
45354 + asm volatile("" :: "r"(ptr) : "memory"); \
45355 + } \
45356 +diff --git a/include/linux/sched.h b/include/linux/sched.h
45357 +index 75ba8aa60248b..e806326eca723 100644
45358 +--- a/include/linux/sched.h
45359 ++++ b/include/linux/sched.h
45360 +@@ -1630,6 +1630,14 @@ static inline unsigned int task_state_index(struct task_struct *tsk)
45361 + if (tsk_state == TASK_IDLE)
45362 + state = TASK_REPORT_IDLE;
45363 +
45364 ++ /*
45365 ++ * We're lying here, but rather than expose a completely new task state
45366 ++ * to userspace, we can make this appear as if the task has gone through
45367 ++ * a regular rt_mutex_lock() call.
45368 ++ */
45369 ++ if (tsk_state == TASK_RTLOCK_WAIT)
45370 ++ state = TASK_UNINTERRUPTIBLE;
45371 ++
45372 + return fls(state);
45373 + }
45374 +
45375 +diff --git a/include/linux/security.h b/include/linux/security.h
45376 +index 6d72772182c82..25b3ef71f495e 100644
45377 +--- a/include/linux/security.h
45378 ++++ b/include/linux/security.h
45379 +@@ -1422,6 +1422,8 @@ int security_sctp_bind_connect(struct sock *sk, int optname,
45380 + struct sockaddr *address, int addrlen);
45381 + void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
45382 + struct sock *newsk);
45383 ++int security_sctp_assoc_established(struct sctp_association *asoc,
45384 ++ struct sk_buff *skb);
45385 +
45386 + #else /* CONFIG_SECURITY_NETWORK */
45387 + static inline int security_unix_stream_connect(struct sock *sock,
45388 +@@ -1641,6 +1643,12 @@ static inline void security_sctp_sk_clone(struct sctp_association *asoc,
45389 + struct sock *newsk)
45390 + {
45391 + }
45392 ++
45393 ++static inline int security_sctp_assoc_established(struct sctp_association *asoc,
45394 ++ struct sk_buff *skb)
45395 ++{
45396 ++ return 0;
45397 ++}
45398 + #endif /* CONFIG_SECURITY_NETWORK */
45399 +
45400 + #ifdef CONFIG_SECURITY_INFINIBAND
45401 +diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
45402 +index c58cc142d23f4..8c32935e1059d 100644
45403 +--- a/include/linux/serial_core.h
45404 ++++ b/include/linux/serial_core.h
45405 +@@ -458,6 +458,8 @@ extern void uart_handle_cts_change(struct uart_port *uport,
45406 + extern void uart_insert_char(struct uart_port *port, unsigned int status,
45407 + unsigned int overrun, unsigned int ch, unsigned int flag);
45408 +
45409 ++void uart_xchar_out(struct uart_port *uport, int offset);
45410 ++
45411 + #ifdef CONFIG_MAGIC_SYSRQ_SERIAL
45412 + #define SYSRQ_TIMEOUT (HZ * 5)
45413 +
45414 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
45415 +index 8a636e678902d..42f885f0ce8af 100644
45416 +--- a/include/linux/skbuff.h
45417 ++++ b/include/linux/skbuff.h
45418 +@@ -1475,6 +1475,11 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb)
45419 + {
45420 + return skb->end;
45421 + }
45422 ++
45423 ++static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
45424 ++{
45425 ++ skb->end = offset;
45426 ++}
45427 + #else
45428 + static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
45429 + {
45430 +@@ -1485,6 +1490,11 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb)
45431 + {
45432 + return skb->end - skb->head;
45433 + }
45434 ++
45435 ++static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
45436 ++{
45437 ++ skb->end = skb->head + offset;
45438 ++}
45439 + #endif
45440 +
45441 + /* Internal */
45442 +@@ -1724,19 +1734,19 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
45443 + return 0;
45444 + }
45445 +
45446 +-/* This variant of skb_unclone() makes sure skb->truesize is not changed */
45447 ++/* This variant of skb_unclone() makes sure skb->truesize
45448 ++ * and skb_end_offset() are not changed, whenever a new skb->head is needed.
45449 ++ *
45450 ++ * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X))
45451 ++ * when various debugging features are in place.
45452 ++ */
45453 ++int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
45454 + static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
45455 + {
45456 + might_sleep_if(gfpflags_allow_blocking(pri));
45457 +
45458 +- if (skb_cloned(skb)) {
45459 +- unsigned int save = skb->truesize;
45460 +- int res;
45461 +-
45462 +- res = pskb_expand_head(skb, 0, 0, pri);
45463 +- skb->truesize = save;
45464 +- return res;
45465 +- }
45466 ++ if (skb_cloned(skb))
45467 ++ return __skb_unclone_keeptruesize(skb, pri);
45468 + return 0;
45469 + }
45470 +
45471 +diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
45472 +index 18a717fe62eb0..7f32dd59e7513 100644
45473 +--- a/include/linux/skmsg.h
45474 ++++ b/include/linux/skmsg.h
45475 +@@ -310,21 +310,16 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
45476 + kfree_skb(skb);
45477 + }
45478 +
45479 +-static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg)
45480 +-{
45481 +- if (msg->skb)
45482 +- sock_drop(psock->sk, msg->skb);
45483 +- kfree(msg);
45484 +-}
45485 +-
45486 + static inline void sk_psock_queue_msg(struct sk_psock *psock,
45487 + struct sk_msg *msg)
45488 + {
45489 + spin_lock_bh(&psock->ingress_lock);
45490 + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
45491 + list_add_tail(&msg->list, &psock->ingress_msg);
45492 +- else
45493 +- drop_sk_msg(psock, msg);
45494 ++ else {
45495 ++ sk_msg_free(psock->sk, msg);
45496 ++ kfree(msg);
45497 ++ }
45498 + spin_unlock_bh(&psock->ingress_lock);
45499 + }
45500 +
45501 +diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
45502 +index 0aad7009b50e6..bd0d11af76c5e 100644
45503 +--- a/include/linux/soc/ti/ti_sci_protocol.h
45504 ++++ b/include/linux/soc/ti/ti_sci_protocol.h
45505 +@@ -645,7 +645,7 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
45506 +
45507 + static inline struct ti_sci_resource *
45508 + devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
45509 +- u32 dev_id, u32 sub_type);
45510 ++ u32 dev_id, u32 sub_type)
45511 + {
45512 + return ERR_PTR(-EINVAL);
45513 + }
45514 +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
45515 +index b519609af1d02..4417f667c757e 100644
45516 +--- a/include/linux/sunrpc/xdr.h
45517 ++++ b/include/linux/sunrpc/xdr.h
45518 +@@ -731,6 +731,8 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
45519 +
45520 + if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
45521 + return -EBADMSG;
45522 ++ if (len > SIZE_MAX / sizeof(*p))
45523 ++ return -EBADMSG;
45524 + p = xdr_inline_decode(xdr, len * sizeof(*p));
45525 + if (unlikely(!p))
45526 + return -EBADMSG;
45527 +diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
45528 +index 955ea4d7af0b2..eef5e87c03b43 100644
45529 +--- a/include/linux/sunrpc/xprt.h
45530 ++++ b/include/linux/sunrpc/xprt.h
45531 +@@ -139,6 +139,9 @@ struct rpc_xprt_ops {
45532 + void (*rpcbind)(struct rpc_task *task);
45533 + void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
45534 + void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
45535 ++ int (*get_srcaddr)(struct rpc_xprt *xprt, char *buf,
45536 ++ size_t buflen);
45537 ++ unsigned short (*get_srcport)(struct rpc_xprt *xprt);
45538 + int (*buf_alloc)(struct rpc_task *task);
45539 + void (*buf_free)(struct rpc_task *task);
45540 + void (*prepare_request)(struct rpc_rqst *req);
45541 +diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
45542 +index 8c2a712cb2420..fed813ffe7db1 100644
45543 +--- a/include/linux/sunrpc/xprtsock.h
45544 ++++ b/include/linux/sunrpc/xprtsock.h
45545 +@@ -10,7 +10,6 @@
45546 +
45547 + int init_socket_xprt(void);
45548 + void cleanup_socket_xprt(void);
45549 +-unsigned short get_srcport(struct rpc_xprt *);
45550 +
45551 + #define RPC_MIN_RESVPORT (1U)
45552 + #define RPC_MAX_RESVPORT (65535U)
45553 +@@ -89,5 +88,6 @@ struct sock_xprt {
45554 + #define XPRT_SOCK_WAKE_WRITE (5)
45555 + #define XPRT_SOCK_WAKE_PENDING (6)
45556 + #define XPRT_SOCK_WAKE_DISCONNECT (7)
45557 ++#define XPRT_SOCK_CONNECT_SENT (8)
45558 +
45559 + #endif /* _LINUX_SUNRPC_XPRTSOCK_H */
45560 +diff --git a/include/linux/xarray.h b/include/linux/xarray.h
45561 +index d6d5da6ed7354..66e28bc1a023f 100644
45562 +--- a/include/linux/xarray.h
45563 ++++ b/include/linux/xarray.h
45564 +@@ -9,6 +9,7 @@
45565 + * See Documentation/core-api/xarray.rst for how to use the XArray.
45566 + */
45567 +
45568 ++#include <linux/bitmap.h>
45569 + #include <linux/bug.h>
45570 + #include <linux/compiler.h>
45571 + #include <linux/gfp.h>
45572 +diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
45573 +index 37f0fbefb060f..9939c366f720d 100644
45574 +--- a/include/net/netfilter/nf_conntrack_helper.h
45575 ++++ b/include/net/netfilter/nf_conntrack_helper.h
45576 +@@ -177,4 +177,5 @@ void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat);
45577 + int nf_nat_helper_try_module_get(const char *name, u16 l3num,
45578 + u8 protonum);
45579 + void nf_nat_helper_put(struct nf_conntrack_helper *helper);
45580 ++void nf_ct_set_auto_assign_helper_warned(struct net *net);
45581 + #endif /*_NF_CONNTRACK_HELPER_H*/
45582 +diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
45583 +index bd59e950f4d67..64daafd1fc41c 100644
45584 +--- a/include/net/netfilter/nf_flow_table.h
45585 ++++ b/include/net/netfilter/nf_flow_table.h
45586 +@@ -10,6 +10,8 @@
45587 + #include <linux/netfilter/nf_conntrack_tuple_common.h>
45588 + #include <net/flow_offload.h>
45589 + #include <net/dst.h>
45590 ++#include <linux/if_pppox.h>
45591 ++#include <linux/ppp_defs.h>
45592 +
45593 + struct nf_flowtable;
45594 + struct nf_flow_rule;
45595 +@@ -317,4 +319,20 @@ int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
45596 + int nf_flow_table_offload_init(void);
45597 + void nf_flow_table_offload_exit(void);
45598 +
45599 ++static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
45600 ++{
45601 ++ __be16 proto;
45602 ++
45603 ++ proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
45604 ++ sizeof(struct pppoe_hdr)));
45605 ++ switch (proto) {
45606 ++ case htons(PPP_IP):
45607 ++ return htons(ETH_P_IP);
45608 ++ case htons(PPP_IPV6):
45609 ++ return htons(ETH_P_IPV6);
45610 ++ }
45611 ++
45612 ++ return 0;
45613 ++}
45614 ++
45615 + #endif /* _NF_FLOW_TABLE_H */
45616 +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
45617 +index 647c53b261051..57e3e239a1fce 100644
45618 +--- a/include/scsi/scsi_device.h
45619 ++++ b/include/scsi/scsi_device.h
45620 +@@ -206,6 +206,7 @@ struct scsi_device {
45621 + unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device
45622 + * creation time */
45623 + unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
45624 ++ unsigned silence_suspend:1; /* Do not print runtime PM related messages */
45625 +
45626 + unsigned int queue_stopped; /* request queue is quiesced */
45627 + bool offline_already; /* Device offline message logged */
45628 +diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
45629 +index 089a760d36eb7..6fb2d5e378fdd 100644
45630 +--- a/include/sound/intel-nhlt.h
45631 ++++ b/include/sound/intel-nhlt.h
45632 +@@ -18,6 +18,13 @@ enum nhlt_link_type {
45633 + NHLT_LINK_INVALID
45634 + };
45635 +
45636 ++enum nhlt_device_type {
45637 ++ NHLT_DEVICE_BT = 0,
45638 ++ NHLT_DEVICE_DMIC = 1,
45639 ++ NHLT_DEVICE_I2S = 4,
45640 ++ NHLT_DEVICE_INVALID
45641 ++};
45642 ++
45643 + #if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_INTEL_NHLT)
45644 +
45645 + struct wav_fmt {
45646 +@@ -41,13 +48,6 @@ struct wav_fmt_ext {
45647 + u8 sub_fmt[16];
45648 + } __packed;
45649 +
45650 +-enum nhlt_device_type {
45651 +- NHLT_DEVICE_BT = 0,
45652 +- NHLT_DEVICE_DMIC = 1,
45653 +- NHLT_DEVICE_I2S = 4,
45654 +- NHLT_DEVICE_INVALID
45655 +-};
45656 +-
45657 + struct nhlt_specific_cfg {
45658 + u32 size;
45659 + u8 caps[];
45660 +@@ -133,6 +133,9 @@ void intel_nhlt_free(struct nhlt_acpi_table *addr);
45661 + int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt);
45662 +
45663 + bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt, u8 link_type);
45664 ++
45665 ++int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8 device_type);
45666 ++
45667 + struct nhlt_specific_cfg *
45668 + intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
45669 + u32 bus_id, u8 link_type, u8 vbps, u8 bps,
45670 +@@ -163,6 +166,11 @@ static inline bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt,
45671 + return false;
45672 + }
45673 +
45674 ++static inline int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8 device_type)
45675 ++{
45676 ++ return 0;
45677 ++}
45678 ++
45679 + static inline struct nhlt_specific_cfg *
45680 + intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
45681 + u32 bus_id, u8 link_type, u8 vbps, u8 bps,
45682 +diff --git a/include/sound/pcm.h b/include/sound/pcm.h
45683 +index 314f2779cab52..6b99310b5b889 100644
45684 +--- a/include/sound/pcm.h
45685 ++++ b/include/sound/pcm.h
45686 +@@ -402,6 +402,7 @@ struct snd_pcm_runtime {
45687 + struct fasync_struct *fasync;
45688 + bool stop_operating; /* sync_stop will be called */
45689 + struct mutex buffer_mutex; /* protect for buffer changes */
45690 ++ atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */
45691 +
45692 + /* -- private section -- */
45693 + void *private_data;
45694 +diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
45695 +index 19e957b7f9410..1a0b7030f72a3 100644
45696 +--- a/include/trace/events/ext4.h
45697 ++++ b/include/trace/events/ext4.h
45698 +@@ -95,6 +95,17 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B);
45699 + { FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \
45700 + { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"})
45701 +
45702 ++TRACE_DEFINE_ENUM(EXT4_FC_REASON_XATTR);
45703 ++TRACE_DEFINE_ENUM(EXT4_FC_REASON_CROSS_RENAME);
45704 ++TRACE_DEFINE_ENUM(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
45705 ++TRACE_DEFINE_ENUM(EXT4_FC_REASON_NOMEM);
45706 ++TRACE_DEFINE_ENUM(EXT4_FC_REASON_SWAP_BOOT);
45707 ++TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE);
45708 ++TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR);
45709 ++TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE);
45710 ++TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA);
45711 ++TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
45712 ++
45713 + #define show_fc_reason(reason) \
45714 + __print_symbolic(reason, \
45715 + { EXT4_FC_REASON_XATTR, "XATTR"}, \
45716 +@@ -2723,41 +2734,50 @@ TRACE_EVENT(ext4_fc_commit_stop,
45717 +
45718 + #define FC_REASON_NAME_STAT(reason) \
45719 + show_fc_reason(reason), \
45720 +- __entry->sbi->s_fc_stats.fc_ineligible_reason_count[reason]
45721 ++ __entry->fc_ineligible_rc[reason]
45722 +
45723 + TRACE_EVENT(ext4_fc_stats,
45724 +- TP_PROTO(struct super_block *sb),
45725 +-
45726 +- TP_ARGS(sb),
45727 ++ TP_PROTO(struct super_block *sb),
45728 +
45729 +- TP_STRUCT__entry(
45730 +- __field(dev_t, dev)
45731 +- __field(struct ext4_sb_info *, sbi)
45732 +- __field(int, count)
45733 +- ),
45734 ++ TP_ARGS(sb),
45735 +
45736 +- TP_fast_assign(
45737 +- __entry->dev = sb->s_dev;
45738 +- __entry->sbi = EXT4_SB(sb);
45739 +- ),
45740 ++ TP_STRUCT__entry(
45741 ++ __field(dev_t, dev)
45742 ++ __array(unsigned int, fc_ineligible_rc, EXT4_FC_REASON_MAX)
45743 ++ __field(unsigned long, fc_commits)
45744 ++ __field(unsigned long, fc_ineligible_commits)
45745 ++ __field(unsigned long, fc_numblks)
45746 ++ ),
45747 +
45748 +- TP_printk("dev %d:%d fc ineligible reasons:\n"
45749 +- "%s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d; "
45750 +- "num_commits:%ld, ineligible: %ld, numblks: %ld",
45751 +- MAJOR(__entry->dev), MINOR(__entry->dev),
45752 +- FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR),
45753 +- FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME),
45754 +- FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE),
45755 +- FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM),
45756 +- FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT),
45757 +- FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE),
45758 +- FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR),
45759 +- FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE),
45760 +- FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA),
45761 +- __entry->sbi->s_fc_stats.fc_num_commits,
45762 +- __entry->sbi->s_fc_stats.fc_ineligible_commits,
45763 +- __entry->sbi->s_fc_stats.fc_numblks)
45764 ++ TP_fast_assign(
45765 ++ int i;
45766 +
45767 ++ __entry->dev = sb->s_dev;
45768 ++ for (i = 0; i < EXT4_FC_REASON_MAX; i++) {
45769 ++ __entry->fc_ineligible_rc[i] =
45770 ++ EXT4_SB(sb)->s_fc_stats.fc_ineligible_reason_count[i];
45771 ++ }
45772 ++ __entry->fc_commits = EXT4_SB(sb)->s_fc_stats.fc_num_commits;
45773 ++ __entry->fc_ineligible_commits =
45774 ++ EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits;
45775 ++ __entry->fc_numblks = EXT4_SB(sb)->s_fc_stats.fc_numblks;
45776 ++ ),
45777 ++
45778 ++ TP_printk("dev %d,%d fc ineligible reasons:\n"
45779 ++ "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u "
45780 ++ "num_commits:%lu, ineligible: %lu, numblks: %lu",
45781 ++ MAJOR(__entry->dev), MINOR(__entry->dev),
45782 ++ FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR),
45783 ++ FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME),
45784 ++ FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE),
45785 ++ FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM),
45786 ++ FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT),
45787 ++ FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE),
45788 ++ FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR),
45789 ++ FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE),
45790 ++ FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA),
45791 ++ __entry->fc_commits, __entry->fc_ineligible_commits,
45792 ++ __entry->fc_numblks)
45793 + );
45794 +
45795 + #define DEFINE_TRACE_DENTRY_EVENT(__type) \
45796 +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
45797 +index e70c90116edae..4a3ab0ed6e062 100644
45798 +--- a/include/trace/events/rxrpc.h
45799 ++++ b/include/trace/events/rxrpc.h
45800 +@@ -83,12 +83,15 @@ enum rxrpc_call_trace {
45801 + rxrpc_call_error,
45802 + rxrpc_call_got,
45803 + rxrpc_call_got_kernel,
45804 ++ rxrpc_call_got_timer,
45805 + rxrpc_call_got_userid,
45806 + rxrpc_call_new_client,
45807 + rxrpc_call_new_service,
45808 + rxrpc_call_put,
45809 + rxrpc_call_put_kernel,
45810 + rxrpc_call_put_noqueue,
45811 ++ rxrpc_call_put_notimer,
45812 ++ rxrpc_call_put_timer,
45813 + rxrpc_call_put_userid,
45814 + rxrpc_call_queued,
45815 + rxrpc_call_queued_ref,
45816 +@@ -278,12 +281,15 @@ enum rxrpc_tx_point {
45817 + EM(rxrpc_call_error, "*E*") \
45818 + EM(rxrpc_call_got, "GOT") \
45819 + EM(rxrpc_call_got_kernel, "Gke") \
45820 ++ EM(rxrpc_call_got_timer, "GTM") \
45821 + EM(rxrpc_call_got_userid, "Gus") \
45822 + EM(rxrpc_call_new_client, "NWc") \
45823 + EM(rxrpc_call_new_service, "NWs") \
45824 + EM(rxrpc_call_put, "PUT") \
45825 + EM(rxrpc_call_put_kernel, "Pke") \
45826 +- EM(rxrpc_call_put_noqueue, "PNQ") \
45827 ++ EM(rxrpc_call_put_noqueue, "PnQ") \
45828 ++ EM(rxrpc_call_put_notimer, "PnT") \
45829 ++ EM(rxrpc_call_put_timer, "PTM") \
45830 + EM(rxrpc_call_put_userid, "Pus") \
45831 + EM(rxrpc_call_queued, "QUE") \
45832 + EM(rxrpc_call_queued_ref, "QUR") \
45833 +diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
45834 +index b0383d371b9af..015bfec0dbfdb 100644
45835 +--- a/include/uapi/linux/bpf.h
45836 ++++ b/include/uapi/linux/bpf.h
45837 +@@ -2286,8 +2286,8 @@ union bpf_attr {
45838 + * Return
45839 + * The return value depends on the result of the test, and can be:
45840 + *
45841 +- * * 0, if current task belongs to the cgroup2.
45842 +- * * 1, if current task does not belong to the cgroup2.
45843 ++ * * 1, if current task belongs to the cgroup2.
45844 ++ * * 0, if current task does not belong to the cgroup2.
45845 + * * A negative error code, if an error occurred.
45846 + *
45847 + * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
45848 +@@ -2975,8 +2975,8 @@ union bpf_attr {
45849 + *
45850 + * # sysctl kernel.perf_event_max_stack=<new value>
45851 + * Return
45852 +- * A non-negative value equal to or less than *size* on success,
45853 +- * or a negative error in case of failure.
45854 ++ * The non-negative copied *buf* length equal to or less than
45855 ++ * *size* on success, or a negative error in case of failure.
45856 + *
45857 + * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
45858 + * Description
45859 +@@ -4279,8 +4279,8 @@ union bpf_attr {
45860 + *
45861 + * # sysctl kernel.perf_event_max_stack=<new value>
45862 + * Return
45863 +- * A non-negative value equal to or less than *size* on success,
45864 +- * or a negative error in case of failure.
45865 ++ * The non-negative copied *buf* length equal to or less than
45866 ++ * *size* on success, or a negative error in case of failure.
45867 + *
45868 + * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
45869 + * Description
45870 +diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h
45871 +index 24a1c45bd1ae2..98e60801195e2 100644
45872 +--- a/include/uapi/linux/loop.h
45873 ++++ b/include/uapi/linux/loop.h
45874 +@@ -45,7 +45,7 @@ struct loop_info {
45875 + unsigned long lo_inode; /* ioctl r/o */
45876 + __kernel_old_dev_t lo_rdevice; /* ioctl r/o */
45877 + int lo_offset;
45878 +- int lo_encrypt_type;
45879 ++ int lo_encrypt_type; /* obsolete, ignored */
45880 + int lo_encrypt_key_size; /* ioctl w/o */
45881 + int lo_flags;
45882 + char lo_name[LO_NAME_SIZE];
45883 +@@ -61,7 +61,7 @@ struct loop_info64 {
45884 + __u64 lo_offset;
45885 + __u64 lo_sizelimit;/* bytes, 0 == max available */
45886 + __u32 lo_number; /* ioctl r/o */
45887 +- __u32 lo_encrypt_type;
45888 ++ __u32 lo_encrypt_type; /* obsolete, ignored */
45889 + __u32 lo_encrypt_key_size; /* ioctl w/o */
45890 + __u32 lo_flags;
45891 + __u8 lo_file_name[LO_NAME_SIZE];
45892 +diff --git a/include/uapi/linux/omap3isp.h b/include/uapi/linux/omap3isp.h
45893 +index 87b55755f4ffe..d9db7ad438908 100644
45894 +--- a/include/uapi/linux/omap3isp.h
45895 ++++ b/include/uapi/linux/omap3isp.h
45896 +@@ -162,6 +162,7 @@ struct omap3isp_h3a_aewb_config {
45897 + * struct omap3isp_stat_data - Statistic data sent to or received from user
45898 + * @ts: Timestamp of returned framestats.
45899 + * @buf: Pointer to pass to user.
45900 ++ * @buf_size: Size of buffer.
45901 + * @frame_number: Frame number of requested stats.
45902 + * @cur_frame: Current frame number being processed.
45903 + * @config_counter: Number of the configuration associated with the data.
45904 +@@ -176,10 +177,12 @@ struct omap3isp_stat_data {
45905 + struct timeval ts;
45906 + #endif
45907 + void __user *buf;
45908 +- __u32 buf_size;
45909 +- __u16 frame_number;
45910 +- __u16 cur_frame;
45911 +- __u16 config_counter;
45912 ++ __struct_group(/* no tag */, frame, /* no attrs */,
45913 ++ __u32 buf_size;
45914 ++ __u16 frame_number;
45915 ++ __u16 cur_frame;
45916 ++ __u16 config_counter;
45917 ++ );
45918 + };
45919 +
45920 + #ifdef __KERNEL__
45921 +@@ -189,10 +192,12 @@ struct omap3isp_stat_data_time32 {
45922 + __s32 tv_usec;
45923 + } ts;
45924 + __u32 buf;
45925 +- __u32 buf_size;
45926 +- __u16 frame_number;
45927 +- __u16 cur_frame;
45928 +- __u16 config_counter;
45929 ++ __struct_group(/* no tag */, frame, /* no attrs */,
45930 ++ __u32 buf_size;
45931 ++ __u16 frame_number;
45932 ++ __u16 cur_frame;
45933 ++ __u16 config_counter;
45934 ++ );
45935 + };
45936 + #endif
45937 +
45938 +diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h
45939 +index 9b77cfc42efa3..283c5a7b3f2c8 100644
45940 +--- a/include/uapi/linux/rfkill.h
45941 ++++ b/include/uapi/linux/rfkill.h
45942 +@@ -159,8 +159,16 @@ struct rfkill_event_ext {
45943 + * old behaviour for all userspace, unless it explicitly opts in to the
45944 + * rules outlined here by using the new &struct rfkill_event_ext.
45945 + *
45946 +- * Userspace using &struct rfkill_event_ext must adhere to the following
45947 +- * rules
45948 ++ * Additionally, some other userspace (bluez, g-s-d) was reading with a
45949 ++ * large size but as streaming reads rather than message-based, or with
45950 ++ * too strict checks for the returned size. So eventually, we completely
45951 ++ * reverted this, and extended messages need to be opted in to by using
45952 ++ * an ioctl:
45953 ++ *
45954 ++ * ioctl(fd, RFKILL_IOCTL_MAX_SIZE, sizeof(struct rfkill_event_ext));
45955 ++ *
45956 ++ * Userspace using &struct rfkill_event_ext and the ioctl must adhere to
45957 ++ * the following rules:
45958 + *
45959 + * 1. accept short writes, optionally using them to detect that it's
45960 + * running on an older kernel;
45961 +@@ -175,6 +183,8 @@ struct rfkill_event_ext {
45962 + #define RFKILL_IOC_MAGIC 'R'
45963 + #define RFKILL_IOC_NOINPUT 1
45964 + #define RFKILL_IOCTL_NOINPUT _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT)
45965 ++#define RFKILL_IOC_MAX_SIZE 2
45966 ++#define RFKILL_IOCTL_MAX_SIZE _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_EXT_SIZE, __u32)
45967 +
45968 + /* and that's all userspace gets */
45969 +
45970 +diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
45971 +index 9a402fdb60e97..77ee207623a9b 100644
45972 +--- a/include/uapi/linux/rseq.h
45973 ++++ b/include/uapi/linux/rseq.h
45974 +@@ -105,23 +105,11 @@ struct rseq {
45975 + * Read and set by the kernel. Set by user-space with single-copy
45976 + * atomicity semantics. This field should only be updated by the
45977 + * thread which registered this data structure. Aligned on 64-bit.
45978 ++ *
45979 ++ * 32-bit architectures should update the low order bits of the
45980 ++ * rseq_cs field, leaving the high order bits initialized to 0.
45981 + */
45982 +- union {
45983 +- __u64 ptr64;
45984 +-#ifdef __LP64__
45985 +- __u64 ptr;
45986 +-#else
45987 +- struct {
45988 +-#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
45989 +- __u32 padding; /* Initialized to zero. */
45990 +- __u32 ptr32;
45991 +-#else /* LITTLE */
45992 +- __u32 ptr32;
45993 +- __u32 padding; /* Initialized to zero. */
45994 +-#endif /* ENDIAN */
45995 +- } ptr;
45996 +-#endif
45997 +- } rseq_cs;
45998 ++ __u64 rseq_cs;
45999 +
46000 + /*
46001 + * Restartable sequences flags field.
46002 +diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
46003 +index c4042dcfdc0c3..8885e69178bd7 100644
46004 +--- a/include/uapi/linux/serial_core.h
46005 ++++ b/include/uapi/linux/serial_core.h
46006 +@@ -68,6 +68,9 @@
46007 + /* NVIDIA Tegra Combined UART */
46008 + #define PORT_TEGRA_TCU 41
46009 +
46010 ++/* ASPEED AST2x00 virtual UART */
46011 ++#define PORT_ASPEED_VUART 42
46012 ++
46013 + /* Intel EG20 */
46014 + #define PORT_PCH_8LINE 44
46015 + #define PORT_PCH_2LINE 45
46016 +diff --git a/kernel/audit.h b/kernel/audit.h
46017 +index c4498090a5bd6..58b66543b4d57 100644
46018 +--- a/kernel/audit.h
46019 ++++ b/kernel/audit.h
46020 +@@ -201,6 +201,10 @@ struct audit_context {
46021 + struct {
46022 + char *name;
46023 + } module;
46024 ++ struct {
46025 ++ struct audit_ntp_data ntp_data;
46026 ++ struct timespec64 tk_injoffset;
46027 ++ } time;
46028 + };
46029 + int fds[2];
46030 + struct audit_proctitle proctitle;
46031 +diff --git a/kernel/auditsc.c b/kernel/auditsc.c
46032 +index a83928cbdcb7c..ea2ee1181921e 100644
46033 +--- a/kernel/auditsc.c
46034 ++++ b/kernel/auditsc.c
46035 +@@ -1340,6 +1340,53 @@ static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
46036 + from_kuid(&init_user_ns, name->fcap.rootid));
46037 + }
46038 +
46039 ++static void audit_log_time(struct audit_context *context, struct audit_buffer **ab)
46040 ++{
46041 ++ const struct audit_ntp_data *ntp = &context->time.ntp_data;
46042 ++ const struct timespec64 *tk = &context->time.tk_injoffset;
46043 ++ static const char * const ntp_name[] = {
46044 ++ "offset",
46045 ++ "freq",
46046 ++ "status",
46047 ++ "tai",
46048 ++ "tick",
46049 ++ "adjust",
46050 ++ };
46051 ++ int type;
46052 ++
46053 ++ if (context->type == AUDIT_TIME_ADJNTPVAL) {
46054 ++ for (type = 0; type < AUDIT_NTP_NVALS; type++) {
46055 ++ if (ntp->vals[type].newval != ntp->vals[type].oldval) {
46056 ++ if (!*ab) {
46057 ++ *ab = audit_log_start(context,
46058 ++ GFP_KERNEL,
46059 ++ AUDIT_TIME_ADJNTPVAL);
46060 ++ if (!*ab)
46061 ++ return;
46062 ++ }
46063 ++ audit_log_format(*ab, "op=%s old=%lli new=%lli",
46064 ++ ntp_name[type],
46065 ++ ntp->vals[type].oldval,
46066 ++ ntp->vals[type].newval);
46067 ++ audit_log_end(*ab);
46068 ++ *ab = NULL;
46069 ++ }
46070 ++ }
46071 ++ }
46072 ++ if (tk->tv_sec != 0 || tk->tv_nsec != 0) {
46073 ++ if (!*ab) {
46074 ++ *ab = audit_log_start(context, GFP_KERNEL,
46075 ++ AUDIT_TIME_INJOFFSET);
46076 ++ if (!*ab)
46077 ++ return;
46078 ++ }
46079 ++ audit_log_format(*ab, "sec=%lli nsec=%li",
46080 ++ (long long)tk->tv_sec, tk->tv_nsec);
46081 ++ audit_log_end(*ab);
46082 ++ *ab = NULL;
46083 ++ }
46084 ++}
46085 ++
46086 + static void show_special(struct audit_context *context, int *call_panic)
46087 + {
46088 + struct audit_buffer *ab;
46089 +@@ -1454,6 +1501,11 @@ static void show_special(struct audit_context *context, int *call_panic)
46090 + audit_log_format(ab, "(null)");
46091 +
46092 + break;
46093 ++ case AUDIT_TIME_ADJNTPVAL:
46094 ++ case AUDIT_TIME_INJOFFSET:
46095 ++ /* this call deviates from the rest, eating the buffer */
46096 ++ audit_log_time(context, &ab);
46097 ++ break;
46098 + }
46099 + audit_log_end(ab);
46100 + }
46101 +@@ -2849,31 +2901,26 @@ void __audit_fanotify(unsigned int response)
46102 +
46103 + void __audit_tk_injoffset(struct timespec64 offset)
46104 + {
46105 +- audit_log(audit_context(), GFP_KERNEL, AUDIT_TIME_INJOFFSET,
46106 +- "sec=%lli nsec=%li",
46107 +- (long long)offset.tv_sec, offset.tv_nsec);
46108 +-}
46109 +-
46110 +-static void audit_log_ntp_val(const struct audit_ntp_data *ad,
46111 +- const char *op, enum audit_ntp_type type)
46112 +-{
46113 +- const struct audit_ntp_val *val = &ad->vals[type];
46114 +-
46115 +- if (val->newval == val->oldval)
46116 +- return;
46117 ++ struct audit_context *context = audit_context();
46118 +
46119 +- audit_log(audit_context(), GFP_KERNEL, AUDIT_TIME_ADJNTPVAL,
46120 +- "op=%s old=%lli new=%lli", op, val->oldval, val->newval);
46121 ++ /* only set type if not already set by NTP */
46122 ++ if (!context->type)
46123 ++ context->type = AUDIT_TIME_INJOFFSET;
46124 ++ memcpy(&context->time.tk_injoffset, &offset, sizeof(offset));
46125 + }
46126 +
46127 + void __audit_ntp_log(const struct audit_ntp_data *ad)
46128 + {
46129 +- audit_log_ntp_val(ad, "offset", AUDIT_NTP_OFFSET);
46130 +- audit_log_ntp_val(ad, "freq", AUDIT_NTP_FREQ);
46131 +- audit_log_ntp_val(ad, "status", AUDIT_NTP_STATUS);
46132 +- audit_log_ntp_val(ad, "tai", AUDIT_NTP_TAI);
46133 +- audit_log_ntp_val(ad, "tick", AUDIT_NTP_TICK);
46134 +- audit_log_ntp_val(ad, "adjust", AUDIT_NTP_ADJUST);
46135 ++ struct audit_context *context = audit_context();
46136 ++ int type;
46137 ++
46138 ++ for (type = 0; type < AUDIT_NTP_NVALS; type++)
46139 ++ if (ad->vals[type].newval != ad->vals[type].oldval) {
46140 ++ /* unconditionally set type, overwriting TK */
46141 ++ context->type = AUDIT_TIME_ADJNTPVAL;
46142 ++ memcpy(&context->time.ntp_data, ad, sizeof(*ad));
46143 ++ break;
46144 ++ }
46145 + }
46146 +
46147 + void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries,
46148 +diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
46149 +index 3e23b3fa79ff6..ac89e65d1692e 100644
46150 +--- a/kernel/bpf/btf.c
46151 ++++ b/kernel/bpf/btf.c
46152 +@@ -403,6 +403,9 @@ static struct btf_type btf_void;
46153 + static int btf_resolve(struct btf_verifier_env *env,
46154 + const struct btf_type *t, u32 type_id);
46155 +
46156 ++static int btf_func_check(struct btf_verifier_env *env,
46157 ++ const struct btf_type *t);
46158 ++
46159 + static bool btf_type_is_modifier(const struct btf_type *t)
46160 + {
46161 + /* Some of them is not strictly a C modifier
46162 +@@ -579,6 +582,7 @@ static bool btf_type_needs_resolve(const struct btf_type *t)
46163 + btf_type_is_struct(t) ||
46164 + btf_type_is_array(t) ||
46165 + btf_type_is_var(t) ||
46166 ++ btf_type_is_func(t) ||
46167 + btf_type_is_decl_tag(t) ||
46168 + btf_type_is_datasec(t);
46169 + }
46170 +@@ -3533,9 +3537,24 @@ static s32 btf_func_check_meta(struct btf_verifier_env *env,
46171 + return 0;
46172 + }
46173 +
46174 ++static int btf_func_resolve(struct btf_verifier_env *env,
46175 ++ const struct resolve_vertex *v)
46176 ++{
46177 ++ const struct btf_type *t = v->t;
46178 ++ u32 next_type_id = t->type;
46179 ++ int err;
46180 ++
46181 ++ err = btf_func_check(env, t);
46182 ++ if (err)
46183 ++ return err;
46184 ++
46185 ++ env_stack_pop_resolved(env, next_type_id, 0);
46186 ++ return 0;
46187 ++}
46188 ++
46189 + static struct btf_kind_operations func_ops = {
46190 + .check_meta = btf_func_check_meta,
46191 +- .resolve = btf_df_resolve,
46192 ++ .resolve = btf_func_resolve,
46193 + .check_member = btf_df_check_member,
46194 + .check_kflag_member = btf_df_check_kflag_member,
46195 + .log_details = btf_ref_type_log,
46196 +@@ -4156,7 +4175,7 @@ static bool btf_resolve_valid(struct btf_verifier_env *env,
46197 + return !btf_resolved_type_id(btf, type_id) &&
46198 + !btf_resolved_type_size(btf, type_id);
46199 +
46200 +- if (btf_type_is_decl_tag(t))
46201 ++ if (btf_type_is_decl_tag(t) || btf_type_is_func(t))
46202 + return btf_resolved_type_id(btf, type_id) &&
46203 + !btf_resolved_type_size(btf, type_id);
46204 +
46205 +@@ -4246,12 +4265,6 @@ static int btf_check_all_types(struct btf_verifier_env *env)
46206 + if (err)
46207 + return err;
46208 + }
46209 +-
46210 +- if (btf_type_is_func(t)) {
46211 +- err = btf_func_check(env, t);
46212 +- if (err)
46213 +- return err;
46214 +- }
46215 + }
46216 +
46217 + return 0;
46218 +@@ -6201,12 +6214,17 @@ bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
46219 + return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
46220 + }
46221 +
46222 ++enum {
46223 ++ BTF_MODULE_F_LIVE = (1 << 0),
46224 ++};
46225 ++
46226 + #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
46227 + struct btf_module {
46228 + struct list_head list;
46229 + struct module *module;
46230 + struct btf *btf;
46231 + struct bin_attribute *sysfs_attr;
46232 ++ int flags;
46233 + };
46234 +
46235 + static LIST_HEAD(btf_modules);
46236 +@@ -6234,7 +6252,8 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op,
46237 + int err = 0;
46238 +
46239 + if (mod->btf_data_size == 0 ||
46240 +- (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
46241 ++ (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE &&
46242 ++ op != MODULE_STATE_GOING))
46243 + goto out;
46244 +
46245 + switch (op) {
46246 +@@ -6292,6 +6311,17 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op,
46247 + btf_mod->sysfs_attr = attr;
46248 + }
46249 +
46250 ++ break;
46251 ++ case MODULE_STATE_LIVE:
46252 ++ mutex_lock(&btf_module_mutex);
46253 ++ list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
46254 ++ if (btf_mod->module != module)
46255 ++ continue;
46256 ++
46257 ++ btf_mod->flags |= BTF_MODULE_F_LIVE;
46258 ++ break;
46259 ++ }
46260 ++ mutex_unlock(&btf_module_mutex);
46261 + break;
46262 + case MODULE_STATE_GOING:
46263 + mutex_lock(&btf_module_mutex);
46264 +@@ -6339,7 +6369,12 @@ struct module *btf_try_get_module(const struct btf *btf)
46265 + if (btf_mod->btf != btf)
46266 + continue;
46267 +
46268 +- if (try_module_get(btf_mod->module))
46269 ++ /* We must only consider module whose __init routine has
46270 ++ * finished, hence we must check for BTF_MODULE_F_LIVE flag,
46271 ++ * which is set from the notifier callback for
46272 ++ * MODULE_STATE_LIVE.
46273 ++ */
46274 ++ if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
46275 + res = btf_mod->module;
46276 +
46277 + break;
46278 +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
46279 +index 22c8ae94e4c1c..2823dcefae10e 100644
46280 +--- a/kernel/bpf/stackmap.c
46281 ++++ b/kernel/bpf/stackmap.c
46282 +@@ -166,7 +166,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
46283 + }
46284 +
46285 + static struct perf_callchain_entry *
46286 +-get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
46287 ++get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
46288 + {
46289 + #ifdef CONFIG_STACKTRACE
46290 + struct perf_callchain_entry *entry;
46291 +@@ -177,9 +177,8 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
46292 + if (!entry)
46293 + return NULL;
46294 +
46295 +- entry->nr = init_nr +
46296 +- stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr),
46297 +- sysctl_perf_event_max_stack - init_nr, 0);
46298 ++ entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip,
46299 ++ max_depth, 0);
46300 +
46301 + /* stack_trace_save_tsk() works on unsigned long array, while
46302 + * perf_callchain_entry uses u64 array. For 32-bit systems, it is
46303 +@@ -191,7 +190,7 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
46304 + int i;
46305 +
46306 + /* copy data from the end to avoid using extra buffer */
46307 +- for (i = entry->nr - 1; i >= (int)init_nr; i--)
46308 ++ for (i = entry->nr - 1; i >= 0; i--)
46309 + to[i] = (u64)(from[i]);
46310 + }
46311 +
46312 +@@ -208,27 +207,19 @@ static long __bpf_get_stackid(struct bpf_map *map,
46313 + {
46314 + struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
46315 + struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
46316 +- u32 max_depth = map->value_size / stack_map_data_size(map);
46317 +- /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
46318 +- u32 init_nr = sysctl_perf_event_max_stack - max_depth;
46319 + u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
46320 + u32 hash, id, trace_nr, trace_len;
46321 + bool user = flags & BPF_F_USER_STACK;
46322 + u64 *ips;
46323 + bool hash_matches;
46324 +
46325 +- /* get_perf_callchain() guarantees that trace->nr >= init_nr
46326 +- * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
46327 +- */
46328 +- trace_nr = trace->nr - init_nr;
46329 +-
46330 +- if (trace_nr <= skip)
46331 ++ if (trace->nr <= skip)
46332 + /* skipping more than usable stack trace */
46333 + return -EFAULT;
46334 +
46335 +- trace_nr -= skip;
46336 ++ trace_nr = trace->nr - skip;
46337 + trace_len = trace_nr * sizeof(u64);
46338 +- ips = trace->ip + skip + init_nr;
46339 ++ ips = trace->ip + skip;
46340 + hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
46341 + id = hash & (smap->n_buckets - 1);
46342 + bucket = READ_ONCE(smap->buckets[id]);
46343 +@@ -285,8 +276,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
46344 + u64, flags)
46345 + {
46346 + u32 max_depth = map->value_size / stack_map_data_size(map);
46347 +- /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
46348 +- u32 init_nr = sysctl_perf_event_max_stack - max_depth;
46349 ++ u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
46350 + bool user = flags & BPF_F_USER_STACK;
46351 + struct perf_callchain_entry *trace;
46352 + bool kernel = !user;
46353 +@@ -295,8 +285,12 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
46354 + BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
46355 + return -EINVAL;
46356 +
46357 +- trace = get_perf_callchain(regs, init_nr, kernel, user,
46358 +- sysctl_perf_event_max_stack, false, false);
46359 ++ max_depth += skip;
46360 ++ if (max_depth > sysctl_perf_event_max_stack)
46361 ++ max_depth = sysctl_perf_event_max_stack;
46362 ++
46363 ++ trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
46364 ++ false, false);
46365 +
46366 + if (unlikely(!trace))
46367 + /* couldn't fetch the stack trace */
46368 +@@ -387,7 +381,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
46369 + struct perf_callchain_entry *trace_in,
46370 + void *buf, u32 size, u64 flags)
46371 + {
46372 +- u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
46373 ++ u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
46374 + bool user_build_id = flags & BPF_F_USER_BUILD_ID;
46375 + u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
46376 + bool user = flags & BPF_F_USER_STACK;
46377 +@@ -412,30 +406,28 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
46378 + goto err_fault;
46379 +
46380 + num_elem = size / elem_size;
46381 +- if (sysctl_perf_event_max_stack < num_elem)
46382 +- init_nr = 0;
46383 +- else
46384 +- init_nr = sysctl_perf_event_max_stack - num_elem;
46385 ++ max_depth = num_elem + skip;
46386 ++ if (sysctl_perf_event_max_stack < max_depth)
46387 ++ max_depth = sysctl_perf_event_max_stack;
46388 +
46389 + if (trace_in)
46390 + trace = trace_in;
46391 + else if (kernel && task)
46392 +- trace = get_callchain_entry_for_task(task, init_nr);
46393 ++ trace = get_callchain_entry_for_task(task, max_depth);
46394 + else
46395 +- trace = get_perf_callchain(regs, init_nr, kernel, user,
46396 +- sysctl_perf_event_max_stack,
46397 ++ trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
46398 + false, false);
46399 + if (unlikely(!trace))
46400 + goto err_fault;
46401 +
46402 +- trace_nr = trace->nr - init_nr;
46403 +- if (trace_nr < skip)
46404 ++ if (trace->nr < skip)
46405 + goto err_fault;
46406 +
46407 +- trace_nr -= skip;
46408 ++ trace_nr = trace->nr - skip;
46409 + trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
46410 + copy_len = trace_nr * elem_size;
46411 +- ips = trace->ip + skip + init_nr;
46412 ++
46413 ++ ips = trace->ip + skip;
46414 + if (user && user_build_id)
46415 + stack_map_get_build_id_offset(buf, ips, trace_nr, user);
46416 + else
46417 +diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
46418 +index df2bface866ef..85cb51c4a17e6 100644
46419 +--- a/kernel/debug/kdb/kdb_support.c
46420 ++++ b/kernel/debug/kdb/kdb_support.c
46421 +@@ -291,7 +291,7 @@ int kdb_getarea_size(void *res, unsigned long addr, size_t size)
46422 + */
46423 + int kdb_putarea_size(unsigned long addr, void *res, size_t size)
46424 + {
46425 +- int ret = copy_from_kernel_nofault((char *)addr, (char *)res, size);
46426 ++ int ret = copy_to_kernel_nofault((char *)addr, (char *)res, size);
46427 + if (ret) {
46428 + if (!KDB_STATE(SUPPRESS)) {
46429 + kdb_func_printf("Bad address 0x%lx\n", addr);
46430 +diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
46431 +index 7a14ca29c3778..f8ff598596b85 100644
46432 +--- a/kernel/dma/debug.c
46433 ++++ b/kernel/dma/debug.c
46434 +@@ -927,7 +927,7 @@ static __init int dma_debug_cmdline(char *str)
46435 + global_disable = true;
46436 + }
46437 +
46438 +- return 0;
46439 ++ return 1;
46440 + }
46441 +
46442 + static __init int dma_debug_entries_cmdline(char *str)
46443 +@@ -936,7 +936,7 @@ static __init int dma_debug_entries_cmdline(char *str)
46444 + return -EINVAL;
46445 + if (!get_option(&str, &nr_prealloc_entries))
46446 + nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
46447 +- return 0;
46448 ++ return 1;
46449 + }
46450 +
46451 + __setup("dma_debug=", dma_debug_cmdline);
46452 +diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
46453 +index 6db1c475ec827..6c350555e5a1c 100644
46454 +--- a/kernel/dma/swiotlb.c
46455 ++++ b/kernel/dma/swiotlb.c
46456 +@@ -701,13 +701,10 @@ void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
46457 + void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
46458 + size_t size, enum dma_data_direction dir)
46459 + {
46460 +- /*
46461 +- * Unconditional bounce is necessary to avoid corruption on
46462 +- * sync_*_for_cpu or dma_ummap_* when the device didn't overwrite
46463 +- * the whole lengt of the bounce buffer.
46464 +- */
46465 +- swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
46466 +- BUG_ON(!valid_dma_direction(dir));
46467 ++ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
46468 ++ swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
46469 ++ else
46470 ++ BUG_ON(dir != DMA_FROM_DEVICE);
46471 + }
46472 +
46473 + void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
46474 +diff --git a/kernel/events/core.c b/kernel/events/core.c
46475 +index 6859229497b15..69cf71d973121 100644
46476 +--- a/kernel/events/core.c
46477 ++++ b/kernel/events/core.c
46478 +@@ -10574,8 +10574,11 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
46479 + }
46480 +
46481 + /* ready to consume more filters */
46482 ++ kfree(filename);
46483 ++ filename = NULL;
46484 + state = IF_STATE_ACTION;
46485 + filter = NULL;
46486 ++ kernel = 0;
46487 + }
46488 + }
46489 +
46490 +diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
46491 +index 585494ec464f9..bc475e62279d2 100644
46492 +--- a/kernel/livepatch/core.c
46493 ++++ b/kernel/livepatch/core.c
46494 +@@ -190,7 +190,7 @@ static int klp_find_object_symbol(const char *objname, const char *name,
46495 + return -EINVAL;
46496 + }
46497 +
46498 +-static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
46499 ++static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
46500 + unsigned int symndx, Elf_Shdr *relasec,
46501 + const char *sec_objname)
46502 + {
46503 +@@ -218,7 +218,7 @@ static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
46504 + relas = (Elf_Rela *) relasec->sh_addr;
46505 + /* For each rela in this klp relocation section */
46506 + for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
46507 +- sym = (Elf64_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
46508 ++ sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
46509 + if (sym->st_shndx != SHN_LIVEPATCH) {
46510 + pr_err("symbol %s is not marked as a livepatch symbol\n",
46511 + strtab + sym->st_name);
46512 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
46513 +index f8a0212189cad..4675a686f942f 100644
46514 +--- a/kernel/locking/lockdep.c
46515 ++++ b/kernel/locking/lockdep.c
46516 +@@ -183,11 +183,9 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
46517 + static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
46518 + unsigned long nr_lock_classes;
46519 + unsigned long nr_zapped_classes;
46520 +-#ifndef CONFIG_DEBUG_LOCKDEP
46521 +-static
46522 +-#endif
46523 ++unsigned long max_lock_class_idx;
46524 + struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
46525 +-static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
46526 ++DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
46527 +
46528 + static inline struct lock_class *hlock_class(struct held_lock *hlock)
46529 + {
46530 +@@ -338,7 +336,7 @@ static inline void lock_release_holdtime(struct held_lock *hlock)
46531 + * elements. These elements are linked together by the lock_entry member in
46532 + * struct lock_class.
46533 + */
46534 +-LIST_HEAD(all_lock_classes);
46535 ++static LIST_HEAD(all_lock_classes);
46536 + static LIST_HEAD(free_lock_classes);
46537 +
46538 + /**
46539 +@@ -1252,6 +1250,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
46540 + struct lockdep_subclass_key *key;
46541 + struct hlist_head *hash_head;
46542 + struct lock_class *class;
46543 ++ int idx;
46544 +
46545 + DEBUG_LOCKS_WARN_ON(!irqs_disabled());
46546 +
46547 +@@ -1317,6 +1316,9 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
46548 + * of classes.
46549 + */
46550 + list_move_tail(&class->lock_entry, &all_lock_classes);
46551 ++ idx = class - lock_classes;
46552 ++ if (idx > max_lock_class_idx)
46553 ++ max_lock_class_idx = idx;
46554 +
46555 + if (verbose(class)) {
46556 + graph_unlock();
46557 +@@ -6000,6 +6002,8 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
46558 + WRITE_ONCE(class->name, NULL);
46559 + nr_lock_classes--;
46560 + __clear_bit(class - lock_classes, lock_classes_in_use);
46561 ++ if (class - lock_classes == max_lock_class_idx)
46562 ++ max_lock_class_idx--;
46563 + } else {
46564 + WARN_ONCE(true, "%s() failed for class %s\n", __func__,
46565 + class->name);
46566 +@@ -6290,7 +6294,13 @@ void lockdep_reset_lock(struct lockdep_map *lock)
46567 + lockdep_reset_lock_reg(lock);
46568 + }
46569 +
46570 +-/* Unregister a dynamically allocated key. */
46571 ++/*
46572 ++ * Unregister a dynamically allocated key.
46573 ++ *
46574 ++ * Unlike lockdep_register_key(), a search is always done to find a matching
46575 ++ * key irrespective of debug_locks to avoid potential invalid access to freed
46576 ++ * memory in lock_class entry.
46577 ++ */
46578 + void lockdep_unregister_key(struct lock_class_key *key)
46579 + {
46580 + struct hlist_head *hash_head = keyhashentry(key);
46581 +@@ -6305,10 +6315,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
46582 + return;
46583 +
46584 + raw_local_irq_save(flags);
46585 +- if (!graph_lock())
46586 +- goto out_irq;
46587 ++ lockdep_lock();
46588 +
46589 +- pf = get_pending_free();
46590 + hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
46591 + if (k == key) {
46592 + hlist_del_rcu(&k->hash_entry);
46593 +@@ -6316,11 +6324,13 @@ void lockdep_unregister_key(struct lock_class_key *key)
46594 + break;
46595 + }
46596 + }
46597 +- WARN_ON_ONCE(!found);
46598 +- __lockdep_free_key_range(pf, key, 1);
46599 +- call_rcu_zapped(pf);
46600 +- graph_unlock();
46601 +-out_irq:
46602 ++ WARN_ON_ONCE(!found && debug_locks);
46603 ++ if (found) {
46604 ++ pf = get_pending_free();
46605 ++ __lockdep_free_key_range(pf, key, 1);
46606 ++ call_rcu_zapped(pf);
46607 ++ }
46608 ++ lockdep_unlock();
46609 + raw_local_irq_restore(flags);
46610 +
46611 + /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
46612 +diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
46613 +index ecb8662e7a4ed..bbe9000260d02 100644
46614 +--- a/kernel/locking/lockdep_internals.h
46615 ++++ b/kernel/locking/lockdep_internals.h
46616 +@@ -121,7 +121,6 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
46617 +
46618 + #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
46619 +
46620 +-extern struct list_head all_lock_classes;
46621 + extern struct lock_chain lock_chains[];
46622 +
46623 + #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
46624 +@@ -151,6 +150,10 @@ extern unsigned int nr_large_chain_blocks;
46625 +
46626 + extern unsigned int max_lockdep_depth;
46627 + extern unsigned int max_bfs_queue_depth;
46628 ++extern unsigned long max_lock_class_idx;
46629 ++
46630 ++extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
46631 ++extern unsigned long lock_classes_in_use[];
46632 +
46633 + #ifdef CONFIG_PROVE_LOCKING
46634 + extern unsigned long lockdep_count_forward_deps(struct lock_class *);
46635 +@@ -205,7 +208,6 @@ struct lockdep_stats {
46636 + };
46637 +
46638 + DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
46639 +-extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
46640 +
46641 + #define __debug_atomic_inc(ptr) \
46642 + this_cpu_inc(lockdep_stats.ptr);
46643 +diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
46644 +index b8d9a050c337a..15fdc7fa5c688 100644
46645 +--- a/kernel/locking/lockdep_proc.c
46646 ++++ b/kernel/locking/lockdep_proc.c
46647 +@@ -24,14 +24,33 @@
46648 +
46649 + #include "lockdep_internals.h"
46650 +
46651 ++/*
46652 ++ * Since iteration of lock_classes is done without holding the lockdep lock,
46653 ++ * it is not safe to iterate all_lock_classes list directly as the iteration
46654 ++ * may branch off to free_lock_classes or the zapped list. Iteration is done
46655 ++ * directly on the lock_classes array by checking the lock_classes_in_use
46656 ++ * bitmap and max_lock_class_idx.
46657 ++ */
46658 ++#define iterate_lock_classes(idx, class) \
46659 ++ for (idx = 0, class = lock_classes; idx <= max_lock_class_idx; \
46660 ++ idx++, class++)
46661 ++
46662 + static void *l_next(struct seq_file *m, void *v, loff_t *pos)
46663 + {
46664 +- return seq_list_next(v, &all_lock_classes, pos);
46665 ++ struct lock_class *class = v;
46666 ++
46667 ++ ++class;
46668 ++ *pos = class - lock_classes;
46669 ++ return (*pos > max_lock_class_idx) ? NULL : class;
46670 + }
46671 +
46672 + static void *l_start(struct seq_file *m, loff_t *pos)
46673 + {
46674 +- return seq_list_start_head(&all_lock_classes, *pos);
46675 ++ unsigned long idx = *pos;
46676 ++
46677 ++ if (idx > max_lock_class_idx)
46678 ++ return NULL;
46679 ++ return lock_classes + idx;
46680 + }
46681 +
46682 + static void l_stop(struct seq_file *m, void *v)
46683 +@@ -57,14 +76,16 @@ static void print_name(struct seq_file *m, struct lock_class *class)
46684 +
46685 + static int l_show(struct seq_file *m, void *v)
46686 + {
46687 +- struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
46688 ++ struct lock_class *class = v;
46689 + struct lock_list *entry;
46690 + char usage[LOCK_USAGE_CHARS];
46691 ++ int idx = class - lock_classes;
46692 +
46693 +- if (v == &all_lock_classes) {
46694 ++ if (v == lock_classes)
46695 + seq_printf(m, "all lock classes:\n");
46696 ++
46697 ++ if (!test_bit(idx, lock_classes_in_use))
46698 + return 0;
46699 +- }
46700 +
46701 + seq_printf(m, "%p", class->key);
46702 + #ifdef CONFIG_DEBUG_LOCKDEP
46703 +@@ -220,8 +241,11 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
46704 +
46705 + #ifdef CONFIG_PROVE_LOCKING
46706 + struct lock_class *class;
46707 ++ unsigned long idx;
46708 +
46709 +- list_for_each_entry(class, &all_lock_classes, lock_entry) {
46710 ++ iterate_lock_classes(idx, class) {
46711 ++ if (!test_bit(idx, lock_classes_in_use))
46712 ++ continue;
46713 +
46714 + if (class->usage_mask == 0)
46715 + nr_unused++;
46716 +@@ -254,6 +278,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
46717 +
46718 + sum_forward_deps += lockdep_count_forward_deps(class);
46719 + }
46720 ++
46721 + #ifdef CONFIG_DEBUG_LOCKDEP
46722 + DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
46723 + #endif
46724 +@@ -345,6 +370,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
46725 + seq_printf(m, " max bfs queue depth: %11u\n",
46726 + max_bfs_queue_depth);
46727 + #endif
46728 ++ seq_printf(m, " max lock class index: %11lu\n",
46729 ++ max_lock_class_idx);
46730 + lockdep_stats_debug_show(m);
46731 + seq_printf(m, " debug_locks: %11u\n",
46732 + debug_locks);
46733 +@@ -622,12 +649,16 @@ static int lock_stat_open(struct inode *inode, struct file *file)
46734 + if (!res) {
46735 + struct lock_stat_data *iter = data->stats;
46736 + struct seq_file *m = file->private_data;
46737 ++ unsigned long idx;
46738 +
46739 +- list_for_each_entry(class, &all_lock_classes, lock_entry) {
46740 ++ iterate_lock_classes(idx, class) {
46741 ++ if (!test_bit(idx, lock_classes_in_use))
46742 ++ continue;
46743 + iter->class = class;
46744 + iter->stats = lock_stats(class);
46745 + iter++;
46746 + }
46747 ++
46748 + data->iter_end = iter;
46749 +
46750 + sort(data->stats, data->iter_end - data->stats,
46751 +@@ -645,6 +676,7 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf,
46752 + size_t count, loff_t *ppos)
46753 + {
46754 + struct lock_class *class;
46755 ++ unsigned long idx;
46756 + char c;
46757 +
46758 + if (count) {
46759 +@@ -654,8 +686,11 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf,
46760 + if (c != '0')
46761 + return count;
46762 +
46763 +- list_for_each_entry(class, &all_lock_classes, lock_entry)
46764 ++ iterate_lock_classes(idx, class) {
46765 ++ if (!test_bit(idx, lock_classes_in_use))
46766 ++ continue;
46767 + clear_lock_stats(class);
46768 ++ }
46769 + }
46770 + return count;
46771 + }
46772 +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
46773 +index e6af502c2fd77..08780a466fdf7 100644
46774 +--- a/kernel/power/hibernate.c
46775 ++++ b/kernel/power/hibernate.c
46776 +@@ -1328,7 +1328,7 @@ static int __init resumedelay_setup(char *str)
46777 + int rc = kstrtouint(str, 0, &resume_delay);
46778 +
46779 + if (rc)
46780 +- return rc;
46781 ++ pr_warn("resumedelay: bad option string '%s'\n", str);
46782 + return 1;
46783 + }
46784 +
46785 +diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
46786 +index d20526c5be15b..b663a97f5867a 100644
46787 +--- a/kernel/power/suspend_test.c
46788 ++++ b/kernel/power/suspend_test.c
46789 +@@ -157,22 +157,22 @@ static int __init setup_test_suspend(char *value)
46790 + value++;
46791 + suspend_type = strsep(&value, ",");
46792 + if (!suspend_type)
46793 +- return 0;
46794 ++ return 1;
46795 +
46796 + repeat = strsep(&value, ",");
46797 + if (repeat) {
46798 + if (kstrtou32(repeat, 0, &test_repeat_count_max))
46799 +- return 0;
46800 ++ return 1;
46801 + }
46802 +
46803 + for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
46804 + if (!strcmp(pm_labels[i], suspend_type)) {
46805 + test_state_label = pm_labels[i];
46806 +- return 0;
46807 ++ return 1;
46808 + }
46809 +
46810 + printk(warn_bad_state, suspend_type);
46811 +- return 0;
46812 ++ return 1;
46813 + }
46814 + __setup("test_suspend", setup_test_suspend);
46815 +
46816 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
46817 +index 82abfaf3c2aad..833e407545b82 100644
46818 +--- a/kernel/printk/printk.c
46819 ++++ b/kernel/printk/printk.c
46820 +@@ -146,8 +146,10 @@ static int __control_devkmsg(char *str)
46821 +
46822 + static int __init control_devkmsg(char *str)
46823 + {
46824 +- if (__control_devkmsg(str) < 0)
46825 ++ if (__control_devkmsg(str) < 0) {
46826 ++ pr_warn("printk.devkmsg: bad option string '%s'\n", str);
46827 + return 1;
46828 ++ }
46829 +
46830 + /*
46831 + * Set sysctl string accordingly:
46832 +@@ -166,7 +168,7 @@ static int __init control_devkmsg(char *str)
46833 + */
46834 + devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
46835 +
46836 +- return 0;
46837 ++ return 1;
46838 + }
46839 + __setup("printk.devkmsg=", control_devkmsg);
46840 +
46841 +diff --git a/kernel/ptrace.c b/kernel/ptrace.c
46842 +index eea265082e975..ccc4b465775b8 100644
46843 +--- a/kernel/ptrace.c
46844 ++++ b/kernel/ptrace.c
46845 +@@ -371,6 +371,26 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
46846 + return !err;
46847 + }
46848 +
46849 ++static int check_ptrace_options(unsigned long data)
46850 ++{
46851 ++ if (data & ~(unsigned long)PTRACE_O_MASK)
46852 ++ return -EINVAL;
46853 ++
46854 ++ if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
46855 ++ if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
46856 ++ !IS_ENABLED(CONFIG_SECCOMP))
46857 ++ return -EINVAL;
46858 ++
46859 ++ if (!capable(CAP_SYS_ADMIN))
46860 ++ return -EPERM;
46861 ++
46862 ++ if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
46863 ++ current->ptrace & PT_SUSPEND_SECCOMP)
46864 ++ return -EPERM;
46865 ++ }
46866 ++ return 0;
46867 ++}
46868 ++
46869 + static int ptrace_attach(struct task_struct *task, long request,
46870 + unsigned long addr,
46871 + unsigned long flags)
46872 +@@ -382,8 +402,16 @@ static int ptrace_attach(struct task_struct *task, long request,
46873 + if (seize) {
46874 + if (addr != 0)
46875 + goto out;
46876 ++ /*
46877 ++ * This duplicates the check in check_ptrace_options() because
46878 ++ * ptrace_attach() and ptrace_setoptions() have historically
46879 ++ * used different error codes for unknown ptrace options.
46880 ++ */
46881 + if (flags & ~(unsigned long)PTRACE_O_MASK)
46882 + goto out;
46883 ++ retval = check_ptrace_options(flags);
46884 ++ if (retval)
46885 ++ return retval;
46886 + flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
46887 + } else {
46888 + flags = PT_PTRACED;
46889 +@@ -654,22 +682,11 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
46890 + static int ptrace_setoptions(struct task_struct *child, unsigned long data)
46891 + {
46892 + unsigned flags;
46893 ++ int ret;
46894 +
46895 +- if (data & ~(unsigned long)PTRACE_O_MASK)
46896 +- return -EINVAL;
46897 +-
46898 +- if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
46899 +- if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
46900 +- !IS_ENABLED(CONFIG_SECCOMP))
46901 +- return -EINVAL;
46902 +-
46903 +- if (!capable(CAP_SYS_ADMIN))
46904 +- return -EPERM;
46905 +-
46906 +- if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
46907 +- current->ptrace & PT_SUSPEND_SECCOMP)
46908 +- return -EPERM;
46909 +- }
46910 ++ ret = check_ptrace_options(data);
46911 ++ if (ret)
46912 ++ return ret;
46913 +
46914 + /* Avoid intermediate state when all opts are cleared */
46915 + flags = child->ptrace;
46916 +diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h
46917 +index e373fbe44da5e..431cee212467d 100644
46918 +--- a/kernel/rcu/rcu_segcblist.h
46919 ++++ b/kernel/rcu/rcu_segcblist.h
46920 +@@ -56,13 +56,13 @@ static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
46921 + static inline void rcu_segcblist_set_flags(struct rcu_segcblist *rsclp,
46922 + int flags)
46923 + {
46924 +- rsclp->flags |= flags;
46925 ++ WRITE_ONCE(rsclp->flags, rsclp->flags | flags);
46926 + }
46927 +
46928 + static inline void rcu_segcblist_clear_flags(struct rcu_segcblist *rsclp,
46929 + int flags)
46930 + {
46931 +- rsclp->flags &= ~flags;
46932 ++ WRITE_ONCE(rsclp->flags, rsclp->flags & ~flags);
46933 + }
46934 +
46935 + static inline bool rcu_segcblist_test_flags(struct rcu_segcblist *rsclp,
46936 +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
46937 +index a4c25a6283b0b..73a4c9d07b865 100644
46938 +--- a/kernel/rcu/tree.c
46939 ++++ b/kernel/rcu/tree.c
46940 +@@ -91,7 +91,7 @@ static struct rcu_state rcu_state = {
46941 + .abbr = RCU_ABBR,
46942 + .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
46943 + .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
46944 +- .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
46945 ++ .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
46946 + };
46947 +
46948 + /* Dump rcu_node combining tree at boot to verify correct setup. */
46949 +@@ -1175,7 +1175,15 @@ bool rcu_lockdep_current_cpu_online(void)
46950 + preempt_disable_notrace();
46951 + rdp = this_cpu_ptr(&rcu_data);
46952 + rnp = rdp->mynode;
46953 +- if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1)
46954 ++ /*
46955 ++ * Strictly, we care here about the case where the current CPU is
46956 ++ * in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
46957 ++ * not being up to date. So arch_spin_is_locked() might have a
46958 ++ * false positive if it's held by some *other* CPU, but that's
46959 ++ * OK because that just means a false *negative* on the warning.
46960 ++ */
46961 ++ if (rdp->grpmask & rcu_rnp_online_cpus(rnp) ||
46962 ++ arch_spin_is_locked(&rcu_state.ofl_lock))
46963 + ret = true;
46964 + preempt_enable_notrace();
46965 + return ret;
46966 +@@ -1739,7 +1747,6 @@ static void rcu_strict_gp_boundary(void *unused)
46967 + */
46968 + static noinline_for_stack bool rcu_gp_init(void)
46969 + {
46970 +- unsigned long firstseq;
46971 + unsigned long flags;
46972 + unsigned long oldmask;
46973 + unsigned long mask;
46974 +@@ -1782,22 +1789,17 @@ static noinline_for_stack bool rcu_gp_init(void)
46975 + * of RCU's Requirements documentation.
46976 + */
46977 + WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
46978 ++ /* Exclude CPU hotplug operations. */
46979 + rcu_for_each_leaf_node(rnp) {
46980 +- // Wait for CPU-hotplug operations that might have
46981 +- // started before this grace period did.
46982 +- smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
46983 +- firstseq = READ_ONCE(rnp->ofl_seq);
46984 +- if (firstseq & 0x1)
46985 +- while (firstseq == READ_ONCE(rnp->ofl_seq))
46986 +- schedule_timeout_idle(1); // Can't wake unless RCU is watching.
46987 +- smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values.
46988 +- raw_spin_lock(&rcu_state.ofl_lock);
46989 +- raw_spin_lock_irq_rcu_node(rnp);
46990 ++ local_irq_save(flags);
46991 ++ arch_spin_lock(&rcu_state.ofl_lock);
46992 ++ raw_spin_lock_rcu_node(rnp);
46993 + if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
46994 + !rnp->wait_blkd_tasks) {
46995 + /* Nothing to do on this leaf rcu_node structure. */
46996 +- raw_spin_unlock_irq_rcu_node(rnp);
46997 +- raw_spin_unlock(&rcu_state.ofl_lock);
46998 ++ raw_spin_unlock_rcu_node(rnp);
46999 ++ arch_spin_unlock(&rcu_state.ofl_lock);
47000 ++ local_irq_restore(flags);
47001 + continue;
47002 + }
47003 +
47004 +@@ -1832,8 +1834,9 @@ static noinline_for_stack bool rcu_gp_init(void)
47005 + rcu_cleanup_dead_rnp(rnp);
47006 + }
47007 +
47008 +- raw_spin_unlock_irq_rcu_node(rnp);
47009 +- raw_spin_unlock(&rcu_state.ofl_lock);
47010 ++ raw_spin_unlock_rcu_node(rnp);
47011 ++ arch_spin_unlock(&rcu_state.ofl_lock);
47012 ++ local_irq_restore(flags);
47013 + }
47014 + rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
47015 +
47016 +@@ -4287,11 +4290,10 @@ void rcu_cpu_starting(unsigned int cpu)
47017 +
47018 + rnp = rdp->mynode;
47019 + mask = rdp->grpmask;
47020 +- WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
47021 +- WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
47022 ++ local_irq_save(flags);
47023 ++ arch_spin_lock(&rcu_state.ofl_lock);
47024 + rcu_dynticks_eqs_online();
47025 +- smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
47026 +- raw_spin_lock_irqsave_rcu_node(rnp, flags);
47027 ++ raw_spin_lock_rcu_node(rnp);
47028 + WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
47029 + newcpu = !(rnp->expmaskinitnext & mask);
47030 + rnp->expmaskinitnext |= mask;
47031 +@@ -4304,15 +4306,18 @@ void rcu_cpu_starting(unsigned int cpu)
47032 +
47033 + /* An incoming CPU should never be blocking a grace period. */
47034 + if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
47035 ++ /* rcu_report_qs_rnp() *really* wants some flags to restore */
47036 ++ unsigned long flags2;
47037 ++
47038 ++ local_irq_save(flags2);
47039 + rcu_disable_urgency_upon_qs(rdp);
47040 + /* Report QS -after- changing ->qsmaskinitnext! */
47041 +- rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
47042 ++ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags2);
47043 + } else {
47044 +- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
47045 ++ raw_spin_unlock_rcu_node(rnp);
47046 + }
47047 +- smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
47048 +- WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
47049 +- WARN_ON_ONCE(rnp->ofl_seq & 0x1);
47050 ++ arch_spin_unlock(&rcu_state.ofl_lock);
47051 ++ local_irq_restore(flags);
47052 + smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
47053 + }
47054 +
47055 +@@ -4326,7 +4331,7 @@ void rcu_cpu_starting(unsigned int cpu)
47056 + */
47057 + void rcu_report_dead(unsigned int cpu)
47058 + {
47059 +- unsigned long flags;
47060 ++ unsigned long flags, seq_flags;
47061 + unsigned long mask;
47062 + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
47063 + struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
47064 +@@ -4340,10 +4345,8 @@ void rcu_report_dead(unsigned int cpu)
47065 +
47066 + /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
47067 + mask = rdp->grpmask;
47068 +- WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
47069 +- WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
47070 +- smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
47071 +- raw_spin_lock(&rcu_state.ofl_lock);
47072 ++ local_irq_save(seq_flags);
47073 ++ arch_spin_lock(&rcu_state.ofl_lock);
47074 + raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
47075 + rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
47076 + rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
47077 +@@ -4354,10 +4357,8 @@ void rcu_report_dead(unsigned int cpu)
47078 + }
47079 + WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
47080 + raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
47081 +- raw_spin_unlock(&rcu_state.ofl_lock);
47082 +- smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
47083 +- WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
47084 +- WARN_ON_ONCE(rnp->ofl_seq & 0x1);
47085 ++ arch_spin_unlock(&rcu_state.ofl_lock);
47086 ++ local_irq_restore(seq_flags);
47087 +
47088 + rdp->cpu_started = false;
47089 + }
47090 +diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
47091 +index 486fc901bd085..4b4bcef8a9743 100644
47092 +--- a/kernel/rcu/tree.h
47093 ++++ b/kernel/rcu/tree.h
47094 +@@ -56,8 +56,6 @@ struct rcu_node {
47095 + /* Initialized from ->qsmaskinitnext at the */
47096 + /* beginning of each grace period. */
47097 + unsigned long qsmaskinitnext;
47098 +- unsigned long ofl_seq; /* CPU-hotplug operation sequence count. */
47099 +- /* Online CPUs for next grace period. */
47100 + unsigned long expmask; /* CPUs or groups that need to check in */
47101 + /* to allow the current expedited GP */
47102 + /* to complete. */
47103 +@@ -355,7 +353,7 @@ struct rcu_state {
47104 + const char *name; /* Name of structure. */
47105 + char abbr; /* Abbreviated name. */
47106 +
47107 +- raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
47108 ++ arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
47109 + /* Synchronize offline with */
47110 + /* GP pre-initialization. */
47111 + };
47112 +diff --git a/kernel/resource.c b/kernel/resource.c
47113 +index 9c08d6e9eef27..34eaee179689a 100644
47114 +--- a/kernel/resource.c
47115 ++++ b/kernel/resource.c
47116 +@@ -56,14 +56,6 @@ struct resource_constraint {
47117 +
47118 + static DEFINE_RWLOCK(resource_lock);
47119 +
47120 +-/*
47121 +- * For memory hotplug, there is no way to free resource entries allocated
47122 +- * by boot mem after the system is up. So for reusing the resource entry
47123 +- * we need to remember the resource.
47124 +- */
47125 +-static struct resource *bootmem_resource_free;
47126 +-static DEFINE_SPINLOCK(bootmem_resource_lock);
47127 +-
47128 + static struct resource *next_resource(struct resource *p)
47129 + {
47130 + if (p->child)
47131 +@@ -160,36 +152,19 @@ __initcall(ioresources_init);
47132 +
47133 + static void free_resource(struct resource *res)
47134 + {
47135 +- if (!res)
47136 +- return;
47137 +-
47138 +- if (!PageSlab(virt_to_head_page(res))) {
47139 +- spin_lock(&bootmem_resource_lock);
47140 +- res->sibling = bootmem_resource_free;
47141 +- bootmem_resource_free = res;
47142 +- spin_unlock(&bootmem_resource_lock);
47143 +- } else {
47144 ++ /**
47145 ++ * If the resource was allocated using memblock early during boot
47146 ++ * we'll leak it here: we can only return full pages back to the
47147 ++ * buddy and trying to be smart and reusing them eventually in
47148 ++ * alloc_resource() overcomplicates resource handling.
47149 ++ */
47150 ++ if (res && PageSlab(virt_to_head_page(res)))
47151 + kfree(res);
47152 +- }
47153 + }
47154 +
47155 + static struct resource *alloc_resource(gfp_t flags)
47156 + {
47157 +- struct resource *res = NULL;
47158 +-
47159 +- spin_lock(&bootmem_resource_lock);
47160 +- if (bootmem_resource_free) {
47161 +- res = bootmem_resource_free;
47162 +- bootmem_resource_free = res->sibling;
47163 +- }
47164 +- spin_unlock(&bootmem_resource_lock);
47165 +-
47166 +- if (res)
47167 +- memset(res, 0, sizeof(struct resource));
47168 +- else
47169 +- res = kzalloc(sizeof(struct resource), flags);
47170 +-
47171 +- return res;
47172 ++ return kzalloc(sizeof(struct resource), flags);
47173 + }
47174 +
47175 + /* Return the conflict entry if you can't request it */
47176 +diff --git a/kernel/rseq.c b/kernel/rseq.c
47177 +index 6d45ac3dae7fb..97ac20b4f7387 100644
47178 +--- a/kernel/rseq.c
47179 ++++ b/kernel/rseq.c
47180 +@@ -128,10 +128,10 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
47181 + int ret;
47182 +
47183 + #ifdef CONFIG_64BIT
47184 +- if (get_user(ptr, &t->rseq->rseq_cs.ptr64))
47185 ++ if (get_user(ptr, &t->rseq->rseq_cs))
47186 + return -EFAULT;
47187 + #else
47188 +- if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr)))
47189 ++ if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr)))
47190 + return -EFAULT;
47191 + #endif
47192 + if (!ptr) {
47193 +@@ -217,9 +217,9 @@ static int clear_rseq_cs(struct task_struct *t)
47194 + * Set rseq_cs to NULL.
47195 + */
47196 + #ifdef CONFIG_64BIT
47197 +- return put_user(0UL, &t->rseq->rseq_cs.ptr64);
47198 ++ return put_user(0UL, &t->rseq->rseq_cs);
47199 + #else
47200 +- if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64)))
47201 ++ if (clear_user(&t->rseq->rseq_cs, sizeof(t->rseq->rseq_cs)))
47202 + return -EFAULT;
47203 + return 0;
47204 + #endif
47205 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
47206 +index 9745613d531ce..1620ae8535dcf 100644
47207 +--- a/kernel/sched/core.c
47208 ++++ b/kernel/sched/core.c
47209 +@@ -36,6 +36,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
47210 + EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
47211 + EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
47212 + EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
47213 ++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp);
47214 + EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
47215 + EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
47216 + EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
47217 +diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
47218 +index 3d06c5e4220d4..307800586ac81 100644
47219 +--- a/kernel/sched/cpuacct.c
47220 ++++ b/kernel/sched/cpuacct.c
47221 +@@ -334,12 +334,13 @@ static struct cftype files[] = {
47222 + */
47223 + void cpuacct_charge(struct task_struct *tsk, u64 cputime)
47224 + {
47225 ++ unsigned int cpu = task_cpu(tsk);
47226 + struct cpuacct *ca;
47227 +
47228 + rcu_read_lock();
47229 +
47230 + for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
47231 +- __this_cpu_add(*ca->cpuusage, cputime);
47232 ++ *per_cpu_ptr(ca->cpuusage, cpu) += cputime;
47233 +
47234 + rcu_read_unlock();
47235 + }
47236 +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
47237 +index 26778884d9ab1..6d65ab6e484e2 100644
47238 +--- a/kernel/sched/cpufreq_schedutil.c
47239 ++++ b/kernel/sched/cpufreq_schedutil.c
47240 +@@ -289,6 +289,7 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
47241 + * into the same scale so we can compare.
47242 + */
47243 + boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
47244 ++ boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
47245 + if (sg_cpu->util < boost)
47246 + sg_cpu->util = boost;
47247 + }
47248 +@@ -348,8 +349,11 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
47249 + /*
47250 + * Do not reduce the frequency if the CPU has not been idle
47251 + * recently, as the reduction is likely to be premature then.
47252 ++ *
47253 ++ * Except when the rq is capped by uclamp_max.
47254 + */
47255 +- if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
47256 ++ if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
47257 ++ sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
47258 + next_f = sg_policy->next_freq;
47259 +
47260 + /* Restore cached freq as next_freq has changed */
47261 +@@ -395,8 +399,11 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
47262 + /*
47263 + * Do not reduce the target performance level if the CPU has not been
47264 + * idle recently, as the reduction is likely to be premature then.
47265 ++ *
47266 ++ * Except when the rq is capped by uclamp_max.
47267 + */
47268 +- if (sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
47269 ++ if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
47270 ++ sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
47271 + sg_cpu->util = prev_util;
47272 +
47273 + cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
47274 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
47275 +index d2c072b0ef01f..62f0cf8422775 100644
47276 +--- a/kernel/sched/deadline.c
47277 ++++ b/kernel/sched/deadline.c
47278 +@@ -2240,12 +2240,6 @@ static int push_dl_task(struct rq *rq)
47279 + return 0;
47280 +
47281 + retry:
47282 +- if (is_migration_disabled(next_task))
47283 +- return 0;
47284 +-
47285 +- if (WARN_ON(next_task == rq->curr))
47286 +- return 0;
47287 +-
47288 + /*
47289 + * If next_task preempts rq->curr, and rq->curr
47290 + * can move away, it makes sense to just reschedule
47291 +@@ -2258,6 +2252,12 @@ retry:
47292 + return 0;
47293 + }
47294 +
47295 ++ if (is_migration_disabled(next_task))
47296 ++ return 0;
47297 ++
47298 ++ if (WARN_ON(next_task == rq->curr))
47299 ++ return 0;
47300 ++
47301 + /* We might release rq lock */
47302 + get_task_struct(next_task);
47303 +
47304 +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
47305 +index aa29211de1bf8..102d6f70e84d3 100644
47306 +--- a/kernel/sched/debug.c
47307 ++++ b/kernel/sched/debug.c
47308 +@@ -931,25 +931,15 @@ void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
47309 + static void sched_show_numa(struct task_struct *p, struct seq_file *m)
47310 + {
47311 + #ifdef CONFIG_NUMA_BALANCING
47312 +- struct mempolicy *pol;
47313 +-
47314 + if (p->mm)
47315 + P(mm->numa_scan_seq);
47316 +
47317 +- task_lock(p);
47318 +- pol = p->mempolicy;
47319 +- if (pol && !(pol->flags & MPOL_F_MORON))
47320 +- pol = NULL;
47321 +- mpol_get(pol);
47322 +- task_unlock(p);
47323 +-
47324 + P(numa_pages_migrated);
47325 + P(numa_preferred_nid);
47326 + P(total_numa_faults);
47327 + SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
47328 + task_node(p), task_numa_group_id(p));
47329 + show_numa_stats(p, m);
47330 +- mpol_put(pol);
47331 + #endif
47332 + }
47333 +
47334 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
47335 +index 5146163bfabb9..cddcf2f4f5251 100644
47336 +--- a/kernel/sched/fair.c
47337 ++++ b/kernel/sched/fair.c
47338 +@@ -9040,9 +9040,10 @@ static bool update_pick_idlest(struct sched_group *idlest,
47339 + * This is an approximation as the number of running tasks may not be
47340 + * related to the number of busy CPUs due to sched_setaffinity.
47341 + */
47342 +-static inline bool allow_numa_imbalance(int dst_running, int dst_weight)
47343 ++static inline bool
47344 ++allow_numa_imbalance(unsigned int running, unsigned int weight)
47345 + {
47346 +- return (dst_running < (dst_weight >> 2));
47347 ++ return (running < (weight >> 2));
47348 + }
47349 +
47350 + /*
47351 +@@ -9176,12 +9177,13 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
47352 + return idlest;
47353 + #endif
47354 + /*
47355 +- * Otherwise, keep the task on this node to stay close
47356 +- * its wakeup source and improve locality. If there is
47357 +- * a real need of migration, periodic load balance will
47358 +- * take care of it.
47359 ++ * Otherwise, keep the task close to the wakeup source
47360 ++ * and improve locality if the number of running tasks
47361 ++ * would remain below threshold where an imbalance is
47362 ++ * allowed. If there is a real need of migration,
47363 ++ * periodic load balance will take care of it.
47364 + */
47365 +- if (allow_numa_imbalance(local_sgs.sum_nr_running, sd->span_weight))
47366 ++ if (allow_numa_imbalance(local_sgs.sum_nr_running + 1, local_sgs.group_weight))
47367 + return NULL;
47368 + }
47369 +
47370 +@@ -9387,7 +9389,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
47371 + /* Consider allowing a small imbalance between NUMA groups */
47372 + if (env->sd->flags & SD_NUMA) {
47373 + env->imbalance = adjust_numa_imbalance(env->imbalance,
47374 +- busiest->sum_nr_running, busiest->group_weight);
47375 ++ local->sum_nr_running + 1, local->group_weight);
47376 + }
47377 +
47378 + return;
47379 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
47380 +index 7b4f4fbbb4048..14f273c295183 100644
47381 +--- a/kernel/sched/rt.c
47382 ++++ b/kernel/sched/rt.c
47383 +@@ -2026,6 +2026,16 @@ static int push_rt_task(struct rq *rq, bool pull)
47384 + return 0;
47385 +
47386 + retry:
47387 ++ /*
47388 ++ * It's possible that the next_task slipped in of
47389 ++ * higher priority than current. If that's the case
47390 ++ * just reschedule current.
47391 ++ */
47392 ++ if (unlikely(next_task->prio < rq->curr->prio)) {
47393 ++ resched_curr(rq);
47394 ++ return 0;
47395 ++ }
47396 ++
47397 + if (is_migration_disabled(next_task)) {
47398 + struct task_struct *push_task = NULL;
47399 + int cpu;
47400 +@@ -2033,6 +2043,18 @@ retry:
47401 + if (!pull || rq->push_busy)
47402 + return 0;
47403 +
47404 ++ /*
47405 ++ * Invoking find_lowest_rq() on anything but an RT task doesn't
47406 ++ * make sense. Per the above priority check, curr has to
47407 ++ * be of higher priority than next_task, so no need to
47408 ++ * reschedule when bailing out.
47409 ++ *
47410 ++ * Note that the stoppers are masqueraded as SCHED_FIFO
47411 ++ * (cf. sched_set_stop_task()), so we can't rely on rt_task().
47412 ++ */
47413 ++ if (rq->curr->sched_class != &rt_sched_class)
47414 ++ return 0;
47415 ++
47416 + cpu = find_lowest_rq(rq->curr);
47417 + if (cpu == -1 || cpu == rq->cpu)
47418 + return 0;
47419 +@@ -2057,16 +2079,6 @@ retry:
47420 + if (WARN_ON(next_task == rq->curr))
47421 + return 0;
47422 +
47423 +- /*
47424 +- * It's possible that the next_task slipped in of
47425 +- * higher priority than current. If that's the case
47426 +- * just reschedule current.
47427 +- */
47428 +- if (unlikely(next_task->prio < rq->curr->prio)) {
47429 +- resched_curr(rq);
47430 +- return 0;
47431 +- }
47432 +-
47433 + /* We might release rq lock */
47434 + get_task_struct(next_task);
47435 +
47436 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
47437 +index de53be9057390..9b33ba9c3c420 100644
47438 +--- a/kernel/sched/sched.h
47439 ++++ b/kernel/sched/sched.h
47440 +@@ -2841,88 +2841,6 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
47441 + static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
47442 + #endif /* CONFIG_CPU_FREQ */
47443 +
47444 +-#ifdef CONFIG_UCLAMP_TASK
47445 +-unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
47446 +-
47447 +-/**
47448 +- * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
47449 +- * @rq: The rq to clamp against. Must not be NULL.
47450 +- * @util: The util value to clamp.
47451 +- * @p: The task to clamp against. Can be NULL if you want to clamp
47452 +- * against @rq only.
47453 +- *
47454 +- * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
47455 +- *
47456 +- * If sched_uclamp_used static key is disabled, then just return the util
47457 +- * without any clamping since uclamp aggregation at the rq level in the fast
47458 +- * path is disabled, rendering this operation a NOP.
47459 +- *
47460 +- * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
47461 +- * will return the correct effective uclamp value of the task even if the
47462 +- * static key is disabled.
47463 +- */
47464 +-static __always_inline
47465 +-unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
47466 +- struct task_struct *p)
47467 +-{
47468 +- unsigned long min_util = 0;
47469 +- unsigned long max_util = 0;
47470 +-
47471 +- if (!static_branch_likely(&sched_uclamp_used))
47472 +- return util;
47473 +-
47474 +- if (p) {
47475 +- min_util = uclamp_eff_value(p, UCLAMP_MIN);
47476 +- max_util = uclamp_eff_value(p, UCLAMP_MAX);
47477 +-
47478 +- /*
47479 +- * Ignore last runnable task's max clamp, as this task will
47480 +- * reset it. Similarly, no need to read the rq's min clamp.
47481 +- */
47482 +- if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
47483 +- goto out;
47484 +- }
47485 +-
47486 +- min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
47487 +- max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
47488 +-out:
47489 +- /*
47490 +- * Since CPU's {min,max}_util clamps are MAX aggregated considering
47491 +- * RUNNABLE tasks with _different_ clamps, we can end up with an
47492 +- * inversion. Fix it now when the clamps are applied.
47493 +- */
47494 +- if (unlikely(min_util >= max_util))
47495 +- return min_util;
47496 +-
47497 +- return clamp(util, min_util, max_util);
47498 +-}
47499 +-
47500 +-/*
47501 +- * When uclamp is compiled in, the aggregation at rq level is 'turned off'
47502 +- * by default in the fast path and only gets turned on once userspace performs
47503 +- * an operation that requires it.
47504 +- *
47505 +- * Returns true if userspace opted-in to use uclamp and aggregation at rq level
47506 +- * hence is active.
47507 +- */
47508 +-static inline bool uclamp_is_used(void)
47509 +-{
47510 +- return static_branch_likely(&sched_uclamp_used);
47511 +-}
47512 +-#else /* CONFIG_UCLAMP_TASK */
47513 +-static inline
47514 +-unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
47515 +- struct task_struct *p)
47516 +-{
47517 +- return util;
47518 +-}
47519 +-
47520 +-static inline bool uclamp_is_used(void)
47521 +-{
47522 +- return false;
47523 +-}
47524 +-#endif /* CONFIG_UCLAMP_TASK */
47525 +-
47526 + #ifdef arch_scale_freq_capacity
47527 + # ifndef arch_scale_freq_invariant
47528 + # define arch_scale_freq_invariant() true
47529 +@@ -3020,6 +2938,105 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
47530 + }
47531 + #endif
47532 +
47533 ++#ifdef CONFIG_UCLAMP_TASK
47534 ++unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
47535 ++
47536 ++/**
47537 ++ * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
47538 ++ * @rq: The rq to clamp against. Must not be NULL.
47539 ++ * @util: The util value to clamp.
47540 ++ * @p: The task to clamp against. Can be NULL if you want to clamp
47541 ++ * against @rq only.
47542 ++ *
47543 ++ * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
47544 ++ *
47545 ++ * If sched_uclamp_used static key is disabled, then just return the util
47546 ++ * without any clamping since uclamp aggregation at the rq level in the fast
47547 ++ * path is disabled, rendering this operation a NOP.
47548 ++ *
47549 ++ * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
47550 ++ * will return the correct effective uclamp value of the task even if the
47551 ++ * static key is disabled.
47552 ++ */
47553 ++static __always_inline
47554 ++unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
47555 ++ struct task_struct *p)
47556 ++{
47557 ++ unsigned long min_util = 0;
47558 ++ unsigned long max_util = 0;
47559 ++
47560 ++ if (!static_branch_likely(&sched_uclamp_used))
47561 ++ return util;
47562 ++
47563 ++ if (p) {
47564 ++ min_util = uclamp_eff_value(p, UCLAMP_MIN);
47565 ++ max_util = uclamp_eff_value(p, UCLAMP_MAX);
47566 ++
47567 ++ /*
47568 ++ * Ignore last runnable task's max clamp, as this task will
47569 ++ * reset it. Similarly, no need to read the rq's min clamp.
47570 ++ */
47571 ++ if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
47572 ++ goto out;
47573 ++ }
47574 ++
47575 ++ min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
47576 ++ max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
47577 ++out:
47578 ++ /*
47579 ++ * Since CPU's {min,max}_util clamps are MAX aggregated considering
47580 ++ * RUNNABLE tasks with _different_ clamps, we can end up with an
47581 ++ * inversion. Fix it now when the clamps are applied.
47582 ++ */
47583 ++ if (unlikely(min_util >= max_util))
47584 ++ return min_util;
47585 ++
47586 ++ return clamp(util, min_util, max_util);
47587 ++}
47588 ++
47589 ++/* Is the rq being capped/throttled by uclamp_max? */
47590 ++static inline bool uclamp_rq_is_capped(struct rq *rq)
47591 ++{
47592 ++ unsigned long rq_util;
47593 ++ unsigned long max_util;
47594 ++
47595 ++ if (!static_branch_likely(&sched_uclamp_used))
47596 ++ return false;
47597 ++
47598 ++ rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq);
47599 ++ max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
47600 ++
47601 ++ return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util;
47602 ++}
47603 ++
47604 ++/*
47605 ++ * When uclamp is compiled in, the aggregation at rq level is 'turned off'
47606 ++ * by default in the fast path and only gets turned on once userspace performs
47607 ++ * an operation that requires it.
47608 ++ *
47609 ++ * Returns true if userspace opted-in to use uclamp and aggregation at rq level
47610 ++ * hence is active.
47611 ++ */
47612 ++static inline bool uclamp_is_used(void)
47613 ++{
47614 ++ return static_branch_likely(&sched_uclamp_used);
47615 ++}
47616 ++#else /* CONFIG_UCLAMP_TASK */
47617 ++static inline
47618 ++unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
47619 ++ struct task_struct *p)
47620 ++{
47621 ++ return util;
47622 ++}
47623 ++
47624 ++static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
47625 ++
47626 ++static inline bool uclamp_is_used(void)
47627 ++{
47628 ++ return false;
47629 ++}
47630 ++#endif /* CONFIG_UCLAMP_TASK */
47631 ++
47632 + #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
47633 + static inline unsigned long cpu_util_irq(struct rq *rq)
47634 + {
47635 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
47636 +index eb44418574f9c..96265a717ca4e 100644
47637 +--- a/kernel/trace/trace.c
47638 ++++ b/kernel/trace/trace.c
47639 +@@ -3663,12 +3663,17 @@ static char *trace_iter_expand_format(struct trace_iterator *iter)
47640 + }
47641 +
47642 + /* Returns true if the string is safe to dereference from an event */
47643 +-static bool trace_safe_str(struct trace_iterator *iter, const char *str)
47644 ++static bool trace_safe_str(struct trace_iterator *iter, const char *str,
47645 ++ bool star, int len)
47646 + {
47647 + unsigned long addr = (unsigned long)str;
47648 + struct trace_event *trace_event;
47649 + struct trace_event_call *event;
47650 +
47651 ++ /* Ignore strings with no length */
47652 ++ if (star && !len)
47653 ++ return true;
47654 ++
47655 + /* OK if part of the event data */
47656 + if ((addr >= (unsigned long)iter->ent) &&
47657 + (addr < (unsigned long)iter->ent + iter->ent_size))
47658 +@@ -3854,7 +3859,7 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
47659 + * instead. See samples/trace_events/trace-events-sample.h
47660 + * for reference.
47661 + */
47662 +- if (WARN_ONCE(!trace_safe_str(iter, str),
47663 ++ if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
47664 + "fmt: '%s' current_buffer: '%s'",
47665 + fmt, show_buffer(&iter->seq))) {
47666 + int ret;
47667 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
47668 +index 3147614c1812a..25b5d0f9f3758 100644
47669 +--- a/kernel/trace/trace_events.c
47670 ++++ b/kernel/trace/trace_events.c
47671 +@@ -40,6 +40,14 @@ static LIST_HEAD(ftrace_generic_fields);
47672 + static LIST_HEAD(ftrace_common_fields);
47673 + static bool eventdir_initialized;
47674 +
47675 ++static LIST_HEAD(module_strings);
47676 ++
47677 ++struct module_string {
47678 ++ struct list_head next;
47679 ++ struct module *module;
47680 ++ char *str;
47681 ++};
47682 ++
47683 + #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
47684 +
47685 + static struct kmem_cache *field_cachep;
47686 +@@ -2633,6 +2641,76 @@ static void update_event_printk(struct trace_event_call *call,
47687 + }
47688 + }
47689 +
47690 ++static void add_str_to_module(struct module *module, char *str)
47691 ++{
47692 ++ struct module_string *modstr;
47693 ++
47694 ++ modstr = kmalloc(sizeof(*modstr), GFP_KERNEL);
47695 ++
47696 ++ /*
47697 ++ * If we failed to allocate memory here, then we'll just
47698 ++ * let the str memory leak when the module is removed.
47699 ++ * If this fails to allocate, there's worse problems than
47700 ++ * a leaked string on module removal.
47701 ++ */
47702 ++ if (WARN_ON_ONCE(!modstr))
47703 ++ return;
47704 ++
47705 ++ modstr->module = module;
47706 ++ modstr->str = str;
47707 ++
47708 ++ list_add(&modstr->next, &module_strings);
47709 ++}
47710 ++
47711 ++static void update_event_fields(struct trace_event_call *call,
47712 ++ struct trace_eval_map *map)
47713 ++{
47714 ++ struct ftrace_event_field *field;
47715 ++ struct list_head *head;
47716 ++ char *ptr;
47717 ++ char *str;
47718 ++ int len = strlen(map->eval_string);
47719 ++
47720 ++ /* Dynamic events should never have field maps */
47721 ++ if (WARN_ON_ONCE(call->flags & TRACE_EVENT_FL_DYNAMIC))
47722 ++ return;
47723 ++
47724 ++ head = trace_get_fields(call);
47725 ++ list_for_each_entry(field, head, link) {
47726 ++ ptr = strchr(field->type, '[');
47727 ++ if (!ptr)
47728 ++ continue;
47729 ++ ptr++;
47730 ++
47731 ++ if (!isalpha(*ptr) && *ptr != '_')
47732 ++ continue;
47733 ++
47734 ++ if (strncmp(map->eval_string, ptr, len) != 0)
47735 ++ continue;
47736 ++
47737 ++ str = kstrdup(field->type, GFP_KERNEL);
47738 ++ if (WARN_ON_ONCE(!str))
47739 ++ return;
47740 ++ ptr = str + (ptr - field->type);
47741 ++ ptr = eval_replace(ptr, map, len);
47742 ++ /* enum/sizeof string smaller than value */
47743 ++ if (WARN_ON_ONCE(!ptr)) {
47744 ++ kfree(str);
47745 ++ continue;
47746 ++ }
47747 ++
47748 ++ /*
47749 ++ * If the event is part of a module, then we need to free the string
47750 ++ * when the module is removed. Otherwise, it will stay allocated
47751 ++ * until a reboot.
47752 ++ */
47753 ++ if (call->module)
47754 ++ add_str_to_module(call->module, str);
47755 ++
47756 ++ field->type = str;
47757 ++ }
47758 ++}
47759 ++
47760 + void trace_event_eval_update(struct trace_eval_map **map, int len)
47761 + {
47762 + struct trace_event_call *call, *p;
47763 +@@ -2668,6 +2746,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
47764 + first = false;
47765 + }
47766 + update_event_printk(call, map[i]);
47767 ++ update_event_fields(call, map[i]);
47768 + }
47769 + }
47770 + }
47771 +@@ -2853,6 +2932,7 @@ static void trace_module_add_events(struct module *mod)
47772 + static void trace_module_remove_events(struct module *mod)
47773 + {
47774 + struct trace_event_call *call, *p;
47775 ++ struct module_string *modstr, *m;
47776 +
47777 + down_write(&trace_event_sem);
47778 + list_for_each_entry_safe(call, p, &ftrace_events, list) {
47779 +@@ -2861,6 +2941,14 @@ static void trace_module_remove_events(struct module *mod)
47780 + if (call->module == mod)
47781 + __trace_remove_event_call(call);
47782 + }
47783 ++ /* Check for any strings allocade for this module */
47784 ++ list_for_each_entry_safe(modstr, m, &module_strings, next) {
47785 ++ if (modstr->module != mod)
47786 ++ continue;
47787 ++ list_del(&modstr->next);
47788 ++ kfree(modstr->str);
47789 ++ kfree(modstr);
47790 ++ }
47791 + up_write(&trace_event_sem);
47792 +
47793 + /*
47794 +diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
47795 +index 00703444a2194..230038d4f9081 100644
47796 +--- a/kernel/watch_queue.c
47797 ++++ b/kernel/watch_queue.c
47798 +@@ -271,7 +271,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
47799 + return 0;
47800 +
47801 + error_p:
47802 +- for (i = 0; i < nr_pages; i++)
47803 ++ while (--i >= 0)
47804 + __free_page(pages[i]);
47805 + kfree(pages);
47806 + error:
47807 +@@ -370,6 +370,7 @@ static void __put_watch_queue(struct kref *kref)
47808 +
47809 + for (i = 0; i < wqueue->nr_pages; i++)
47810 + __free_page(wqueue->notes[i]);
47811 ++ kfree(wqueue->notes);
47812 + bitmap_free(wqueue->notes_bitmap);
47813 +
47814 + wfilter = rcu_access_pointer(wqueue->filter);
47815 +@@ -395,6 +396,7 @@ static void free_watch(struct rcu_head *rcu)
47816 + put_watch_queue(rcu_access_pointer(watch->queue));
47817 + atomic_dec(&watch->cred->user->nr_watches);
47818 + put_cred(watch->cred);
47819 ++ kfree(watch);
47820 + }
47821 +
47822 + static void __put_watch(struct kref *kref)
47823 +diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
47824 +index be38a2c5ecc2b..42825941f19f2 100644
47825 +--- a/lib/kunit/try-catch.c
47826 ++++ b/lib/kunit/try-catch.c
47827 +@@ -52,7 +52,7 @@ static unsigned long kunit_test_timeout(void)
47828 + * If tests timeout due to exceeding sysctl_hung_task_timeout_secs,
47829 + * the task will be killed and an oops generated.
47830 + */
47831 +- return 300 * MSEC_PER_SEC; /* 5 min */
47832 ++ return 300 * msecs_to_jiffies(MSEC_PER_SEC); /* 5 min */
47833 + }
47834 +
47835 + void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
47836 +diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
47837 +index a4c7cd74cff58..4fb7700a741bd 100644
47838 +--- a/lib/raid6/test/Makefile
47839 ++++ b/lib/raid6/test/Makefile
47840 +@@ -4,6 +4,8 @@
47841 + # from userspace.
47842 + #
47843 +
47844 ++pound := \#
47845 ++
47846 + CC = gcc
47847 + OPTFLAGS = -O2 # Adjust as desired
47848 + CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
47849 +@@ -42,7 +44,7 @@ else ifeq ($(HAS_NEON),yes)
47850 + OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
47851 + CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
47852 + else
47853 +- HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
47854 ++ HAS_ALTIVEC := $(shell printf '$(pound)include <altivec.h>\nvector int a;\n' |\
47855 + gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
47856 + ifeq ($(HAS_ALTIVEC),yes)
47857 + CFLAGS += -I../../../arch/powerpc/include
47858 +diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c
47859 +index a3cf071941ab4..841a55242abaa 100644
47860 +--- a/lib/raid6/test/test.c
47861 ++++ b/lib/raid6/test/test.c
47862 +@@ -19,7 +19,6 @@
47863 + #define NDISKS 16 /* Including P and Q */
47864 +
47865 + const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
47866 +-struct raid6_calls raid6_call;
47867 +
47868 + char *dataptrs[NDISKS];
47869 + char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
47870 +diff --git a/lib/test_kmod.c b/lib/test_kmod.c
47871 +index ce15893914131..cb800b1d0d99c 100644
47872 +--- a/lib/test_kmod.c
47873 ++++ b/lib/test_kmod.c
47874 +@@ -1149,6 +1149,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
47875 + if (ret) {
47876 + pr_err("could not register misc device: %d\n", ret);
47877 + free_test_dev_kmod(test_dev);
47878 ++ test_dev = NULL;
47879 + goto out;
47880 + }
47881 +
47882 +diff --git a/lib/test_lockup.c b/lib/test_lockup.c
47883 +index 906b598740a7b..c3fd87d6c2dd0 100644
47884 +--- a/lib/test_lockup.c
47885 ++++ b/lib/test_lockup.c
47886 +@@ -417,9 +417,14 @@ static bool test_kernel_ptr(unsigned long addr, int size)
47887 + return false;
47888 +
47889 + /* should be at least readable kernel address */
47890 +- if (access_ok(ptr, 1) ||
47891 +- access_ok(ptr + size - 1, 1) ||
47892 +- get_kernel_nofault(buf, ptr) ||
47893 ++ if (!IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) &&
47894 ++ (access_ok((void __user *)ptr, 1) ||
47895 ++ access_ok((void __user *)ptr + size - 1, 1))) {
47896 ++ pr_err("user space ptr invalid in kernel: %#lx\n", addr);
47897 ++ return true;
47898 ++ }
47899 ++
47900 ++ if (get_kernel_nofault(buf, ptr) ||
47901 + get_kernel_nofault(buf, ptr + size - 1)) {
47902 + pr_err("invalid kernel ptr: %#lx\n", addr);
47903 + return true;
47904 +diff --git a/lib/test_xarray.c b/lib/test_xarray.c
47905 +index 8b1c318189ce8..e77d4856442c3 100644
47906 +--- a/lib/test_xarray.c
47907 ++++ b/lib/test_xarray.c
47908 +@@ -1463,6 +1463,25 @@ unlock:
47909 + XA_BUG_ON(xa, !xa_empty(xa));
47910 + }
47911 +
47912 ++static noinline void check_create_range_5(struct xarray *xa,
47913 ++ unsigned long index, unsigned int order)
47914 ++{
47915 ++ XA_STATE_ORDER(xas, xa, index, order);
47916 ++ unsigned int i;
47917 ++
47918 ++ xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
47919 ++
47920 ++ for (i = 0; i < order + 10; i++) {
47921 ++ do {
47922 ++ xas_lock(&xas);
47923 ++ xas_create_range(&xas);
47924 ++ xas_unlock(&xas);
47925 ++ } while (xas_nomem(&xas, GFP_KERNEL));
47926 ++ }
47927 ++
47928 ++ xa_destroy(xa);
47929 ++}
47930 ++
47931 + static noinline void check_create_range(struct xarray *xa)
47932 + {
47933 + unsigned int order;
47934 +@@ -1490,6 +1509,9 @@ static noinline void check_create_range(struct xarray *xa)
47935 + check_create_range_4(xa, (3U << order) + 1, order);
47936 + check_create_range_4(xa, (3U << order) - 1, order);
47937 + check_create_range_4(xa, (1U << 24) + 1, order);
47938 ++
47939 ++ check_create_range_5(xa, 0, order);
47940 ++ check_create_range_5(xa, (1U << order), order);
47941 + }
47942 +
47943 + check_create_range_3();
47944 +diff --git a/lib/vsprintf.c b/lib/vsprintf.c
47945 +index 3b8129dd374cd..fbf261bbea950 100644
47946 +--- a/lib/vsprintf.c
47947 ++++ b/lib/vsprintf.c
47948 +@@ -49,10 +49,15 @@
47949 +
47950 + #include <asm/page.h> /* for PAGE_SIZE */
47951 + #include <asm/byteorder.h> /* cpu_to_le16 */
47952 ++#include <asm/unaligned.h>
47953 +
47954 + #include <linux/string_helpers.h>
47955 + #include "kstrtox.h"
47956 +
47957 ++/* Disable pointer hashing if requested */
47958 ++bool no_hash_pointers __ro_after_init;
47959 ++EXPORT_SYMBOL_GPL(no_hash_pointers);
47960 ++
47961 + static noinline unsigned long long simple_strntoull(const char *startp, size_t max_chars, char **endp, unsigned int base)
47962 + {
47963 + const char *cp;
47964 +@@ -848,6 +853,19 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
47965 + return pointer_string(buf, end, (const void *)hashval, spec);
47966 + }
47967 +
47968 ++static char *default_pointer(char *buf, char *end, const void *ptr,
47969 ++ struct printf_spec spec)
47970 ++{
47971 ++ /*
47972 ++ * default is to _not_ leak addresses, so hash before printing,
47973 ++ * unless no_hash_pointers is specified on the command line.
47974 ++ */
47975 ++ if (unlikely(no_hash_pointers))
47976 ++ return pointer_string(buf, end, ptr, spec);
47977 ++
47978 ++ return ptr_to_id(buf, end, ptr, spec);
47979 ++}
47980 ++
47981 + int kptr_restrict __read_mostly;
47982 +
47983 + static noinline_for_stack
47984 +@@ -857,7 +875,7 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
47985 + switch (kptr_restrict) {
47986 + case 0:
47987 + /* Handle as %p, hash and do _not_ leak addresses. */
47988 +- return ptr_to_id(buf, end, ptr, spec);
47989 ++ return default_pointer(buf, end, ptr, spec);
47990 + case 1: {
47991 + const struct cred *cred;
47992 +
47993 +@@ -1761,7 +1779,7 @@ char *fourcc_string(char *buf, char *end, const u32 *fourcc,
47994 + char output[sizeof("0123 little-endian (0x01234567)")];
47995 + char *p = output;
47996 + unsigned int i;
47997 +- u32 val;
47998 ++ u32 orig, val;
47999 +
48000 + if (fmt[1] != 'c' || fmt[2] != 'c')
48001 + return error_string(buf, end, "(%p4?)", spec);
48002 +@@ -1769,21 +1787,22 @@ char *fourcc_string(char *buf, char *end, const u32 *fourcc,
48003 + if (check_pointer(&buf, end, fourcc, spec))
48004 + return buf;
48005 +
48006 +- val = *fourcc & ~BIT(31);
48007 ++ orig = get_unaligned(fourcc);
48008 ++ val = orig & ~BIT(31);
48009 +
48010 +- for (i = 0; i < sizeof(*fourcc); i++) {
48011 ++ for (i = 0; i < sizeof(u32); i++) {
48012 + unsigned char c = val >> (i * 8);
48013 +
48014 + /* Print non-control ASCII characters as-is, dot otherwise */
48015 + *p++ = isascii(c) && isprint(c) ? c : '.';
48016 + }
48017 +
48018 +- strcpy(p, *fourcc & BIT(31) ? " big-endian" : " little-endian");
48019 ++ strcpy(p, orig & BIT(31) ? " big-endian" : " little-endian");
48020 + p += strlen(p);
48021 +
48022 + *p++ = ' ';
48023 + *p++ = '(';
48024 +- p = special_hex_number(p, output + sizeof(output) - 2, *fourcc, sizeof(u32));
48025 ++ p = special_hex_number(p, output + sizeof(output) - 2, orig, sizeof(u32));
48026 + *p++ = ')';
48027 + *p = '\0';
48028 +
48029 +@@ -2223,10 +2242,6 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
48030 + return widen_string(buf, buf - buf_start, end, spec);
48031 + }
48032 +
48033 +-/* Disable pointer hashing if requested */
48034 +-bool no_hash_pointers __ro_after_init;
48035 +-EXPORT_SYMBOL_GPL(no_hash_pointers);
48036 +-
48037 + int __init no_hash_pointers_enable(char *str)
48038 + {
48039 + if (no_hash_pointers)
48040 +@@ -2455,7 +2470,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
48041 + case 'e':
48042 + /* %pe with a non-ERR_PTR gets treated as plain %p */
48043 + if (!IS_ERR(ptr))
48044 +- break;
48045 ++ return default_pointer(buf, end, ptr, spec);
48046 + return err_ptr(buf, end, ptr, spec);
48047 + case 'u':
48048 + case 'k':
48049 +@@ -2465,16 +2480,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
48050 + default:
48051 + return error_string(buf, end, "(einval)", spec);
48052 + }
48053 ++ default:
48054 ++ return default_pointer(buf, end, ptr, spec);
48055 + }
48056 +-
48057 +- /*
48058 +- * default is to _not_ leak addresses, so hash before printing,
48059 +- * unless no_hash_pointers is specified on the command line.
48060 +- */
48061 +- if (unlikely(no_hash_pointers))
48062 +- return pointer_string(buf, end, ptr, spec);
48063 +- else
48064 +- return ptr_to_id(buf, end, ptr, spec);
48065 + }
48066 +
48067 + /*
48068 +diff --git a/lib/xarray.c b/lib/xarray.c
48069 +index 6f47f6375808a..88ca87435e3da 100644
48070 +--- a/lib/xarray.c
48071 ++++ b/lib/xarray.c
48072 +@@ -722,6 +722,8 @@ void xas_create_range(struct xa_state *xas)
48073 +
48074 + for (;;) {
48075 + struct xa_node *node = xas->xa_node;
48076 ++ if (node->shift >= shift)
48077 ++ break;
48078 + xas->xa_node = xa_parent_locked(xas->xa, node);
48079 + xas->xa_offset = node->offset - 1;
48080 + if (node->offset != 0)
48081 +@@ -1079,6 +1081,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
48082 + xa_mk_node(child));
48083 + if (xa_is_value(curr))
48084 + values--;
48085 ++ xas_update(xas, child);
48086 + } else {
48087 + unsigned int canon = offset - xas->xa_sibs;
48088 +
48089 +@@ -1093,6 +1096,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
48090 + } while (offset-- > xas->xa_offset);
48091 +
48092 + node->nr_values += values;
48093 ++ xas_update(xas, node);
48094 + }
48095 + EXPORT_SYMBOL_GPL(xas_split);
48096 + #endif
48097 +diff --git a/mm/kmemleak.c b/mm/kmemleak.c
48098 +index 7580baa76af1c..acd7cbb82e160 100644
48099 +--- a/mm/kmemleak.c
48100 ++++ b/mm/kmemleak.c
48101 +@@ -796,6 +796,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
48102 + unsigned long flags;
48103 + struct kmemleak_object *object;
48104 + struct kmemleak_scan_area *area = NULL;
48105 ++ unsigned long untagged_ptr;
48106 ++ unsigned long untagged_objp;
48107 +
48108 + object = find_and_get_object(ptr, 1);
48109 + if (!object) {
48110 +@@ -804,6 +806,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
48111 + return;
48112 + }
48113 +
48114 ++ untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
48115 ++ untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
48116 ++
48117 + if (scan_area_cache)
48118 + area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
48119 +
48120 +@@ -815,8 +820,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
48121 + goto out_unlock;
48122 + }
48123 + if (size == SIZE_MAX) {
48124 +- size = object->pointer + object->size - ptr;
48125 +- } else if (ptr + size > object->pointer + object->size) {
48126 ++ size = untagged_objp + object->size - untagged_ptr;
48127 ++ } else if (untagged_ptr + size > untagged_objp + object->size) {
48128 + kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
48129 + dump_object_info(object);
48130 + kmem_cache_free(scan_area_cache, area);
48131 +diff --git a/mm/madvise.c b/mm/madvise.c
48132 +index 38d0f515d5486..e97e6a93d5aee 100644
48133 +--- a/mm/madvise.c
48134 ++++ b/mm/madvise.c
48135 +@@ -1433,8 +1433,7 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
48136 + iov_iter_advance(&iter, iovec.iov_len);
48137 + }
48138 +
48139 +- if (ret == 0)
48140 +- ret = total_len - iov_iter_count(&iter);
48141 ++ ret = (total_len - iov_iter_count(&iter)) ? : ret;
48142 +
48143 + release_mm:
48144 + mmput(mm);
48145 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
48146 +index 36e9f38c919d0..9b89a340a6629 100644
48147 +--- a/mm/memcontrol.c
48148 ++++ b/mm/memcontrol.c
48149 +@@ -7053,7 +7053,7 @@ static int __init cgroup_memory(char *s)
48150 + if (!strcmp(token, "nokmem"))
48151 + cgroup_memory_nokmem = true;
48152 + }
48153 +- return 0;
48154 ++ return 1;
48155 + }
48156 + __setup("cgroup.memory=", cgroup_memory);
48157 +
48158 +diff --git a/mm/memory.c b/mm/memory.c
48159 +index c125c4969913a..b69afe3dd597a 100644
48160 +--- a/mm/memory.c
48161 ++++ b/mm/memory.c
48162 +@@ -1313,6 +1313,17 @@ struct zap_details {
48163 + struct folio *single_folio; /* Locked folio to be unmapped */
48164 + };
48165 +
48166 ++/* Whether we should zap all COWed (private) pages too */
48167 ++static inline bool should_zap_cows(struct zap_details *details)
48168 ++{
48169 ++ /* By default, zap all pages */
48170 ++ if (!details)
48171 ++ return true;
48172 ++
48173 ++ /* Or, we zap COWed pages only if the caller wants to */
48174 ++ return !details->zap_mapping;
48175 ++}
48176 ++
48177 + /*
48178 + * We set details->zap_mapping when we want to unmap shared but keep private
48179 + * pages. Return true if skip zapping this page, false otherwise.
48180 +@@ -1320,11 +1331,15 @@ struct zap_details {
48181 + static inline bool
48182 + zap_skip_check_mapping(struct zap_details *details, struct page *page)
48183 + {
48184 +- if (!details || !page)
48185 ++ /* If we can make a decision without *page.. */
48186 ++ if (should_zap_cows(details))
48187 + return false;
48188 +
48189 +- return details->zap_mapping &&
48190 +- (details->zap_mapping != page_rmapping(page));
48191 ++ /* E.g. the caller passes NULL for the case of a zero page */
48192 ++ if (!page)
48193 ++ return false;
48194 ++
48195 ++ return details->zap_mapping != page_rmapping(page);
48196 + }
48197 +
48198 + static unsigned long zap_pte_range(struct mmu_gather *tlb,
48199 +@@ -1405,17 +1420,24 @@ again:
48200 + continue;
48201 + }
48202 +
48203 +- /* If details->check_mapping, we leave swap entries. */
48204 +- if (unlikely(details))
48205 +- continue;
48206 +-
48207 +- if (!non_swap_entry(entry))
48208 ++ if (!non_swap_entry(entry)) {
48209 ++ /* Genuine swap entry, hence a private anon page */
48210 ++ if (!should_zap_cows(details))
48211 ++ continue;
48212 + rss[MM_SWAPENTS]--;
48213 +- else if (is_migration_entry(entry)) {
48214 ++ } else if (is_migration_entry(entry)) {
48215 + struct page *page;
48216 +
48217 + page = pfn_swap_entry_to_page(entry);
48218 ++ if (zap_skip_check_mapping(details, page))
48219 ++ continue;
48220 + rss[mm_counter(page)]--;
48221 ++ } else if (is_hwpoison_entry(entry)) {
48222 ++ if (!should_zap_cows(details))
48223 ++ continue;
48224 ++ } else {
48225 ++ /* We should have covered all the swap entry types */
48226 ++ WARN_ON_ONCE(1);
48227 + }
48228 + if (unlikely(!free_swap_and_cache(entry)))
48229 + print_bad_pte(vma, addr, ptent, NULL);
48230 +@@ -3871,11 +3893,20 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
48231 + return ret;
48232 +
48233 + if (unlikely(PageHWPoison(vmf->page))) {
48234 +- if (ret & VM_FAULT_LOCKED)
48235 +- unlock_page(vmf->page);
48236 +- put_page(vmf->page);
48237 ++ struct page *page = vmf->page;
48238 ++ vm_fault_t poisonret = VM_FAULT_HWPOISON;
48239 ++ if (ret & VM_FAULT_LOCKED) {
48240 ++ if (page_mapped(page))
48241 ++ unmap_mapping_pages(page_mapping(page),
48242 ++ page->index, 1, false);
48243 ++ /* Retry if a clean page was removed from the cache. */
48244 ++ if (invalidate_inode_page(page))
48245 ++ poisonret = VM_FAULT_NOPAGE;
48246 ++ unlock_page(page);
48247 ++ }
48248 ++ put_page(page);
48249 + vmf->page = NULL;
48250 +- return VM_FAULT_HWPOISON;
48251 ++ return poisonret;
48252 + }
48253 +
48254 + if (unlikely(!(ret & VM_FAULT_LOCKED)))
48255 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
48256 +index 69284d3b5e53f..1628cd90d9fcc 100644
48257 +--- a/mm/mempolicy.c
48258 ++++ b/mm/mempolicy.c
48259 +@@ -786,7 +786,6 @@ static int vma_replace_policy(struct vm_area_struct *vma,
48260 + static int mbind_range(struct mm_struct *mm, unsigned long start,
48261 + unsigned long end, struct mempolicy *new_pol)
48262 + {
48263 +- struct vm_area_struct *next;
48264 + struct vm_area_struct *prev;
48265 + struct vm_area_struct *vma;
48266 + int err = 0;
48267 +@@ -801,8 +800,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
48268 + if (start > vma->vm_start)
48269 + prev = vma;
48270 +
48271 +- for (; vma && vma->vm_start < end; prev = vma, vma = next) {
48272 +- next = vma->vm_next;
48273 ++ for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) {
48274 + vmstart = max(start, vma->vm_start);
48275 + vmend = min(end, vma->vm_end);
48276 +
48277 +@@ -817,10 +815,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
48278 + anon_vma_name(vma));
48279 + if (prev) {
48280 + vma = prev;
48281 +- next = vma->vm_next;
48282 +- if (mpol_equal(vma_policy(vma), new_pol))
48283 +- continue;
48284 +- /* vma_merge() joined vma && vma->next, case 8 */
48285 + goto replace;
48286 + }
48287 + if (vma->vm_start != vmstart) {
48288 +diff --git a/mm/migrate.c b/mm/migrate.c
48289 +index c7da064b4781b..086a366374678 100644
48290 +--- a/mm/migrate.c
48291 ++++ b/mm/migrate.c
48292 +@@ -3190,7 +3190,7 @@ again:
48293 + /*
48294 + * For callers that do not hold get_online_mems() already.
48295 + */
48296 +-static void set_migration_target_nodes(void)
48297 ++void set_migration_target_nodes(void)
48298 + {
48299 + get_online_mems();
48300 + __set_migration_target_nodes();
48301 +@@ -3254,51 +3254,24 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
48302 + return notifier_from_errno(0);
48303 + }
48304 +
48305 +-/*
48306 +- * React to hotplug events that might affect the migration targets
48307 +- * like events that online or offline NUMA nodes.
48308 +- *
48309 +- * The ordering is also currently dependent on which nodes have
48310 +- * CPUs. That means we need CPU on/offline notification too.
48311 +- */
48312 +-static int migration_online_cpu(unsigned int cpu)
48313 +-{
48314 +- set_migration_target_nodes();
48315 +- return 0;
48316 +-}
48317 +-
48318 +-static int migration_offline_cpu(unsigned int cpu)
48319 ++void __init migrate_on_reclaim_init(void)
48320 + {
48321 +- set_migration_target_nodes();
48322 +- return 0;
48323 +-}
48324 +-
48325 +-static int __init migrate_on_reclaim_init(void)
48326 +-{
48327 +- int ret;
48328 +-
48329 + node_demotion = kmalloc_array(nr_node_ids,
48330 + sizeof(struct demotion_nodes),
48331 + GFP_KERNEL);
48332 + WARN_ON(!node_demotion);
48333 +
48334 +- ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline",
48335 +- NULL, migration_offline_cpu);
48336 ++ hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
48337 + /*
48338 +- * In the unlikely case that this fails, the automatic
48339 +- * migration targets may become suboptimal for nodes
48340 +- * where N_CPU changes. With such a small impact in a
48341 +- * rare case, do not bother trying to do anything special.
48342 ++ * At this point, all numa nodes with memory/CPus have their state
48343 ++ * properly set, so we can build the demotion order now.
48344 ++ * Let us hold the cpu_hotplug lock just, as we could possibily have
48345 ++ * CPU hotplug events during boot.
48346 + */
48347 +- WARN_ON(ret < 0);
48348 +- ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online",
48349 +- migration_online_cpu, NULL);
48350 +- WARN_ON(ret < 0);
48351 +-
48352 +- hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
48353 +- return 0;
48354 ++ cpus_read_lock();
48355 ++ set_migration_target_nodes();
48356 ++ cpus_read_unlock();
48357 + }
48358 +-late_initcall(migrate_on_reclaim_init);
48359 + #endif /* CONFIG_HOTPLUG_CPU */
48360 +
48361 + bool numa_demotion_enabled = false;
48362 +diff --git a/mm/mlock.c b/mm/mlock.c
48363 +index 25934e7db3e10..37f969ec68fa4 100644
48364 +--- a/mm/mlock.c
48365 ++++ b/mm/mlock.c
48366 +@@ -827,13 +827,12 @@ int user_shm_lock(size_t size, struct ucounts *ucounts)
48367 +
48368 + locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
48369 + lock_limit = rlimit(RLIMIT_MEMLOCK);
48370 +- if (lock_limit == RLIM_INFINITY)
48371 +- allowed = 1;
48372 +- lock_limit >>= PAGE_SHIFT;
48373 ++ if (lock_limit != RLIM_INFINITY)
48374 ++ lock_limit >>= PAGE_SHIFT;
48375 + spin_lock(&shmlock_user_lock);
48376 + memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
48377 +
48378 +- if (!allowed && (memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
48379 ++ if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
48380 + dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
48381 + goto out;
48382 + }
48383 +diff --git a/mm/mmap.c b/mm/mmap.c
48384 +index f61a15474dd6d..18875c216f8db 100644
48385 +--- a/mm/mmap.c
48386 ++++ b/mm/mmap.c
48387 +@@ -2557,7 +2557,7 @@ static int __init cmdline_parse_stack_guard_gap(char *p)
48388 + if (!*endptr)
48389 + stack_guard_gap = val << PAGE_SHIFT;
48390 +
48391 +- return 0;
48392 ++ return 1;
48393 + }
48394 + __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
48395 +
48396 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
48397 +index 3589febc6d319..a1fbf656e7dbd 100644
48398 +--- a/mm/page_alloc.c
48399 ++++ b/mm/page_alloc.c
48400 +@@ -7972,10 +7972,17 @@ restart:
48401 +
48402 + out2:
48403 + /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
48404 +- for (nid = 0; nid < MAX_NUMNODES; nid++)
48405 ++ for (nid = 0; nid < MAX_NUMNODES; nid++) {
48406 ++ unsigned long start_pfn, end_pfn;
48407 ++
48408 + zone_movable_pfn[nid] =
48409 + roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
48410 +
48411 ++ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
48412 ++ if (zone_movable_pfn[nid] >= end_pfn)
48413 ++ zone_movable_pfn[nid] = 0;
48414 ++ }
48415 ++
48416 + out:
48417 + /* restore the node_state */
48418 + node_states[N_MEMORY] = saved_node_state;
48419 +diff --git a/mm/slab.c b/mm/slab.c
48420 +index ddf5737c63d90..a36af26e15216 100644
48421 +--- a/mm/slab.c
48422 ++++ b/mm/slab.c
48423 +@@ -3421,6 +3421,7 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
48424 +
48425 + if (is_kfence_address(objp)) {
48426 + kmemleak_free_recursive(objp, cachep->flags);
48427 ++ memcg_slab_free_hook(cachep, &objp, 1);
48428 + __kfence_free(objp);
48429 + return;
48430 + }
48431 +diff --git a/mm/usercopy.c b/mm/usercopy.c
48432 +index d0d268135d96d..21fd84ee7fcd4 100644
48433 +--- a/mm/usercopy.c
48434 ++++ b/mm/usercopy.c
48435 +@@ -295,7 +295,10 @@ static bool enable_checks __initdata = true;
48436 +
48437 + static int __init parse_hardened_usercopy(char *str)
48438 + {
48439 +- return strtobool(str, &enable_checks);
48440 ++ if (strtobool(str, &enable_checks))
48441 ++ pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
48442 ++ str);
48443 ++ return 1;
48444 + }
48445 +
48446 + __setup("hardened_usercopy=", parse_hardened_usercopy);
48447 +diff --git a/mm/vmstat.c b/mm/vmstat.c
48448 +index 4057372745d04..9e9536df51b5a 100644
48449 +--- a/mm/vmstat.c
48450 ++++ b/mm/vmstat.c
48451 +@@ -28,6 +28,7 @@
48452 + #include <linux/mm_inline.h>
48453 + #include <linux/page_ext.h>
48454 + #include <linux/page_owner.h>
48455 ++#include <linux/migrate.h>
48456 +
48457 + #include "internal.h"
48458 +
48459 +@@ -2043,7 +2044,12 @@ static void __init init_cpu_node_state(void)
48460 + static int vmstat_cpu_online(unsigned int cpu)
48461 + {
48462 + refresh_zone_stat_thresholds();
48463 +- node_set_state(cpu_to_node(cpu), N_CPU);
48464 ++
48465 ++ if (!node_state(cpu_to_node(cpu), N_CPU)) {
48466 ++ node_set_state(cpu_to_node(cpu), N_CPU);
48467 ++ set_migration_target_nodes();
48468 ++ }
48469 ++
48470 + return 0;
48471 + }
48472 +
48473 +@@ -2066,6 +2072,8 @@ static int vmstat_cpu_dead(unsigned int cpu)
48474 + return 0;
48475 +
48476 + node_clear_state(node, N_CPU);
48477 ++ set_migration_target_nodes();
48478 ++
48479 + return 0;
48480 + }
48481 +
48482 +@@ -2097,6 +2105,9 @@ void __init init_mm_internals(void)
48483 +
48484 + start_shepherd_timer();
48485 + #endif
48486 ++#if defined(CONFIG_MIGRATION) && defined(CONFIG_HOTPLUG_CPU)
48487 ++ migrate_on_reclaim_init();
48488 ++#endif
48489 + #ifdef CONFIG_PROC_FS
48490 + proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
48491 + proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
48492 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
48493 +index 6bd0971807721..f5686c463bc0d 100644
48494 +--- a/net/ax25/af_ax25.c
48495 ++++ b/net/ax25/af_ax25.c
48496 +@@ -89,18 +89,20 @@ again:
48497 + sk = s->sk;
48498 + if (!sk) {
48499 + spin_unlock_bh(&ax25_list_lock);
48500 +- s->ax25_dev = NULL;
48501 + ax25_disconnect(s, ENETUNREACH);
48502 ++ s->ax25_dev = NULL;
48503 + spin_lock_bh(&ax25_list_lock);
48504 + goto again;
48505 + }
48506 + sock_hold(sk);
48507 + spin_unlock_bh(&ax25_list_lock);
48508 + lock_sock(sk);
48509 +- s->ax25_dev = NULL;
48510 +- dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
48511 +- ax25_dev_put(ax25_dev);
48512 + ax25_disconnect(s, ENETUNREACH);
48513 ++ s->ax25_dev = NULL;
48514 ++ if (sk->sk_socket) {
48515 ++ dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
48516 ++ ax25_dev_put(ax25_dev);
48517 ++ }
48518 + release_sock(sk);
48519 + spin_lock_bh(&ax25_list_lock);
48520 + sock_put(sk);
48521 +@@ -979,14 +981,16 @@ static int ax25_release(struct socket *sock)
48522 + {
48523 + struct sock *sk = sock->sk;
48524 + ax25_cb *ax25;
48525 ++ ax25_dev *ax25_dev;
48526 +
48527 + if (sk == NULL)
48528 + return 0;
48529 +
48530 + sock_hold(sk);
48531 +- sock_orphan(sk);
48532 + lock_sock(sk);
48533 ++ sock_orphan(sk);
48534 + ax25 = sk_to_ax25(sk);
48535 ++ ax25_dev = ax25->ax25_dev;
48536 +
48537 + if (sk->sk_type == SOCK_SEQPACKET) {
48538 + switch (ax25->state) {
48539 +@@ -1048,6 +1052,10 @@ static int ax25_release(struct socket *sock)
48540 + sk->sk_state_change(sk);
48541 + ax25_destroy_socket(ax25);
48542 + }
48543 ++ if (ax25_dev) {
48544 ++ dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
48545 ++ ax25_dev_put(ax25_dev);
48546 ++ }
48547 +
48548 + sock->sk = NULL;
48549 + release_sock(sk);
48550 +diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
48551 +index 15ab812c4fe4b..3a476e4f6cd0b 100644
48552 +--- a/net/ax25/ax25_subr.c
48553 ++++ b/net/ax25/ax25_subr.c
48554 +@@ -261,12 +261,20 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
48555 + {
48556 + ax25_clear_queues(ax25);
48557 +
48558 +- if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
48559 +- ax25_stop_heartbeat(ax25);
48560 +- ax25_stop_t1timer(ax25);
48561 +- ax25_stop_t2timer(ax25);
48562 +- ax25_stop_t3timer(ax25);
48563 +- ax25_stop_idletimer(ax25);
48564 ++ if (reason == ENETUNREACH) {
48565 ++ del_timer_sync(&ax25->timer);
48566 ++ del_timer_sync(&ax25->t1timer);
48567 ++ del_timer_sync(&ax25->t2timer);
48568 ++ del_timer_sync(&ax25->t3timer);
48569 ++ del_timer_sync(&ax25->idletimer);
48570 ++ } else {
48571 ++ if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
48572 ++ ax25_stop_heartbeat(ax25);
48573 ++ ax25_stop_t1timer(ax25);
48574 ++ ax25_stop_t2timer(ax25);
48575 ++ ax25_stop_t3timer(ax25);
48576 ++ ax25_stop_idletimer(ax25);
48577 ++ }
48578 +
48579 + ax25->state = AX25_STATE_0;
48580 +
48581 +diff --git a/net/bluetooth/eir.h b/net/bluetooth/eir.h
48582 +index 05e2e917fc254..e5876751f07ed 100644
48583 +--- a/net/bluetooth/eir.h
48584 ++++ b/net/bluetooth/eir.h
48585 +@@ -15,6 +15,11 @@ u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr);
48586 + u8 eir_append_local_name(struct hci_dev *hdev, u8 *eir, u8 ad_len);
48587 + u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
48588 +
48589 ++static inline u16 eir_precalc_len(u8 data_len)
48590 ++{
48591 ++ return sizeof(u8) * 2 + data_len;
48592 ++}
48593 ++
48594 + static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
48595 + u8 *data, u8 data_len)
48596 + {
48597 +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
48598 +index 04ebe901e86f0..3bb2b3b6a1c92 100644
48599 +--- a/net/bluetooth/hci_conn.c
48600 ++++ b/net/bluetooth/hci_conn.c
48601 +@@ -669,7 +669,9 @@ static void le_conn_timeout(struct work_struct *work)
48602 + if (conn->role == HCI_ROLE_SLAVE) {
48603 + /* Disable LE Advertising */
48604 + le_disable_advertising(hdev);
48605 ++ hci_dev_lock(hdev);
48606 + hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
48607 ++ hci_dev_unlock(hdev);
48608 + return;
48609 + }
48610 +
48611 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
48612 +index fc30f4c03d292..a105b7317560c 100644
48613 +--- a/net/bluetooth/hci_event.c
48614 ++++ b/net/bluetooth/hci_event.c
48615 +@@ -4534,7 +4534,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
48616 + if (!info) {
48617 + bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
48618 + HCI_EV_INQUIRY_RESULT_WITH_RSSI);
48619 +- return;
48620 ++ goto unlock;
48621 + }
48622 +
48623 + bacpy(&data.bdaddr, &info->bdaddr);
48624 +@@ -4565,7 +4565,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
48625 + if (!info) {
48626 + bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
48627 + HCI_EV_INQUIRY_RESULT_WITH_RSSI);
48628 +- return;
48629 ++ goto unlock;
48630 + }
48631 +
48632 + bacpy(&data.bdaddr, &info->bdaddr);
48633 +@@ -4587,7 +4587,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
48634 + bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
48635 + HCI_EV_INQUIRY_RESULT_WITH_RSSI);
48636 + }
48637 +-
48638 ++unlock:
48639 + hci_dev_unlock(hdev);
48640 + }
48641 +
48642 +@@ -6798,7 +6798,7 @@ static const struct hci_ev {
48643 + HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
48644 + sizeof(struct hci_ev_num_comp_blocks)),
48645 + /* [0xff = HCI_EV_VENDOR] */
48646 +- HCI_EV(HCI_EV_VENDOR, msft_vendor_evt, 0),
48647 ++ HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
48648 + };
48649 +
48650 + static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
48651 +@@ -6823,8 +6823,9 @@ static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
48652 + * decide if that is acceptable.
48653 + */
48654 + if (skb->len > ev->max_len)
48655 +- bt_dev_warn(hdev, "unexpected event 0x%2.2x length: %u > %u",
48656 +- event, skb->len, ev->max_len);
48657 ++ bt_dev_warn_ratelimited(hdev,
48658 ++ "unexpected event 0x%2.2x length: %u > %u",
48659 ++ event, skb->len, ev->max_len);
48660 +
48661 + data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
48662 + if (!data)
48663 +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
48664 +index 5e93f37c2e04d..405d48c3e63ed 100644
48665 +--- a/net/bluetooth/hci_sync.c
48666 ++++ b/net/bluetooth/hci_sync.c
48667 +@@ -4432,7 +4432,7 @@ static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
48668 + return err;
48669 + }
48670 +
48671 +- return err;
48672 ++ return 0;
48673 + }
48674 +
48675 + /* This function perform power off HCI command sequence as follows:
48676 +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
48677 +index 230a7a8196c07..15eab8b968ce8 100644
48678 +--- a/net/bluetooth/mgmt.c
48679 ++++ b/net/bluetooth/mgmt.c
48680 +@@ -9086,12 +9086,14 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
48681 + u16 eir_len = 0;
48682 + u32 flags = 0;
48683 +
48684 ++ /* allocate buff for LE or BR/EDR adv */
48685 + if (conn->le_adv_data_len > 0)
48686 + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
48687 +- conn->le_adv_data_len);
48688 ++ sizeof(*ev) + conn->le_adv_data_len);
48689 + else
48690 + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
48691 +- 2 + name_len + 5);
48692 ++ sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
48693 ++ eir_precalc_len(sizeof(conn->dev_class)));
48694 +
48695 + ev = skb_put(skb, sizeof(*ev));
48696 + bacpy(&ev->addr.bdaddr, &conn->dst);
48697 +@@ -9707,13 +9709,11 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
48698 + {
48699 + struct sk_buff *skb;
48700 + struct mgmt_ev_device_found *ev;
48701 +- u16 eir_len;
48702 +- u32 flags;
48703 ++ u16 eir_len = 0;
48704 ++ u32 flags = 0;
48705 +
48706 +- if (name_len)
48707 +- skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 2 + name_len);
48708 +- else
48709 +- skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 0);
48710 ++ skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
48711 ++ sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
48712 +
48713 + ev = skb_put(skb, sizeof(*ev));
48714 + bacpy(&ev->addr.bdaddr, bdaddr);
48715 +@@ -9723,10 +9723,8 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
48716 + if (name) {
48717 + eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
48718 + name_len);
48719 +- flags = 0;
48720 + skb_put(skb, eir_len);
48721 + } else {
48722 +- eir_len = 0;
48723 + flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
48724 + }
48725 +
48726 +diff --git a/net/can/isotp.c b/net/can/isotp.c
48727 +index d2a430b6a13bd..a95d171b3a64b 100644
48728 +--- a/net/can/isotp.c
48729 ++++ b/net/can/isotp.c
48730 +@@ -1005,26 +1005,29 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
48731 + {
48732 + struct sock *sk = sock->sk;
48733 + struct sk_buff *skb;
48734 +- int err = 0;
48735 +- int noblock;
48736 ++ struct isotp_sock *so = isotp_sk(sk);
48737 ++ int noblock = flags & MSG_DONTWAIT;
48738 ++ int ret = 0;
48739 +
48740 +- noblock = flags & MSG_DONTWAIT;
48741 +- flags &= ~MSG_DONTWAIT;
48742 ++ if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
48743 ++ return -EINVAL;
48744 +
48745 +- skb = skb_recv_datagram(sk, flags, noblock, &err);
48746 ++ if (!so->bound)
48747 ++ return -EADDRNOTAVAIL;
48748 ++
48749 ++ flags &= ~MSG_DONTWAIT;
48750 ++ skb = skb_recv_datagram(sk, flags, noblock, &ret);
48751 + if (!skb)
48752 +- return err;
48753 ++ return ret;
48754 +
48755 + if (size < skb->len)
48756 + msg->msg_flags |= MSG_TRUNC;
48757 + else
48758 + size = skb->len;
48759 +
48760 +- err = memcpy_to_msg(msg, skb->data, size);
48761 +- if (err < 0) {
48762 +- skb_free_datagram(sk, skb);
48763 +- return err;
48764 +- }
48765 ++ ret = memcpy_to_msg(msg, skb->data, size);
48766 ++ if (ret < 0)
48767 ++ goto out_err;
48768 +
48769 + sock_recv_timestamp(msg, sk, skb);
48770 +
48771 +@@ -1034,9 +1037,13 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
48772 + memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
48773 + }
48774 +
48775 ++ /* set length of return value */
48776 ++ ret = (flags & MSG_TRUNC) ? skb->len : size;
48777 ++
48778 ++out_err:
48779 + skb_free_datagram(sk, skb);
48780 +
48781 +- return size;
48782 ++ return ret;
48783 + }
48784 +
48785 + static int isotp_release(struct socket *sock)
48786 +@@ -1104,6 +1111,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
48787 + struct net *net = sock_net(sk);
48788 + int ifindex;
48789 + struct net_device *dev;
48790 ++ canid_t tx_id, rx_id;
48791 + int err = 0;
48792 + int notify_enetdown = 0;
48793 + int do_rx_reg = 1;
48794 +@@ -1111,8 +1119,18 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
48795 + if (len < ISOTP_MIN_NAMELEN)
48796 + return -EINVAL;
48797 +
48798 +- if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
48799 +- return -EADDRNOTAVAIL;
48800 ++ /* sanitize tx/rx CAN identifiers */
48801 ++ tx_id = addr->can_addr.tp.tx_id;
48802 ++ if (tx_id & CAN_EFF_FLAG)
48803 ++ tx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
48804 ++ else
48805 ++ tx_id &= CAN_SFF_MASK;
48806 ++
48807 ++ rx_id = addr->can_addr.tp.rx_id;
48808 ++ if (rx_id & CAN_EFF_FLAG)
48809 ++ rx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
48810 ++ else
48811 ++ rx_id &= CAN_SFF_MASK;
48812 +
48813 + if (!addr->can_ifindex)
48814 + return -ENODEV;
48815 +@@ -1124,21 +1142,13 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
48816 + do_rx_reg = 0;
48817 +
48818 + /* do not validate rx address for functional addressing */
48819 +- if (do_rx_reg) {
48820 +- if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) {
48821 +- err = -EADDRNOTAVAIL;
48822 +- goto out;
48823 +- }
48824 +-
48825 +- if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) {
48826 +- err = -EADDRNOTAVAIL;
48827 +- goto out;
48828 +- }
48829 ++ if (do_rx_reg && rx_id == tx_id) {
48830 ++ err = -EADDRNOTAVAIL;
48831 ++ goto out;
48832 + }
48833 +
48834 + if (so->bound && addr->can_ifindex == so->ifindex &&
48835 +- addr->can_addr.tp.rx_id == so->rxid &&
48836 +- addr->can_addr.tp.tx_id == so->txid)
48837 ++ rx_id == so->rxid && tx_id == so->txid)
48838 + goto out;
48839 +
48840 + dev = dev_get_by_index(net, addr->can_ifindex);
48841 +@@ -1162,8 +1172,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
48842 + ifindex = dev->ifindex;
48843 +
48844 + if (do_rx_reg)
48845 +- can_rx_register(net, dev, addr->can_addr.tp.rx_id,
48846 +- SINGLE_MASK(addr->can_addr.tp.rx_id),
48847 ++ can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id),
48848 + isotp_rcv, sk, "isotp", sk);
48849 +
48850 + dev_put(dev);
48851 +@@ -1183,8 +1192,8 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
48852 +
48853 + /* switch to new settings */
48854 + so->ifindex = ifindex;
48855 +- so->rxid = addr->can_addr.tp.rx_id;
48856 +- so->txid = addr->can_addr.tp.tx_id;
48857 ++ so->rxid = rx_id;
48858 ++ so->txid = tx_id;
48859 + so->bound = 1;
48860 +
48861 + out:
48862 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
48863 +index ea51e23e9247e..a8a2fb745274c 100644
48864 +--- a/net/core/skbuff.c
48865 ++++ b/net/core/skbuff.c
48866 +@@ -201,7 +201,7 @@ static void __build_skb_around(struct sk_buff *skb, void *data,
48867 + skb->head = data;
48868 + skb->data = data;
48869 + skb_reset_tail_pointer(skb);
48870 +- skb->end = skb->tail + size;
48871 ++ skb_set_end_offset(skb, size);
48872 + skb->mac_header = (typeof(skb->mac_header))~0U;
48873 + skb->transport_header = (typeof(skb->transport_header))~0U;
48874 +
48875 +@@ -1736,11 +1736,10 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
48876 + skb->head = data;
48877 + skb->head_frag = 0;
48878 + skb->data += off;
48879 ++
48880 ++ skb_set_end_offset(skb, size);
48881 + #ifdef NET_SKBUFF_DATA_USES_OFFSET
48882 +- skb->end = size;
48883 + off = nhead;
48884 +-#else
48885 +- skb->end = skb->head + size;
48886 + #endif
48887 + skb->tail += off;
48888 + skb_headers_offset_update(skb, nhead);
48889 +@@ -1788,6 +1787,38 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
48890 + }
48891 + EXPORT_SYMBOL(skb_realloc_headroom);
48892 +
48893 ++int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
48894 ++{
48895 ++ unsigned int saved_end_offset, saved_truesize;
48896 ++ struct skb_shared_info *shinfo;
48897 ++ int res;
48898 ++
48899 ++ saved_end_offset = skb_end_offset(skb);
48900 ++ saved_truesize = skb->truesize;
48901 ++
48902 ++ res = pskb_expand_head(skb, 0, 0, pri);
48903 ++ if (res)
48904 ++ return res;
48905 ++
48906 ++ skb->truesize = saved_truesize;
48907 ++
48908 ++ if (likely(skb_end_offset(skb) == saved_end_offset))
48909 ++ return 0;
48910 ++
48911 ++ shinfo = skb_shinfo(skb);
48912 ++
48913 ++ /* We are about to change back skb->end,
48914 ++ * we need to move skb_shinfo() to its new location.
48915 ++ */
48916 ++ memmove(skb->head + saved_end_offset,
48917 ++ shinfo,
48918 ++ offsetof(struct skb_shared_info, frags[shinfo->nr_frags]));
48919 ++
48920 ++ skb_set_end_offset(skb, saved_end_offset);
48921 ++
48922 ++ return 0;
48923 ++}
48924 ++
48925 + /**
48926 + * skb_expand_head - reallocate header of &sk_buff
48927 + * @skb: buffer to reallocate
48928 +@@ -6044,11 +6075,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
48929 + skb->head = data;
48930 + skb->data = data;
48931 + skb->head_frag = 0;
48932 +-#ifdef NET_SKBUFF_DATA_USES_OFFSET
48933 +- skb->end = size;
48934 +-#else
48935 +- skb->end = skb->head + size;
48936 +-#endif
48937 ++ skb_set_end_offset(skb, size);
48938 + skb_set_tail_pointer(skb, skb_headlen(skb));
48939 + skb_headers_offset_update(skb, 0);
48940 + skb->cloned = 0;
48941 +@@ -6186,11 +6213,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
48942 + skb->head = data;
48943 + skb->head_frag = 0;
48944 + skb->data = data;
48945 +-#ifdef NET_SKBUFF_DATA_USES_OFFSET
48946 +- skb->end = size;
48947 +-#else
48948 +- skb->end = skb->head + size;
48949 +-#endif
48950 ++ skb_set_end_offset(skb, size);
48951 + skb_reset_tail_pointer(skb);
48952 + skb_headers_offset_update(skb, 0);
48953 + skb->cloned = 0;
48954 +diff --git a/net/core/skmsg.c b/net/core/skmsg.c
48955 +index 929a2b096b04e..cc381165ea080 100644
48956 +--- a/net/core/skmsg.c
48957 ++++ b/net/core/skmsg.c
48958 +@@ -27,6 +27,7 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
48959 + int elem_first_coalesce)
48960 + {
48961 + struct page_frag *pfrag = sk_page_frag(sk);
48962 ++ u32 osize = msg->sg.size;
48963 + int ret = 0;
48964 +
48965 + len -= msg->sg.size;
48966 +@@ -35,13 +36,17 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
48967 + u32 orig_offset;
48968 + int use, i;
48969 +
48970 +- if (!sk_page_frag_refill(sk, pfrag))
48971 +- return -ENOMEM;
48972 ++ if (!sk_page_frag_refill(sk, pfrag)) {
48973 ++ ret = -ENOMEM;
48974 ++ goto msg_trim;
48975 ++ }
48976 +
48977 + orig_offset = pfrag->offset;
48978 + use = min_t(int, len, pfrag->size - orig_offset);
48979 +- if (!sk_wmem_schedule(sk, use))
48980 +- return -ENOMEM;
48981 ++ if (!sk_wmem_schedule(sk, use)) {
48982 ++ ret = -ENOMEM;
48983 ++ goto msg_trim;
48984 ++ }
48985 +
48986 + i = msg->sg.end;
48987 + sk_msg_iter_var_prev(i);
48988 +@@ -71,6 +76,10 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
48989 + }
48990 +
48991 + return ret;
48992 ++
48993 ++msg_trim:
48994 ++ sk_msg_trim(sk, msg, osize);
48995 ++ return ret;
48996 + }
48997 + EXPORT_SYMBOL_GPL(sk_msg_alloc);
48998 +
48999 +diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
49000 +index 88e2808019b47..a39bbed77f87d 100644
49001 +--- a/net/dsa/dsa2.c
49002 ++++ b/net/dsa/dsa2.c
49003 +@@ -1722,6 +1722,10 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
49004 + struct dsa_port *dp;
49005 +
49006 + mutex_lock(&dsa2_mutex);
49007 ++
49008 ++ if (!ds->setup)
49009 ++ goto out;
49010 ++
49011 + rtnl_lock();
49012 +
49013 + dsa_switch_for_each_user_port(dp, ds) {
49014 +@@ -1738,6 +1742,7 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
49015 + dp->master->dsa_ptr = NULL;
49016 +
49017 + rtnl_unlock();
49018 ++out:
49019 + mutex_unlock(&dsa2_mutex);
49020 + }
49021 + EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
49022 +diff --git a/net/dsa/switch.c b/net/dsa/switch.c
49023 +index e3c7d2627a619..517cc83d13cc8 100644
49024 +--- a/net/dsa/switch.c
49025 ++++ b/net/dsa/switch.c
49026 +@@ -113,26 +113,15 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds,
49027 + return dsa_tag_8021q_bridge_join(ds, info);
49028 + }
49029 +
49030 +-static int dsa_switch_bridge_leave(struct dsa_switch *ds,
49031 +- struct dsa_notifier_bridge_info *info)
49032 ++static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds,
49033 ++ struct dsa_notifier_bridge_info *info)
49034 + {
49035 +- struct dsa_switch_tree *dst = ds->dst;
49036 + struct netlink_ext_ack extack = {0};
49037 + bool change_vlan_filtering = false;
49038 + bool vlan_filtering;
49039 + struct dsa_port *dp;
49040 + int err;
49041 +
49042 +- if (dst->index == info->tree_index && ds->index == info->sw_index &&
49043 +- ds->ops->port_bridge_leave)
49044 +- ds->ops->port_bridge_leave(ds, info->port, info->bridge);
49045 +-
49046 +- if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
49047 +- ds->ops->crosschip_bridge_leave)
49048 +- ds->ops->crosschip_bridge_leave(ds, info->tree_index,
49049 +- info->sw_index, info->port,
49050 +- info->bridge);
49051 +-
49052 + if (ds->needs_standalone_vlan_filtering &&
49053 + !br_vlan_enabled(info->bridge.dev)) {
49054 + change_vlan_filtering = true;
49055 +@@ -172,6 +161,31 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
49056 + return err;
49057 + }
49058 +
49059 ++ return 0;
49060 ++}
49061 ++
49062 ++static int dsa_switch_bridge_leave(struct dsa_switch *ds,
49063 ++ struct dsa_notifier_bridge_info *info)
49064 ++{
49065 ++ struct dsa_switch_tree *dst = ds->dst;
49066 ++ int err;
49067 ++
49068 ++ if (dst->index == info->tree_index && ds->index == info->sw_index &&
49069 ++ ds->ops->port_bridge_leave)
49070 ++ ds->ops->port_bridge_leave(ds, info->port, info->bridge);
49071 ++
49072 ++ if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
49073 ++ ds->ops->crosschip_bridge_leave)
49074 ++ ds->ops->crosschip_bridge_leave(ds, info->tree_index,
49075 ++ info->sw_index, info->port,
49076 ++ info->bridge);
49077 ++
49078 ++ if (ds->dst->index == info->tree_index && ds->index == info->sw_index) {
49079 ++ err = dsa_switch_sync_vlan_filtering(ds, info);
49080 ++ if (err)
49081 ++ return err;
49082 ++ }
49083 ++
49084 + return dsa_tag_8021q_bridge_leave(ds, info);
49085 + }
49086 +
49087 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
49088 +index f33ad1f383b68..d5d058de36646 100644
49089 +--- a/net/ipv4/route.c
49090 ++++ b/net/ipv4/route.c
49091 +@@ -499,6 +499,15 @@ void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
49092 + }
49093 + EXPORT_SYMBOL(__ip_select_ident);
49094 +
49095 ++static void ip_rt_fix_tos(struct flowi4 *fl4)
49096 ++{
49097 ++ __u8 tos = RT_FL_TOS(fl4);
49098 ++
49099 ++ fl4->flowi4_tos = tos & IPTOS_RT_MASK;
49100 ++ fl4->flowi4_scope = tos & RTO_ONLINK ?
49101 ++ RT_SCOPE_LINK : RT_SCOPE_UNIVERSE;
49102 ++}
49103 ++
49104 + static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
49105 + const struct sock *sk,
49106 + const struct iphdr *iph,
49107 +@@ -824,6 +833,7 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
49108 + rt = (struct rtable *) dst;
49109 +
49110 + __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
49111 ++ ip_rt_fix_tos(&fl4);
49112 + __ip_do_redirect(rt, skb, &fl4, true);
49113 + }
49114 +
49115 +@@ -1048,6 +1058,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
49116 + struct flowi4 fl4;
49117 +
49118 + ip_rt_build_flow_key(&fl4, sk, skb);
49119 ++ ip_rt_fix_tos(&fl4);
49120 +
49121 + /* Don't make lookup fail for bridged encapsulations */
49122 + if (skb && netif_is_any_bridge_port(skb->dev))
49123 +@@ -1122,6 +1133,8 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
49124 + goto out;
49125 +
49126 + new = true;
49127 ++ } else {
49128 ++ ip_rt_fix_tos(&fl4);
49129 + }
49130 +
49131 + __ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
49132 +@@ -2603,7 +2616,6 @@ add:
49133 + struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
49134 + const struct sk_buff *skb)
49135 + {
49136 +- __u8 tos = RT_FL_TOS(fl4);
49137 + struct fib_result res = {
49138 + .type = RTN_UNSPEC,
49139 + .fi = NULL,
49140 +@@ -2613,9 +2625,7 @@ struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
49141 + struct rtable *rth;
49142 +
49143 + fl4->flowi4_iif = LOOPBACK_IFINDEX;
49144 +- fl4->flowi4_tos = tos & IPTOS_RT_MASK;
49145 +- fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
49146 +- RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
49147 ++ ip_rt_fix_tos(fl4);
49148 +
49149 + rcu_read_lock();
49150 + rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
49151 +diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
49152 +index 9b9b02052fd36..1cdcb4df0eb7e 100644
49153 +--- a/net/ipv4/tcp_bpf.c
49154 ++++ b/net/ipv4/tcp_bpf.c
49155 +@@ -138,10 +138,9 @@ int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
49156 + struct sk_psock *psock = sk_psock_get(sk);
49157 + int ret;
49158 +
49159 +- if (unlikely(!psock)) {
49160 +- sk_msg_free(sk, msg);
49161 +- return 0;
49162 +- }
49163 ++ if (unlikely(!psock))
49164 ++ return -EPIPE;
49165 ++
49166 + ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
49167 + tcp_bpf_push_locked(sk, msg, bytes, flags, false);
49168 + sk_psock_put(sk, psock);
49169 +@@ -335,7 +334,7 @@ more_data:
49170 + cork = true;
49171 + psock->cork = NULL;
49172 + }
49173 +- sk_msg_return(sk, msg, tosend);
49174 ++ sk_msg_return(sk, msg, msg->sg.size);
49175 + release_sock(sk);
49176 +
49177 + ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
49178 +@@ -375,8 +374,11 @@ more_data:
49179 + }
49180 + if (msg &&
49181 + msg->sg.data[msg->sg.start].page_link &&
49182 +- msg->sg.data[msg->sg.start].length)
49183 ++ msg->sg.data[msg->sg.start].length) {
49184 ++ if (eval == __SK_REDIRECT)
49185 ++ sk_mem_charge(sk, msg->sg.size);
49186 + goto more_data;
49187 ++ }
49188 + }
49189 + return ret;
49190 + }
49191 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
49192 +index 5079832af5c10..257780f93305f 100644
49193 +--- a/net/ipv4/tcp_output.c
49194 ++++ b/net/ipv4/tcp_output.c
49195 +@@ -3719,6 +3719,7 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
49196 + */
49197 + static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
49198 + {
49199 ++ struct inet_connection_sock *icsk = inet_csk(sk);
49200 + struct tcp_sock *tp = tcp_sk(sk);
49201 + struct tcp_fastopen_request *fo = tp->fastopen_req;
49202 + int space, err = 0;
49203 +@@ -3733,8 +3734,10 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
49204 + * private TCP options. The cost is reduced data space in SYN :(
49205 + */
49206 + tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
49207 ++ /* Sync mss_cache after updating the mss_clamp */
49208 ++ tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
49209 +
49210 +- space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
49211 ++ space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) -
49212 + MAX_TCP_OPTION_SPACE;
49213 +
49214 + space = min_t(size_t, space, fo->size);
49215 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
49216 +index e87bccaab561f..95aaf00c876c3 100644
49217 +--- a/net/mac80211/ieee80211_i.h
49218 ++++ b/net/mac80211/ieee80211_i.h
49219 +@@ -2380,7 +2380,7 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
49220 + u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
49221 + const struct cfg80211_chan_def *chandef);
49222 + u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype);
49223 +-u8 *ieee80211_ie_build_he_cap(u8 *pos,
49224 ++u8 *ieee80211_ie_build_he_cap(u32 disable_flags, u8 *pos,
49225 + const struct ieee80211_sta_he_cap *he_cap,
49226 + u8 *end);
49227 + void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
49228 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
49229 +index 15ac08d111ea1..6847fdf934392 100644
49230 +--- a/net/mac80211/mesh.c
49231 ++++ b/net/mac80211/mesh.c
49232 +@@ -580,7 +580,7 @@ int mesh_add_he_cap_ie(struct ieee80211_sub_if_data *sdata,
49233 + return -ENOMEM;
49234 +
49235 + pos = skb_put(skb, ie_len);
49236 +- ieee80211_ie_build_he_cap(pos, he_cap, pos + ie_len);
49237 ++ ieee80211_ie_build_he_cap(0, pos, he_cap, pos + ie_len);
49238 +
49239 + return 0;
49240 + }
49241 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
49242 +index 744842c4513b1..c4d3e2da73f23 100644
49243 +--- a/net/mac80211/mlme.c
49244 ++++ b/net/mac80211/mlme.c
49245 +@@ -636,7 +636,7 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
49246 + struct sk_buff *skb,
49247 + struct ieee80211_supported_band *sband)
49248 + {
49249 +- u8 *pos;
49250 ++ u8 *pos, *pre_he_pos;
49251 + const struct ieee80211_sta_he_cap *he_cap = NULL;
49252 + struct ieee80211_chanctx_conf *chanctx_conf;
49253 + u8 he_cap_size;
49254 +@@ -653,16 +653,21 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
49255 +
49256 + he_cap = ieee80211_get_he_iftype_cap(sband,
49257 + ieee80211_vif_type_p2p(&sdata->vif));
49258 +- if (!he_cap || !reg_cap)
49259 ++ if (!he_cap || !chanctx_conf || !reg_cap)
49260 + return;
49261 +
49262 ++ /* get a max size estimate */
49263 + he_cap_size =
49264 + 2 + 1 + sizeof(he_cap->he_cap_elem) +
49265 + ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
49266 + ieee80211_he_ppe_size(he_cap->ppe_thres[0],
49267 + he_cap->he_cap_elem.phy_cap_info);
49268 + pos = skb_put(skb, he_cap_size);
49269 +- ieee80211_ie_build_he_cap(pos, he_cap, pos + he_cap_size);
49270 ++ pre_he_pos = pos;
49271 ++ pos = ieee80211_ie_build_he_cap(sdata->u.mgd.flags,
49272 ++ pos, he_cap, pos + he_cap_size);
49273 ++ /* trim excess if any */
49274 ++ skb_trim(skb, skb->len - (pre_he_pos + he_cap_size - pos));
49275 +
49276 + ieee80211_ie_build_he_6ghz_cap(sdata, skb);
49277 + }
49278 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
49279 +index f71b042a5c8bb..342c2bfe27091 100644
49280 +--- a/net/mac80211/util.c
49281 ++++ b/net/mac80211/util.c
49282 +@@ -1974,7 +1974,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
49283 + if (he_cap &&
49284 + cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
49285 + IEEE80211_CHAN_NO_HE)) {
49286 +- pos = ieee80211_ie_build_he_cap(pos, he_cap, end);
49287 ++ pos = ieee80211_ie_build_he_cap(0, pos, he_cap, end);
49288 + if (!pos)
49289 + goto out_err;
49290 + }
49291 +@@ -2918,10 +2918,11 @@ u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
49292 + he_cap->he_cap_elem.phy_cap_info);
49293 + }
49294 +
49295 +-u8 *ieee80211_ie_build_he_cap(u8 *pos,
49296 ++u8 *ieee80211_ie_build_he_cap(u32 disable_flags, u8 *pos,
49297 + const struct ieee80211_sta_he_cap *he_cap,
49298 + u8 *end)
49299 + {
49300 ++ struct ieee80211_he_cap_elem elem;
49301 + u8 n;
49302 + u8 ie_len;
49303 + u8 *orig_pos = pos;
49304 +@@ -2934,7 +2935,23 @@ u8 *ieee80211_ie_build_he_cap(u8 *pos,
49305 + if (!he_cap)
49306 + return orig_pos;
49307 +
49308 +- n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem);
49309 ++ /* modify on stack first to calculate 'n' and 'ie_len' correctly */
49310 ++ elem = he_cap->he_cap_elem;
49311 ++
49312 ++ if (disable_flags & IEEE80211_STA_DISABLE_40MHZ)
49313 ++ elem.phy_cap_info[0] &=
49314 ++ ~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
49315 ++ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G);
49316 ++
49317 ++ if (disable_flags & IEEE80211_STA_DISABLE_160MHZ)
49318 ++ elem.phy_cap_info[0] &=
49319 ++ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
49320 ++
49321 ++ if (disable_flags & IEEE80211_STA_DISABLE_80P80MHZ)
49322 ++ elem.phy_cap_info[0] &=
49323 ++ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
49324 ++
49325 ++ n = ieee80211_he_mcs_nss_size(&elem);
49326 + ie_len = 2 + 1 +
49327 + sizeof(he_cap->he_cap_elem) + n +
49328 + ieee80211_he_ppe_size(he_cap->ppe_thres[0],
49329 +@@ -2948,8 +2965,8 @@ u8 *ieee80211_ie_build_he_cap(u8 *pos,
49330 + *pos++ = WLAN_EID_EXT_HE_CAPABILITY;
49331 +
49332 + /* Fixed data */
49333 +- memcpy(pos, &he_cap->he_cap_elem, sizeof(he_cap->he_cap_elem));
49334 +- pos += sizeof(he_cap->he_cap_elem);
49335 ++ memcpy(pos, &elem, sizeof(elem));
49336 ++ pos += sizeof(elem);
49337 +
49338 + memcpy(pos, &he_cap->he_mcs_nss_supp, n);
49339 + pos += n;
49340 +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
49341 +index 1c72f25f083ea..014c9d88f9479 100644
49342 +--- a/net/mptcp/protocol.c
49343 ++++ b/net/mptcp/protocol.c
49344 +@@ -1196,6 +1196,7 @@ static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, g
49345 + tcp_skb_entail(ssk, skb);
49346 + return skb;
49347 + }
49348 ++ tcp_skb_tsorted_anchor_cleanup(skb);
49349 + kfree_skb(skb);
49350 + return NULL;
49351 + }
49352 +diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
49353 +index ae4488a13c70c..ceb38a7b37cb7 100644
49354 +--- a/net/netfilter/nf_conntrack_helper.c
49355 ++++ b/net/netfilter/nf_conntrack_helper.c
49356 +@@ -556,6 +556,12 @@ static const struct nf_ct_ext_type helper_extend = {
49357 + .id = NF_CT_EXT_HELPER,
49358 + };
49359 +
49360 ++void nf_ct_set_auto_assign_helper_warned(struct net *net)
49361 ++{
49362 ++ nf_ct_pernet(net)->auto_assign_helper_warned = true;
49363 ++}
49364 ++EXPORT_SYMBOL_GPL(nf_ct_set_auto_assign_helper_warned);
49365 ++
49366 + void nf_conntrack_helper_pernet_init(struct net *net)
49367 + {
49368 + struct nf_conntrack_net *cnet = nf_ct_pernet(net);
49369 +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
49370 +index d1582b888c0d8..8ec55cd72572e 100644
49371 +--- a/net/netfilter/nf_conntrack_proto_tcp.c
49372 ++++ b/net/netfilter/nf_conntrack_proto_tcp.c
49373 +@@ -341,8 +341,8 @@ static void tcp_options(const struct sk_buff *skb,
49374 + if (!ptr)
49375 + return;
49376 +
49377 +- state->td_scale =
49378 +- state->flags = 0;
49379 ++ state->td_scale = 0;
49380 ++ state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
49381 +
49382 + while (length > 0) {
49383 + int opcode=*ptr++;
49384 +@@ -862,6 +862,16 @@ static bool tcp_can_early_drop(const struct nf_conn *ct)
49385 + return false;
49386 + }
49387 +
49388 ++static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
49389 ++{
49390 ++ state->td_end = 0;
49391 ++ state->td_maxend = 0;
49392 ++ state->td_maxwin = 0;
49393 ++ state->td_maxack = 0;
49394 ++ state->td_scale = 0;
49395 ++ state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
49396 ++}
49397 ++
49398 + /* Returns verdict for packet, or -1 for invalid. */
49399 + int nf_conntrack_tcp_packet(struct nf_conn *ct,
49400 + struct sk_buff *skb,
49401 +@@ -968,8 +978,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
49402 + ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
49403 + ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
49404 + ct->proto.tcp.last_flags;
49405 +- memset(&ct->proto.tcp.seen[dir], 0,
49406 +- sizeof(struct ip_ct_tcp_state));
49407 ++ nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]);
49408 + break;
49409 + }
49410 + ct->proto.tcp.last_index = index;
49411 +diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
49412 +index 5c57ade6bd05a..0ccabf3fa6aa3 100644
49413 +--- a/net/netfilter/nf_flow_table_inet.c
49414 ++++ b/net/netfilter/nf_flow_table_inet.c
49415 +@@ -6,12 +6,29 @@
49416 + #include <linux/rhashtable.h>
49417 + #include <net/netfilter/nf_flow_table.h>
49418 + #include <net/netfilter/nf_tables.h>
49419 ++#include <linux/if_vlan.h>
49420 +
49421 + static unsigned int
49422 + nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
49423 + const struct nf_hook_state *state)
49424 + {
49425 ++ struct vlan_ethhdr *veth;
49426 ++ __be16 proto;
49427 ++
49428 + switch (skb->protocol) {
49429 ++ case htons(ETH_P_8021Q):
49430 ++ veth = (struct vlan_ethhdr *)skb_mac_header(skb);
49431 ++ proto = veth->h_vlan_encapsulated_proto;
49432 ++ break;
49433 ++ case htons(ETH_P_PPP_SES):
49434 ++ proto = nf_flow_pppoe_proto(skb);
49435 ++ break;
49436 ++ default:
49437 ++ proto = skb->protocol;
49438 ++ break;
49439 ++ }
49440 ++
49441 ++ switch (proto) {
49442 + case htons(ETH_P_IP):
49443 + return nf_flow_offload_ip_hook(priv, skb, state);
49444 + case htons(ETH_P_IPV6):
49445 +diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
49446 +index 889cf88d3dba6..6257d87c3a56d 100644
49447 +--- a/net/netfilter/nf_flow_table_ip.c
49448 ++++ b/net/netfilter/nf_flow_table_ip.c
49449 +@@ -8,8 +8,6 @@
49450 + #include <linux/ipv6.h>
49451 + #include <linux/netdevice.h>
49452 + #include <linux/if_ether.h>
49453 +-#include <linux/if_pppox.h>
49454 +-#include <linux/ppp_defs.h>
49455 + #include <net/ip.h>
49456 + #include <net/ipv6.h>
49457 + #include <net/ip6_route.h>
49458 +@@ -239,22 +237,6 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
49459 + return NF_STOLEN;
49460 + }
49461 +
49462 +-static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
49463 +-{
49464 +- __be16 proto;
49465 +-
49466 +- proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
49467 +- sizeof(struct pppoe_hdr)));
49468 +- switch (proto) {
49469 +- case htons(PPP_IP):
49470 +- return htons(ETH_P_IP);
49471 +- case htons(PPP_IPV6):
49472 +- return htons(ETH_P_IPV6);
49473 +- }
49474 +-
49475 +- return 0;
49476 +-}
49477 +-
49478 + static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
49479 + u32 *offset)
49480 + {
49481 +diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
49482 +index 5adf8bb628a80..9c7472af9e4a1 100644
49483 +--- a/net/netfilter/nft_ct.c
49484 ++++ b/net/netfilter/nft_ct.c
49485 +@@ -1041,6 +1041,9 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
49486 + if (err < 0)
49487 + goto err_put_helper;
49488 +
49489 ++ /* Avoid the bogus warning, helper will be assigned after CT init */
49490 ++ nf_ct_set_auto_assign_helper_warned(ctx->net);
49491 ++
49492 + return 0;
49493 +
49494 + err_put_helper:
49495 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
49496 +index 7b344035bfe3f..47a876ccd2881 100644
49497 +--- a/net/netlink/af_netlink.c
49498 ++++ b/net/netlink/af_netlink.c
49499 +@@ -159,6 +159,8 @@ EXPORT_SYMBOL(do_trace_netlink_extack);
49500 +
49501 + static inline u32 netlink_group_mask(u32 group)
49502 + {
49503 ++ if (group > 32)
49504 ++ return 0;
49505 + return group ? 1 << (group - 1) : 0;
49506 + }
49507 +
49508 +diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
49509 +index c07afff57dd32..4a947c13c813a 100644
49510 +--- a/net/openvswitch/conntrack.c
49511 ++++ b/net/openvswitch/conntrack.c
49512 +@@ -734,6 +734,57 @@ static bool skb_nfct_cached(struct net *net,
49513 + }
49514 +
49515 + #if IS_ENABLED(CONFIG_NF_NAT)
49516 ++static void ovs_nat_update_key(struct sw_flow_key *key,
49517 ++ const struct sk_buff *skb,
49518 ++ enum nf_nat_manip_type maniptype)
49519 ++{
49520 ++ if (maniptype == NF_NAT_MANIP_SRC) {
49521 ++ __be16 src;
49522 ++
49523 ++ key->ct_state |= OVS_CS_F_SRC_NAT;
49524 ++ if (key->eth.type == htons(ETH_P_IP))
49525 ++ key->ipv4.addr.src = ip_hdr(skb)->saddr;
49526 ++ else if (key->eth.type == htons(ETH_P_IPV6))
49527 ++ memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr,
49528 ++ sizeof(key->ipv6.addr.src));
49529 ++ else
49530 ++ return;
49531 ++
49532 ++ if (key->ip.proto == IPPROTO_UDP)
49533 ++ src = udp_hdr(skb)->source;
49534 ++ else if (key->ip.proto == IPPROTO_TCP)
49535 ++ src = tcp_hdr(skb)->source;
49536 ++ else if (key->ip.proto == IPPROTO_SCTP)
49537 ++ src = sctp_hdr(skb)->source;
49538 ++ else
49539 ++ return;
49540 ++
49541 ++ key->tp.src = src;
49542 ++ } else {
49543 ++ __be16 dst;
49544 ++
49545 ++ key->ct_state |= OVS_CS_F_DST_NAT;
49546 ++ if (key->eth.type == htons(ETH_P_IP))
49547 ++ key->ipv4.addr.dst = ip_hdr(skb)->daddr;
49548 ++ else if (key->eth.type == htons(ETH_P_IPV6))
49549 ++ memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr,
49550 ++ sizeof(key->ipv6.addr.dst));
49551 ++ else
49552 ++ return;
49553 ++
49554 ++ if (key->ip.proto == IPPROTO_UDP)
49555 ++ dst = udp_hdr(skb)->dest;
49556 ++ else if (key->ip.proto == IPPROTO_TCP)
49557 ++ dst = tcp_hdr(skb)->dest;
49558 ++ else if (key->ip.proto == IPPROTO_SCTP)
49559 ++ dst = sctp_hdr(skb)->dest;
49560 ++ else
49561 ++ return;
49562 ++
49563 ++ key->tp.dst = dst;
49564 ++ }
49565 ++}
49566 ++
49567 + /* Modelled after nf_nat_ipv[46]_fn().
49568 + * range is only used for new, uninitialized NAT state.
49569 + * Returns either NF_ACCEPT or NF_DROP.
49570 +@@ -741,7 +792,7 @@ static bool skb_nfct_cached(struct net *net,
49571 + static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
49572 + enum ip_conntrack_info ctinfo,
49573 + const struct nf_nat_range2 *range,
49574 +- enum nf_nat_manip_type maniptype)
49575 ++ enum nf_nat_manip_type maniptype, struct sw_flow_key *key)
49576 + {
49577 + int hooknum, nh_off, err = NF_ACCEPT;
49578 +
49579 +@@ -813,58 +864,11 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
49580 + push:
49581 + skb_push_rcsum(skb, nh_off);
49582 +
49583 +- return err;
49584 +-}
49585 +-
49586 +-static void ovs_nat_update_key(struct sw_flow_key *key,
49587 +- const struct sk_buff *skb,
49588 +- enum nf_nat_manip_type maniptype)
49589 +-{
49590 +- if (maniptype == NF_NAT_MANIP_SRC) {
49591 +- __be16 src;
49592 +-
49593 +- key->ct_state |= OVS_CS_F_SRC_NAT;
49594 +- if (key->eth.type == htons(ETH_P_IP))
49595 +- key->ipv4.addr.src = ip_hdr(skb)->saddr;
49596 +- else if (key->eth.type == htons(ETH_P_IPV6))
49597 +- memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr,
49598 +- sizeof(key->ipv6.addr.src));
49599 +- else
49600 +- return;
49601 +-
49602 +- if (key->ip.proto == IPPROTO_UDP)
49603 +- src = udp_hdr(skb)->source;
49604 +- else if (key->ip.proto == IPPROTO_TCP)
49605 +- src = tcp_hdr(skb)->source;
49606 +- else if (key->ip.proto == IPPROTO_SCTP)
49607 +- src = sctp_hdr(skb)->source;
49608 +- else
49609 +- return;
49610 +-
49611 +- key->tp.src = src;
49612 +- } else {
49613 +- __be16 dst;
49614 +-
49615 +- key->ct_state |= OVS_CS_F_DST_NAT;
49616 +- if (key->eth.type == htons(ETH_P_IP))
49617 +- key->ipv4.addr.dst = ip_hdr(skb)->daddr;
49618 +- else if (key->eth.type == htons(ETH_P_IPV6))
49619 +- memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr,
49620 +- sizeof(key->ipv6.addr.dst));
49621 +- else
49622 +- return;
49623 +-
49624 +- if (key->ip.proto == IPPROTO_UDP)
49625 +- dst = udp_hdr(skb)->dest;
49626 +- else if (key->ip.proto == IPPROTO_TCP)
49627 +- dst = tcp_hdr(skb)->dest;
49628 +- else if (key->ip.proto == IPPROTO_SCTP)
49629 +- dst = sctp_hdr(skb)->dest;
49630 +- else
49631 +- return;
49632 ++ /* Update the flow key if NAT successful. */
49633 ++ if (err == NF_ACCEPT)
49634 ++ ovs_nat_update_key(key, skb, maniptype);
49635 +
49636 +- key->tp.dst = dst;
49637 +- }
49638 ++ return err;
49639 + }
49640 +
49641 + /* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */
49642 +@@ -906,7 +910,7 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
49643 + } else {
49644 + return NF_ACCEPT; /* Connection is not NATed. */
49645 + }
49646 +- err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
49647 ++ err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype, key);
49648 +
49649 + if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
49650 + if (ct->status & IPS_SRC_NAT) {
49651 +@@ -916,17 +920,13 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
49652 + maniptype = NF_NAT_MANIP_SRC;
49653 +
49654 + err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
49655 +- maniptype);
49656 ++ maniptype, key);
49657 + } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
49658 + err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL,
49659 +- NF_NAT_MANIP_SRC);
49660 ++ NF_NAT_MANIP_SRC, key);
49661 + }
49662 + }
49663 +
49664 +- /* Mark NAT done if successful and update the flow key. */
49665 +- if (err == NF_ACCEPT)
49666 +- ovs_nat_update_key(key, skb, maniptype);
49667 +-
49668 + return err;
49669 + }
49670 + #else /* !CONFIG_NF_NAT */
49671 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
49672 +index fd1f809e9bc1b..0d677c9c2c805 100644
49673 +--- a/net/openvswitch/flow_netlink.c
49674 ++++ b/net/openvswitch/flow_netlink.c
49675 +@@ -2201,8 +2201,8 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
49676 + icmpv6_key->icmpv6_type = ntohs(output->tp.src);
49677 + icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
49678 +
49679 +- if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
49680 +- icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
49681 ++ if (swkey->tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
49682 ++ swkey->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
49683 + struct ovs_key_nd *nd_key;
49684 +
49685 + nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
49686 +diff --git a/net/rfkill/core.c b/net/rfkill/core.c
49687 +index 5b1927d66f0da..dac4fdc7488a3 100644
49688 +--- a/net/rfkill/core.c
49689 ++++ b/net/rfkill/core.c
49690 +@@ -78,6 +78,7 @@ struct rfkill_data {
49691 + struct mutex mtx;
49692 + wait_queue_head_t read_wait;
49693 + bool input_handler;
49694 ++ u8 max_size;
49695 + };
49696 +
49697 +
49698 +@@ -1153,6 +1154,8 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
49699 + if (!data)
49700 + return -ENOMEM;
49701 +
49702 ++ data->max_size = RFKILL_EVENT_SIZE_V1;
49703 ++
49704 + INIT_LIST_HEAD(&data->events);
49705 + mutex_init(&data->mtx);
49706 + init_waitqueue_head(&data->read_wait);
49707 +@@ -1235,6 +1238,7 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
49708 + list);
49709 +
49710 + sz = min_t(unsigned long, sizeof(ev->ev), count);
49711 ++ sz = min_t(unsigned long, sz, data->max_size);
49712 + ret = sz;
49713 + if (copy_to_user(buf, &ev->ev, sz))
49714 + ret = -EFAULT;
49715 +@@ -1249,6 +1253,7 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
49716 + static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
49717 + size_t count, loff_t *pos)
49718 + {
49719 ++ struct rfkill_data *data = file->private_data;
49720 + struct rfkill *rfkill;
49721 + struct rfkill_event_ext ev;
49722 + int ret;
49723 +@@ -1263,6 +1268,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
49724 + * our API version even in a write() call, if it cares.
49725 + */
49726 + count = min(count, sizeof(ev));
49727 ++ count = min_t(size_t, count, data->max_size);
49728 + if (copy_from_user(&ev, buf, count))
49729 + return -EFAULT;
49730 +
49731 +@@ -1322,31 +1328,47 @@ static int rfkill_fop_release(struct inode *inode, struct file *file)
49732 + return 0;
49733 + }
49734 +
49735 +-#ifdef CONFIG_RFKILL_INPUT
49736 + static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
49737 + unsigned long arg)
49738 + {
49739 + struct rfkill_data *data = file->private_data;
49740 ++ int ret = -ENOSYS;
49741 ++ u32 size;
49742 +
49743 + if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
49744 + return -ENOSYS;
49745 +
49746 +- if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
49747 +- return -ENOSYS;
49748 +-
49749 + mutex_lock(&data->mtx);
49750 +-
49751 +- if (!data->input_handler) {
49752 +- if (atomic_inc_return(&rfkill_input_disabled) == 1)
49753 +- printk(KERN_DEBUG "rfkill: input handler disabled\n");
49754 +- data->input_handler = true;
49755 ++ switch (_IOC_NR(cmd)) {
49756 ++#ifdef CONFIG_RFKILL_INPUT
49757 ++ case RFKILL_IOC_NOINPUT:
49758 ++ if (!data->input_handler) {
49759 ++ if (atomic_inc_return(&rfkill_input_disabled) == 1)
49760 ++ printk(KERN_DEBUG "rfkill: input handler disabled\n");
49761 ++ data->input_handler = true;
49762 ++ }
49763 ++ ret = 0;
49764 ++ break;
49765 ++#endif
49766 ++ case RFKILL_IOC_MAX_SIZE:
49767 ++ if (get_user(size, (__u32 __user *)arg)) {
49768 ++ ret = -EFAULT;
49769 ++ break;
49770 ++ }
49771 ++ if (size < RFKILL_EVENT_SIZE_V1 || size > U8_MAX) {
49772 ++ ret = -EINVAL;
49773 ++ break;
49774 ++ }
49775 ++ data->max_size = size;
49776 ++ ret = 0;
49777 ++ break;
49778 ++ default:
49779 ++ break;
49780 + }
49781 +-
49782 + mutex_unlock(&data->mtx);
49783 +
49784 +- return 0;
49785 ++ return ret;
49786 + }
49787 +-#endif
49788 +
49789 + static const struct file_operations rfkill_fops = {
49790 + .owner = THIS_MODULE,
49791 +@@ -1355,10 +1377,8 @@ static const struct file_operations rfkill_fops = {
49792 + .write = rfkill_fop_write,
49793 + .poll = rfkill_fop_poll,
49794 + .release = rfkill_fop_release,
49795 +-#ifdef CONFIG_RFKILL_INPUT
49796 + .unlocked_ioctl = rfkill_fop_ioctl,
49797 + .compat_ioctl = compat_ptr_ioctl,
49798 +-#endif
49799 + .llseek = no_llseek,
49800 + };
49801 +
49802 +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
49803 +index 7bd6f8a66a3ef..969e532f77a90 100644
49804 +--- a/net/rxrpc/ar-internal.h
49805 ++++ b/net/rxrpc/ar-internal.h
49806 +@@ -777,14 +777,12 @@ void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
49807 + enum rxrpc_propose_ack_trace);
49808 + void rxrpc_process_call(struct work_struct *);
49809 +
49810 +-static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
49811 +- unsigned long expire_at,
49812 +- unsigned long now,
49813 +- enum rxrpc_timer_trace why)
49814 +-{
49815 +- trace_rxrpc_timer(call, why, now);
49816 +- timer_reduce(&call->timer, expire_at);
49817 +-}
49818 ++void rxrpc_reduce_call_timer(struct rxrpc_call *call,
49819 ++ unsigned long expire_at,
49820 ++ unsigned long now,
49821 ++ enum rxrpc_timer_trace why);
49822 ++
49823 ++void rxrpc_delete_call_timer(struct rxrpc_call *call);
49824 +
49825 + /*
49826 + * call_object.c
49827 +@@ -808,6 +806,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
49828 + bool __rxrpc_queue_call(struct rxrpc_call *);
49829 + bool rxrpc_queue_call(struct rxrpc_call *);
49830 + void rxrpc_see_call(struct rxrpc_call *);
49831 ++bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op);
49832 + void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
49833 + void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
49834 + void rxrpc_cleanup_call(struct rxrpc_call *);
49835 +diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
49836 +index df864e6922679..22e05de5d1ca9 100644
49837 +--- a/net/rxrpc/call_event.c
49838 ++++ b/net/rxrpc/call_event.c
49839 +@@ -310,7 +310,7 @@ recheck_state:
49840 + }
49841 +
49842 + if (call->state == RXRPC_CALL_COMPLETE) {
49843 +- del_timer_sync(&call->timer);
49844 ++ rxrpc_delete_call_timer(call);
49845 + goto out_put;
49846 + }
49847 +
49848 +diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
49849 +index 4eb91d958a48d..043508fd8d8a5 100644
49850 +--- a/net/rxrpc/call_object.c
49851 ++++ b/net/rxrpc/call_object.c
49852 +@@ -53,10 +53,30 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
49853 +
49854 + if (call->state < RXRPC_CALL_COMPLETE) {
49855 + trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
49856 +- rxrpc_queue_call(call);
49857 ++ __rxrpc_queue_call(call);
49858 ++ } else {
49859 ++ rxrpc_put_call(call, rxrpc_call_put);
49860 ++ }
49861 ++}
49862 ++
49863 ++void rxrpc_reduce_call_timer(struct rxrpc_call *call,
49864 ++ unsigned long expire_at,
49865 ++ unsigned long now,
49866 ++ enum rxrpc_timer_trace why)
49867 ++{
49868 ++ if (rxrpc_try_get_call(call, rxrpc_call_got_timer)) {
49869 ++ trace_rxrpc_timer(call, why, now);
49870 ++ if (timer_reduce(&call->timer, expire_at))
49871 ++ rxrpc_put_call(call, rxrpc_call_put_notimer);
49872 + }
49873 + }
49874 +
49875 ++void rxrpc_delete_call_timer(struct rxrpc_call *call)
49876 ++{
49877 ++ if (del_timer_sync(&call->timer))
49878 ++ rxrpc_put_call(call, rxrpc_call_put_timer);
49879 ++}
49880 ++
49881 + static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
49882 +
49883 + /*
49884 +@@ -463,6 +483,17 @@ void rxrpc_see_call(struct rxrpc_call *call)
49885 + }
49886 + }
49887 +
49888 ++bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
49889 ++{
49890 ++ const void *here = __builtin_return_address(0);
49891 ++ int n = atomic_fetch_add_unless(&call->usage, 1, 0);
49892 ++
49893 ++ if (n == 0)
49894 ++ return false;
49895 ++ trace_rxrpc_call(call->debug_id, op, n, here, NULL);
49896 ++ return true;
49897 ++}
49898 ++
49899 + /*
49900 + * Note the addition of a ref on a call.
49901 + */
49902 +@@ -510,8 +541,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
49903 + spin_unlock_bh(&call->lock);
49904 +
49905 + rxrpc_put_call_slot(call);
49906 +-
49907 +- del_timer_sync(&call->timer);
49908 ++ rxrpc_delete_call_timer(call);
49909 +
49910 + /* Make sure we don't get any more notifications */
49911 + write_lock_bh(&rx->recvmsg_lock);
49912 +@@ -618,6 +648,8 @@ static void rxrpc_destroy_call(struct work_struct *work)
49913 + struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
49914 + struct rxrpc_net *rxnet = call->rxnet;
49915 +
49916 ++ rxrpc_delete_call_timer(call);
49917 ++
49918 + rxrpc_put_connection(call->conn);
49919 + rxrpc_put_peer(call->peer);
49920 + kfree(call->rxtx_buffer);
49921 +@@ -652,8 +684,6 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
49922 +
49923 + memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
49924 +
49925 +- del_timer_sync(&call->timer);
49926 +-
49927 + ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
49928 + ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
49929 +
49930 +diff --git a/net/rxrpc/server_key.c b/net/rxrpc/server_key.c
49931 +index ead3471307ee5..ee269e0e6ee87 100644
49932 +--- a/net/rxrpc/server_key.c
49933 ++++ b/net/rxrpc/server_key.c
49934 +@@ -84,6 +84,9 @@ static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
49935 +
49936 + prep->payload.data[1] = (struct rxrpc_security *)sec;
49937 +
49938 ++ if (!sec->preparse_server_key)
49939 ++ return -EINVAL;
49940 ++
49941 + return sec->preparse_server_key(prep);
49942 + }
49943 +
49944 +@@ -91,7 +94,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *prep)
49945 + {
49946 + const struct rxrpc_security *sec = prep->payload.data[1];
49947 +
49948 +- if (sec)
49949 ++ if (sec && sec->free_preparse_server_key)
49950 + sec->free_preparse_server_key(prep);
49951 + }
49952 +
49953 +@@ -99,7 +102,7 @@ static void rxrpc_destroy_s(struct key *key)
49954 + {
49955 + const struct rxrpc_security *sec = key->payload.data[1];
49956 +
49957 +- if (sec)
49958 ++ if (sec && sec->destroy_server_key)
49959 + sec->destroy_server_key(key);
49960 + }
49961 +
49962 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
49963 +index ec19f625863a0..25718acc0ff00 100644
49964 +--- a/net/sched/act_ct.c
49965 ++++ b/net/sched/act_ct.c
49966 +@@ -605,22 +605,25 @@ static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
49967 + if (!ct)
49968 + return false;
49969 + if (!net_eq(net, read_pnet(&ct->ct_net)))
49970 +- return false;
49971 ++ goto drop_ct;
49972 + if (nf_ct_zone(ct)->id != zone_id)
49973 +- return false;
49974 ++ goto drop_ct;
49975 +
49976 + /* Force conntrack entry direction. */
49977 + if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
49978 + if (nf_ct_is_confirmed(ct))
49979 + nf_ct_kill(ct);
49980 +
49981 +- nf_ct_put(ct);
49982 +- nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
49983 +-
49984 +- return false;
49985 ++ goto drop_ct;
49986 + }
49987 +
49988 + return true;
49989 ++
49990 ++drop_ct:
49991 ++ nf_ct_put(ct);
49992 ++ nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
49993 ++
49994 ++ return false;
49995 + }
49996 +
49997 + /* Trim the skb to the length specified by the IP/IPv6 header,
49998 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
49999 +index cc544a97c4afd..7f342bc127358 100644
50000 +--- a/net/sctp/sm_statefuns.c
50001 ++++ b/net/sctp/sm_statefuns.c
50002 +@@ -930,6 +930,11 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net,
50003 + if (!sctp_vtag_verify(chunk, asoc))
50004 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
50005 +
50006 ++ /* Set peer label for connection. */
50007 ++ if (security_sctp_assoc_established((struct sctp_association *)asoc,
50008 ++ chunk->skb))
50009 ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
50010 ++
50011 + /* Verify that the chunk length for the COOKIE-ACK is OK.
50012 + * If we don't do this, any bundled chunks may be junked.
50013 + */
50014 +@@ -945,9 +950,6 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net,
50015 + */
50016 + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
50017 +
50018 +- /* Set peer label for connection. */
50019 +- security_inet_conn_established(ep->base.sk, chunk->skb);
50020 +-
50021 + /* RFC 2960 5.1 Normal Establishment of an Association
50022 + *
50023 + * E) Upon reception of the COOKIE ACK, endpoint "A" will move
50024 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
50025 +index c83fe618767c4..b36d235d2d6d9 100644
50026 +--- a/net/sunrpc/clnt.c
50027 ++++ b/net/sunrpc/clnt.c
50028 +@@ -1065,7 +1065,9 @@ rpc_task_get_next_xprt(struct rpc_clnt *clnt)
50029 + static
50030 + void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
50031 + {
50032 +- if (task->tk_xprt)
50033 ++ if (task->tk_xprt &&
50034 ++ !(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) &&
50035 ++ (task->tk_flags & RPC_TASK_MOVEABLE)))
50036 + return;
50037 + if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN)
50038 + task->tk_xprt = rpc_task_get_first_xprt(clnt);
50039 +@@ -1085,8 +1087,6 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
50040 + task->tk_flags |= RPC_TASK_TIMEOUT;
50041 + if (clnt->cl_noretranstimeo)
50042 + task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
50043 +- if (atomic_read(&clnt->cl_swapper))
50044 +- task->tk_flags |= RPC_TASK_SWAPPER;
50045 + /* Add to the client's list of all tasks */
50046 + spin_lock(&clnt->cl_lock);
50047 + list_add_tail(&task->tk_task, &clnt->cl_tasks);
50048 +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
50049 +index e2c835482791e..ae295844ac55a 100644
50050 +--- a/net/sunrpc/sched.c
50051 ++++ b/net/sunrpc/sched.c
50052 +@@ -876,6 +876,15 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
50053 + ops->rpc_release(calldata);
50054 + }
50055 +
50056 ++static bool xprt_needs_memalloc(struct rpc_xprt *xprt, struct rpc_task *tk)
50057 ++{
50058 ++ if (!xprt)
50059 ++ return false;
50060 ++ if (!atomic_read(&xprt->swapper))
50061 ++ return false;
50062 ++ return test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == tk;
50063 ++}
50064 ++
50065 + /*
50066 + * This is the RPC `scheduler' (or rather, the finite state machine).
50067 + */
50068 +@@ -884,6 +893,7 @@ static void __rpc_execute(struct rpc_task *task)
50069 + struct rpc_wait_queue *queue;
50070 + int task_is_async = RPC_IS_ASYNC(task);
50071 + int status = 0;
50072 ++ unsigned long pflags = current->flags;
50073 +
50074 + WARN_ON_ONCE(RPC_IS_QUEUED(task));
50075 + if (RPC_IS_QUEUED(task))
50076 +@@ -906,6 +916,10 @@ static void __rpc_execute(struct rpc_task *task)
50077 + }
50078 + if (!do_action)
50079 + break;
50080 ++ if (RPC_IS_SWAPPER(task) ||
50081 ++ xprt_needs_memalloc(task->tk_xprt, task))
50082 ++ current->flags |= PF_MEMALLOC;
50083 ++
50084 + trace_rpc_task_run_action(task, do_action);
50085 + do_action(task);
50086 +
50087 +@@ -943,7 +957,7 @@ static void __rpc_execute(struct rpc_task *task)
50088 + rpc_clear_running(task);
50089 + spin_unlock(&queue->lock);
50090 + if (task_is_async)
50091 +- return;
50092 ++ goto out;
50093 +
50094 + /* sync task: sleep here */
50095 + trace_rpc_task_sync_sleep(task, task->tk_action);
50096 +@@ -967,6 +981,8 @@ static void __rpc_execute(struct rpc_task *task)
50097 +
50098 + /* Release all resources associated with the task */
50099 + rpc_release_task(task);
50100 ++out:
50101 ++ current_restore_flags(pflags, PF_MEMALLOC);
50102 + }
50103 +
50104 + /*
50105 +@@ -1023,8 +1039,8 @@ int rpc_malloc(struct rpc_task *task)
50106 + struct rpc_buffer *buf;
50107 + gfp_t gfp = GFP_NOFS;
50108 +
50109 +- if (RPC_IS_SWAPPER(task))
50110 +- gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
50111 ++ if (RPC_IS_ASYNC(task))
50112 ++ gfp = GFP_NOWAIT | __GFP_NOWARN;
50113 +
50114 + size += sizeof(struct rpc_buffer);
50115 + if (size <= RPC_BUFFER_MAXSIZE)
50116 +diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
50117 +index 05c758da6a92a..9d8a7d9f3e412 100644
50118 +--- a/net/sunrpc/sysfs.c
50119 ++++ b/net/sunrpc/sysfs.c
50120 +@@ -97,7 +97,7 @@ static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj,
50121 + return 0;
50122 + ret = sprintf(buf, "%s\n", xprt->address_strings[RPC_DISPLAY_ADDR]);
50123 + xprt_put(xprt);
50124 +- return ret + 1;
50125 ++ return ret;
50126 + }
50127 +
50128 + static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj,
50129 +@@ -105,33 +105,31 @@ static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj,
50130 + char *buf)
50131 + {
50132 + struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
50133 +- struct sockaddr_storage saddr;
50134 +- struct sock_xprt *sock;
50135 +- ssize_t ret = -1;
50136 ++ size_t buflen = PAGE_SIZE;
50137 ++ ssize_t ret = -ENOTSOCK;
50138 +
50139 + if (!xprt || !xprt_connected(xprt)) {
50140 +- xprt_put(xprt);
50141 +- return -ENOTCONN;
50142 ++ ret = -ENOTCONN;
50143 ++ } else if (xprt->ops->get_srcaddr) {
50144 ++ ret = xprt->ops->get_srcaddr(xprt, buf, buflen);
50145 ++ if (ret > 0) {
50146 ++ if (ret < buflen - 1) {
50147 ++ buf[ret] = '\n';
50148 ++ ret++;
50149 ++ buf[ret] = '\0';
50150 ++ }
50151 ++ }
50152 + }
50153 +-
50154 +- sock = container_of(xprt, struct sock_xprt, xprt);
50155 +- mutex_lock(&sock->recv_mutex);
50156 +- if (sock->sock == NULL ||
50157 +- kernel_getsockname(sock->sock, (struct sockaddr *)&saddr) < 0)
50158 +- goto out;
50159 +-
50160 +- ret = sprintf(buf, "%pISc\n", &saddr);
50161 +-out:
50162 +- mutex_unlock(&sock->recv_mutex);
50163 + xprt_put(xprt);
50164 +- return ret + 1;
50165 ++ return ret;
50166 + }
50167 +
50168 + static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj,
50169 +- struct kobj_attribute *attr,
50170 +- char *buf)
50171 ++ struct kobj_attribute *attr, char *buf)
50172 + {
50173 + struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
50174 ++ unsigned short srcport = 0;
50175 ++ size_t buflen = PAGE_SIZE;
50176 + ssize_t ret;
50177 +
50178 + if (!xprt || !xprt_connected(xprt)) {
50179 +@@ -139,7 +137,11 @@ static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj,
50180 + return -ENOTCONN;
50181 + }
50182 +
50183 +- ret = sprintf(buf, "last_used=%lu\ncur_cong=%lu\ncong_win=%lu\n"
50184 ++ if (xprt->ops->get_srcport)
50185 ++ srcport = xprt->ops->get_srcport(xprt);
50186 ++
50187 ++ ret = snprintf(buf, buflen,
50188 ++ "last_used=%lu\ncur_cong=%lu\ncong_win=%lu\n"
50189 + "max_num_slots=%u\nmin_num_slots=%u\nnum_reqs=%u\n"
50190 + "binding_q_len=%u\nsending_q_len=%u\npending_q_len=%u\n"
50191 + "backlog_q_len=%u\nmain_xprt=%d\nsrc_port=%u\n"
50192 +@@ -147,14 +149,11 @@ static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj,
50193 + xprt->last_used, xprt->cong, xprt->cwnd, xprt->max_reqs,
50194 + xprt->min_reqs, xprt->num_reqs, xprt->binding.qlen,
50195 + xprt->sending.qlen, xprt->pending.qlen,
50196 +- xprt->backlog.qlen, xprt->main,
50197 +- (xprt->xprt_class->ident == XPRT_TRANSPORT_TCP) ?
50198 +- get_srcport(xprt) : 0,
50199 ++ xprt->backlog.qlen, xprt->main, srcport,
50200 + atomic_long_read(&xprt->queuelen),
50201 +- (xprt->xprt_class->ident == XPRT_TRANSPORT_TCP) ?
50202 +- xprt->address_strings[RPC_DISPLAY_PORT] : "0");
50203 ++ xprt->address_strings[RPC_DISPLAY_PORT]);
50204 + xprt_put(xprt);
50205 +- return ret + 1;
50206 ++ return ret;
50207 + }
50208 +
50209 + static ssize_t rpc_sysfs_xprt_state_show(struct kobject *kobj,
50210 +@@ -201,7 +200,7 @@ static ssize_t rpc_sysfs_xprt_state_show(struct kobject *kobj,
50211 + }
50212 +
50213 + xprt_put(xprt);
50214 +- return ret + 1;
50215 ++ return ret;
50216 + }
50217 +
50218 + static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj,
50219 +@@ -220,7 +219,7 @@ static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj,
50220 + xprt_switch->xps_nunique_destaddr_xprts,
50221 + atomic_long_read(&xprt_switch->xps_queuelen));
50222 + xprt_switch_put(xprt_switch);
50223 +- return ret + 1;
50224 ++ return ret;
50225 + }
50226 +
50227 + static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj,
50228 +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
50229 +index a02de2bddb28b..396a74974f60f 100644
50230 +--- a/net/sunrpc/xprt.c
50231 ++++ b/net/sunrpc/xprt.c
50232 +@@ -1503,6 +1503,9 @@ bool xprt_prepare_transmit(struct rpc_task *task)
50233 + return false;
50234 +
50235 + }
50236 ++ if (atomic_read(&xprt->swapper))
50237 ++ /* This will be clear in __rpc_execute */
50238 ++ current->flags |= PF_MEMALLOC;
50239 + return true;
50240 + }
50241 +
50242 +@@ -2112,7 +2115,14 @@ static void xprt_destroy(struct rpc_xprt *xprt)
50243 + */
50244 + wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
50245 +
50246 ++ /*
50247 ++ * xprt_schedule_autodisconnect() can run after XPRT_LOCKED
50248 ++ * is cleared. We use ->transport_lock to ensure the mod_timer()
50249 ++ * can only run *before* del_time_sync(), never after.
50250 ++ */
50251 ++ spin_lock(&xprt->transport_lock);
50252 + del_timer_sync(&xprt->timer);
50253 ++ spin_unlock(&xprt->transport_lock);
50254 +
50255 + /*
50256 + * Destroy sockets etc from the system workqueue so they can
50257 +diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
50258 +index 42e375dbdadb4..ff78a296fa810 100644
50259 +--- a/net/sunrpc/xprtrdma/transport.c
50260 ++++ b/net/sunrpc/xprtrdma/transport.c
50261 +@@ -235,8 +235,11 @@ xprt_rdma_connect_worker(struct work_struct *work)
50262 + struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
50263 + rx_connect_worker.work);
50264 + struct rpc_xprt *xprt = &r_xprt->rx_xprt;
50265 ++ unsigned int pflags = current->flags;
50266 + int rc;
50267 +
50268 ++ if (atomic_read(&xprt->swapper))
50269 ++ current->flags |= PF_MEMALLOC;
50270 + rc = rpcrdma_xprt_connect(r_xprt);
50271 + xprt_clear_connecting(xprt);
50272 + if (!rc) {
50273 +@@ -250,6 +253,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
50274 + rpcrdma_xprt_disconnect(r_xprt);
50275 + xprt_unlock_connect(xprt, r_xprt);
50276 + xprt_wake_pending_tasks(xprt, rc);
50277 ++ current_restore_flags(pflags, PF_MEMALLOC);
50278 + }
50279 +
50280 + /**
50281 +@@ -570,8 +574,8 @@ xprt_rdma_allocate(struct rpc_task *task)
50282 + gfp_t flags;
50283 +
50284 + flags = RPCRDMA_DEF_GFP;
50285 +- if (RPC_IS_SWAPPER(task))
50286 +- flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
50287 ++ if (RPC_IS_ASYNC(task))
50288 ++ flags = GFP_NOWAIT | __GFP_NOWARN;
50289 +
50290 + if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize,
50291 + flags))
50292 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
50293 +index 0f39e08ee580e..11eab0f0333b0 100644
50294 +--- a/net/sunrpc/xprtsock.c
50295 ++++ b/net/sunrpc/xprtsock.c
50296 +@@ -1638,7 +1638,7 @@ static int xs_get_srcport(struct sock_xprt *transport)
50297 + return port;
50298 + }
50299 +
50300 +-unsigned short get_srcport(struct rpc_xprt *xprt)
50301 ++static unsigned short xs_sock_srcport(struct rpc_xprt *xprt)
50302 + {
50303 + struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
50304 + unsigned short ret = 0;
50305 +@@ -1648,7 +1648,25 @@ unsigned short get_srcport(struct rpc_xprt *xprt)
50306 + mutex_unlock(&sock->recv_mutex);
50307 + return ret;
50308 + }
50309 +-EXPORT_SYMBOL(get_srcport);
50310 ++
50311 ++static int xs_sock_srcaddr(struct rpc_xprt *xprt, char *buf, size_t buflen)
50312 ++{
50313 ++ struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
50314 ++ union {
50315 ++ struct sockaddr sa;
50316 ++ struct sockaddr_storage st;
50317 ++ } saddr;
50318 ++ int ret = -ENOTCONN;
50319 ++
50320 ++ mutex_lock(&sock->recv_mutex);
50321 ++ if (sock->sock) {
50322 ++ ret = kernel_getsockname(sock->sock, &saddr.sa);
50323 ++ if (ret >= 0)
50324 ++ ret = snprintf(buf, buflen, "%pISc", &saddr.sa);
50325 ++ }
50326 ++ mutex_unlock(&sock->recv_mutex);
50327 ++ return ret;
50328 ++}
50329 +
50330 + static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
50331 + {
50332 +@@ -2052,7 +2070,10 @@ static void xs_udp_setup_socket(struct work_struct *work)
50333 + struct rpc_xprt *xprt = &transport->xprt;
50334 + struct socket *sock;
50335 + int status = -EIO;
50336 ++ unsigned int pflags = current->flags;
50337 +
50338 ++ if (atomic_read(&xprt->swapper))
50339 ++ current->flags |= PF_MEMALLOC;
50340 + sock = xs_create_sock(xprt, transport,
50341 + xs_addr(xprt)->sa_family, SOCK_DGRAM,
50342 + IPPROTO_UDP, false);
50343 +@@ -2072,6 +2093,7 @@ out:
50344 + xprt_clear_connecting(xprt);
50345 + xprt_unlock_connect(xprt, transport);
50346 + xprt_wake_pending_tasks(xprt, status);
50347 ++ current_restore_flags(pflags, PF_MEMALLOC);
50348 + }
50349 +
50350 + /**
50351 +@@ -2231,11 +2253,19 @@ static void xs_tcp_setup_socket(struct work_struct *work)
50352 + struct socket *sock = transport->sock;
50353 + struct rpc_xprt *xprt = &transport->xprt;
50354 + int status;
50355 ++ unsigned int pflags = current->flags;
50356 ++
50357 ++ if (atomic_read(&xprt->swapper))
50358 ++ current->flags |= PF_MEMALLOC;
50359 +
50360 +- if (!sock) {
50361 +- sock = xs_create_sock(xprt, transport,
50362 +- xs_addr(xprt)->sa_family, SOCK_STREAM,
50363 +- IPPROTO_TCP, true);
50364 ++ if (xprt_connected(xprt))
50365 ++ goto out;
50366 ++ if (test_and_clear_bit(XPRT_SOCK_CONNECT_SENT,
50367 ++ &transport->sock_state) ||
50368 ++ !sock) {
50369 ++ xs_reset_transport(transport);
50370 ++ sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family,
50371 ++ SOCK_STREAM, IPPROTO_TCP, true);
50372 + if (IS_ERR(sock)) {
50373 + xprt_wake_pending_tasks(xprt, PTR_ERR(sock));
50374 + goto out;
50375 +@@ -2259,6 +2289,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
50376 + fallthrough;
50377 + case -EINPROGRESS:
50378 + /* SYN_SENT! */
50379 ++ set_bit(XPRT_SOCK_CONNECT_SENT, &transport->sock_state);
50380 + if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
50381 + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
50382 + fallthrough;
50383 +@@ -2296,6 +2327,7 @@ out:
50384 + xprt_clear_connecting(xprt);
50385 + out_unlock:
50386 + xprt_unlock_connect(xprt, transport);
50387 ++ current_restore_flags(pflags, PF_MEMALLOC);
50388 + }
50389 +
50390 + /**
50391 +@@ -2319,13 +2351,9 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
50392 +
50393 + WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
50394 +
50395 +- if (transport->sock != NULL && !xprt_connecting(xprt)) {
50396 ++ if (transport->sock != NULL) {
50397 + dprintk("RPC: xs_connect delayed xprt %p for %lu "
50398 +- "seconds\n",
50399 +- xprt, xprt->reestablish_timeout / HZ);
50400 +-
50401 +- /* Start by resetting any existing state */
50402 +- xs_reset_transport(transport);
50403 ++ "seconds\n", xprt, xprt->reestablish_timeout / HZ);
50404 +
50405 + delay = xprt_reconnect_delay(xprt);
50406 + xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);
50407 +@@ -2621,6 +2649,8 @@ static const struct rpc_xprt_ops xs_udp_ops = {
50408 + .rpcbind = rpcb_getport_async,
50409 + .set_port = xs_set_port,
50410 + .connect = xs_connect,
50411 ++ .get_srcaddr = xs_sock_srcaddr,
50412 ++ .get_srcport = xs_sock_srcport,
50413 + .buf_alloc = rpc_malloc,
50414 + .buf_free = rpc_free,
50415 + .send_request = xs_udp_send_request,
50416 +@@ -2643,6 +2673,8 @@ static const struct rpc_xprt_ops xs_tcp_ops = {
50417 + .rpcbind = rpcb_getport_async,
50418 + .set_port = xs_set_port,
50419 + .connect = xs_connect,
50420 ++ .get_srcaddr = xs_sock_srcaddr,
50421 ++ .get_srcport = xs_sock_srcport,
50422 + .buf_alloc = rpc_malloc,
50423 + .buf_free = rpc_free,
50424 + .prepare_request = xs_stream_prepare_request,
50425 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
50426 +index 7545321c3440b..17f8c523e33b0 100644
50427 +--- a/net/tipc/socket.c
50428 ++++ b/net/tipc/socket.c
50429 +@@ -2852,7 +2852,8 @@ static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
50430 +
50431 + /* Try again later if dest link is congested */
50432 + if (tsk->cong_link_cnt) {
50433 +- sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
50434 ++ sk_reset_timer(sk, &sk->sk_timer,
50435 ++ jiffies + msecs_to_jiffies(100));
50436 + return;
50437 + }
50438 + /* Prepare SYN for retransmit */
50439 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
50440 +index c19569819866e..1e7ed5829ed51 100644
50441 +--- a/net/unix/af_unix.c
50442 ++++ b/net/unix/af_unix.c
50443 +@@ -2084,7 +2084,7 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other
50444 + if (ousk->oob_skb)
50445 + consume_skb(ousk->oob_skb);
50446 +
50447 +- ousk->oob_skb = skb;
50448 ++ WRITE_ONCE(ousk->oob_skb, skb);
50449 +
50450 + scm_stat_add(other, skb);
50451 + skb_queue_tail(&other->sk_receive_queue, skb);
50452 +@@ -2602,9 +2602,8 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
50453 +
50454 + oob_skb = u->oob_skb;
50455 +
50456 +- if (!(state->flags & MSG_PEEK)) {
50457 +- u->oob_skb = NULL;
50458 +- }
50459 ++ if (!(state->flags & MSG_PEEK))
50460 ++ WRITE_ONCE(u->oob_skb, NULL);
50461 +
50462 + unix_state_unlock(sk);
50463 +
50464 +@@ -2639,7 +2638,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
50465 + skb = NULL;
50466 + } else if (sock_flag(sk, SOCK_URGINLINE)) {
50467 + if (!(flags & MSG_PEEK)) {
50468 +- u->oob_skb = NULL;
50469 ++ WRITE_ONCE(u->oob_skb, NULL);
50470 + consume_skb(skb);
50471 + }
50472 + } else if (!(flags & MSG_PEEK)) {
50473 +@@ -3094,11 +3093,10 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
50474 + case SIOCATMARK:
50475 + {
50476 + struct sk_buff *skb;
50477 +- struct unix_sock *u = unix_sk(sk);
50478 + int answ = 0;
50479 +
50480 + skb = skb_peek(&sk->sk_receive_queue);
50481 +- if (skb && skb == u->oob_skb)
50482 ++ if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
50483 + answ = 1;
50484 + err = put_user(answ, (int __user *)arg);
50485 + }
50486 +@@ -3139,6 +3137,10 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
50487 + mask |= EPOLLIN | EPOLLRDNORM;
50488 + if (sk_is_readable(sk))
50489 + mask |= EPOLLIN | EPOLLRDNORM;
50490 ++#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
50491 ++ if (READ_ONCE(unix_sk(sk)->oob_skb))
50492 ++ mask |= EPOLLPRI;
50493 ++#endif
50494 +
50495 + /* Connection-based need to check for termination and startup */
50496 + if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
50497 +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
50498 +index 5afc194a58bbd..ba1c8cc0c4671 100644
50499 +--- a/net/vmw_vsock/virtio_transport.c
50500 ++++ b/net/vmw_vsock/virtio_transport.c
50501 +@@ -622,6 +622,13 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
50502 + INIT_WORK(&vsock->event_work, virtio_transport_event_work);
50503 + INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
50504 +
50505 ++ if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
50506 ++ vsock->seqpacket_allow = true;
50507 ++
50508 ++ vdev->priv = vsock;
50509 ++
50510 ++ virtio_device_ready(vdev);
50511 ++
50512 + mutex_lock(&vsock->tx_lock);
50513 + vsock->tx_run = true;
50514 + mutex_unlock(&vsock->tx_lock);
50515 +@@ -636,10 +643,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
50516 + vsock->event_run = true;
50517 + mutex_unlock(&vsock->event_lock);
50518 +
50519 +- if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
50520 +- vsock->seqpacket_allow = true;
50521 +-
50522 +- vdev->priv = vsock;
50523 + rcu_assign_pointer(the_virtio_vsock, vsock);
50524 +
50525 + mutex_unlock(&the_virtio_vsock_mutex);
50526 +diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
50527 +index 3583354a7d7fe..3a171828638b1 100644
50528 +--- a/net/x25/af_x25.c
50529 ++++ b/net/x25/af_x25.c
50530 +@@ -1765,10 +1765,15 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
50531 +
50532 + write_lock_bh(&x25_list_lock);
50533 +
50534 +- sk_for_each(s, &x25_list)
50535 +- if (x25_sk(s)->neighbour == nb)
50536 ++ sk_for_each(s, &x25_list) {
50537 ++ if (x25_sk(s)->neighbour == nb) {
50538 ++ write_unlock_bh(&x25_list_lock);
50539 ++ lock_sock(s);
50540 + x25_disconnect(s, ENETUNREACH, 0, 0);
50541 +-
50542 ++ release_sock(s);
50543 ++ write_lock_bh(&x25_list_lock);
50544 ++ }
50545 ++ }
50546 + write_unlock_bh(&x25_list_lock);
50547 +
50548 + /* Remove any related forwards */
50549 +diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
50550 +index 28ef3f4465ae9..ac343cd8ff3f6 100644
50551 +--- a/net/xdp/xsk.c
50552 ++++ b/net/xdp/xsk.c
50553 +@@ -403,18 +403,8 @@ EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
50554 + static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
50555 + {
50556 + struct net_device *dev = xs->dev;
50557 +- int err;
50558 +-
50559 +- rcu_read_lock();
50560 +- err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
50561 +- rcu_read_unlock();
50562 +-
50563 +- return err;
50564 +-}
50565 +
50566 +-static int xsk_zc_xmit(struct xdp_sock *xs)
50567 +-{
50568 +- return xsk_wakeup(xs, XDP_WAKEUP_TX);
50569 ++ return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
50570 + }
50571 +
50572 + static void xsk_destruct_skb(struct sk_buff *skb)
50573 +@@ -533,6 +523,12 @@ static int xsk_generic_xmit(struct sock *sk)
50574 +
50575 + mutex_lock(&xs->mutex);
50576 +
50577 ++ /* Since we dropped the RCU read lock, the socket state might have changed. */
50578 ++ if (unlikely(!xsk_is_bound(xs))) {
50579 ++ err = -ENXIO;
50580 ++ goto out;
50581 ++ }
50582 ++
50583 + if (xs->queue_id >= xs->dev->real_num_tx_queues)
50584 + goto out;
50585 +
50586 +@@ -596,16 +592,26 @@ out:
50587 + return err;
50588 + }
50589 +
50590 +-static int __xsk_sendmsg(struct sock *sk)
50591 ++static int xsk_xmit(struct sock *sk)
50592 + {
50593 + struct xdp_sock *xs = xdp_sk(sk);
50594 ++ int ret;
50595 +
50596 + if (unlikely(!(xs->dev->flags & IFF_UP)))
50597 + return -ENETDOWN;
50598 + if (unlikely(!xs->tx))
50599 + return -ENOBUFS;
50600 +
50601 +- return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
50602 ++ if (xs->zc)
50603 ++ return xsk_wakeup(xs, XDP_WAKEUP_TX);
50604 ++
50605 ++ /* Drop the RCU lock since the SKB path might sleep. */
50606 ++ rcu_read_unlock();
50607 ++ ret = xsk_generic_xmit(sk);
50608 ++ /* Reaquire RCU lock before going into common code. */
50609 ++ rcu_read_lock();
50610 ++
50611 ++ return ret;
50612 + }
50613 +
50614 + static bool xsk_no_wakeup(struct sock *sk)
50615 +@@ -619,7 +625,7 @@ static bool xsk_no_wakeup(struct sock *sk)
50616 + #endif
50617 + }
50618 +
50619 +-static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
50620 ++static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
50621 + {
50622 + bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
50623 + struct sock *sk = sock->sk;
50624 +@@ -639,11 +645,22 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
50625 +
50626 + pool = xs->pool;
50627 + if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
50628 +- return __xsk_sendmsg(sk);
50629 ++ return xsk_xmit(sk);
50630 + return 0;
50631 + }
50632 +
50633 +-static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
50634 ++static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
50635 ++{
50636 ++ int ret;
50637 ++
50638 ++ rcu_read_lock();
50639 ++ ret = __xsk_sendmsg(sock, m, total_len);
50640 ++ rcu_read_unlock();
50641 ++
50642 ++ return ret;
50643 ++}
50644 ++
50645 ++static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
50646 + {
50647 + bool need_wait = !(flags & MSG_DONTWAIT);
50648 + struct sock *sk = sock->sk;
50649 +@@ -669,6 +686,17 @@ static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int fl
50650 + return 0;
50651 + }
50652 +
50653 ++static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
50654 ++{
50655 ++ int ret;
50656 ++
50657 ++ rcu_read_lock();
50658 ++ ret = __xsk_recvmsg(sock, m, len, flags);
50659 ++ rcu_read_unlock();
50660 ++
50661 ++ return ret;
50662 ++}
50663 ++
50664 + static __poll_t xsk_poll(struct file *file, struct socket *sock,
50665 + struct poll_table_struct *wait)
50666 + {
50667 +@@ -679,8 +707,11 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
50668 +
50669 + sock_poll_wait(file, sock, wait);
50670 +
50671 +- if (unlikely(!xsk_is_bound(xs)))
50672 ++ rcu_read_lock();
50673 ++ if (unlikely(!xsk_is_bound(xs))) {
50674 ++ rcu_read_unlock();
50675 + return mask;
50676 ++ }
50677 +
50678 + pool = xs->pool;
50679 +
50680 +@@ -689,7 +720,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
50681 + xsk_wakeup(xs, pool->cached_need_wakeup);
50682 + else
50683 + /* Poll needs to drive Tx also in copy mode */
50684 +- __xsk_sendmsg(sk);
50685 ++ xsk_xmit(sk);
50686 + }
50687 +
50688 + if (xs->rx && !xskq_prod_is_empty(xs->rx))
50689 +@@ -697,6 +728,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
50690 + if (xs->tx && xsk_tx_writeable(xs))
50691 + mask |= EPOLLOUT | EPOLLWRNORM;
50692 +
50693 ++ rcu_read_unlock();
50694 + return mask;
50695 + }
50696 +
50697 +@@ -728,7 +760,6 @@ static void xsk_unbind_dev(struct xdp_sock *xs)
50698 +
50699 + /* Wait for driver to stop using the xdp socket. */
50700 + xp_del_xsk(xs->pool, xs);
50701 +- xs->dev = NULL;
50702 + synchronize_net();
50703 + dev_put(dev);
50704 + }
50705 +diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
50706 +index fd39bb660ebcd..0202a90b65e3a 100644
50707 +--- a/net/xdp/xsk_buff_pool.c
50708 ++++ b/net/xdp/xsk_buff_pool.c
50709 +@@ -584,9 +584,13 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
50710 + u32 nb_entries1 = 0, nb_entries2;
50711 +
50712 + if (unlikely(pool->dma_need_sync)) {
50713 ++ struct xdp_buff *buff;
50714 ++
50715 + /* Slow path */
50716 +- *xdp = xp_alloc(pool);
50717 +- return !!*xdp;
50718 ++ buff = xp_alloc(pool);
50719 ++ if (buff)
50720 ++ *xdp = buff;
50721 ++ return !!buff;
50722 + }
50723 +
50724 + if (unlikely(pool->free_list_cnt)) {
50725 +diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
50726 +index aa50864e4415a..9f3446af50ce2 100644
50727 +--- a/samples/bpf/xdpsock_user.c
50728 ++++ b/samples/bpf/xdpsock_user.c
50729 +@@ -1984,15 +1984,15 @@ int main(int argc, char **argv)
50730 +
50731 + setlocale(LC_ALL, "");
50732 +
50733 ++ prev_time = get_nsecs();
50734 ++ start_time = prev_time;
50735 ++
50736 + if (!opt_quiet) {
50737 + ret = pthread_create(&pt, NULL, poller, NULL);
50738 + if (ret)
50739 + exit_with_error(ret);
50740 + }
50741 +
50742 +- prev_time = get_nsecs();
50743 +- start_time = prev_time;
50744 +-
50745 + /* Configure sched priority for better wake-up accuracy */
50746 + memset(&schparam, 0, sizeof(schparam));
50747 + schparam.sched_priority = opt_schprio;
50748 +diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c
50749 +index 7a15910d21718..8859fc1935428 100644
50750 +--- a/samples/landlock/sandboxer.c
50751 ++++ b/samples/landlock/sandboxer.c
50752 +@@ -134,6 +134,7 @@ static int populate_ruleset(
50753 + ret = 0;
50754 +
50755 + out_free_name:
50756 ++ free(path_list);
50757 + free(env_path_name);
50758 + return ret;
50759 + }
50760 +diff --git a/scripts/atomic/fallbacks/read_acquire b/scripts/atomic/fallbacks/read_acquire
50761 +index 803ba75610766..a0ea1d26e6b2e 100755
50762 +--- a/scripts/atomic/fallbacks/read_acquire
50763 ++++ b/scripts/atomic/fallbacks/read_acquire
50764 +@@ -2,6 +2,15 @@ cat <<EOF
50765 + static __always_inline ${ret}
50766 + arch_${atomic}_read_acquire(const ${atomic}_t *v)
50767 + {
50768 +- return smp_load_acquire(&(v)->counter);
50769 ++ ${int} ret;
50770 ++
50771 ++ if (__native_word(${atomic}_t)) {
50772 ++ ret = smp_load_acquire(&(v)->counter);
50773 ++ } else {
50774 ++ ret = arch_${atomic}_read(v);
50775 ++ __atomic_acquire_fence();
50776 ++ }
50777 ++
50778 ++ return ret;
50779 + }
50780 + EOF
50781 +diff --git a/scripts/atomic/fallbacks/set_release b/scripts/atomic/fallbacks/set_release
50782 +index 86ede759f24ea..05cdb7f42477a 100755
50783 +--- a/scripts/atomic/fallbacks/set_release
50784 ++++ b/scripts/atomic/fallbacks/set_release
50785 +@@ -2,6 +2,11 @@ cat <<EOF
50786 + static __always_inline void
50787 + arch_${atomic}_set_release(${atomic}_t *v, ${int} i)
50788 + {
50789 +- smp_store_release(&(v)->counter, i);
50790 ++ if (__native_word(${atomic}_t)) {
50791 ++ smp_store_release(&(v)->counter, i);
50792 ++ } else {
50793 ++ __atomic_release_fence();
50794 ++ arch_${atomic}_set(v, i);
50795 ++ }
50796 + }
50797 + EOF
50798 +diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
50799 +index 95aaf7431bffa..1cba78e1dce68 100644
50800 +--- a/scripts/dtc/Makefile
50801 ++++ b/scripts/dtc/Makefile
50802 +@@ -29,7 +29,7 @@ dtc-objs += yamltree.o
50803 + # To include <yaml.h> installed in a non-default path
50804 + HOSTCFLAGS_yamltree.o := $(shell pkg-config --cflags yaml-0.1)
50805 + # To link libyaml installed in a non-default path
50806 +-HOSTLDLIBS_dtc := $(shell pkg-config yaml-0.1 --libs)
50807 ++HOSTLDLIBS_dtc := $(shell pkg-config --libs yaml-0.1)
50808 + endif
50809 +
50810 + # Generated files need one more search path to include headers in source tree
50811 +diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c
50812 +index e9db7dcb3e5f4..b04aa8e91a41f 100644
50813 +--- a/scripts/gcc-plugins/stackleak_plugin.c
50814 ++++ b/scripts/gcc-plugins/stackleak_plugin.c
50815 +@@ -429,6 +429,23 @@ static unsigned int stackleak_cleanup_execute(void)
50816 + return 0;
50817 + }
50818 +
50819 ++/*
50820 ++ * STRING_CST may or may not be NUL terminated:
50821 ++ * https://gcc.gnu.org/onlinedocs/gccint/Constant-expressions.html
50822 ++ */
50823 ++static inline bool string_equal(tree node, const char *string, int length)
50824 ++{
50825 ++ if (TREE_STRING_LENGTH(node) < length)
50826 ++ return false;
50827 ++ if (TREE_STRING_LENGTH(node) > length + 1)
50828 ++ return false;
50829 ++ if (TREE_STRING_LENGTH(node) == length + 1 &&
50830 ++ TREE_STRING_POINTER(node)[length] != '\0')
50831 ++ return false;
50832 ++ return !memcmp(TREE_STRING_POINTER(node), string, length);
50833 ++}
50834 ++#define STRING_EQUAL(node, str) string_equal(node, str, strlen(str))
50835 ++
50836 + static bool stackleak_gate(void)
50837 + {
50838 + tree section;
50839 +@@ -438,13 +455,13 @@ static bool stackleak_gate(void)
50840 + if (section && TREE_VALUE(section)) {
50841 + section = TREE_VALUE(TREE_VALUE(section));
50842 +
50843 +- if (!strncmp(TREE_STRING_POINTER(section), ".init.text", 10))
50844 ++ if (STRING_EQUAL(section, ".init.text"))
50845 + return false;
50846 +- if (!strncmp(TREE_STRING_POINTER(section), ".devinit.text", 13))
50847 ++ if (STRING_EQUAL(section, ".devinit.text"))
50848 + return false;
50849 +- if (!strncmp(TREE_STRING_POINTER(section), ".cpuinit.text", 13))
50850 ++ if (STRING_EQUAL(section, ".cpuinit.text"))
50851 + return false;
50852 +- if (!strncmp(TREE_STRING_POINTER(section), ".meminit.text", 13))
50853 ++ if (STRING_EQUAL(section, ".meminit.text"))
50854 + return false;
50855 + }
50856 +
50857 +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
50858 +index 6bfa332179140..e04ae56931e2e 100644
50859 +--- a/scripts/mod/modpost.c
50860 ++++ b/scripts/mod/modpost.c
50861 +@@ -669,7 +669,7 @@ static void handle_modversion(const struct module *mod,
50862 + unsigned int crc;
50863 +
50864 + if (sym->st_shndx == SHN_UNDEF) {
50865 +- warn("EXPORT symbol \"%s\" [%s%s] version ...\n"
50866 ++ warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n"
50867 + "Is \"%s\" prototyped in <asm/asm-prototypes.h>?\n",
50868 + symname, mod->name, mod->is_vmlinux ? "" : ".ko",
50869 + symname);
50870 +diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
50871 +index 08f907382c618..7d87772f0ce68 100644
50872 +--- a/security/integrity/evm/evm_main.c
50873 ++++ b/security/integrity/evm/evm_main.c
50874 +@@ -86,7 +86,7 @@ static int __init evm_set_fixmode(char *str)
50875 + else
50876 + pr_err("invalid \"%s\" mode", str);
50877 +
50878 +- return 0;
50879 ++ return 1;
50880 + }
50881 + __setup("evm=", evm_set_fixmode);
50882 +
50883 +diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c
50884 +index 5de0d599a2748..97bc27bbf0797 100644
50885 +--- a/security/keys/keyctl_pkey.c
50886 ++++ b/security/keys/keyctl_pkey.c
50887 +@@ -135,15 +135,23 @@ static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_par
50888 +
50889 + switch (op) {
50890 + case KEYCTL_PKEY_ENCRYPT:
50891 ++ if (uparams.in_len > info.max_dec_size ||
50892 ++ uparams.out_len > info.max_enc_size)
50893 ++ return -EINVAL;
50894 ++ break;
50895 + case KEYCTL_PKEY_DECRYPT:
50896 + if (uparams.in_len > info.max_enc_size ||
50897 + uparams.out_len > info.max_dec_size)
50898 + return -EINVAL;
50899 + break;
50900 + case KEYCTL_PKEY_SIGN:
50901 ++ if (uparams.in_len > info.max_data_size ||
50902 ++ uparams.out_len > info.max_sig_size)
50903 ++ return -EINVAL;
50904 ++ break;
50905 + case KEYCTL_PKEY_VERIFY:
50906 +- if (uparams.in_len > info.max_sig_size ||
50907 +- uparams.out_len > info.max_data_size)
50908 ++ if (uparams.in_len > info.max_data_size ||
50909 ++ uparams.in2_len > info.max_sig_size)
50910 + return -EINVAL;
50911 + break;
50912 + default:
50913 +@@ -151,7 +159,7 @@ static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_par
50914 + }
50915 +
50916 + params->in_len = uparams.in_len;
50917 +- params->out_len = uparams.out_len;
50918 ++ params->out_len = uparams.out_len; /* Note: same as in2_len */
50919 + return 0;
50920 + }
50921 +
50922 +diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
50923 +index d5c891d8d3534..9b9d3ef79cbe3 100644
50924 +--- a/security/keys/trusted-keys/trusted_core.c
50925 ++++ b/security/keys/trusted-keys/trusted_core.c
50926 +@@ -27,10 +27,10 @@ module_param_named(source, trusted_key_source, charp, 0);
50927 + MODULE_PARM_DESC(source, "Select trusted keys source (tpm or tee)");
50928 +
50929 + static const struct trusted_key_source trusted_key_sources[] = {
50930 +-#if defined(CONFIG_TCG_TPM)
50931 ++#if IS_REACHABLE(CONFIG_TCG_TPM)
50932 + { "tpm", &trusted_key_tpm_ops },
50933 + #endif
50934 +-#if defined(CONFIG_TEE)
50935 ++#if IS_REACHABLE(CONFIG_TEE)
50936 + { "tee", &trusted_key_tee_ops },
50937 + #endif
50938 + };
50939 +@@ -351,7 +351,7 @@ static int __init init_trusted(void)
50940 +
50941 + static void __exit cleanup_trusted(void)
50942 + {
50943 +- static_call(trusted_key_exit)();
50944 ++ static_call_cond(trusted_key_exit)();
50945 + }
50946 +
50947 + late_initcall(init_trusted);
50948 +diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
50949 +index 32396962f04d6..7e27ce394020d 100644
50950 +--- a/security/landlock/syscalls.c
50951 ++++ b/security/landlock/syscalls.c
50952 +@@ -192,7 +192,7 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
50953 + return PTR_ERR(ruleset);
50954 +
50955 + /* Creates anonymous FD referring to the ruleset. */
50956 +- ruleset_fd = anon_inode_getfd("landlock-ruleset", &ruleset_fops,
50957 ++ ruleset_fd = anon_inode_getfd("[landlock-ruleset]", &ruleset_fops,
50958 + ruleset, O_RDWR | O_CLOEXEC);
50959 + if (ruleset_fd < 0)
50960 + landlock_put_ruleset(ruleset);
50961 +diff --git a/security/security.c b/security/security.c
50962 +index 22261d79f3333..b7cf5cbfdc677 100644
50963 +--- a/security/security.c
50964 ++++ b/security/security.c
50965 +@@ -884,9 +884,22 @@ int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
50966 + return call_int_hook(fs_context_dup, 0, fc, src_fc);
50967 + }
50968 +
50969 +-int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param)
50970 ++int security_fs_context_parse_param(struct fs_context *fc,
50971 ++ struct fs_parameter *param)
50972 + {
50973 +- return call_int_hook(fs_context_parse_param, -ENOPARAM, fc, param);
50974 ++ struct security_hook_list *hp;
50975 ++ int trc;
50976 ++ int rc = -ENOPARAM;
50977 ++
50978 ++ hlist_for_each_entry(hp, &security_hook_heads.fs_context_parse_param,
50979 ++ list) {
50980 ++ trc = hp->hook.fs_context_parse_param(fc, param);
50981 ++ if (trc == 0)
50982 ++ rc = 0;
50983 ++ else if (trc != -ENOPARAM)
50984 ++ return trc;
50985 ++ }
50986 ++ return rc;
50987 + }
50988 +
50989 + int security_sb_alloc(struct super_block *sb)
50990 +@@ -2391,6 +2404,13 @@ void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
50991 + }
50992 + EXPORT_SYMBOL(security_sctp_sk_clone);
50993 +
50994 ++int security_sctp_assoc_established(struct sctp_association *asoc,
50995 ++ struct sk_buff *skb)
50996 ++{
50997 ++ return call_int_hook(sctp_assoc_established, 0, asoc, skb);
50998 ++}
50999 ++EXPORT_SYMBOL(security_sctp_assoc_established);
51000 ++
51001 + #endif /* CONFIG_SECURITY_NETWORK */
51002 +
51003 + #ifdef CONFIG_SECURITY_INFINIBAND
51004 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
51005 +index 5b6895e4fc29e..ea725891e566e 100644
51006 +--- a/security/selinux/hooks.c
51007 ++++ b/security/selinux/hooks.c
51008 +@@ -342,6 +342,10 @@ static void inode_free_security(struct inode *inode)
51009 +
51010 + struct selinux_mnt_opts {
51011 + const char *fscontext, *context, *rootcontext, *defcontext;
51012 ++ u32 fscontext_sid;
51013 ++ u32 context_sid;
51014 ++ u32 rootcontext_sid;
51015 ++ u32 defcontext_sid;
51016 + };
51017 +
51018 + static void selinux_free_mnt_opts(void *mnt_opts)
51019 +@@ -479,7 +483,7 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
51020 +
51021 + static int sb_check_xattr_support(struct super_block *sb)
51022 + {
51023 +- struct superblock_security_struct *sbsec = sb->s_security;
51024 ++ struct superblock_security_struct *sbsec = selinux_superblock(sb);
51025 + struct dentry *root = sb->s_root;
51026 + struct inode *root_inode = d_backing_inode(root);
51027 + u32 sid;
51028 +@@ -598,15 +602,14 @@ static int bad_option(struct superblock_security_struct *sbsec, char flag,
51029 + return 0;
51030 + }
51031 +
51032 +-static int parse_sid(struct super_block *sb, const char *s, u32 *sid,
51033 +- gfp_t gfp)
51034 ++static int parse_sid(struct super_block *sb, const char *s, u32 *sid)
51035 + {
51036 + int rc = security_context_str_to_sid(&selinux_state, s,
51037 +- sid, gfp);
51038 ++ sid, GFP_KERNEL);
51039 + if (rc)
51040 + pr_warn("SELinux: security_context_str_to_sid"
51041 + "(%s) failed for (dev %s, type %s) errno=%d\n",
51042 +- s, sb->s_id, sb->s_type->name, rc);
51043 ++ s, sb ? sb->s_id : "?", sb ? sb->s_type->name : "?", rc);
51044 + return rc;
51045 + }
51046 +
51047 +@@ -673,8 +676,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
51048 + */
51049 + if (opts) {
51050 + if (opts->fscontext) {
51051 +- rc = parse_sid(sb, opts->fscontext, &fscontext_sid,
51052 +- GFP_KERNEL);
51053 ++ rc = parse_sid(sb, opts->fscontext, &fscontext_sid);
51054 + if (rc)
51055 + goto out;
51056 + if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid,
51057 +@@ -683,8 +685,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
51058 + sbsec->flags |= FSCONTEXT_MNT;
51059 + }
51060 + if (opts->context) {
51061 +- rc = parse_sid(sb, opts->context, &context_sid,
51062 +- GFP_KERNEL);
51063 ++ rc = parse_sid(sb, opts->context, &context_sid);
51064 + if (rc)
51065 + goto out;
51066 + if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
51067 +@@ -693,8 +694,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
51068 + sbsec->flags |= CONTEXT_MNT;
51069 + }
51070 + if (opts->rootcontext) {
51071 +- rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid,
51072 +- GFP_KERNEL);
51073 ++ rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid);
51074 + if (rc)
51075 + goto out;
51076 + if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid,
51077 +@@ -703,8 +703,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
51078 + sbsec->flags |= ROOTCONTEXT_MNT;
51079 + }
51080 + if (opts->defcontext) {
51081 +- rc = parse_sid(sb, opts->defcontext, &defcontext_sid,
51082 +- GFP_KERNEL);
51083 ++ rc = parse_sid(sb, opts->defcontext, &defcontext_sid);
51084 + if (rc)
51085 + goto out;
51086 + if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid,
51087 +@@ -996,21 +995,29 @@ static int selinux_add_opt(int token, const char *s, void **mnt_opts)
51088 + if (opts->context || opts->defcontext)
51089 + goto err;
51090 + opts->context = s;
51091 ++ if (selinux_initialized(&selinux_state))
51092 ++ parse_sid(NULL, s, &opts->context_sid);
51093 + break;
51094 + case Opt_fscontext:
51095 + if (opts->fscontext)
51096 + goto err;
51097 + opts->fscontext = s;
51098 ++ if (selinux_initialized(&selinux_state))
51099 ++ parse_sid(NULL, s, &opts->fscontext_sid);
51100 + break;
51101 + case Opt_rootcontext:
51102 + if (opts->rootcontext)
51103 + goto err;
51104 + opts->rootcontext = s;
51105 ++ if (selinux_initialized(&selinux_state))
51106 ++ parse_sid(NULL, s, &opts->rootcontext_sid);
51107 + break;
51108 + case Opt_defcontext:
51109 + if (opts->context || opts->defcontext)
51110 + goto err;
51111 + opts->defcontext = s;
51112 ++ if (selinux_initialized(&selinux_state))
51113 ++ parse_sid(NULL, s, &opts->defcontext_sid);
51114 + break;
51115 + }
51116 +
51117 +@@ -2647,9 +2654,7 @@ free_opt:
51118 + static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
51119 + {
51120 + struct selinux_mnt_opts *opts = mnt_opts;
51121 +- struct superblock_security_struct *sbsec = sb->s_security;
51122 +- u32 sid;
51123 +- int rc;
51124 ++ struct superblock_security_struct *sbsec = selinux_superblock(sb);
51125 +
51126 + /*
51127 + * Superblock not initialized (i.e. no options) - reject if any
51128 +@@ -2666,34 +2671,36 @@ static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
51129 + return (sbsec->flags & SE_MNTMASK) ? 1 : 0;
51130 +
51131 + if (opts->fscontext) {
51132 +- rc = parse_sid(sb, opts->fscontext, &sid, GFP_NOWAIT);
51133 +- if (rc)
51134 ++ if (opts->fscontext_sid == SECSID_NULL)
51135 + return 1;
51136 +- if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
51137 ++ else if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid,
51138 ++ opts->fscontext_sid))
51139 + return 1;
51140 + }
51141 + if (opts->context) {
51142 +- rc = parse_sid(sb, opts->context, &sid, GFP_NOWAIT);
51143 +- if (rc)
51144 ++ if (opts->context_sid == SECSID_NULL)
51145 + return 1;
51146 +- if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
51147 ++ else if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
51148 ++ opts->context_sid))
51149 + return 1;
51150 + }
51151 + if (opts->rootcontext) {
51152 +- struct inode_security_struct *root_isec;
51153 +-
51154 +- root_isec = backing_inode_security(sb->s_root);
51155 +- rc = parse_sid(sb, opts->rootcontext, &sid, GFP_NOWAIT);
51156 +- if (rc)
51157 +- return 1;
51158 +- if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
51159 ++ if (opts->rootcontext_sid == SECSID_NULL)
51160 + return 1;
51161 ++ else {
51162 ++ struct inode_security_struct *root_isec;
51163 ++
51164 ++ root_isec = backing_inode_security(sb->s_root);
51165 ++ if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid,
51166 ++ opts->rootcontext_sid))
51167 ++ return 1;
51168 ++ }
51169 + }
51170 + if (opts->defcontext) {
51171 +- rc = parse_sid(sb, opts->defcontext, &sid, GFP_NOWAIT);
51172 +- if (rc)
51173 ++ if (opts->defcontext_sid == SECSID_NULL)
51174 + return 1;
51175 +- if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
51176 ++ else if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid,
51177 ++ opts->defcontext_sid))
51178 + return 1;
51179 + }
51180 + return 0;
51181 +@@ -2713,14 +2720,14 @@ static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
51182 + return 0;
51183 +
51184 + if (opts->fscontext) {
51185 +- rc = parse_sid(sb, opts->fscontext, &sid, GFP_KERNEL);
51186 ++ rc = parse_sid(sb, opts->fscontext, &sid);
51187 + if (rc)
51188 + return rc;
51189 + if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
51190 + goto out_bad_option;
51191 + }
51192 + if (opts->context) {
51193 +- rc = parse_sid(sb, opts->context, &sid, GFP_KERNEL);
51194 ++ rc = parse_sid(sb, opts->context, &sid);
51195 + if (rc)
51196 + return rc;
51197 + if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
51198 +@@ -2729,14 +2736,14 @@ static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
51199 + if (opts->rootcontext) {
51200 + struct inode_security_struct *root_isec;
51201 + root_isec = backing_inode_security(sb->s_root);
51202 +- rc = parse_sid(sb, opts->rootcontext, &sid, GFP_KERNEL);
51203 ++ rc = parse_sid(sb, opts->rootcontext, &sid);
51204 + if (rc)
51205 + return rc;
51206 + if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
51207 + goto out_bad_option;
51208 + }
51209 + if (opts->defcontext) {
51210 +- rc = parse_sid(sb, opts->defcontext, &sid, GFP_KERNEL);
51211 ++ rc = parse_sid(sb, opts->defcontext, &sid);
51212 + if (rc)
51213 + return rc;
51214 + if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
51215 +@@ -2860,10 +2867,9 @@ static int selinux_fs_context_parse_param(struct fs_context *fc,
51216 + return opt;
51217 +
51218 + rc = selinux_add_opt(opt, param->string, &fc->security);
51219 +- if (!rc) {
51220 ++ if (!rc)
51221 + param->string = NULL;
51222 +- rc = 1;
51223 +- }
51224 ++
51225 + return rc;
51226 + }
51227 +
51228 +@@ -3745,6 +3751,12 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
51229 + CAP_OPT_NONE, true);
51230 + break;
51231 +
51232 ++ case FIOCLEX:
51233 ++ case FIONCLEX:
51234 ++ if (!selinux_policycap_ioctl_skip_cloexec())
51235 ++ error = ioctl_has_perm(cred, file, FILE__IOCTL, (u16) cmd);
51236 ++ break;
51237 ++
51238 + /* default case assumes that the command will go
51239 + * to the file's ioctl() function.
51240 + */
51241 +@@ -5299,37 +5311,38 @@ static void selinux_sock_graft(struct sock *sk, struct socket *parent)
51242 + sksec->sclass = isec->sclass;
51243 + }
51244 +
51245 +-/* Called whenever SCTP receives an INIT chunk. This happens when an incoming
51246 +- * connect(2), sctp_connectx(3) or sctp_sendmsg(3) (with no association
51247 +- * already present).
51248 ++/*
51249 ++ * Determines peer_secid for the asoc and updates socket's peer label
51250 ++ * if it's the first association on the socket.
51251 + */
51252 +-static int selinux_sctp_assoc_request(struct sctp_association *asoc,
51253 +- struct sk_buff *skb)
51254 ++static int selinux_sctp_process_new_assoc(struct sctp_association *asoc,
51255 ++ struct sk_buff *skb)
51256 + {
51257 +- struct sk_security_struct *sksec = asoc->base.sk->sk_security;
51258 ++ struct sock *sk = asoc->base.sk;
51259 ++ u16 family = sk->sk_family;
51260 ++ struct sk_security_struct *sksec = sk->sk_security;
51261 + struct common_audit_data ad;
51262 + struct lsm_network_audit net = {0,};
51263 +- u8 peerlbl_active;
51264 +- u32 peer_sid = SECINITSID_UNLABELED;
51265 +- u32 conn_sid;
51266 +- int err = 0;
51267 ++ int err;
51268 +
51269 +- if (!selinux_policycap_extsockclass())
51270 +- return 0;
51271 ++ /* handle mapped IPv4 packets arriving via IPv6 sockets */
51272 ++ if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
51273 ++ family = PF_INET;
51274 +
51275 +- peerlbl_active = selinux_peerlbl_enabled();
51276 ++ if (selinux_peerlbl_enabled()) {
51277 ++ asoc->peer_secid = SECSID_NULL;
51278 +
51279 +- if (peerlbl_active) {
51280 + /* This will return peer_sid = SECSID_NULL if there are
51281 + * no peer labels, see security_net_peersid_resolve().
51282 + */
51283 +- err = selinux_skb_peerlbl_sid(skb, asoc->base.sk->sk_family,
51284 +- &peer_sid);
51285 ++ err = selinux_skb_peerlbl_sid(skb, family, &asoc->peer_secid);
51286 + if (err)
51287 + return err;
51288 +
51289 +- if (peer_sid == SECSID_NULL)
51290 +- peer_sid = SECINITSID_UNLABELED;
51291 ++ if (asoc->peer_secid == SECSID_NULL)
51292 ++ asoc->peer_secid = SECINITSID_UNLABELED;
51293 ++ } else {
51294 ++ asoc->peer_secid = SECINITSID_UNLABELED;
51295 + }
51296 +
51297 + if (sksec->sctp_assoc_state == SCTP_ASSOC_UNSET) {
51298 +@@ -5340,8 +5353,8 @@ static int selinux_sctp_assoc_request(struct sctp_association *asoc,
51299 + * then it is approved by policy and used as the primary
51300 + * peer SID for getpeercon(3).
51301 + */
51302 +- sksec->peer_sid = peer_sid;
51303 +- } else if (sksec->peer_sid != peer_sid) {
51304 ++ sksec->peer_sid = asoc->peer_secid;
51305 ++ } else if (sksec->peer_sid != asoc->peer_secid) {
51306 + /* Other association peer SIDs are checked to enforce
51307 + * consistency among the peer SIDs.
51308 + */
51309 +@@ -5349,11 +5362,32 @@ static int selinux_sctp_assoc_request(struct sctp_association *asoc,
51310 + ad.u.net = &net;
51311 + ad.u.net->sk = asoc->base.sk;
51312 + err = avc_has_perm(&selinux_state,
51313 +- sksec->peer_sid, peer_sid, sksec->sclass,
51314 +- SCTP_SOCKET__ASSOCIATION, &ad);
51315 ++ sksec->peer_sid, asoc->peer_secid,
51316 ++ sksec->sclass, SCTP_SOCKET__ASSOCIATION,
51317 ++ &ad);
51318 + if (err)
51319 + return err;
51320 + }
51321 ++ return 0;
51322 ++}
51323 ++
51324 ++/* Called whenever SCTP receives an INIT or COOKIE ECHO chunk. This
51325 ++ * happens on an incoming connect(2), sctp_connectx(3) or
51326 ++ * sctp_sendmsg(3) (with no association already present).
51327 ++ */
51328 ++static int selinux_sctp_assoc_request(struct sctp_association *asoc,
51329 ++ struct sk_buff *skb)
51330 ++{
51331 ++ struct sk_security_struct *sksec = asoc->base.sk->sk_security;
51332 ++ u32 conn_sid;
51333 ++ int err;
51334 ++
51335 ++ if (!selinux_policycap_extsockclass())
51336 ++ return 0;
51337 ++
51338 ++ err = selinux_sctp_process_new_assoc(asoc, skb);
51339 ++ if (err)
51340 ++ return err;
51341 +
51342 + /* Compute the MLS component for the connection and store
51343 + * the information in asoc. This will be used by SCTP TCP type
51344 +@@ -5361,17 +5395,36 @@ static int selinux_sctp_assoc_request(struct sctp_association *asoc,
51345 + * socket to be generated. selinux_sctp_sk_clone() will then
51346 + * plug this into the new socket.
51347 + */
51348 +- err = selinux_conn_sid(sksec->sid, peer_sid, &conn_sid);
51349 ++ err = selinux_conn_sid(sksec->sid, asoc->peer_secid, &conn_sid);
51350 + if (err)
51351 + return err;
51352 +
51353 + asoc->secid = conn_sid;
51354 +- asoc->peer_secid = peer_sid;
51355 +
51356 + /* Set any NetLabel labels including CIPSO/CALIPSO options. */
51357 + return selinux_netlbl_sctp_assoc_request(asoc, skb);
51358 + }
51359 +
51360 ++/* Called when SCTP receives a COOKIE ACK chunk as the final
51361 ++ * response to an association request (initited by us).
51362 ++ */
51363 ++static int selinux_sctp_assoc_established(struct sctp_association *asoc,
51364 ++ struct sk_buff *skb)
51365 ++{
51366 ++ struct sk_security_struct *sksec = asoc->base.sk->sk_security;
51367 ++
51368 ++ if (!selinux_policycap_extsockclass())
51369 ++ return 0;
51370 ++
51371 ++ /* Inherit secid from the parent socket - this will be picked up
51372 ++ * by selinux_sctp_sk_clone() if the association gets peeled off
51373 ++ * into a new socket.
51374 ++ */
51375 ++ asoc->secid = sksec->sid;
51376 ++
51377 ++ return selinux_sctp_process_new_assoc(asoc, skb);
51378 ++}
51379 ++
51380 + /* Check if sctp IPv4/IPv6 addresses are valid for binding or connecting
51381 + * based on their @optname.
51382 + */
51383 +@@ -7192,6 +7245,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
51384 + LSM_HOOK_INIT(sctp_assoc_request, selinux_sctp_assoc_request),
51385 + LSM_HOOK_INIT(sctp_sk_clone, selinux_sctp_sk_clone),
51386 + LSM_HOOK_INIT(sctp_bind_connect, selinux_sctp_bind_connect),
51387 ++ LSM_HOOK_INIT(sctp_assoc_established, selinux_sctp_assoc_established),
51388 + LSM_HOOK_INIT(inet_conn_request, selinux_inet_conn_request),
51389 + LSM_HOOK_INIT(inet_csk_clone, selinux_inet_csk_clone),
51390 + LSM_HOOK_INIT(inet_conn_established, selinux_inet_conn_established),
51391 +diff --git a/security/selinux/include/policycap.h b/security/selinux/include/policycap.h
51392 +index 2ec038efbb03c..a9e572ca4fd96 100644
51393 +--- a/security/selinux/include/policycap.h
51394 ++++ b/security/selinux/include/policycap.h
51395 +@@ -11,6 +11,7 @@ enum {
51396 + POLICYDB_CAPABILITY_CGROUPSECLABEL,
51397 + POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION,
51398 + POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS,
51399 ++ POLICYDB_CAPABILITY_IOCTL_SKIP_CLOEXEC,
51400 + __POLICYDB_CAPABILITY_MAX
51401 + };
51402 + #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
51403 +diff --git a/security/selinux/include/policycap_names.h b/security/selinux/include/policycap_names.h
51404 +index b89289f092c93..ebd64afe1defd 100644
51405 +--- a/security/selinux/include/policycap_names.h
51406 ++++ b/security/selinux/include/policycap_names.h
51407 +@@ -12,7 +12,8 @@ const char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX] = {
51408 + "always_check_network",
51409 + "cgroup_seclabel",
51410 + "nnp_nosuid_transition",
51411 +- "genfs_seclabel_symlinks"
51412 ++ "genfs_seclabel_symlinks",
51413 ++ "ioctl_skip_cloexec"
51414 + };
51415 +
51416 + #endif /* _SELINUX_POLICYCAP_NAMES_H_ */
51417 +diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
51418 +index ac0ece01305a6..c0d966020ebdd 100644
51419 +--- a/security/selinux/include/security.h
51420 ++++ b/security/selinux/include/security.h
51421 +@@ -219,6 +219,13 @@ static inline bool selinux_policycap_genfs_seclabel_symlinks(void)
51422 + return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]);
51423 + }
51424 +
51425 ++static inline bool selinux_policycap_ioctl_skip_cloexec(void)
51426 ++{
51427 ++ struct selinux_state *state = &selinux_state;
51428 ++
51429 ++ return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_IOCTL_SKIP_CLOEXEC]);
51430 ++}
51431 ++
51432 + struct selinux_policy_convert_data;
51433 +
51434 + struct selinux_load_state {
51435 +diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
51436 +index e4cd7cb856f37..f2f6203e0fff5 100644
51437 +--- a/security/selinux/selinuxfs.c
51438 ++++ b/security/selinux/selinuxfs.c
51439 +@@ -2127,6 +2127,8 @@ static int sel_fill_super(struct super_block *sb, struct fs_context *fc)
51440 + }
51441 +
51442 + ret = sel_make_avc_files(dentry);
51443 ++ if (ret)
51444 ++ goto err;
51445 +
51446 + dentry = sel_make_dir(sb->s_root, "ss", &fsi->last_ino);
51447 + if (IS_ERR(dentry)) {
51448 +diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
51449 +index 90697317895fb..c576832febc67 100644
51450 +--- a/security/selinux/xfrm.c
51451 ++++ b/security/selinux/xfrm.c
51452 +@@ -347,7 +347,7 @@ int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
51453 + int rc;
51454 + struct xfrm_sec_ctx *ctx;
51455 + char *ctx_str = NULL;
51456 +- int str_len;
51457 ++ u32 str_len;
51458 +
51459 + if (!polsec)
51460 + return 0;
51461 +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
51462 +index 14b279cc75c96..6207762dbdb13 100644
51463 +--- a/security/smack/smack_lsm.c
51464 ++++ b/security/smack/smack_lsm.c
51465 +@@ -2510,7 +2510,7 @@ static int smk_ipv6_check(struct smack_known *subject,
51466 + #ifdef CONFIG_AUDIT
51467 + smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
51468 + ad.a.u.net->family = PF_INET6;
51469 +- ad.a.u.net->dport = ntohs(address->sin6_port);
51470 ++ ad.a.u.net->dport = address->sin6_port;
51471 + if (act == SMK_RECEIVING)
51472 + ad.a.u.net->v6info.saddr = address->sin6_addr;
51473 + else
51474 +diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c
51475 +index 3445ae6fd4794..363b65be87ab7 100644
51476 +--- a/security/tomoyo/load_policy.c
51477 ++++ b/security/tomoyo/load_policy.c
51478 +@@ -24,7 +24,7 @@ static const char *tomoyo_loader;
51479 + static int __init tomoyo_loader_setup(char *str)
51480 + {
51481 + tomoyo_loader = str;
51482 +- return 0;
51483 ++ return 1;
51484 + }
51485 +
51486 + __setup("TOMOYO_loader=", tomoyo_loader_setup);
51487 +@@ -64,7 +64,7 @@ static const char *tomoyo_trigger;
51488 + static int __init tomoyo_trigger_setup(char *str)
51489 + {
51490 + tomoyo_trigger = str;
51491 +- return 0;
51492 ++ return 1;
51493 + }
51494 +
51495 + __setup("TOMOYO_trigger=", tomoyo_trigger_setup);
51496 +diff --git a/sound/core/pcm.c b/sound/core/pcm.c
51497 +index edd9849210f2d..977d54320a5ca 100644
51498 +--- a/sound/core/pcm.c
51499 ++++ b/sound/core/pcm.c
51500 +@@ -970,6 +970,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
51501 +
51502 + runtime->status->state = SNDRV_PCM_STATE_OPEN;
51503 + mutex_init(&runtime->buffer_mutex);
51504 ++ atomic_set(&runtime->buffer_accessing, 0);
51505 +
51506 + substream->runtime = runtime;
51507 + substream->private_data = pcm->private_data;
51508 +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
51509 +index a40a35e51fad7..1fc7c50ffa625 100644
51510 +--- a/sound/core/pcm_lib.c
51511 ++++ b/sound/core/pcm_lib.c
51512 +@@ -1906,11 +1906,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
51513 + if (avail >= runtime->twake)
51514 + break;
51515 + snd_pcm_stream_unlock_irq(substream);
51516 +- mutex_unlock(&runtime->buffer_mutex);
51517 +
51518 + tout = schedule_timeout(wait_time);
51519 +
51520 +- mutex_lock(&runtime->buffer_mutex);
51521 + snd_pcm_stream_lock_irq(substream);
51522 + set_current_state(TASK_INTERRUPTIBLE);
51523 + switch (runtime->status->state) {
51524 +@@ -2221,7 +2219,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
51525 +
51526 + nonblock = !!(substream->f_flags & O_NONBLOCK);
51527 +
51528 +- mutex_lock(&runtime->buffer_mutex);
51529 + snd_pcm_stream_lock_irq(substream);
51530 + err = pcm_accessible_state(runtime);
51531 + if (err < 0)
51532 +@@ -2276,6 +2273,10 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
51533 + err = -EINVAL;
51534 + goto _end_unlock;
51535 + }
51536 ++ if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
51537 ++ err = -EBUSY;
51538 ++ goto _end_unlock;
51539 ++ }
51540 + snd_pcm_stream_unlock_irq(substream);
51541 + if (!is_playback)
51542 + snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
51543 +@@ -2284,6 +2285,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
51544 + if (is_playback)
51545 + snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
51546 + snd_pcm_stream_lock_irq(substream);
51547 ++ atomic_dec(&runtime->buffer_accessing);
51548 + if (err < 0)
51549 + goto _end_unlock;
51550 + err = pcm_accessible_state(runtime);
51551 +@@ -2313,7 +2315,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
51552 + if (xfer > 0 && err >= 0)
51553 + snd_pcm_update_state(substream, runtime);
51554 + snd_pcm_stream_unlock_irq(substream);
51555 +- mutex_unlock(&runtime->buffer_mutex);
51556 + return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
51557 + }
51558 + EXPORT_SYMBOL(__snd_pcm_lib_xfer);
51559 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
51560 +index 704fdc9ebf911..4adaee62ef333 100644
51561 +--- a/sound/core/pcm_native.c
51562 ++++ b/sound/core/pcm_native.c
51563 +@@ -685,6 +685,24 @@ static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
51564 + return 0;
51565 + }
51566 +
51567 ++/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
51568 ++ * block the further r/w operations
51569 ++ */
51570 ++static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
51571 ++{
51572 ++ if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
51573 ++ return -EBUSY;
51574 ++ mutex_lock(&runtime->buffer_mutex);
51575 ++ return 0; /* keep buffer_mutex, unlocked by below */
51576 ++}
51577 ++
51578 ++/* release buffer_mutex and clear r/w access flag */
51579 ++static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
51580 ++{
51581 ++ mutex_unlock(&runtime->buffer_mutex);
51582 ++ atomic_inc(&runtime->buffer_accessing);
51583 ++}
51584 ++
51585 + #if IS_ENABLED(CONFIG_SND_PCM_OSS)
51586 + #define is_oss_stream(substream) ((substream)->oss.oss)
51587 + #else
51588 +@@ -695,14 +713,16 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
51589 + struct snd_pcm_hw_params *params)
51590 + {
51591 + struct snd_pcm_runtime *runtime;
51592 +- int err = 0, usecs;
51593 ++ int err, usecs;
51594 + unsigned int bits;
51595 + snd_pcm_uframes_t frames;
51596 +
51597 + if (PCM_RUNTIME_CHECK(substream))
51598 + return -ENXIO;
51599 + runtime = substream->runtime;
51600 +- mutex_lock(&runtime->buffer_mutex);
51601 ++ err = snd_pcm_buffer_access_lock(runtime);
51602 ++ if (err < 0)
51603 ++ return err;
51604 + snd_pcm_stream_lock_irq(substream);
51605 + switch (runtime->status->state) {
51606 + case SNDRV_PCM_STATE_OPEN:
51607 +@@ -820,7 +840,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
51608 + snd_pcm_lib_free_pages(substream);
51609 + }
51610 + unlock:
51611 +- mutex_unlock(&runtime->buffer_mutex);
51612 ++ snd_pcm_buffer_access_unlock(runtime);
51613 + return err;
51614 + }
51615 +
51616 +@@ -865,7 +885,9 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
51617 + if (PCM_RUNTIME_CHECK(substream))
51618 + return -ENXIO;
51619 + runtime = substream->runtime;
51620 +- mutex_lock(&runtime->buffer_mutex);
51621 ++ result = snd_pcm_buffer_access_lock(runtime);
51622 ++ if (result < 0)
51623 ++ return result;
51624 + snd_pcm_stream_lock_irq(substream);
51625 + switch (runtime->status->state) {
51626 + case SNDRV_PCM_STATE_SETUP:
51627 +@@ -884,7 +906,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
51628 + snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
51629 + cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
51630 + unlock:
51631 +- mutex_unlock(&runtime->buffer_mutex);
51632 ++ snd_pcm_buffer_access_unlock(runtime);
51633 + return result;
51634 + }
51635 +
51636 +@@ -1369,12 +1391,15 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,
51637 +
51638 + /* Guarantee the group members won't change during non-atomic action */
51639 + down_read(&snd_pcm_link_rwsem);
51640 +- mutex_lock(&substream->runtime->buffer_mutex);
51641 ++ res = snd_pcm_buffer_access_lock(substream->runtime);
51642 ++ if (res < 0)
51643 ++ goto unlock;
51644 + if (snd_pcm_stream_linked(substream))
51645 + res = snd_pcm_action_group(ops, substream, state, false);
51646 + else
51647 + res = snd_pcm_action_single(ops, substream, state);
51648 +- mutex_unlock(&substream->runtime->buffer_mutex);
51649 ++ snd_pcm_buffer_access_unlock(substream->runtime);
51650 ++ unlock:
51651 + up_read(&snd_pcm_link_rwsem);
51652 + return res;
51653 + }
51654 +diff --git a/sound/firewire/fcp.c b/sound/firewire/fcp.c
51655 +index bbfbebf4affbc..df44dd5dc4b22 100644
51656 +--- a/sound/firewire/fcp.c
51657 ++++ b/sound/firewire/fcp.c
51658 +@@ -240,9 +240,7 @@ int fcp_avc_transaction(struct fw_unit *unit,
51659 + t.response_match_bytes = response_match_bytes;
51660 + t.state = STATE_PENDING;
51661 + init_waitqueue_head(&t.wait);
51662 +-
51663 +- if (*(const u8 *)command == 0x00 || *(const u8 *)command == 0x03)
51664 +- t.deferrable = true;
51665 ++ t.deferrable = (*(const u8 *)command == 0x00 || *(const u8 *)command == 0x03);
51666 +
51667 + spin_lock_irq(&transactions_lock);
51668 + list_add_tail(&t.list, &transactions);
51669 +diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
51670 +index 4fb90ceb4053b..70fd8b13938ed 100644
51671 +--- a/sound/hda/intel-dsp-config.c
51672 ++++ b/sound/hda/intel-dsp-config.c
51673 +@@ -11,6 +11,7 @@
51674 + #include <sound/core.h>
51675 + #include <sound/intel-dsp-config.h>
51676 + #include <sound/intel-nhlt.h>
51677 ++#include <sound/soc-acpi.h>
51678 +
51679 + static int dsp_driver;
51680 +
51681 +@@ -31,7 +32,12 @@ struct config_entry {
51682 + u16 device;
51683 + u8 acpi_hid[ACPI_ID_LEN];
51684 + const struct dmi_system_id *dmi_table;
51685 +- u8 codec_hid[ACPI_ID_LEN];
51686 ++ const struct snd_soc_acpi_codecs *codec_hid;
51687 ++};
51688 ++
51689 ++static const struct snd_soc_acpi_codecs __maybe_unused essx_83x6 = {
51690 ++ .num_codecs = 3,
51691 ++ .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
51692 + };
51693 +
51694 + /*
51695 +@@ -77,7 +83,7 @@ static const struct config_entry config_table[] = {
51696 + {
51697 + .flags = FLAG_SOF,
51698 + .device = 0x5a98,
51699 +- .codec_hid = "ESSX8336",
51700 ++ .codec_hid = &essx_83x6,
51701 + },
51702 + #endif
51703 + #if IS_ENABLED(CONFIG_SND_SOC_INTEL_APL)
51704 +@@ -163,7 +169,7 @@ static const struct config_entry config_table[] = {
51705 + {
51706 + .flags = FLAG_SOF,
51707 + .device = 0x3198,
51708 +- .codec_hid = "ESSX8336",
51709 ++ .codec_hid = &essx_83x6,
51710 + },
51711 + #endif
51712 +
51713 +@@ -193,6 +199,11 @@ static const struct config_entry config_table[] = {
51714 + {}
51715 + }
51716 + },
51717 ++ {
51718 ++ .flags = FLAG_SOF,
51719 ++ .device = 0x09dc8,
51720 ++ .codec_hid = &essx_83x6,
51721 ++ },
51722 + {
51723 + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
51724 + .device = 0x9dc8,
51725 +@@ -251,7 +262,7 @@ static const struct config_entry config_table[] = {
51726 + {
51727 + .flags = FLAG_SOF,
51728 + .device = 0x02c8,
51729 +- .codec_hid = "ESSX8336",
51730 ++ .codec_hid = &essx_83x6,
51731 + },
51732 + {
51733 + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
51734 +@@ -280,7 +291,7 @@ static const struct config_entry config_table[] = {
51735 + {
51736 + .flags = FLAG_SOF,
51737 + .device = 0x06c8,
51738 +- .codec_hid = "ESSX8336",
51739 ++ .codec_hid = &essx_83x6,
51740 + },
51741 + {
51742 + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
51743 +@@ -327,7 +338,7 @@ static const struct config_entry config_table[] = {
51744 + {
51745 + .flags = FLAG_SOF,
51746 + .device = 0x4dc8,
51747 +- .codec_hid = "ESSX8336",
51748 ++ .codec_hid = &essx_83x6,
51749 + },
51750 + {
51751 + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
51752 +@@ -353,7 +364,7 @@ static const struct config_entry config_table[] = {
51753 + {
51754 + .flags = FLAG_SOF,
51755 + .device = 0xa0c8,
51756 +- .codec_hid = "ESSX8336",
51757 ++ .codec_hid = &essx_83x6,
51758 + },
51759 + {
51760 + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
51761 +@@ -414,8 +425,15 @@ static const struct config_entry *snd_intel_dsp_find_config
51762 + continue;
51763 + if (table->dmi_table && !dmi_check_system(table->dmi_table))
51764 + continue;
51765 +- if (table->codec_hid[0] && !acpi_dev_present(table->codec_hid, NULL, -1))
51766 +- continue;
51767 ++ if (table->codec_hid) {
51768 ++ int i;
51769 ++
51770 ++ for (i = 0; i < table->codec_hid->num_codecs; i++)
51771 ++ if (acpi_dev_present(table->codec_hid->codecs[i], NULL, -1))
51772 ++ break;
51773 ++ if (i == table->codec_hid->num_codecs)
51774 ++ continue;
51775 ++ }
51776 + return table;
51777 + }
51778 + return NULL;
51779 +diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
51780 +index 128476aa7c61d..4063da3782833 100644
51781 +--- a/sound/hda/intel-nhlt.c
51782 ++++ b/sound/hda/intel-nhlt.c
51783 +@@ -130,6 +130,28 @@ bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt, u8 link_type)
51784 + }
51785 + EXPORT_SYMBOL(intel_nhlt_has_endpoint_type);
51786 +
51787 ++int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8 device_type)
51788 ++{
51789 ++ struct nhlt_endpoint *epnt;
51790 ++ int ssp_mask = 0;
51791 ++ int i;
51792 ++
51793 ++ if (!nhlt || (device_type != NHLT_DEVICE_BT && device_type != NHLT_DEVICE_I2S))
51794 ++ return 0;
51795 ++
51796 ++ epnt = (struct nhlt_endpoint *)nhlt->desc;
51797 ++ for (i = 0; i < nhlt->endpoint_count; i++) {
51798 ++ if (epnt->linktype == NHLT_LINK_SSP && epnt->device_type == device_type) {
51799 ++ /* for SSP the virtual bus id is the SSP port */
51800 ++ ssp_mask |= BIT(epnt->virtual_bus_id);
51801 ++ }
51802 ++ epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
51803 ++ }
51804 ++
51805 ++ return ssp_mask;
51806 ++}
51807 ++EXPORT_SYMBOL(intel_nhlt_ssp_endpoint_mask);
51808 ++
51809 + static struct nhlt_specific_cfg *
51810 + nhlt_get_specific_cfg(struct device *dev, struct nhlt_fmt *fmt, u8 num_ch,
51811 + u32 rate, u8 vbps, u8 bps)
51812 +diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
51813 +index b6bdebd9ef275..10112e1bb25dc 100644
51814 +--- a/sound/isa/cs423x/cs4236.c
51815 ++++ b/sound/isa/cs423x/cs4236.c
51816 +@@ -494,7 +494,7 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
51817 + static int dev;
51818 + int err;
51819 + struct snd_card *card;
51820 +- struct pnp_dev *cdev;
51821 ++ struct pnp_dev *cdev, *iter;
51822 + char cid[PNP_ID_LEN];
51823 +
51824 + if (pnp_device_is_isapnp(pdev))
51825 +@@ -510,9 +510,11 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
51826 + strcpy(cid, pdev->id[0].id);
51827 + cid[5] = '1';
51828 + cdev = NULL;
51829 +- list_for_each_entry(cdev, &(pdev->protocol->devices), protocol_list) {
51830 +- if (!strcmp(cdev->id[0].id, cid))
51831 ++ list_for_each_entry(iter, &(pdev->protocol->devices), protocol_list) {
51832 ++ if (!strcmp(iter->id[0].id, cid)) {
51833 ++ cdev = iter;
51834 + break;
51835 ++ }
51836 + }
51837 + err = snd_cs423x_card_new(&pdev->dev, dev, &card);
51838 + if (err < 0)
51839 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
51840 +index 572ff0d1fafee..8eff25d2d9e67 100644
51841 +--- a/sound/pci/hda/hda_intel.c
51842 ++++ b/sound/pci/hda/hda_intel.c
51843 +@@ -2066,14 +2066,16 @@ static const struct hda_controller_ops pci_hda_ops = {
51844 + .position_check = azx_position_check,
51845 + };
51846 +
51847 ++static DECLARE_BITMAP(probed_devs, SNDRV_CARDS);
51848 ++
51849 + static int azx_probe(struct pci_dev *pci,
51850 + const struct pci_device_id *pci_id)
51851 + {
51852 +- static int dev;
51853 + struct snd_card *card;
51854 + struct hda_intel *hda;
51855 + struct azx *chip;
51856 + bool schedule_probe;
51857 ++ int dev;
51858 + int err;
51859 +
51860 + if (pci_match_id(driver_denylist, pci)) {
51861 +@@ -2081,10 +2083,11 @@ static int azx_probe(struct pci_dev *pci,
51862 + return -ENODEV;
51863 + }
51864 +
51865 ++ dev = find_first_zero_bit(probed_devs, SNDRV_CARDS);
51866 + if (dev >= SNDRV_CARDS)
51867 + return -ENODEV;
51868 + if (!enable[dev]) {
51869 +- dev++;
51870 ++ set_bit(dev, probed_devs);
51871 + return -ENOENT;
51872 + }
51873 +
51874 +@@ -2151,7 +2154,7 @@ static int azx_probe(struct pci_dev *pci,
51875 + if (schedule_probe)
51876 + schedule_delayed_work(&hda->probe_work, 0);
51877 +
51878 +- dev++;
51879 ++ set_bit(dev, probed_devs);
51880 + if (chip->disabled)
51881 + complete_all(&hda->probe_wait);
51882 + return 0;
51883 +@@ -2374,6 +2377,7 @@ static void azx_remove(struct pci_dev *pci)
51884 + cancel_delayed_work_sync(&hda->probe_work);
51885 + device_lock(&pci->dev);
51886 +
51887 ++ clear_bit(chip->dev_index, probed_devs);
51888 + pci_set_drvdata(pci, NULL);
51889 + snd_card_free(card);
51890 + }
51891 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
51892 +index 92df4f243ec65..cf4f277dccdda 100644
51893 +--- a/sound/pci/hda/patch_hdmi.c
51894 ++++ b/sound/pci/hda/patch_hdmi.c
51895 +@@ -1617,6 +1617,7 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
51896 + struct hda_codec *codec = per_pin->codec;
51897 + struct hdmi_spec *spec = codec->spec;
51898 + struct hdmi_eld *eld = &spec->temp_eld;
51899 ++ struct device *dev = hda_codec_dev(codec);
51900 + hda_nid_t pin_nid = per_pin->pin_nid;
51901 + int dev_id = per_pin->dev_id;
51902 + /*
51903 +@@ -1630,8 +1631,13 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
51904 + int present;
51905 + int ret;
51906 +
51907 ++#ifdef CONFIG_PM
51908 ++ if (dev->power.runtime_status == RPM_SUSPENDING)
51909 ++ return;
51910 ++#endif
51911 ++
51912 + ret = snd_hda_power_up_pm(codec);
51913 +- if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec)))
51914 ++ if (ret < 0 && pm_runtime_suspended(dev))
51915 + goto out;
51916 +
51917 + present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id);
51918 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
51919 +index 75ff7e8498b83..16e90524a4977 100644
51920 +--- a/sound/pci/hda/patch_realtek.c
51921 ++++ b/sound/pci/hda/patch_realtek.c
51922 +@@ -3617,8 +3617,8 @@ static void alc256_shutup(struct hda_codec *codec)
51923 + /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
51924 + * when booting with headset plugged. So skip setting it for the codec alc257
51925 + */
51926 +- if (spec->codec_variant != ALC269_TYPE_ALC257 &&
51927 +- spec->codec_variant != ALC269_TYPE_ALC256)
51928 ++ if (codec->core.vendor_id != 0x10ec0236 &&
51929 ++ codec->core.vendor_id != 0x10ec0257)
51930 + alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
51931 +
51932 + if (!spec->no_shutup_pins)
51933 +@@ -6948,6 +6948,7 @@ enum {
51934 + ALC236_FIXUP_HP_MUTE_LED,
51935 + ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
51936 + ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
51937 ++ ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
51938 + ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
51939 + ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
51940 + ALC269VC_FIXUP_ACER_HEADSET_MIC,
51941 +@@ -8273,6 +8274,14 @@ static const struct hda_fixup alc269_fixups[] = {
51942 + { }
51943 + },
51944 + },
51945 ++ [ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
51946 ++ .type = HDA_FIXUP_VERBS,
51947 ++ .v.verbs = (const struct hda_verb[]) {
51948 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x08},
51949 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x2fcf},
51950 ++ { }
51951 ++ },
51952 ++ },
51953 + [ALC295_FIXUP_ASUS_MIC_NO_PRESENCE] = {
51954 + .type = HDA_FIXUP_PINS,
51955 + .v.pins = (const struct hda_pintbl[]) {
51956 +@@ -9054,6 +9063,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
51957 + SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
51958 + SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
51959 + SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
51960 ++ SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
51961 + SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
51962 + SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
51963 + SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
51964 +@@ -9400,6 +9410,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
51965 + {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
51966 + {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
51967 + {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
51968 ++ {.id = ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc256-samsung-headphone"},
51969 + {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
51970 + {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
51971 + {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
51972 +diff --git a/sound/soc/amd/acp/acp-mach-common.c b/sound/soc/amd/acp/acp-mach-common.c
51973 +index cd05ee2802c9e..5247015e8b316 100644
51974 +--- a/sound/soc/amd/acp/acp-mach-common.c
51975 ++++ b/sound/soc/amd/acp/acp-mach-common.c
51976 +@@ -556,6 +556,8 @@ int acp_legacy_dai_links_create(struct snd_soc_card *card)
51977 + num_links++;
51978 +
51979 + links = devm_kzalloc(dev, sizeof(struct snd_soc_dai_link) * num_links, GFP_KERNEL);
51980 ++ if (!links)
51981 ++ return -ENOMEM;
51982 +
51983 + if (drv_data->hs_cpu_id == I2S_SP) {
51984 + links[i].name = "acp-headset-codec";
51985 +diff --git a/sound/soc/amd/vangogh/acp5x-mach.c b/sound/soc/amd/vangogh/acp5x-mach.c
51986 +index 14cf325e4b237..5d7a17755fa7f 100644
51987 +--- a/sound/soc/amd/vangogh/acp5x-mach.c
51988 ++++ b/sound/soc/amd/vangogh/acp5x-mach.c
51989 +@@ -165,6 +165,7 @@ static int acp5x_cs35l41_hw_params(struct snd_pcm_substream *substream,
51990 + unsigned int num_codecs = rtd->num_codecs;
51991 + unsigned int bclk_val;
51992 +
51993 ++ ret = 0;
51994 + for (i = 0; i < num_codecs; i++) {
51995 + codec_dai = asoc_rtd_to_codec(rtd, i);
51996 + if ((strcmp(codec_dai->name, "spi-VLV1776:00") == 0) ||
51997 +diff --git a/sound/soc/amd/vangogh/acp5x-pcm-dma.c b/sound/soc/amd/vangogh/acp5x-pcm-dma.c
51998 +index f10de38976cb5..bfca4cf423cf1 100644
51999 +--- a/sound/soc/amd/vangogh/acp5x-pcm-dma.c
52000 ++++ b/sound/soc/amd/vangogh/acp5x-pcm-dma.c
52001 +@@ -281,7 +281,7 @@ static int acp5x_dma_hw_params(struct snd_soc_component *component,
52002 + return -EINVAL;
52003 + }
52004 + size = params_buffer_bytes(params);
52005 +- rtd->dma_addr = substream->dma_buffer.addr;
52006 ++ rtd->dma_addr = substream->runtime->dma_addr;
52007 + rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
52008 + config_acp5x_dma(rtd, substream->stream);
52009 + return 0;
52010 +@@ -426,51 +426,51 @@ static int acp5x_audio_remove(struct platform_device *pdev)
52011 + static int __maybe_unused acp5x_pcm_resume(struct device *dev)
52012 + {
52013 + struct i2s_dev_data *adata;
52014 +- u32 val, reg_val, frmt_val;
52015 ++ struct i2s_stream_instance *rtd;
52016 ++ u32 val;
52017 +
52018 +- reg_val = 0;
52019 +- frmt_val = 0;
52020 + adata = dev_get_drvdata(dev);
52021 +
52022 + if (adata->play_stream && adata->play_stream->runtime) {
52023 +- struct i2s_stream_instance *rtd =
52024 +- adata->play_stream->runtime->private_data;
52025 ++ rtd = adata->play_stream->runtime->private_data;
52026 + config_acp5x_dma(rtd, SNDRV_PCM_STREAM_PLAYBACK);
52027 +- switch (rtd->i2s_instance) {
52028 +- case I2S_HS_INSTANCE:
52029 +- reg_val = ACP_HSTDM_ITER;
52030 +- frmt_val = ACP_HSTDM_TXFRMT;
52031 +- break;
52032 +- case I2S_SP_INSTANCE:
52033 +- default:
52034 +- reg_val = ACP_I2STDM_ITER;
52035 +- frmt_val = ACP_I2STDM_TXFRMT;
52036 ++ acp_writel((rtd->xfer_resolution << 3), rtd->acp5x_base + ACP_HSTDM_ITER);
52037 ++ if (adata->tdm_mode == TDM_ENABLE) {
52038 ++ acp_writel(adata->tdm_fmt, adata->acp5x_base + ACP_HSTDM_TXFRMT);
52039 ++ val = acp_readl(adata->acp5x_base + ACP_HSTDM_ITER);
52040 ++ acp_writel(val | 0x2, adata->acp5x_base + ACP_HSTDM_ITER);
52041 ++ }
52042 ++ }
52043 ++ if (adata->i2ssp_play_stream && adata->i2ssp_play_stream->runtime) {
52044 ++ rtd = adata->i2ssp_play_stream->runtime->private_data;
52045 ++ config_acp5x_dma(rtd, SNDRV_PCM_STREAM_PLAYBACK);
52046 ++ acp_writel((rtd->xfer_resolution << 3), rtd->acp5x_base + ACP_I2STDM_ITER);
52047 ++ if (adata->tdm_mode == TDM_ENABLE) {
52048 ++ acp_writel(adata->tdm_fmt, adata->acp5x_base + ACP_I2STDM_TXFRMT);
52049 ++ val = acp_readl(adata->acp5x_base + ACP_I2STDM_ITER);
52050 ++ acp_writel(val | 0x2, adata->acp5x_base + ACP_I2STDM_ITER);
52051 + }
52052 +- acp_writel((rtd->xfer_resolution << 3),
52053 +- rtd->acp5x_base + reg_val);
52054 + }
52055 +
52056 + if (adata->capture_stream && adata->capture_stream->runtime) {
52057 +- struct i2s_stream_instance *rtd =
52058 +- adata->capture_stream->runtime->private_data;
52059 ++ rtd = adata->capture_stream->runtime->private_data;
52060 + config_acp5x_dma(rtd, SNDRV_PCM_STREAM_CAPTURE);
52061 +- switch (rtd->i2s_instance) {
52062 +- case I2S_HS_INSTANCE:
52063 +- reg_val = ACP_HSTDM_IRER;
52064 +- frmt_val = ACP_HSTDM_RXFRMT;
52065 +- break;
52066 +- case I2S_SP_INSTANCE:
52067 +- default:
52068 +- reg_val = ACP_I2STDM_IRER;
52069 +- frmt_val = ACP_I2STDM_RXFRMT;
52070 ++ acp_writel((rtd->xfer_resolution << 3), rtd->acp5x_base + ACP_HSTDM_IRER);
52071 ++ if (adata->tdm_mode == TDM_ENABLE) {
52072 ++ acp_writel(adata->tdm_fmt, adata->acp5x_base + ACP_HSTDM_RXFRMT);
52073 ++ val = acp_readl(adata->acp5x_base + ACP_HSTDM_IRER);
52074 ++ acp_writel(val | 0x2, adata->acp5x_base + ACP_HSTDM_IRER);
52075 + }
52076 +- acp_writel((rtd->xfer_resolution << 3),
52077 +- rtd->acp5x_base + reg_val);
52078 + }
52079 +- if (adata->tdm_mode == TDM_ENABLE) {
52080 +- acp_writel(adata->tdm_fmt, adata->acp5x_base + frmt_val);
52081 +- val = acp_readl(adata->acp5x_base + reg_val);
52082 +- acp_writel(val | 0x2, adata->acp5x_base + reg_val);
52083 ++ if (adata->i2ssp_capture_stream && adata->i2ssp_capture_stream->runtime) {
52084 ++ rtd = adata->i2ssp_capture_stream->runtime->private_data;
52085 ++ config_acp5x_dma(rtd, SNDRV_PCM_STREAM_CAPTURE);
52086 ++ acp_writel((rtd->xfer_resolution << 3), rtd->acp5x_base + ACP_I2STDM_IRER);
52087 ++ if (adata->tdm_mode == TDM_ENABLE) {
52088 ++ acp_writel(adata->tdm_fmt, adata->acp5x_base + ACP_I2STDM_RXFRMT);
52089 ++ val = acp_readl(adata->acp5x_base + ACP_I2STDM_IRER);
52090 ++ acp_writel(val | 0x2, adata->acp5x_base + ACP_I2STDM_IRER);
52091 ++ }
52092 + }
52093 + acp_writel(1, adata->acp5x_base + ACP_EXTERNAL_INTR_ENB);
52094 + return 0;
52095 +diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
52096 +index 26e2bc690d86e..c1dea8d624164 100644
52097 +--- a/sound/soc/atmel/atmel_ssc_dai.c
52098 ++++ b/sound/soc/atmel/atmel_ssc_dai.c
52099 +@@ -280,7 +280,10 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
52100 +
52101 + /* Enable PMC peripheral clock for this SSC */
52102 + pr_debug("atmel_ssc_dai: Starting clock\n");
52103 +- clk_enable(ssc_p->ssc->clk);
52104 ++ ret = clk_enable(ssc_p->ssc->clk);
52105 ++ if (ret)
52106 ++ return ret;
52107 ++
52108 + ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
52109 +
52110 + /* Reset the SSC unless initialized to keep it in a clean state */
52111 +diff --git a/sound/soc/atmel/mikroe-proto.c b/sound/soc/atmel/mikroe-proto.c
52112 +index 627564c18c270..ce46d8a0b7e43 100644
52113 +--- a/sound/soc/atmel/mikroe-proto.c
52114 ++++ b/sound/soc/atmel/mikroe-proto.c
52115 +@@ -115,7 +115,8 @@ static int snd_proto_probe(struct platform_device *pdev)
52116 + cpu_np = of_parse_phandle(np, "i2s-controller", 0);
52117 + if (!cpu_np) {
52118 + dev_err(&pdev->dev, "i2s-controller missing\n");
52119 +- return -EINVAL;
52120 ++ ret = -EINVAL;
52121 ++ goto put_codec_node;
52122 + }
52123 + dai->cpus->of_node = cpu_np;
52124 + dai->platforms->of_node = cpu_np;
52125 +@@ -125,7 +126,8 @@ static int snd_proto_probe(struct platform_device *pdev)
52126 + &bitclkmaster, &framemaster);
52127 + if (bitclkmaster != framemaster) {
52128 + dev_err(&pdev->dev, "Must be the same bitclock and frame master\n");
52129 +- return -EINVAL;
52130 ++ ret = -EINVAL;
52131 ++ goto put_cpu_node;
52132 + }
52133 + if (bitclkmaster) {
52134 + if (codec_np == bitclkmaster)
52135 +@@ -136,18 +138,20 @@ static int snd_proto_probe(struct platform_device *pdev)
52136 + dai_fmt |= snd_soc_daifmt_parse_clock_provider_as_flag(np, NULL);
52137 + }
52138 +
52139 +- of_node_put(bitclkmaster);
52140 +- of_node_put(framemaster);
52141 +- dai->dai_fmt = dai_fmt;
52142 +-
52143 +- of_node_put(codec_np);
52144 +- of_node_put(cpu_np);
52145 +
52146 ++ dai->dai_fmt = dai_fmt;
52147 + ret = snd_soc_register_card(&snd_proto);
52148 + if (ret)
52149 + dev_err_probe(&pdev->dev, ret,
52150 + "snd_soc_register_card() failed\n");
52151 +
52152 ++
52153 ++put_cpu_node:
52154 ++ of_node_put(bitclkmaster);
52155 ++ of_node_put(framemaster);
52156 ++ of_node_put(cpu_np);
52157 ++put_codec_node:
52158 ++ of_node_put(codec_np);
52159 + return ret;
52160 + }
52161 +
52162 +diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
52163 +index 915da92e1ec82..33e43013ff770 100644
52164 +--- a/sound/soc/atmel/sam9g20_wm8731.c
52165 ++++ b/sound/soc/atmel/sam9g20_wm8731.c
52166 +@@ -214,6 +214,7 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
52167 + cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
52168 + if (!cpu_np) {
52169 + dev_err(&pdev->dev, "dai and pcm info missing\n");
52170 ++ of_node_put(codec_np);
52171 + return -EINVAL;
52172 + }
52173 + at91sam9g20ek_dai.cpus->of_node = cpu_np;
52174 +diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
52175 +index 7c45dc4f8c1bb..99310e40e7a62 100644
52176 +--- a/sound/soc/atmel/sam9x5_wm8731.c
52177 ++++ b/sound/soc/atmel/sam9x5_wm8731.c
52178 +@@ -142,7 +142,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
52179 + if (!cpu_np) {
52180 + dev_err(&pdev->dev, "atmel,ssc-controller node missing\n");
52181 + ret = -EINVAL;
52182 +- goto out;
52183 ++ goto out_put_codec_np;
52184 + }
52185 + dai->cpus->of_node = cpu_np;
52186 + dai->platforms->of_node = cpu_np;
52187 +@@ -153,12 +153,9 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
52188 + if (ret != 0) {
52189 + dev_err(&pdev->dev, "Failed to set SSC %d for audio: %d\n",
52190 + ret, priv->ssc_id);
52191 +- goto out;
52192 ++ goto out_put_cpu_np;
52193 + }
52194 +
52195 +- of_node_put(codec_np);
52196 +- of_node_put(cpu_np);
52197 +-
52198 + ret = devm_snd_soc_register_card(&pdev->dev, card);
52199 + if (ret) {
52200 + dev_err(&pdev->dev, "Platform device allocation failed\n");
52201 +@@ -167,10 +164,14 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
52202 +
52203 + dev_dbg(&pdev->dev, "%s ok\n", __func__);
52204 +
52205 +- return ret;
52206 ++ goto out_put_cpu_np;
52207 +
52208 + out_put_audio:
52209 + atmel_ssc_put_audio(priv->ssc_id);
52210 ++out_put_cpu_np:
52211 ++ of_node_put(cpu_np);
52212 ++out_put_codec_np:
52213 ++ of_node_put(codec_np);
52214 + out:
52215 + return ret;
52216 + }
52217 +diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
52218 +index d3e5ae8310ef2..30c00380499cd 100644
52219 +--- a/sound/soc/codecs/Kconfig
52220 ++++ b/sound/soc/codecs/Kconfig
52221 +@@ -733,6 +733,7 @@ config SND_SOC_CS4349
52222 +
52223 + config SND_SOC_CS47L15
52224 + tristate
52225 ++ depends on MFD_CS47L15
52226 +
52227 + config SND_SOC_CS47L24
52228 + tristate
52229 +@@ -740,15 +741,19 @@ config SND_SOC_CS47L24
52230 +
52231 + config SND_SOC_CS47L35
52232 + tristate
52233 ++ depends on MFD_CS47L35
52234 +
52235 + config SND_SOC_CS47L85
52236 + tristate
52237 ++ depends on MFD_CS47L85
52238 +
52239 + config SND_SOC_CS47L90
52240 + tristate
52241 ++ depends on MFD_CS47L90
52242 +
52243 + config SND_SOC_CS47L92
52244 + tristate
52245 ++ depends on MFD_CS47L92
52246 +
52247 + # Cirrus Logic Quad-Channel ADC
52248 + config SND_SOC_CS53L30
52249 +diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
52250 +index 77a0176946459..f3787d77f892b 100644
52251 +--- a/sound/soc/codecs/cs35l41.c
52252 ++++ b/sound/soc/codecs/cs35l41.c
52253 +@@ -1035,8 +1035,8 @@ static int cs35l41_irq_gpio_config(struct cs35l41_private *cs35l41)
52254 +
52255 + regmap_update_bits(cs35l41->regmap, CS35L41_GPIO2_CTRL1,
52256 + CS35L41_GPIO_POL_MASK | CS35L41_GPIO_DIR_MASK,
52257 +- irq_gpio_cfg1->irq_pol_inv << CS35L41_GPIO_POL_SHIFT |
52258 +- !irq_gpio_cfg1->irq_out_en << CS35L41_GPIO_DIR_SHIFT);
52259 ++ irq_gpio_cfg2->irq_pol_inv << CS35L41_GPIO_POL_SHIFT |
52260 ++ !irq_gpio_cfg2->irq_out_en << CS35L41_GPIO_DIR_SHIFT);
52261 +
52262 + regmap_update_bits(cs35l41->regmap, CS35L41_GPIO_PAD_CONTROL,
52263 + CS35L41_GPIO1_CTRL_MASK | CS35L41_GPIO2_CTRL_MASK,
52264 +@@ -1091,7 +1091,7 @@ static struct snd_soc_dai_driver cs35l41_dai[] = {
52265 + .capture = {
52266 + .stream_name = "AMP Capture",
52267 + .channels_min = 1,
52268 +- .channels_max = 8,
52269 ++ .channels_max = 4,
52270 + .rates = SNDRV_PCM_RATE_KNOT,
52271 + .formats = CS35L41_TX_FORMATS,
52272 + },
52273 +diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
52274 +index 43d98bdb5b5b0..2c294868008ed 100644
52275 +--- a/sound/soc/codecs/cs42l42.c
52276 ++++ b/sound/soc/codecs/cs42l42.c
52277 +@@ -1637,7 +1637,11 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
52278 +
52279 + mutex_lock(&cs42l42->jack_detect_mutex);
52280 +
52281 +- /* Check auto-detect status */
52282 ++ /*
52283 ++ * Check auto-detect status. Don't assume a previous unplug event has
52284 ++ * cleared the flags. If the jack is unplugged and plugged during
52285 ++ * system suspend there won't have been an unplug event.
52286 ++ */
52287 + if ((~masks[5]) & irq_params_table[5].mask) {
52288 + if (stickies[5] & CS42L42_HSDET_AUTO_DONE_MASK) {
52289 + cs42l42_process_hs_type_detect(cs42l42);
52290 +@@ -1645,11 +1649,15 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
52291 + case CS42L42_PLUG_CTIA:
52292 + case CS42L42_PLUG_OMTP:
52293 + snd_soc_jack_report(cs42l42->jack, SND_JACK_HEADSET,
52294 +- SND_JACK_HEADSET);
52295 ++ SND_JACK_HEADSET |
52296 ++ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
52297 ++ SND_JACK_BTN_2 | SND_JACK_BTN_3);
52298 + break;
52299 + case CS42L42_PLUG_HEADPHONE:
52300 + snd_soc_jack_report(cs42l42->jack, SND_JACK_HEADPHONE,
52301 +- SND_JACK_HEADPHONE);
52302 ++ SND_JACK_HEADSET |
52303 ++ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
52304 ++ SND_JACK_BTN_2 | SND_JACK_BTN_3);
52305 + break;
52306 + default:
52307 + break;
52308 +diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
52309 +index 6ffe88345de5f..3a3dc0539d921 100644
52310 +--- a/sound/soc/codecs/lpass-rx-macro.c
52311 ++++ b/sound/soc/codecs/lpass-rx-macro.c
52312 +@@ -2039,6 +2039,10 @@ static int rx_macro_load_compander_coeff(struct snd_soc_component *component,
52313 + int i;
52314 + int hph_pwr_mode;
52315 +
52316 ++ /* AUX does not have compander */
52317 ++ if (comp == INTERP_AUX)
52318 ++ return 0;
52319 ++
52320 + if (!rx->comp_enabled[comp])
52321 + return 0;
52322 +
52323 +@@ -2268,7 +2272,7 @@ static int rx_macro_mux_get(struct snd_kcontrol *kcontrol,
52324 + struct snd_soc_component *component = snd_soc_dapm_to_component(widget->dapm);
52325 + struct rx_macro *rx = snd_soc_component_get_drvdata(component);
52326 +
52327 +- ucontrol->value.integer.value[0] =
52328 ++ ucontrol->value.enumerated.item[0] =
52329 + rx->rx_port_value[widget->shift];
52330 + return 0;
52331 + }
52332 +@@ -2280,7 +2284,7 @@ static int rx_macro_mux_put(struct snd_kcontrol *kcontrol,
52333 + struct snd_soc_component *component = snd_soc_dapm_to_component(widget->dapm);
52334 + struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
52335 + struct snd_soc_dapm_update *update = NULL;
52336 +- u32 rx_port_value = ucontrol->value.integer.value[0];
52337 ++ u32 rx_port_value = ucontrol->value.enumerated.item[0];
52338 + u32 aif_rst;
52339 + struct rx_macro *rx = snd_soc_component_get_drvdata(component);
52340 +
52341 +@@ -2392,7 +2396,7 @@ static int rx_macro_get_hph_pwr_mode(struct snd_kcontrol *kcontrol,
52342 + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
52343 + struct rx_macro *rx = snd_soc_component_get_drvdata(component);
52344 +
52345 +- ucontrol->value.integer.value[0] = rx->hph_pwr_mode;
52346 ++ ucontrol->value.enumerated.item[0] = rx->hph_pwr_mode;
52347 + return 0;
52348 + }
52349 +
52350 +@@ -2402,7 +2406,7 @@ static int rx_macro_put_hph_pwr_mode(struct snd_kcontrol *kcontrol,
52351 + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
52352 + struct rx_macro *rx = snd_soc_component_get_drvdata(component);
52353 +
52354 +- rx->hph_pwr_mode = ucontrol->value.integer.value[0];
52355 ++ rx->hph_pwr_mode = ucontrol->value.enumerated.item[0];
52356 + return 0;
52357 + }
52358 +
52359 +@@ -3542,6 +3546,8 @@ static int rx_macro_probe(struct platform_device *pdev)
52360 + return PTR_ERR(base);
52361 +
52362 + rx->regmap = devm_regmap_init_mmio(dev, base, &rx_regmap_config);
52363 ++ if (IS_ERR(rx->regmap))
52364 ++ return PTR_ERR(rx->regmap);
52365 +
52366 + dev_set_drvdata(dev, rx);
52367 +
52368 +diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
52369 +index a4c0a155af565..9c96ab1bf84f9 100644
52370 +--- a/sound/soc/codecs/lpass-tx-macro.c
52371 ++++ b/sound/soc/codecs/lpass-tx-macro.c
52372 +@@ -1821,6 +1821,8 @@ static int tx_macro_probe(struct platform_device *pdev)
52373 + }
52374 +
52375 + tx->regmap = devm_regmap_init_mmio(dev, base, &tx_regmap_config);
52376 ++ if (IS_ERR(tx->regmap))
52377 ++ return PTR_ERR(tx->regmap);
52378 +
52379 + dev_set_drvdata(dev, tx);
52380 +
52381 +diff --git a/sound/soc/codecs/lpass-va-macro.c b/sound/soc/codecs/lpass-va-macro.c
52382 +index 11147e35689b2..e14c277e6a8b6 100644
52383 +--- a/sound/soc/codecs/lpass-va-macro.c
52384 ++++ b/sound/soc/codecs/lpass-va-macro.c
52385 +@@ -780,7 +780,7 @@ static int va_macro_dec_mode_get(struct snd_kcontrol *kcontrol,
52386 + struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
52387 + int path = e->shift_l;
52388 +
52389 +- ucontrol->value.integer.value[0] = va->dec_mode[path];
52390 ++ ucontrol->value.enumerated.item[0] = va->dec_mode[path];
52391 +
52392 + return 0;
52393 + }
52394 +@@ -789,7 +789,7 @@ static int va_macro_dec_mode_put(struct snd_kcontrol *kcontrol,
52395 + struct snd_ctl_elem_value *ucontrol)
52396 + {
52397 + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
52398 +- int value = ucontrol->value.integer.value[0];
52399 ++ int value = ucontrol->value.enumerated.item[0];
52400 + struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
52401 + int path = e->shift_l;
52402 + struct va_macro *va = snd_soc_component_get_drvdata(comp);
52403 +diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
52404 +index 75baf8eb70299..69d2915f40d88 100644
52405 +--- a/sound/soc/codecs/lpass-wsa-macro.c
52406 ++++ b/sound/soc/codecs/lpass-wsa-macro.c
52407 +@@ -2405,6 +2405,8 @@ static int wsa_macro_probe(struct platform_device *pdev)
52408 + return PTR_ERR(base);
52409 +
52410 + wsa->regmap = devm_regmap_init_mmio(dev, base, &wsa_regmap_config);
52411 ++ if (IS_ERR(wsa->regmap))
52412 ++ return PTR_ERR(wsa->regmap);
52413 +
52414 + dev_set_drvdata(dev, wsa);
52415 +
52416 +diff --git a/sound/soc/codecs/max98927.c b/sound/soc/codecs/max98927.c
52417 +index 5ba5f876eab87..fd84780bf689f 100644
52418 +--- a/sound/soc/codecs/max98927.c
52419 ++++ b/sound/soc/codecs/max98927.c
52420 +@@ -16,6 +16,7 @@
52421 + #include <sound/pcm_params.h>
52422 + #include <sound/soc.h>
52423 + #include <linux/gpio.h>
52424 ++#include <linux/gpio/consumer.h>
52425 + #include <linux/of_gpio.h>
52426 + #include <sound/tlv.h>
52427 + #include "max98927.h"
52428 +diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
52429 +index 485cda46dbb9b..e52a559c52d68 100644
52430 +--- a/sound/soc/codecs/msm8916-wcd-analog.c
52431 ++++ b/sound/soc/codecs/msm8916-wcd-analog.c
52432 +@@ -1222,8 +1222,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
52433 + }
52434 +
52435 + irq = platform_get_irq_byname(pdev, "mbhc_switch_int");
52436 +- if (irq < 0)
52437 +- return irq;
52438 ++ if (irq < 0) {
52439 ++ ret = irq;
52440 ++ goto err_disable_clk;
52441 ++ }
52442 +
52443 + ret = devm_request_threaded_irq(dev, irq, NULL,
52444 + pm8916_mbhc_switch_irq_handler,
52445 +@@ -1235,8 +1237,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
52446 +
52447 + if (priv->mbhc_btn_enabled) {
52448 + irq = platform_get_irq_byname(pdev, "mbhc_but_press_det");
52449 +- if (irq < 0)
52450 +- return irq;
52451 ++ if (irq < 0) {
52452 ++ ret = irq;
52453 ++ goto err_disable_clk;
52454 ++ }
52455 +
52456 + ret = devm_request_threaded_irq(dev, irq, NULL,
52457 + mbhc_btn_press_irq_handler,
52458 +@@ -1247,8 +1251,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
52459 + dev_err(dev, "cannot request mbhc button press irq\n");
52460 +
52461 + irq = platform_get_irq_byname(pdev, "mbhc_but_rel_det");
52462 +- if (irq < 0)
52463 +- return irq;
52464 ++ if (irq < 0) {
52465 ++ ret = irq;
52466 ++ goto err_disable_clk;
52467 ++ }
52468 +
52469 + ret = devm_request_threaded_irq(dev, irq, NULL,
52470 + mbhc_btn_release_irq_handler,
52471 +@@ -1265,6 +1271,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
52472 + return devm_snd_soc_register_component(dev, &pm8916_wcd_analog,
52473 + pm8916_wcd_analog_dai,
52474 + ARRAY_SIZE(pm8916_wcd_analog_dai));
52475 ++
52476 ++err_disable_clk:
52477 ++ clk_disable_unprepare(priv->mclk);
52478 ++ return ret;
52479 + }
52480 +
52481 + static int pm8916_wcd_analog_spmi_remove(struct platform_device *pdev)
52482 +diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
52483 +index fcc10c8bc6259..9ad7fc0baf072 100644
52484 +--- a/sound/soc/codecs/msm8916-wcd-digital.c
52485 ++++ b/sound/soc/codecs/msm8916-wcd-digital.c
52486 +@@ -1201,7 +1201,7 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
52487 + ret = clk_prepare_enable(priv->mclk);
52488 + if (ret < 0) {
52489 + dev_err(dev, "failed to enable mclk %d\n", ret);
52490 +- return ret;
52491 ++ goto err_clk;
52492 + }
52493 +
52494 + dev_set_drvdata(dev, priv);
52495 +@@ -1209,6 +1209,9 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
52496 + return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
52497 + msm8916_wcd_digital_dai,
52498 + ARRAY_SIZE(msm8916_wcd_digital_dai));
52499 ++err_clk:
52500 ++ clk_disable_unprepare(priv->ahbclk);
52501 ++ return ret;
52502 + }
52503 +
52504 + static int msm8916_wcd_digital_remove(struct platform_device *pdev)
52505 +diff --git a/sound/soc/codecs/mt6358.c b/sound/soc/codecs/mt6358.c
52506 +index 9b263a9a669dc..4c7b5d940799b 100644
52507 +--- a/sound/soc/codecs/mt6358.c
52508 ++++ b/sound/soc/codecs/mt6358.c
52509 +@@ -107,6 +107,7 @@ int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt,
52510 + priv->mtkaif_protocol = mtkaif_protocol;
52511 + return 0;
52512 + }
52513 ++EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_protocol);
52514 +
52515 + static void playback_gpio_set(struct mt6358_priv *priv)
52516 + {
52517 +@@ -273,6 +274,7 @@ int mt6358_mtkaif_calibration_enable(struct snd_soc_component *cmpnt)
52518 + 1 << RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT);
52519 + return 0;
52520 + }
52521 ++EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_enable);
52522 +
52523 + int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
52524 + {
52525 +@@ -296,6 +298,7 @@ int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
52526 + capture_gpio_reset(priv);
52527 + return 0;
52528 + }
52529 ++EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_disable);
52530 +
52531 + int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
52532 + int phase_1, int phase_2)
52533 +@@ -310,6 +313,7 @@ int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
52534 + phase_2 << RG_AUD_PAD_TOP_PHASE_MODE2_SFT);
52535 + return 0;
52536 + }
52537 ++EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_calibration_phase);
52538 +
52539 + /* dl pga gain */
52540 + enum {
52541 +diff --git a/sound/soc/codecs/rk817_codec.c b/sound/soc/codecs/rk817_codec.c
52542 +index 03f24edfe4f64..8fffe378618d0 100644
52543 +--- a/sound/soc/codecs/rk817_codec.c
52544 ++++ b/sound/soc/codecs/rk817_codec.c
52545 +@@ -508,12 +508,14 @@ static int rk817_platform_probe(struct platform_device *pdev)
52546 + if (ret < 0) {
52547 + dev_err(&pdev->dev, "%s() register codec error %d\n",
52548 + __func__, ret);
52549 +- goto err_;
52550 ++ goto err_clk;
52551 + }
52552 +
52553 + return 0;
52554 +-err_:
52555 +
52556 ++err_clk:
52557 ++ clk_disable_unprepare(rk817_codec_data->mclk);
52558 ++err_:
52559 + return ret;
52560 + }
52561 +
52562 +diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
52563 +index 2138f62e6af5d..3a8fba101b20f 100644
52564 +--- a/sound/soc/codecs/rt5663.c
52565 ++++ b/sound/soc/codecs/rt5663.c
52566 +@@ -3478,6 +3478,8 @@ static int rt5663_parse_dp(struct rt5663_priv *rt5663, struct device *dev)
52567 + table_size = sizeof(struct impedance_mapping_table) *
52568 + rt5663->pdata.impedance_sensing_num;
52569 + rt5663->imp_table = devm_kzalloc(dev, table_size, GFP_KERNEL);
52570 ++ if (!rt5663->imp_table)
52571 ++ return -ENOMEM;
52572 + ret = device_property_read_u32_array(dev,
52573 + "realtek,impedance_sensing_table",
52574 + (u32 *)rt5663->imp_table, table_size);
52575 +diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c
52576 +index 1e662d1be2b3e..92b8753f1267b 100644
52577 +--- a/sound/soc/codecs/rt5682s.c
52578 ++++ b/sound/soc/codecs/rt5682s.c
52579 +@@ -822,6 +822,7 @@ static void rt5682s_jack_detect_handler(struct work_struct *work)
52580 + {
52581 + struct rt5682s_priv *rt5682s =
52582 + container_of(work, struct rt5682s_priv, jack_detect_work.work);
52583 ++ struct snd_soc_dapm_context *dapm;
52584 + int val, btn_type;
52585 +
52586 + if (!rt5682s->component || !rt5682s->component->card ||
52587 +@@ -832,7 +833,9 @@ static void rt5682s_jack_detect_handler(struct work_struct *work)
52588 + return;
52589 + }
52590 +
52591 +- mutex_lock(&rt5682s->jdet_mutex);
52592 ++ dapm = snd_soc_component_get_dapm(rt5682s->component);
52593 ++
52594 ++ snd_soc_dapm_mutex_lock(dapm);
52595 + mutex_lock(&rt5682s->calibrate_mutex);
52596 +
52597 + val = snd_soc_component_read(rt5682s->component, RT5682S_AJD1_CTRL)
52598 +@@ -889,6 +892,9 @@ static void rt5682s_jack_detect_handler(struct work_struct *work)
52599 + rt5682s->irq_work_delay_time = 50;
52600 + }
52601 +
52602 ++ mutex_unlock(&rt5682s->calibrate_mutex);
52603 ++ snd_soc_dapm_mutex_unlock(dapm);
52604 ++
52605 + snd_soc_jack_report(rt5682s->hs_jack, rt5682s->jack_type,
52606 + SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
52607 + SND_JACK_BTN_2 | SND_JACK_BTN_3);
52608 +@@ -898,9 +904,6 @@ static void rt5682s_jack_detect_handler(struct work_struct *work)
52609 + schedule_delayed_work(&rt5682s->jd_check_work, 0);
52610 + else
52611 + cancel_delayed_work_sync(&rt5682s->jd_check_work);
52612 +-
52613 +- mutex_unlock(&rt5682s->calibrate_mutex);
52614 +- mutex_unlock(&rt5682s->jdet_mutex);
52615 + }
52616 +
52617 + static void rt5682s_jd_check_handler(struct work_struct *work)
52618 +@@ -908,14 +911,9 @@ static void rt5682s_jd_check_handler(struct work_struct *work)
52619 + struct rt5682s_priv *rt5682s =
52620 + container_of(work, struct rt5682s_priv, jd_check_work.work);
52621 +
52622 +- if (snd_soc_component_read(rt5682s->component, RT5682S_AJD1_CTRL)
52623 +- & RT5682S_JDH_RS_MASK) {
52624 ++ if (snd_soc_component_read(rt5682s->component, RT5682S_AJD1_CTRL) & RT5682S_JDH_RS_MASK) {
52625 + /* jack out */
52626 +- rt5682s->jack_type = rt5682s_headset_detect(rt5682s->component, 0);
52627 +-
52628 +- snd_soc_jack_report(rt5682s->hs_jack, rt5682s->jack_type,
52629 +- SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
52630 +- SND_JACK_BTN_2 | SND_JACK_BTN_3);
52631 ++ schedule_delayed_work(&rt5682s->jack_detect_work, 0);
52632 + } else {
52633 + schedule_delayed_work(&rt5682s->jd_check_work, 500);
52634 + }
52635 +@@ -1323,7 +1321,6 @@ static int rt5682s_hp_amp_event(struct snd_soc_dapm_widget *w,
52636 + struct snd_kcontrol *kcontrol, int event)
52637 + {
52638 + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
52639 +- struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component);
52640 +
52641 + switch (event) {
52642 + case SND_SOC_DAPM_POST_PMU:
52643 +@@ -1339,8 +1336,6 @@ static int rt5682s_hp_amp_event(struct snd_soc_dapm_widget *w,
52644 + snd_soc_component_write(component, RT5682S_BIAS_CUR_CTRL_11, 0x6666);
52645 + snd_soc_component_write(component, RT5682S_BIAS_CUR_CTRL_12, 0xa82a);
52646 +
52647 +- mutex_lock(&rt5682s->jdet_mutex);
52648 +-
52649 + snd_soc_component_update_bits(component, RT5682S_HP_CTRL_2,
52650 + RT5682S_HPO_L_PATH_MASK | RT5682S_HPO_R_PATH_MASK |
52651 + RT5682S_HPO_SEL_IP_EN_SW, RT5682S_HPO_L_PATH_EN |
52652 +@@ -1348,8 +1343,6 @@ static int rt5682s_hp_amp_event(struct snd_soc_dapm_widget *w,
52653 + usleep_range(5000, 10000);
52654 + snd_soc_component_update_bits(component, RT5682S_HP_AMP_DET_CTL_1,
52655 + RT5682S_CP_SW_SIZE_MASK, RT5682S_CP_SW_SIZE_L | RT5682S_CP_SW_SIZE_S);
52656 +-
52657 +- mutex_unlock(&rt5682s->jdet_mutex);
52658 + break;
52659 +
52660 + case SND_SOC_DAPM_POST_PMD:
52661 +@@ -3103,7 +3096,6 @@ static int rt5682s_i2c_probe(struct i2c_client *i2c,
52662 +
52663 + mutex_init(&rt5682s->calibrate_mutex);
52664 + mutex_init(&rt5682s->sar_mutex);
52665 +- mutex_init(&rt5682s->jdet_mutex);
52666 + rt5682s_calibrate(rt5682s);
52667 +
52668 + regmap_update_bits(rt5682s->regmap, RT5682S_MICBIAS_2,
52669 +diff --git a/sound/soc/codecs/rt5682s.h b/sound/soc/codecs/rt5682s.h
52670 +index 1bf2ef7ce5784..397a2531b6f68 100644
52671 +--- a/sound/soc/codecs/rt5682s.h
52672 ++++ b/sound/soc/codecs/rt5682s.h
52673 +@@ -1446,7 +1446,6 @@ struct rt5682s_priv {
52674 + struct delayed_work jd_check_work;
52675 + struct mutex calibrate_mutex;
52676 + struct mutex sar_mutex;
52677 +- struct mutex jdet_mutex;
52678 +
52679 + #ifdef CONFIG_COMMON_CLK
52680 + struct clk_hw dai_clks_hw[RT5682S_DAI_NUM_CLKS];
52681 +diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
52682 +index 6c468527fec61..1e75e93cf28f2 100644
52683 +--- a/sound/soc/codecs/wcd934x.c
52684 ++++ b/sound/soc/codecs/wcd934x.c
52685 +@@ -3023,14 +3023,14 @@ static int wcd934x_hph_impedance_get(struct snd_kcontrol *kcontrol,
52686 + return 0;
52687 + }
52688 + static const struct snd_kcontrol_new hph_type_detect_controls[] = {
52689 +- SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
52690 ++ SOC_SINGLE_EXT("HPH Type", 0, 0, WCD_MBHC_HPH_STEREO, 0,
52691 + wcd934x_get_hph_type, NULL),
52692 + };
52693 +
52694 + static const struct snd_kcontrol_new impedance_detect_controls[] = {
52695 +- SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
52696 ++ SOC_SINGLE_EXT("HPHL Impedance", 0, 0, INT_MAX, 0,
52697 + wcd934x_hph_impedance_get, NULL),
52698 +- SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
52699 ++ SOC_SINGLE_EXT("HPHR Impedance", 0, 1, INT_MAX, 0,
52700 + wcd934x_hph_impedance_get, NULL),
52701 + };
52702 +
52703 +@@ -3308,13 +3308,16 @@ static int wcd934x_rx_hph_mode_put(struct snd_kcontrol *kc,
52704 +
52705 + mode_val = ucontrol->value.enumerated.item[0];
52706 +
52707 ++ if (mode_val == wcd->hph_mode)
52708 ++ return 0;
52709 ++
52710 + if (mode_val == 0) {
52711 + dev_err(wcd->dev, "Invalid HPH Mode, default to ClSH HiFi\n");
52712 + mode_val = CLS_H_LOHIFI;
52713 + }
52714 + wcd->hph_mode = mode_val;
52715 +
52716 +- return 0;
52717 ++ return 1;
52718 + }
52719 +
52720 + static int slim_rx_mux_get(struct snd_kcontrol *kc,
52721 +@@ -5883,6 +5886,7 @@ static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd)
52722 + }
52723 +
52724 + wcd->sidev = of_slim_get_device(wcd->sdev->ctrl, ifc_dev_np);
52725 ++ of_node_put(ifc_dev_np);
52726 + if (!wcd->sidev) {
52727 + dev_err(dev, "Unable to get SLIM Interface device\n");
52728 + return -EINVAL;
52729 +diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
52730 +index 36cbc66914f90..9ae65cbabb1aa 100644
52731 +--- a/sound/soc/codecs/wcd938x.c
52732 ++++ b/sound/soc/codecs/wcd938x.c
52733 +@@ -2504,7 +2504,7 @@ static int wcd938x_tx_mode_get(struct snd_kcontrol *kcontrol,
52734 + struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
52735 + int path = e->shift_l;
52736 +
52737 +- ucontrol->value.integer.value[0] = wcd938x->tx_mode[path];
52738 ++ ucontrol->value.enumerated.item[0] = wcd938x->tx_mode[path];
52739 +
52740 + return 0;
52741 + }
52742 +@@ -2528,7 +2528,7 @@ static int wcd938x_rx_hph_mode_get(struct snd_kcontrol *kcontrol,
52743 + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
52744 + struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
52745 +
52746 +- ucontrol->value.integer.value[0] = wcd938x->hph_mode;
52747 ++ ucontrol->value.enumerated.item[0] = wcd938x->hph_mode;
52748 +
52749 + return 0;
52750 + }
52751 +@@ -3575,14 +3575,14 @@ static int wcd938x_hph_impedance_get(struct snd_kcontrol *kcontrol,
52752 + }
52753 +
52754 + static const struct snd_kcontrol_new hph_type_detect_controls[] = {
52755 +- SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
52756 ++ SOC_SINGLE_EXT("HPH Type", 0, 0, WCD_MBHC_HPH_STEREO, 0,
52757 + wcd938x_get_hph_type, NULL),
52758 + };
52759 +
52760 + static const struct snd_kcontrol_new impedance_detect_controls[] = {
52761 +- SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
52762 ++ SOC_SINGLE_EXT("HPHL Impedance", 0, 0, INT_MAX, 0,
52763 + wcd938x_hph_impedance_get, NULL),
52764 +- SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
52765 ++ SOC_SINGLE_EXT("HPHR Impedance", 0, 1, INT_MAX, 0,
52766 + wcd938x_hph_impedance_get, NULL),
52767 + };
52768 +
52769 +diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
52770 +index 15d42ce3b21d6..41504ce2a682f 100644
52771 +--- a/sound/soc/codecs/wm8350.c
52772 ++++ b/sound/soc/codecs/wm8350.c
52773 +@@ -1537,18 +1537,38 @@ static int wm8350_component_probe(struct snd_soc_component *component)
52774 + wm8350_clear_bits(wm8350, WM8350_JACK_DETECT,
52775 + WM8350_JDL_ENA | WM8350_JDR_ENA);
52776 +
52777 +- wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L,
52778 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L,
52779 + wm8350_hpl_jack_handler, 0, "Left jack detect",
52780 + priv);
52781 +- wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R,
52782 ++ if (ret != 0)
52783 ++ goto err;
52784 ++
52785 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R,
52786 + wm8350_hpr_jack_handler, 0, "Right jack detect",
52787 + priv);
52788 +- wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICSCD,
52789 ++ if (ret != 0)
52790 ++ goto free_jck_det_l;
52791 ++
52792 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICSCD,
52793 + wm8350_mic_handler, 0, "Microphone short", priv);
52794 +- wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICD,
52795 ++ if (ret != 0)
52796 ++ goto free_jck_det_r;
52797 ++
52798 ++ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICD,
52799 + wm8350_mic_handler, 0, "Microphone detect", priv);
52800 ++ if (ret != 0)
52801 ++ goto free_micscd;
52802 +
52803 + return 0;
52804 ++
52805 ++free_micscd:
52806 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_MICSCD, priv);
52807 ++free_jck_det_r:
52808 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R, priv);
52809 ++free_jck_det_l:
52810 ++ wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L, priv);
52811 ++err:
52812 ++ return ret;
52813 + }
52814 +
52815 + static void wm8350_component_remove(struct snd_soc_component *component)
52816 +diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
52817 +index 5cb58929090d4..1edac3e10f345 100644
52818 +--- a/sound/soc/dwc/dwc-i2s.c
52819 ++++ b/sound/soc/dwc/dwc-i2s.c
52820 +@@ -403,9 +403,13 @@ static int dw_i2s_runtime_suspend(struct device *dev)
52821 + static int dw_i2s_runtime_resume(struct device *dev)
52822 + {
52823 + struct dw_i2s_dev *dw_dev = dev_get_drvdata(dev);
52824 ++ int ret;
52825 +
52826 +- if (dw_dev->capability & DW_I2S_MASTER)
52827 +- clk_enable(dw_dev->clk);
52828 ++ if (dw_dev->capability & DW_I2S_MASTER) {
52829 ++ ret = clk_enable(dw_dev->clk);
52830 ++ if (ret)
52831 ++ return ret;
52832 ++ }
52833 + return 0;
52834 + }
52835 +
52836 +@@ -422,10 +426,13 @@ static int dw_i2s_resume(struct snd_soc_component *component)
52837 + {
52838 + struct dw_i2s_dev *dev = snd_soc_component_get_drvdata(component);
52839 + struct snd_soc_dai *dai;
52840 +- int stream;
52841 ++ int stream, ret;
52842 +
52843 +- if (dev->capability & DW_I2S_MASTER)
52844 +- clk_enable(dev->clk);
52845 ++ if (dev->capability & DW_I2S_MASTER) {
52846 ++ ret = clk_enable(dev->clk);
52847 ++ if (ret)
52848 ++ return ret;
52849 ++ }
52850 +
52851 + for_each_component_dais(component, dai) {
52852 + for_each_pcm_streams(stream)
52853 +diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
52854 +index d178b479c8bd4..06d4a014f296d 100644
52855 +--- a/sound/soc/fsl/fsl_spdif.c
52856 ++++ b/sound/soc/fsl/fsl_spdif.c
52857 +@@ -610,6 +610,8 @@ static void fsl_spdif_shutdown(struct snd_pcm_substream *substream,
52858 + mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK |
52859 + SCR_TXSEL_MASK | SCR_USRC_SEL_MASK |
52860 + SCR_TXFIFO_FSEL_MASK;
52861 ++ /* Disable TX clock */
52862 ++ regmap_update_bits(regmap, REG_SPDIF_STC, STC_TXCLK_ALL_EN_MASK, 0);
52863 + } else {
52864 + scr = SCR_RXFIFO_OFF | SCR_RXFIFO_CTL_ZERO;
52865 + mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK|
52866 +diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c
52867 +index 09c674ee79f1a..168973035e35f 100644
52868 +--- a/sound/soc/fsl/imx-es8328.c
52869 ++++ b/sound/soc/fsl/imx-es8328.c
52870 +@@ -87,6 +87,7 @@ static int imx_es8328_probe(struct platform_device *pdev)
52871 + if (int_port > MUX_PORT_MAX || int_port == 0) {
52872 + dev_err(dev, "mux-int-port: hardware only has %d mux ports\n",
52873 + MUX_PORT_MAX);
52874 ++ ret = -EINVAL;
52875 + goto fail;
52876 + }
52877 +
52878 +diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
52879 +index a81323d1691d0..9736102e68088 100644
52880 +--- a/sound/soc/generic/simple-card-utils.c
52881 ++++ b/sound/soc/generic/simple-card-utils.c
52882 +@@ -275,6 +275,7 @@ int asoc_simple_hw_params(struct snd_pcm_substream *substream,
52883 + mclk_fs = props->mclk_fs;
52884 +
52885 + if (mclk_fs) {
52886 ++ struct snd_soc_component *component;
52887 + mclk = params_rate(params) * mclk_fs;
52888 +
52889 + for_each_prop_dai_codec(props, i, pdai) {
52890 +@@ -282,16 +283,30 @@ int asoc_simple_hw_params(struct snd_pcm_substream *substream,
52891 + if (ret < 0)
52892 + return ret;
52893 + }
52894 ++
52895 + for_each_prop_dai_cpu(props, i, pdai) {
52896 + ret = asoc_simple_set_clk_rate(pdai, mclk);
52897 + if (ret < 0)
52898 + return ret;
52899 + }
52900 ++
52901 ++ /* Ensure sysclk is set on all components in case any
52902 ++ * (such as platform components) are missed by calls to
52903 ++ * snd_soc_dai_set_sysclk.
52904 ++ */
52905 ++ for_each_rtd_components(rtd, i, component) {
52906 ++ ret = snd_soc_component_set_sysclk(component, 0, 0,
52907 ++ mclk, SND_SOC_CLOCK_IN);
52908 ++ if (ret && ret != -ENOTSUPP)
52909 ++ return ret;
52910 ++ }
52911 ++
52912 + for_each_rtd_codec_dais(rtd, i, sdai) {
52913 + ret = snd_soc_dai_set_sysclk(sdai, 0, mclk, SND_SOC_CLOCK_IN);
52914 + if (ret && ret != -ENOTSUPP)
52915 + return ret;
52916 + }
52917 ++
52918 + for_each_rtd_cpu_dais(rtd, i, sdai) {
52919 + ret = snd_soc_dai_set_sysclk(sdai, 0, mclk, SND_SOC_CLOCK_OUT);
52920 + if (ret && ret != -ENOTSUPP)
52921 +diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c
52922 +index 20d577eaab6d7..28d7670b8f8f8 100644
52923 +--- a/sound/soc/intel/boards/sof_es8336.c
52924 ++++ b/sound/soc/intel/boards/sof_es8336.c
52925 +@@ -63,7 +63,12 @@ static const struct acpi_gpio_mapping *gpio_mapping = acpi_es8336_gpios;
52926 +
52927 + static void log_quirks(struct device *dev)
52928 + {
52929 +- dev_info(dev, "quirk SSP%ld", SOF_ES8336_SSP_CODEC(quirk));
52930 ++ dev_info(dev, "quirk mask %#lx\n", quirk);
52931 ++ dev_info(dev, "quirk SSP%ld\n", SOF_ES8336_SSP_CODEC(quirk));
52932 ++ if (quirk & SOF_ES8336_ENABLE_DMIC)
52933 ++ dev_info(dev, "quirk DMIC enabled\n");
52934 ++ if (quirk & SOF_ES8336_TGL_GPIO_QUIRK)
52935 ++ dev_info(dev, "quirk TGL GPIO enabled\n");
52936 + }
52937 +
52938 + static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
52939 +diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
52940 +index da515eb1ddbe7..1f00679b42409 100644
52941 +--- a/sound/soc/intel/boards/sof_sdw.c
52942 ++++ b/sound/soc/intel/boards/sof_sdw.c
52943 +@@ -185,7 +185,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
52944 + .callback = sof_sdw_quirk_cb,
52945 + .matches = {
52946 + DMI_MATCH(DMI_SYS_VENDOR, "HP"),
52947 +- DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible"),
52948 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Conv"),
52949 + },
52950 + .driver_data = (void *)(SOF_SDW_TGL_HDMI |
52951 + SOF_SDW_PCH_DMIC |
52952 +diff --git a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
52953 +index 342d340522045..04a92e74d99bc 100644
52954 +--- a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
52955 ++++ b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
52956 +@@ -41,6 +41,11 @@ static struct snd_soc_acpi_mach *apl_quirk(void *arg)
52957 + return mach;
52958 + }
52959 +
52960 ++static const struct snd_soc_acpi_codecs essx_83x6 = {
52961 ++ .num_codecs = 3,
52962 ++ .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
52963 ++};
52964 ++
52965 + static const struct snd_soc_acpi_codecs bxt_codecs = {
52966 + .num_codecs = 1,
52967 + .codecs = {"MX98357A"}
52968 +@@ -83,7 +88,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = {
52969 + .sof_tplg_filename = "sof-apl-tdf8532.tplg",
52970 + },
52971 + {
52972 +- .id = "ESSX8336",
52973 ++ .comp_ids = &essx_83x6,
52974 + .drv_name = "sof-essx8336",
52975 + .sof_fw_filename = "sof-apl.ri",
52976 + .sof_tplg_filename = "sof-apl-es8336.tplg",
52977 +diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
52978 +index 4eebc79d4b486..14395833d89e8 100644
52979 +--- a/sound/soc/intel/common/soc-acpi-intel-cml-match.c
52980 ++++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
52981 +@@ -9,6 +9,11 @@
52982 + #include <sound/soc-acpi.h>
52983 + #include <sound/soc-acpi-intel-match.h>
52984 +
52985 ++static const struct snd_soc_acpi_codecs essx_83x6 = {
52986 ++ .num_codecs = 3,
52987 ++ .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
52988 ++};
52989 ++
52990 + static const struct snd_soc_acpi_codecs rt1011_spk_codecs = {
52991 + .num_codecs = 1,
52992 + .codecs = {"10EC1011"}
52993 +@@ -82,7 +87,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
52994 + .sof_tplg_filename = "sof-cml-da7219-max98390.tplg",
52995 + },
52996 + {
52997 +- .id = "ESSX8336",
52998 ++ .comp_ids = &essx_83x6,
52999 + .drv_name = "sof-essx8336",
53000 + .sof_fw_filename = "sof-cml.ri",
53001 + .sof_tplg_filename = "sof-cml-es8336.tplg",
53002 +diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
53003 +index 8492b7e2a9450..7aa6a870d5a5c 100644
53004 +--- a/sound/soc/intel/common/soc-acpi-intel-glk-match.c
53005 ++++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
53006 +@@ -9,6 +9,11 @@
53007 + #include <sound/soc-acpi.h>
53008 + #include <sound/soc-acpi-intel-match.h>
53009 +
53010 ++static const struct snd_soc_acpi_codecs essx_83x6 = {
53011 ++ .num_codecs = 3,
53012 ++ .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
53013 ++};
53014 ++
53015 + static const struct snd_soc_acpi_codecs glk_codecs = {
53016 + .num_codecs = 1,
53017 + .codecs = {"MX98357A"}
53018 +@@ -58,7 +63,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
53019 + .sof_tplg_filename = "sof-glk-cs42l42.tplg",
53020 + },
53021 + {
53022 +- .id = "ESSX8336",
53023 ++ .comp_ids = &essx_83x6,
53024 + .drv_name = "sof-essx8336",
53025 + .sof_fw_filename = "sof-glk.ri",
53026 + .sof_tplg_filename = "sof-glk-es8336.tplg",
53027 +diff --git a/sound/soc/intel/common/soc-acpi-intel-jsl-match.c b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
53028 +index 278ec196da7bf..9d0d0e1437a4b 100644
53029 +--- a/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
53030 ++++ b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
53031 +@@ -9,6 +9,11 @@
53032 + #include <sound/soc-acpi.h>
53033 + #include <sound/soc-acpi-intel-match.h>
53034 +
53035 ++static const struct snd_soc_acpi_codecs essx_83x6 = {
53036 ++ .num_codecs = 3,
53037 ++ .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
53038 ++};
53039 ++
53040 + static const struct snd_soc_acpi_codecs jsl_7219_98373_codecs = {
53041 + .num_codecs = 1,
53042 + .codecs = {"MX98373"}
53043 +@@ -87,7 +92,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[] = {
53044 + .sof_tplg_filename = "sof-jsl-cs42l42-mx98360a.tplg",
53045 + },
53046 + {
53047 +- .id = "ESSX8336",
53048 ++ .comp_ids = &essx_83x6,
53049 + .drv_name = "sof-essx8336",
53050 + .sof_fw_filename = "sof-jsl.ri",
53051 + .sof_tplg_filename = "sof-jsl-es8336.tplg",
53052 +diff --git a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
53053 +index da31bb3cca17c..e2658bca69318 100644
53054 +--- a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
53055 ++++ b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
53056 +@@ -10,6 +10,11 @@
53057 + #include <sound/soc-acpi-intel-match.h>
53058 + #include "soc-acpi-intel-sdw-mockup-match.h"
53059 +
53060 ++static const struct snd_soc_acpi_codecs essx_83x6 = {
53061 ++ .num_codecs = 3,
53062 ++ .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
53063 ++};
53064 ++
53065 + static const struct snd_soc_acpi_codecs tgl_codecs = {
53066 + .num_codecs = 1,
53067 + .codecs = {"MX98357A"}
53068 +@@ -389,7 +394,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[] = {
53069 + .sof_tplg_filename = "sof-tgl-rt1011-rt5682.tplg",
53070 + },
53071 + {
53072 +- .id = "ESSX8336",
53073 ++ .comp_ids = &essx_83x6,
53074 + .drv_name = "sof-essx8336",
53075 + .sof_fw_filename = "sof-tgl.ri",
53076 + .sof_tplg_filename = "sof-tgl-es8336.tplg",
53077 +diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
53078 +index 718505c754188..f090dee0c7a4f 100644
53079 +--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
53080 ++++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
53081 +@@ -695,8 +695,11 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev)
53082 + }
53083 +
53084 + card = (struct snd_soc_card *)of_device_get_match_data(&pdev->dev);
53085 +- if (!card)
53086 +- return -EINVAL;
53087 ++ if (!card) {
53088 ++ ret = -EINVAL;
53089 ++ goto put_platform_node;
53090 ++ }
53091 ++
53092 + card->dev = &pdev->dev;
53093 +
53094 + hdmi_codec = of_parse_phandle(pdev->dev.of_node,
53095 +@@ -761,12 +764,15 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev)
53096 + if (!mt8183_da7219_max98357_headset_dev.dlc.of_node) {
53097 + dev_err(&pdev->dev,
53098 + "Property 'mediatek,headset-codec' missing/invalid\n");
53099 +- return -EINVAL;
53100 ++ ret = -EINVAL;
53101 ++ goto put_hdmi_codec;
53102 + }
53103 +
53104 + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
53105 +- if (!priv)
53106 +- return -ENOMEM;
53107 ++ if (!priv) {
53108 ++ ret = -ENOMEM;
53109 ++ goto put_hdmi_codec;
53110 ++ }
53111 +
53112 + snd_soc_card_set_drvdata(card, priv);
53113 +
53114 +@@ -775,13 +781,16 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev)
53115 + ret = PTR_ERR(pinctrl);
53116 + dev_err(&pdev->dev, "%s failed to select default state %d\n",
53117 + __func__, ret);
53118 +- return ret;
53119 ++ goto put_hdmi_codec;
53120 + }
53121 +
53122 + ret = devm_snd_soc_register_card(&pdev->dev, card);
53123 +
53124 +- of_node_put(platform_node);
53125 ++
53126 ++put_hdmi_codec:
53127 + of_node_put(hdmi_codec);
53128 ++put_platform_node:
53129 ++ of_node_put(platform_node);
53130 + return ret;
53131 + }
53132 +
53133 +diff --git a/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c b/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
53134 +index f7daad1bfe1ed..ee91569c09117 100644
53135 +--- a/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
53136 ++++ b/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
53137 +@@ -1116,8 +1116,10 @@ static int mt8192_mt6359_dev_probe(struct platform_device *pdev)
53138 + }
53139 +
53140 + card = (struct snd_soc_card *)of_device_get_match_data(&pdev->dev);
53141 +- if (!card)
53142 +- return -EINVAL;
53143 ++ if (!card) {
53144 ++ ret = -EINVAL;
53145 ++ goto put_platform_node;
53146 ++ }
53147 + card->dev = &pdev->dev;
53148 +
53149 + hdmi_codec = of_parse_phandle(pdev->dev.of_node,
53150 +@@ -1159,20 +1161,24 @@ static int mt8192_mt6359_dev_probe(struct platform_device *pdev)
53151 + }
53152 +
53153 + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
53154 +- if (!priv)
53155 +- return -ENOMEM;
53156 ++ if (!priv) {
53157 ++ ret = -ENOMEM;
53158 ++ goto put_hdmi_codec;
53159 ++ }
53160 + snd_soc_card_set_drvdata(card, priv);
53161 +
53162 + ret = mt8192_afe_gpio_init(&pdev->dev);
53163 + if (ret) {
53164 + dev_err(&pdev->dev, "init gpio error %d\n", ret);
53165 +- return ret;
53166 ++ goto put_hdmi_codec;
53167 + }
53168 +
53169 + ret = devm_snd_soc_register_card(&pdev->dev, card);
53170 +
53171 +- of_node_put(platform_node);
53172 ++put_hdmi_codec:
53173 + of_node_put(hdmi_codec);
53174 ++put_platform_node:
53175 ++ of_node_put(platform_node);
53176 + return ret;
53177 + }
53178 +
53179 +diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c b/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
53180 +index 29c2d3407cc7c..e3146311722f8 100644
53181 +--- a/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
53182 ++++ b/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
53183 +@@ -1342,7 +1342,8 @@ static int mt8195_mt6359_rt1019_rt5682_dev_probe(struct platform_device *pdev)
53184 + "mediatek,dai-link");
53185 + if (ret) {
53186 + dev_dbg(&pdev->dev, "Parse dai-link fail\n");
53187 +- return -EINVAL;
53188 ++ ret = -EINVAL;
53189 ++ goto put_node;
53190 + }
53191 + } else {
53192 + if (!sof_on)
53193 +@@ -1398,6 +1399,7 @@ static int mt8195_mt6359_rt1019_rt5682_dev_probe(struct platform_device *pdev)
53194 +
53195 + ret = devm_snd_soc_register_card(&pdev->dev, card);
53196 +
53197 ++put_node:
53198 + of_node_put(platform_node);
53199 + of_node_put(adsp_node);
53200 + of_node_put(dp_node);
53201 +diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
53202 +index 6a2d24d489647..879c1221a809b 100644
53203 +--- a/sound/soc/mxs/mxs-saif.c
53204 ++++ b/sound/soc/mxs/mxs-saif.c
53205 +@@ -455,7 +455,10 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
53206 + * basic clock which should be fast enough for the internal
53207 + * logic.
53208 + */
53209 +- clk_enable(saif->clk);
53210 ++ ret = clk_enable(saif->clk);
53211 ++ if (ret)
53212 ++ return ret;
53213 ++
53214 + ret = clk_set_rate(saif->clk, 24000000);
53215 + clk_disable(saif->clk);
53216 + if (ret)
53217 +diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
53218 +index 2412dc7e65d44..746f409386751 100644
53219 +--- a/sound/soc/mxs/mxs-sgtl5000.c
53220 ++++ b/sound/soc/mxs/mxs-sgtl5000.c
53221 +@@ -118,6 +118,9 @@ static int mxs_sgtl5000_probe(struct platform_device *pdev)
53222 + codec_np = of_parse_phandle(np, "audio-codec", 0);
53223 + if (!saif_np[0] || !saif_np[1] || !codec_np) {
53224 + dev_err(&pdev->dev, "phandle missing or invalid\n");
53225 ++ of_node_put(codec_np);
53226 ++ of_node_put(saif_np[0]);
53227 ++ of_node_put(saif_np[1]);
53228 + return -EINVAL;
53229 + }
53230 +
53231 +diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
53232 +index a6d7656c206e5..4ce5d25793875 100644
53233 +--- a/sound/soc/rockchip/rockchip_i2s.c
53234 ++++ b/sound/soc/rockchip/rockchip_i2s.c
53235 +@@ -716,19 +716,23 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
53236 + i2s->mclk = devm_clk_get(&pdev->dev, "i2s_clk");
53237 + if (IS_ERR(i2s->mclk)) {
53238 + dev_err(&pdev->dev, "Can't retrieve i2s master clock\n");
53239 +- return PTR_ERR(i2s->mclk);
53240 ++ ret = PTR_ERR(i2s->mclk);
53241 ++ goto err_clk;
53242 + }
53243 +
53244 + regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
53245 +- if (IS_ERR(regs))
53246 +- return PTR_ERR(regs);
53247 ++ if (IS_ERR(regs)) {
53248 ++ ret = PTR_ERR(regs);
53249 ++ goto err_clk;
53250 ++ }
53251 +
53252 + i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
53253 + &rockchip_i2s_regmap_config);
53254 + if (IS_ERR(i2s->regmap)) {
53255 + dev_err(&pdev->dev,
53256 + "Failed to initialise managed register map\n");
53257 +- return PTR_ERR(i2s->regmap);
53258 ++ ret = PTR_ERR(i2s->regmap);
53259 ++ goto err_clk;
53260 + }
53261 +
53262 + i2s->bclk_ratio = 64;
53263 +@@ -768,7 +772,8 @@ err_suspend:
53264 + i2s_runtime_suspend(&pdev->dev);
53265 + err_pm_disable:
53266 + pm_runtime_disable(&pdev->dev);
53267 +-
53268 ++err_clk:
53269 ++ clk_disable_unprepare(i2s->hclk);
53270 + return ret;
53271 + }
53272 +
53273 +diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
53274 +index 5f9cb5c4c7f09..98700e75b82a1 100644
53275 +--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
53276 ++++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
53277 +@@ -469,14 +469,14 @@ static int rockchip_i2s_tdm_set_fmt(struct snd_soc_dai *cpu_dai,
53278 + txcr_val = I2S_TXCR_IBM_NORMAL;
53279 + rxcr_val = I2S_RXCR_IBM_NORMAL;
53280 + break;
53281 +- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
53282 +- txcr_val = I2S_TXCR_TFS_PCM;
53283 +- rxcr_val = I2S_RXCR_TFS_PCM;
53284 +- break;
53285 +- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
53286 ++ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 mode */
53287 + txcr_val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1);
53288 + rxcr_val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1);
53289 + break;
53290 ++ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
53291 ++ txcr_val = I2S_TXCR_TFS_PCM;
53292 ++ rxcr_val = I2S_RXCR_TFS_PCM;
53293 ++ break;
53294 + default:
53295 + ret = -EINVAL;
53296 + goto err_pm_put;
53297 +@@ -1738,7 +1738,7 @@ static int __maybe_unused rockchip_i2s_tdm_resume(struct device *dev)
53298 + struct rk_i2s_tdm_dev *i2s_tdm = dev_get_drvdata(dev);
53299 + int ret;
53300 +
53301 +- ret = pm_runtime_get_sync(dev);
53302 ++ ret = pm_runtime_resume_and_get(dev);
53303 + if (ret < 0)
53304 + return ret;
53305 + ret = regcache_sync(i2s_tdm->regmap);
53306 +diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
53307 +index cdf3b7f69ba70..e9a1eb6bdf66a 100644
53308 +--- a/sound/soc/sh/fsi.c
53309 ++++ b/sound/soc/sh/fsi.c
53310 +@@ -816,14 +816,27 @@ static int fsi_clk_enable(struct device *dev,
53311 + return ret;
53312 + }
53313 +
53314 +- clk_enable(clock->xck);
53315 +- clk_enable(clock->ick);
53316 +- clk_enable(clock->div);
53317 ++ ret = clk_enable(clock->xck);
53318 ++ if (ret)
53319 ++ goto err;
53320 ++ ret = clk_enable(clock->ick);
53321 ++ if (ret)
53322 ++ goto disable_xck;
53323 ++ ret = clk_enable(clock->div);
53324 ++ if (ret)
53325 ++ goto disable_ick;
53326 +
53327 + clock->count++;
53328 + }
53329 +
53330 + return ret;
53331 ++
53332 ++disable_ick:
53333 ++ clk_disable(clock->ick);
53334 ++disable_xck:
53335 ++ clk_disable(clock->xck);
53336 ++err:
53337 ++ return ret;
53338 + }
53339 +
53340 + static int fsi_clk_disable(struct device *dev,
53341 +diff --git a/sound/soc/sh/rz-ssi.c b/sound/soc/sh/rz-ssi.c
53342 +index e8d98b362f9db..7379b1489e358 100644
53343 +--- a/sound/soc/sh/rz-ssi.c
53344 ++++ b/sound/soc/sh/rz-ssi.c
53345 +@@ -411,54 +411,56 @@ static int rz_ssi_pio_recv(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
53346 + {
53347 + struct snd_pcm_substream *substream = strm->substream;
53348 + struct snd_pcm_runtime *runtime;
53349 ++ bool done = false;
53350 + u16 *buf;
53351 + int fifo_samples;
53352 + int frames_left;
53353 +- int samples = 0;
53354 ++ int samples;
53355 + int i;
53356 +
53357 + if (!rz_ssi_stream_is_valid(ssi, strm))
53358 + return -EINVAL;
53359 +
53360 + runtime = substream->runtime;
53361 +- /* frames left in this period */
53362 +- frames_left = runtime->period_size - (strm->buffer_pos %
53363 +- runtime->period_size);
53364 +- if (frames_left == 0)
53365 +- frames_left = runtime->period_size;
53366 +
53367 +- /* Samples in RX FIFO */
53368 +- fifo_samples = (rz_ssi_reg_readl(ssi, SSIFSR) >>
53369 +- SSIFSR_RDC_SHIFT) & SSIFSR_RDC_MASK;
53370 ++ while (!done) {
53371 ++ /* frames left in this period */
53372 ++ frames_left = runtime->period_size -
53373 ++ (strm->buffer_pos % runtime->period_size);
53374 ++ if (!frames_left)
53375 ++ frames_left = runtime->period_size;
53376 ++
53377 ++ /* Samples in RX FIFO */
53378 ++ fifo_samples = (rz_ssi_reg_readl(ssi, SSIFSR) >>
53379 ++ SSIFSR_RDC_SHIFT) & SSIFSR_RDC_MASK;
53380 ++
53381 ++ /* Only read full frames at a time */
53382 ++ samples = 0;
53383 ++ while (frames_left && (fifo_samples >= runtime->channels)) {
53384 ++ samples += runtime->channels;
53385 ++ fifo_samples -= runtime->channels;
53386 ++ frames_left--;
53387 ++ }
53388 +
53389 +- /* Only read full frames at a time */
53390 +- while (frames_left && (fifo_samples >= runtime->channels)) {
53391 +- samples += runtime->channels;
53392 +- fifo_samples -= runtime->channels;
53393 +- frames_left--;
53394 +- }
53395 ++ /* not enough samples yet */
53396 ++ if (!samples)
53397 ++ break;
53398 +
53399 +- /* not enough samples yet */
53400 +- if (samples == 0)
53401 +- return 0;
53402 ++ /* calculate new buffer index */
53403 ++ buf = (u16 *)(runtime->dma_area);
53404 ++ buf += strm->buffer_pos * runtime->channels;
53405 +
53406 +- /* calculate new buffer index */
53407 +- buf = (u16 *)(runtime->dma_area);
53408 +- buf += strm->buffer_pos * runtime->channels;
53409 +-
53410 +- /* Note, only supports 16-bit samples */
53411 +- for (i = 0; i < samples; i++)
53412 +- *buf++ = (u16)(rz_ssi_reg_readl(ssi, SSIFRDR) >> 16);
53413 ++ /* Note, only supports 16-bit samples */
53414 ++ for (i = 0; i < samples; i++)
53415 ++ *buf++ = (u16)(rz_ssi_reg_readl(ssi, SSIFRDR) >> 16);
53416 +
53417 +- rz_ssi_reg_mask_setl(ssi, SSIFSR, SSIFSR_RDF, 0);
53418 +- rz_ssi_pointer_update(strm, samples / runtime->channels);
53419 ++ rz_ssi_reg_mask_setl(ssi, SSIFSR, SSIFSR_RDF, 0);
53420 ++ rz_ssi_pointer_update(strm, samples / runtime->channels);
53421 +
53422 +- /*
53423 +- * If we finished this period, but there are more samples in
53424 +- * the RX FIFO, call this function again
53425 +- */
53426 +- if (frames_left == 0 && fifo_samples >= runtime->channels)
53427 +- rz_ssi_pio_recv(ssi, strm);
53428 ++ /* check if there are no more samples in the RX FIFO */
53429 ++ if (!(!frames_left && fifo_samples >= runtime->channels))
53430 ++ done = true;
53431 ++ }
53432 +
53433 + return 0;
53434 + }
53435 +@@ -975,6 +977,9 @@ static int rz_ssi_probe(struct platform_device *pdev)
53436 + ssi->playback.priv = ssi;
53437 + ssi->capture.priv = ssi;
53438 +
53439 ++ spin_lock_init(&ssi->lock);
53440 ++ dev_set_drvdata(&pdev->dev, ssi);
53441 ++
53442 + /* Error Interrupt */
53443 + ssi->irq_int = platform_get_irq_byname(pdev, "int_req");
53444 + if (ssi->irq_int < 0)
53445 +@@ -1027,8 +1032,6 @@ static int rz_ssi_probe(struct platform_device *pdev)
53446 + return dev_err_probe(ssi->dev, ret, "pm_runtime_resume_and_get failed\n");
53447 + }
53448 +
53449 +- spin_lock_init(&ssi->lock);
53450 +- dev_set_drvdata(&pdev->dev, ssi);
53451 + ret = devm_snd_soc_register_component(&pdev->dev, &rz_ssi_soc_component,
53452 + rz_ssi_soc_dai,
53453 + ARRAY_SIZE(rz_ssi_soc_dai));
53454 +diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
53455 +index 8e2494a9f3a7f..e9dd25894dc0f 100644
53456 +--- a/sound/soc/soc-compress.c
53457 ++++ b/sound/soc/soc-compress.c
53458 +@@ -567,6 +567,11 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
53459 + return -EINVAL;
53460 + }
53461 +
53462 ++ if (!codec_dai) {
53463 ++ dev_err(rtd->card->dev, "Missing codec\n");
53464 ++ return -EINVAL;
53465 ++ }
53466 ++
53467 + /* check client and interface hw capabilities */
53468 + if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_PLAYBACK) &&
53469 + snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_PLAYBACK))
53470 +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
53471 +index 434e61b46983c..a088bc9f7dd7c 100644
53472 +--- a/sound/soc/soc-core.c
53473 ++++ b/sound/soc/soc-core.c
53474 +@@ -3233,7 +3233,7 @@ int snd_soc_get_dai_name(const struct of_phandle_args *args,
53475 + for_each_component(pos) {
53476 + struct device_node *component_of_node = soc_component_to_node(pos);
53477 +
53478 +- if (component_of_node != args->np)
53479 ++ if (component_of_node != args->np || !pos->num_dai)
53480 + continue;
53481 +
53482 + ret = snd_soc_component_of_xlate_dai_name(pos, args, dai_name);
53483 +diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
53484 +index c54c8ca8d7156..359987bf76d1b 100644
53485 +--- a/sound/soc/soc-generic-dmaengine-pcm.c
53486 ++++ b/sound/soc/soc-generic-dmaengine-pcm.c
53487 +@@ -86,10 +86,10 @@ static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
53488 +
53489 + memset(&slave_config, 0, sizeof(slave_config));
53490 +
53491 +- if (!pcm->config)
53492 +- prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
53493 +- else
53494 ++ if (pcm->config && pcm->config->prepare_slave_config)
53495 + prepare_slave_config = pcm->config->prepare_slave_config;
53496 ++ else
53497 ++ prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
53498 +
53499 + if (prepare_slave_config) {
53500 + int ret = prepare_slave_config(substream, params, &slave_config);
53501 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
53502 +index 2630df024dff3..cb24805668bd8 100644
53503 +--- a/sound/soc/soc-topology.c
53504 ++++ b/sound/soc/soc-topology.c
53505 +@@ -512,7 +512,8 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
53506 +
53507 + if (le32_to_cpu(hdr->ops.info) == SND_SOC_TPLG_CTL_BYTES
53508 + && k->iface & SNDRV_CTL_ELEM_IFACE_MIXER
53509 +- && k->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE
53510 ++ && (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ
53511 ++ || k->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE)
53512 + && k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
53513 + struct soc_bytes_ext *sbe;
53514 + struct snd_soc_tplg_bytes_control *be;
53515 +diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
53516 +index 6d6757075f7c3..e755c0c5f86c0 100644
53517 +--- a/sound/soc/sof/debug.c
53518 ++++ b/sound/soc/sof/debug.c
53519 +@@ -960,7 +960,7 @@ static void snd_sof_dbg_print_fw_state(struct snd_sof_dev *sdev, const char *lev
53520 +
53521 + void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, const char *msg, u32 flags)
53522 + {
53523 +- char *level = flags & SOF_DBG_DUMP_OPTIONAL ? KERN_DEBUG : KERN_ERR;
53524 ++ char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
53525 + bool print_all = sof_debug_check_flag(SOF_DBG_PRINT_ALL_DUMPS);
53526 +
53527 + if (flags & SOF_DBG_DUMP_OPTIONAL && !print_all)
53528 +diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
53529 +index 788e77bcb6038..60251486b24b2 100644
53530 +--- a/sound/soc/sof/imx/imx8m.c
53531 ++++ b/sound/soc/sof/imx/imx8m.c
53532 +@@ -224,6 +224,7 @@ static int imx8m_probe(struct snd_sof_dev *sdev)
53533 + }
53534 +
53535 + ret = of_address_to_resource(res_node, 0, &res);
53536 ++ of_node_put(res_node);
53537 + if (ret) {
53538 + dev_err(&pdev->dev, "failed to get reserved region address\n");
53539 + goto exit_pdev_unregister;
53540 +diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
53541 +index 88b6176af021c..d83e1a36707af 100644
53542 +--- a/sound/soc/sof/intel/Kconfig
53543 ++++ b/sound/soc/sof/intel/Kconfig
53544 +@@ -84,6 +84,7 @@ if SND_SOC_SOF_PCI
53545 + config SND_SOC_SOF_MERRIFIELD
53546 + tristate "SOF support for Tangier/Merrifield"
53547 + default SND_SOC_SOF_PCI
53548 ++ select SND_SOC_SOF_PCI_DEV
53549 + select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
53550 + help
53551 + This adds support for Sound Open Firmware for Intel(R) platforms
53552 +diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
53553 +index cd12589355eff..28a54145c1506 100644
53554 +--- a/sound/soc/sof/intel/hda-dai.c
53555 ++++ b/sound/soc/sof/intel/hda-dai.c
53556 +@@ -59,6 +59,8 @@ static struct hdac_ext_stream *
53557 + {
53558 + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
53559 + struct sof_intel_hda_stream *hda_stream;
53560 ++ const struct sof_intel_dsp_desc *chip;
53561 ++ struct snd_sof_dev *sdev;
53562 + struct hdac_ext_stream *res = NULL;
53563 + struct hdac_stream *stream = NULL;
53564 +
53565 +@@ -77,9 +79,20 @@ static struct hdac_ext_stream *
53566 + continue;
53567 +
53568 + hda_stream = hstream_to_sof_hda_stream(hstream);
53569 ++ sdev = hda_stream->sdev;
53570 ++ chip = get_chip_info(sdev->pdata);
53571 +
53572 + /* check if link is available */
53573 + if (!hstream->link_locked) {
53574 ++ /*
53575 ++ * choose the first available link for platforms that do not have the
53576 ++ * PROCEN_FMT_QUIRK set.
53577 ++ */
53578 ++ if (!(chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK)) {
53579 ++ res = hstream;
53580 ++ break;
53581 ++ }
53582 ++
53583 + if (stream->opened) {
53584 + /*
53585 + * check if the stream tag matches the stream
53586 +diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
53587 +index 33306d2023a78..9bbfdab8009de 100644
53588 +--- a/sound/soc/sof/intel/hda-loader.c
53589 ++++ b/sound/soc/sof/intel/hda-loader.c
53590 +@@ -47,7 +47,7 @@ static struct hdac_ext_stream *cl_stream_prepare(struct snd_sof_dev *sdev, unsig
53591 + ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
53592 + if (ret < 0) {
53593 + dev_err(sdev->dev, "error: memory alloc failed: %d\n", ret);
53594 +- goto error;
53595 ++ goto out_put;
53596 + }
53597 +
53598 + hstream->period_bytes = 0;/* initialize period_bytes */
53599 +@@ -58,22 +58,23 @@ static struct hdac_ext_stream *cl_stream_prepare(struct snd_sof_dev *sdev, unsig
53600 + ret = hda_dsp_iccmax_stream_hw_params(sdev, dsp_stream, dmab, NULL);
53601 + if (ret < 0) {
53602 + dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret);
53603 +- goto error;
53604 ++ goto out_free;
53605 + }
53606 + } else {
53607 + ret = hda_dsp_stream_hw_params(sdev, dsp_stream, dmab, NULL);
53608 + if (ret < 0) {
53609 + dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
53610 +- goto error;
53611 ++ goto out_free;
53612 + }
53613 + hda_dsp_stream_spib_config(sdev, dsp_stream, HDA_DSP_SPIB_ENABLE, size);
53614 + }
53615 +
53616 + return dsp_stream;
53617 +
53618 +-error:
53619 +- hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
53620 ++out_free:
53621 + snd_dma_free_pages(dmab);
53622 ++out_put:
53623 ++ hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
53624 + return ERR_PTR(ret);
53625 + }
53626 +
53627 +diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
53628 +index d78aa5d8552d5..8aeb00eacd219 100644
53629 +--- a/sound/soc/sof/intel/hda-pcm.c
53630 ++++ b/sound/soc/sof/intel/hda-pcm.c
53631 +@@ -315,6 +315,7 @@ int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
53632 + runtime->hw.info &= ~SNDRV_PCM_INFO_PAUSE;
53633 +
53634 + if (hda_always_enable_dmi_l1 ||
53635 ++ direction == SNDRV_PCM_STREAM_PLAYBACK ||
53636 + spcm->stream[substream->stream].d0i3_compatible)
53637 + flags |= SOF_HDA_STREAM_DMI_L1_COMPATIBLE;
53638 +
53639 +diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
53640 +index 1385695d77458..028751549f6da 100644
53641 +--- a/sound/soc/sof/intel/hda.c
53642 ++++ b/sound/soc/sof/intel/hda.c
53643 +@@ -432,11 +432,9 @@ static char *hda_model;
53644 + module_param(hda_model, charp, 0444);
53645 + MODULE_PARM_DESC(hda_model, "Use the given HDA board model.");
53646 +
53647 +-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) || IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
53648 +-static int hda_dmic_num = -1;
53649 +-module_param_named(dmic_num, hda_dmic_num, int, 0444);
53650 ++static int dmic_num_override = -1;
53651 ++module_param_named(dmic_num, dmic_num_override, int, 0444);
53652 + MODULE_PARM_DESC(dmic_num, "SOF HDA DMIC number");
53653 +-#endif
53654 +
53655 + #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
53656 + static bool hda_codec_use_common_hdmi = IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI);
53657 +@@ -644,24 +642,35 @@ static int hda_init(struct snd_sof_dev *sdev)
53658 + return ret;
53659 + }
53660 +
53661 +-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) || IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
53662 +-
53663 +-static int check_nhlt_dmic(struct snd_sof_dev *sdev)
53664 ++static int check_dmic_num(struct snd_sof_dev *sdev)
53665 + {
53666 + struct nhlt_acpi_table *nhlt;
53667 +- int dmic_num;
53668 ++ int dmic_num = 0;
53669 +
53670 + nhlt = intel_nhlt_init(sdev->dev);
53671 + if (nhlt) {
53672 + dmic_num = intel_nhlt_get_dmic_geo(sdev->dev, nhlt);
53673 + intel_nhlt_free(nhlt);
53674 +- if (dmic_num >= 1 && dmic_num <= 4)
53675 +- return dmic_num;
53676 + }
53677 +
53678 +- return 0;
53679 ++ /* allow for module parameter override */
53680 ++ if (dmic_num_override != -1) {
53681 ++ dev_dbg(sdev->dev,
53682 ++ "overriding DMICs detected in NHLT tables %d by kernel param %d\n",
53683 ++ dmic_num, dmic_num_override);
53684 ++ dmic_num = dmic_num_override;
53685 ++ }
53686 ++
53687 ++ if (dmic_num < 0 || dmic_num > 4) {
53688 ++ dev_dbg(sdev->dev, "invalid dmic_number %d\n", dmic_num);
53689 ++ dmic_num = 0;
53690 ++ }
53691 ++
53692 ++ return dmic_num;
53693 + }
53694 +
53695 ++#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) || IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
53696 ++
53697 + static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
53698 + const char *sof_tplg_filename,
53699 + const char *idisp_str,
53700 +@@ -697,16 +706,8 @@ static int dmic_topology_fixup(struct snd_sof_dev *sdev,
53701 + const char *dmic_str;
53702 + int dmic_num;
53703 +
53704 +- /* first check NHLT for DMICs */
53705 +- dmic_num = check_nhlt_dmic(sdev);
53706 +-
53707 +- /* allow for module parameter override */
53708 +- if (hda_dmic_num != -1) {
53709 +- dev_dbg(sdev->dev,
53710 +- "overriding DMICs detected in NHLT tables %d by kernel param %d\n",
53711 +- dmic_num, hda_dmic_num);
53712 +- dmic_num = hda_dmic_num;
53713 +- }
53714 ++ /* first check for DMICs (using NHLT or module parameter) */
53715 ++ dmic_num = check_dmic_num(sdev);
53716 +
53717 + switch (dmic_num) {
53718 + case 1:
53719 +@@ -1188,7 +1189,7 @@ static bool link_slaves_found(struct snd_sof_dev *sdev,
53720 + struct hdac_bus *bus = sof_to_bus(sdev);
53721 + struct sdw_intel_slave_id *ids = sdw->ids;
53722 + int num_slaves = sdw->num_slaves;
53723 +- unsigned int part_id, link_id, unique_id, mfg_id;
53724 ++ unsigned int part_id, link_id, unique_id, mfg_id, version;
53725 + int i, j, k;
53726 +
53727 + for (i = 0; i < link->num_adr; i++) {
53728 +@@ -1198,12 +1199,14 @@ static bool link_slaves_found(struct snd_sof_dev *sdev,
53729 + mfg_id = SDW_MFG_ID(adr);
53730 + part_id = SDW_PART_ID(adr);
53731 + link_id = SDW_DISCO_LINK_ID(adr);
53732 ++ version = SDW_VERSION(adr);
53733 +
53734 + for (j = 0; j < num_slaves; j++) {
53735 + /* find out how many identical parts were reported on that link */
53736 + if (ids[j].link_id == link_id &&
53737 + ids[j].id.part_id == part_id &&
53738 +- ids[j].id.mfg_id == mfg_id)
53739 ++ ids[j].id.mfg_id == mfg_id &&
53740 ++ ids[j].id.sdw_version == version)
53741 + reported_part_count++;
53742 + }
53743 +
53744 +@@ -1212,21 +1215,24 @@ static bool link_slaves_found(struct snd_sof_dev *sdev,
53745 +
53746 + if (ids[j].link_id != link_id ||
53747 + ids[j].id.part_id != part_id ||
53748 +- ids[j].id.mfg_id != mfg_id)
53749 ++ ids[j].id.mfg_id != mfg_id ||
53750 ++ ids[j].id.sdw_version != version)
53751 + continue;
53752 +
53753 + /* find out how many identical parts are expected */
53754 + for (k = 0; k < link->num_adr; k++) {
53755 + u64 adr2 = link->adr_d[k].adr;
53756 +- unsigned int part_id2, link_id2, mfg_id2;
53757 ++ unsigned int part_id2, link_id2, mfg_id2, version2;
53758 +
53759 + mfg_id2 = SDW_MFG_ID(adr2);
53760 + part_id2 = SDW_PART_ID(adr2);
53761 + link_id2 = SDW_DISCO_LINK_ID(adr2);
53762 ++ version2 = SDW_VERSION(adr2);
53763 +
53764 + if (link_id2 == link_id &&
53765 + part_id2 == part_id &&
53766 +- mfg_id2 == mfg_id)
53767 ++ mfg_id2 == mfg_id &&
53768 ++ version2 == version)
53769 + expected_part_count++;
53770 + }
53771 +
53772 +@@ -1387,6 +1393,9 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
53773 + if (!sof_pdata->tplg_filename)
53774 + sof_pdata->tplg_filename = mach->sof_tplg_filename;
53775 +
53776 ++ /* report to machine driver if any DMICs are found */
53777 ++ mach->mach_params.dmic_num = check_dmic_num(sdev);
53778 ++
53779 + if (mach->link_mask) {
53780 + mach->mach_params.links = mach->links;
53781 + mach->mach_params.link_mask = mach->link_mask;
53782 +diff --git a/sound/soc/ti/davinci-i2s.c b/sound/soc/ti/davinci-i2s.c
53783 +index 6dca51862dd76..0363a088d2e00 100644
53784 +--- a/sound/soc/ti/davinci-i2s.c
53785 ++++ b/sound/soc/ti/davinci-i2s.c
53786 +@@ -708,7 +708,9 @@ static int davinci_i2s_probe(struct platform_device *pdev)
53787 + dev->clk = clk_get(&pdev->dev, NULL);
53788 + if (IS_ERR(dev->clk))
53789 + return -ENODEV;
53790 +- clk_enable(dev->clk);
53791 ++ ret = clk_enable(dev->clk);
53792 ++ if (ret)
53793 ++ goto err_put_clk;
53794 +
53795 + dev->dev = &pdev->dev;
53796 + dev_set_drvdata(&pdev->dev, dev);
53797 +@@ -730,6 +732,7 @@ err_unregister_component:
53798 + snd_soc_unregister_component(&pdev->dev);
53799 + err_release_clk:
53800 + clk_disable(dev->clk);
53801 ++err_put_clk:
53802 + clk_put(dev->clk);
53803 + return ret;
53804 + }
53805 +diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c
53806 +index ce19a6058b279..5c4158069a5a8 100644
53807 +--- a/sound/soc/xilinx/xlnx_formatter_pcm.c
53808 ++++ b/sound/soc/xilinx/xlnx_formatter_pcm.c
53809 +@@ -84,6 +84,7 @@ struct xlnx_pcm_drv_data {
53810 + struct snd_pcm_substream *play_stream;
53811 + struct snd_pcm_substream *capture_stream;
53812 + struct clk *axi_clk;
53813 ++ unsigned int sysclk;
53814 + };
53815 +
53816 + /*
53817 +@@ -314,6 +315,15 @@ static irqreturn_t xlnx_s2mm_irq_handler(int irq, void *arg)
53818 + return IRQ_NONE;
53819 + }
53820 +
53821 ++static int xlnx_formatter_set_sysclk(struct snd_soc_component *component,
53822 ++ int clk_id, int source, unsigned int freq, int dir)
53823 ++{
53824 ++ struct xlnx_pcm_drv_data *adata = dev_get_drvdata(component->dev);
53825 ++
53826 ++ adata->sysclk = freq;
53827 ++ return 0;
53828 ++}
53829 ++
53830 + static int xlnx_formatter_pcm_open(struct snd_soc_component *component,
53831 + struct snd_pcm_substream *substream)
53832 + {
53833 +@@ -450,11 +460,25 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
53834 + u64 size;
53835 + struct snd_pcm_runtime *runtime = substream->runtime;
53836 + struct xlnx_pcm_stream_param *stream_data = runtime->private_data;
53837 ++ struct xlnx_pcm_drv_data *adata = dev_get_drvdata(component->dev);
53838 +
53839 + active_ch = params_channels(params);
53840 + if (active_ch > stream_data->ch_limit)
53841 + return -EINVAL;
53842 +
53843 ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
53844 ++ adata->sysclk) {
53845 ++ unsigned int mclk_fs = adata->sysclk / params_rate(params);
53846 ++
53847 ++ if (adata->sysclk % params_rate(params) != 0) {
53848 ++ dev_warn(component->dev, "sysclk %u not divisible by rate %u\n",
53849 ++ adata->sysclk, params_rate(params));
53850 ++ return -EINVAL;
53851 ++ }
53852 ++
53853 ++ writel(mclk_fs, stream_data->mmio + XLNX_AUD_FS_MULTIPLIER);
53854 ++ }
53855 ++
53856 + if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
53857 + stream_data->xfer_mode == AES_TO_PCM) {
53858 + val = readl(stream_data->mmio + XLNX_AUD_STS);
53859 +@@ -552,6 +576,7 @@ static int xlnx_formatter_pcm_new(struct snd_soc_component *component,
53860 +
53861 + static const struct snd_soc_component_driver xlnx_asoc_component = {
53862 + .name = DRV_NAME,
53863 ++ .set_sysclk = xlnx_formatter_set_sysclk,
53864 + .open = xlnx_formatter_pcm_open,
53865 + .close = xlnx_formatter_pcm_close,
53866 + .hw_params = xlnx_formatter_pcm_hw_params,
53867 +diff --git a/sound/spi/at73c213.c b/sound/spi/at73c213.c
53868 +index 76c0e37a838cf..8a2da6b1012eb 100644
53869 +--- a/sound/spi/at73c213.c
53870 ++++ b/sound/spi/at73c213.c
53871 +@@ -218,7 +218,9 @@ static int snd_at73c213_pcm_open(struct snd_pcm_substream *substream)
53872 + runtime->hw = snd_at73c213_playback_hw;
53873 + chip->substream = substream;
53874 +
53875 +- clk_enable(chip->ssc->clk);
53876 ++ err = clk_enable(chip->ssc->clk);
53877 ++ if (err)
53878 ++ return err;
53879 +
53880 + return 0;
53881 + }
53882 +@@ -776,7 +778,9 @@ static int snd_at73c213_chip_init(struct snd_at73c213 *chip)
53883 + goto out;
53884 +
53885 + /* Enable DAC master clock. */
53886 +- clk_enable(chip->board->dac_clk);
53887 ++ retval = clk_enable(chip->board->dac_clk);
53888 ++ if (retval)
53889 ++ goto out;
53890 +
53891 + /* Initialize at73c213 on SPI bus. */
53892 + retval = snd_at73c213_write_reg(chip, DAC_RST, 0x04);
53893 +@@ -889,7 +893,9 @@ static int snd_at73c213_dev_init(struct snd_card *card,
53894 + chip->card = card;
53895 + chip->irq = -1;
53896 +
53897 +- clk_enable(chip->ssc->clk);
53898 ++ retval = clk_enable(chip->ssc->clk);
53899 ++ if (retval)
53900 ++ return retval;
53901 +
53902 + retval = request_irq(irq, snd_at73c213_interrupt, 0, "at73c213", chip);
53903 + if (retval) {
53904 +@@ -1008,7 +1014,9 @@ static int snd_at73c213_remove(struct spi_device *spi)
53905 + int retval;
53906 +
53907 + /* Stop playback. */
53908 +- clk_enable(chip->ssc->clk);
53909 ++ retval = clk_enable(chip->ssc->clk);
53910 ++ if (retval)
53911 ++ goto out;
53912 + ssc_writel(chip->ssc->regs, CR, SSC_BIT(CR_TXDIS));
53913 + clk_disable(chip->ssc->clk);
53914 +
53915 +@@ -1088,9 +1096,16 @@ static int snd_at73c213_resume(struct device *dev)
53916 + {
53917 + struct snd_card *card = dev_get_drvdata(dev);
53918 + struct snd_at73c213 *chip = card->private_data;
53919 ++ int retval;
53920 +
53921 +- clk_enable(chip->board->dac_clk);
53922 +- clk_enable(chip->ssc->clk);
53923 ++ retval = clk_enable(chip->board->dac_clk);
53924 ++ if (retval)
53925 ++ return retval;
53926 ++ retval = clk_enable(chip->ssc->clk);
53927 ++ if (retval) {
53928 ++ clk_disable(chip->board->dac_clk);
53929 ++ return retval;
53930 ++ }
53931 + ssc_writel(chip->ssc->regs, CR, SSC_BIT(CR_TXEN));
53932 +
53933 + return 0;
53934 +diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
53935 +index 59833125ac0a1..a2c665beda87c 100644
53936 +--- a/tools/bpf/bpftool/btf.c
53937 ++++ b/tools/bpf/bpftool/btf.c
53938 +@@ -902,7 +902,7 @@ static int do_show(int argc, char **argv)
53939 + equal_fn_for_key_as_id, NULL);
53940 + btf_map_table = hashmap__new(hash_fn_for_key_as_id,
53941 + equal_fn_for_key_as_id, NULL);
53942 +- if (!btf_prog_table || !btf_map_table) {
53943 ++ if (IS_ERR(btf_prog_table) || IS_ERR(btf_map_table)) {
53944 + hashmap__free(btf_prog_table);
53945 + hashmap__free(btf_map_table);
53946 + if (fd >= 0)
53947 +diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
53948 +index b4695df2ea3d7..a7387c265e3cf 100644
53949 +--- a/tools/bpf/bpftool/gen.c
53950 ++++ b/tools/bpf/bpftool/gen.c
53951 +@@ -927,7 +927,6 @@ static int do_skeleton(int argc, char **argv)
53952 + s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
53953 + if (!s) \n\
53954 + goto err; \n\
53955 +- obj->skeleton = s; \n\
53956 + \n\
53957 + s->sz = sizeof(*s); \n\
53958 + s->name = \"%1$s\"; \n\
53959 +@@ -1000,6 +999,7 @@ static int do_skeleton(int argc, char **argv)
53960 + \n\
53961 + s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
53962 + \n\
53963 ++ obj->skeleton = s; \n\
53964 + return 0; \n\
53965 + err: \n\
53966 + bpf_object__destroy_skeleton(s); \n\
53967 +diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
53968 +index 2c258db0d3521..97dec81950e5d 100644
53969 +--- a/tools/bpf/bpftool/link.c
53970 ++++ b/tools/bpf/bpftool/link.c
53971 +@@ -2,6 +2,7 @@
53972 + /* Copyright (C) 2020 Facebook */
53973 +
53974 + #include <errno.h>
53975 ++#include <linux/err.h>
53976 + #include <net/if.h>
53977 + #include <stdio.h>
53978 + #include <unistd.h>
53979 +@@ -306,7 +307,7 @@ static int do_show(int argc, char **argv)
53980 + if (show_pinned) {
53981 + link_table = hashmap__new(hash_fn_for_key_as_id,
53982 + equal_fn_for_key_as_id, NULL);
53983 +- if (!link_table) {
53984 ++ if (IS_ERR(link_table)) {
53985 + p_err("failed to create hashmap for pinned paths");
53986 + return -1;
53987 + }
53988 +diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
53989 +index cc530a2298124..0bba33729c7f0 100644
53990 +--- a/tools/bpf/bpftool/map.c
53991 ++++ b/tools/bpf/bpftool/map.c
53992 +@@ -620,17 +620,14 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
53993 + u32_as_hash_field(info->id))
53994 + printf("\n\tpinned %s", (char *)entry->value);
53995 + }
53996 +- printf("\n");
53997 +
53998 + if (frozen_str) {
53999 + frozen = atoi(frozen_str);
54000 + free(frozen_str);
54001 + }
54002 +
54003 +- if (!info->btf_id && !frozen)
54004 +- return 0;
54005 +-
54006 +- printf("\t");
54007 ++ if (info->btf_id || frozen)
54008 ++ printf("\n\t");
54009 +
54010 + if (info->btf_id)
54011 + printf("btf_id %d", info->btf_id);
54012 +@@ -699,7 +696,7 @@ static int do_show(int argc, char **argv)
54013 + if (show_pinned) {
54014 + map_table = hashmap__new(hash_fn_for_key_as_id,
54015 + equal_fn_for_key_as_id, NULL);
54016 +- if (!map_table) {
54017 ++ if (IS_ERR(map_table)) {
54018 + p_err("failed to create hashmap for pinned paths");
54019 + return -1;
54020 + }
54021 +@@ -805,29 +802,30 @@ static int maps_have_btf(int *fds, int nb_fds)
54022 +
54023 + static struct btf *btf_vmlinux;
54024 +
54025 +-static struct btf *get_map_kv_btf(const struct bpf_map_info *info)
54026 ++static int get_map_kv_btf(const struct bpf_map_info *info, struct btf **btf)
54027 + {
54028 +- struct btf *btf = NULL;
54029 ++ int err = 0;
54030 +
54031 + if (info->btf_vmlinux_value_type_id) {
54032 + if (!btf_vmlinux) {
54033 + btf_vmlinux = libbpf_find_kernel_btf();
54034 +- if (libbpf_get_error(btf_vmlinux))
54035 ++ err = libbpf_get_error(btf_vmlinux);
54036 ++ if (err) {
54037 + p_err("failed to get kernel btf");
54038 ++ return err;
54039 ++ }
54040 + }
54041 +- return btf_vmlinux;
54042 ++ *btf = btf_vmlinux;
54043 + } else if (info->btf_value_type_id) {
54044 +- int err;
54045 +-
54046 +- btf = btf__load_from_kernel_by_id(info->btf_id);
54047 +- err = libbpf_get_error(btf);
54048 +- if (err) {
54049 ++ *btf = btf__load_from_kernel_by_id(info->btf_id);
54050 ++ err = libbpf_get_error(*btf);
54051 ++ if (err)
54052 + p_err("failed to get btf");
54053 +- btf = ERR_PTR(err);
54054 +- }
54055 ++ } else {
54056 ++ *btf = NULL;
54057 + }
54058 +
54059 +- return btf;
54060 ++ return err;
54061 + }
54062 +
54063 + static void free_map_kv_btf(struct btf *btf)
54064 +@@ -862,8 +860,7 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
54065 + prev_key = NULL;
54066 +
54067 + if (wtr) {
54068 +- btf = get_map_kv_btf(info);
54069 +- err = libbpf_get_error(btf);
54070 ++ err = get_map_kv_btf(info, &btf);
54071 + if (err) {
54072 + goto exit_free;
54073 + }
54074 +@@ -1054,11 +1051,8 @@ static void print_key_value(struct bpf_map_info *info, void *key,
54075 + json_writer_t *btf_wtr;
54076 + struct btf *btf;
54077 +
54078 +- btf = btf__load_from_kernel_by_id(info->btf_id);
54079 +- if (libbpf_get_error(btf)) {
54080 +- p_err("failed to get btf");
54081 ++ if (get_map_kv_btf(info, &btf))
54082 + return;
54083 +- }
54084 +
54085 + if (json_output) {
54086 + print_entry_json(info, key, value, btf);
54087 +diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c
54088 +index 56b598eee043a..7c384d10e95f8 100644
54089 +--- a/tools/bpf/bpftool/pids.c
54090 ++++ b/tools/bpf/bpftool/pids.c
54091 +@@ -1,6 +1,7 @@
54092 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
54093 + /* Copyright (C) 2020 Facebook */
54094 + #include <errno.h>
54095 ++#include <linux/err.h>
54096 + #include <stdbool.h>
54097 + #include <stdio.h>
54098 + #include <stdlib.h>
54099 +@@ -101,7 +102,7 @@ int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
54100 + libbpf_print_fn_t default_print;
54101 +
54102 + *map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL);
54103 +- if (!*map) {
54104 ++ if (IS_ERR(*map)) {
54105 + p_err("failed to create hashmap for PID references");
54106 + return -1;
54107 + }
54108 +diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
54109 +index 2a21d50516bc4..33ca834d5f510 100644
54110 +--- a/tools/bpf/bpftool/prog.c
54111 ++++ b/tools/bpf/bpftool/prog.c
54112 +@@ -641,7 +641,7 @@ static int do_show(int argc, char **argv)
54113 + if (show_pinned) {
54114 + prog_table = hashmap__new(hash_fn_for_key_as_id,
54115 + equal_fn_for_key_as_id, NULL);
54116 +- if (!prog_table) {
54117 ++ if (IS_ERR(prog_table)) {
54118 + p_err("failed to create hashmap for pinned paths");
54119 + return -1;
54120 + }
54121 +diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
54122 +index b0383d371b9af..49340175feb94 100644
54123 +--- a/tools/include/uapi/linux/bpf.h
54124 ++++ b/tools/include/uapi/linux/bpf.h
54125 +@@ -2286,8 +2286,8 @@ union bpf_attr {
54126 + * Return
54127 + * The return value depends on the result of the test, and can be:
54128 + *
54129 +- * * 0, if current task belongs to the cgroup2.
54130 +- * * 1, if current task does not belong to the cgroup2.
54131 ++ * * 1, if current task belongs to the cgroup2.
54132 ++ * * 0, if current task does not belong to the cgroup2.
54133 + * * A negative error code, if an error occurred.
54134 + *
54135 + * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
54136 +diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
54137 +index 90f56b0f585f0..e1b5056068828 100644
54138 +--- a/tools/lib/bpf/bpf_tracing.h
54139 ++++ b/tools/lib/bpf/bpf_tracing.h
54140 +@@ -206,10 +206,10 @@
54141 + #define __PT_PARM4_REG a3
54142 + #define __PT_PARM5_REG a4
54143 + #define __PT_RET_REG ra
54144 +-#define __PT_FP_REG fp
54145 ++#define __PT_FP_REG s0
54146 + #define __PT_RC_REG a5
54147 + #define __PT_SP_REG sp
54148 +-#define __PT_IP_REG epc
54149 ++#define __PT_IP_REG pc
54150 +
54151 + #endif
54152 +
54153 +diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
54154 +index 061839f045255..51862fdee850b 100644
54155 +--- a/tools/lib/bpf/btf.h
54156 ++++ b/tools/lib/bpf/btf.h
54157 +@@ -375,8 +375,28 @@ btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
54158 + const struct btf_dump_type_data_opts *opts);
54159 +
54160 + /*
54161 +- * A set of helpers for easier BTF types handling
54162 ++ * A set of helpers for easier BTF types handling.
54163 ++ *
54164 ++ * The inline functions below rely on constants from the kernel headers which
54165 ++ * may not be available for applications including this header file. To avoid
54166 ++ * compilation errors, we define all the constants here that were added after
54167 ++ * the initial introduction of the BTF_KIND* constants.
54168 + */
54169 ++#ifndef BTF_KIND_FUNC
54170 ++#define BTF_KIND_FUNC 12 /* Function */
54171 ++#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
54172 ++#endif
54173 ++#ifndef BTF_KIND_VAR
54174 ++#define BTF_KIND_VAR 14 /* Variable */
54175 ++#define BTF_KIND_DATASEC 15 /* Section */
54176 ++#endif
54177 ++#ifndef BTF_KIND_FLOAT
54178 ++#define BTF_KIND_FLOAT 16 /* Floating point */
54179 ++#endif
54180 ++/* The kernel header switched to enums, so these two were never #defined */
54181 ++#define BTF_KIND_DECL_TAG 17 /* Decl Tag */
54182 ++#define BTF_KIND_TYPE_TAG 18 /* Type Tag */
54183 ++
54184 + static inline __u16 btf_kind(const struct btf_type *t)
54185 + {
54186 + return BTF_INFO_KIND(t->info);
54187 +diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
54188 +index b9a3260c83cbd..6b1bc1f43728c 100644
54189 +--- a/tools/lib/bpf/btf_dump.c
54190 ++++ b/tools/lib/bpf/btf_dump.c
54191 +@@ -1505,6 +1505,11 @@ static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id,
54192 + if (s->name_resolved)
54193 + return *cached_name ? *cached_name : orig_name;
54194 +
54195 ++ if (btf_is_fwd(t) || (btf_is_enum(t) && btf_vlen(t) == 0)) {
54196 ++ s->name_resolved = 1;
54197 ++ return orig_name;
54198 ++ }
54199 ++
54200 + dup_cnt = btf_dump_name_dups(d, name_map, orig_name);
54201 + if (dup_cnt > 1) {
54202 + const size_t max_len = 256;
54203 +@@ -1861,14 +1866,16 @@ static int btf_dump_array_data(struct btf_dump *d,
54204 + {
54205 + const struct btf_array *array = btf_array(t);
54206 + const struct btf_type *elem_type;
54207 +- __u32 i, elem_size = 0, elem_type_id;
54208 ++ __u32 i, elem_type_id;
54209 ++ __s64 elem_size;
54210 + bool is_array_member;
54211 +
54212 + elem_type_id = array->type;
54213 + elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL);
54214 + elem_size = btf__resolve_size(d->btf, elem_type_id);
54215 + if (elem_size <= 0) {
54216 +- pr_warn("unexpected elem size %d for array type [%u]\n", elem_size, id);
54217 ++ pr_warn("unexpected elem size %zd for array type [%u]\n",
54218 ++ (ssize_t)elem_size, id);
54219 + return -EINVAL;
54220 + }
54221 +
54222 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
54223 +index 7f10dd501a52b..94a6a8543cbc9 100644
54224 +--- a/tools/lib/bpf/libbpf.c
54225 ++++ b/tools/lib/bpf/libbpf.c
54226 +@@ -4854,7 +4854,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
54227 + LIBBPF_OPTS(bpf_map_create_opts, create_attr);
54228 + struct bpf_map_def *def = &map->def;
54229 + const char *map_name = NULL;
54230 +- __u32 max_entries;
54231 + int err = 0;
54232 +
54233 + if (kernel_supports(obj, FEAT_PROG_NAME))
54234 +@@ -4864,21 +4863,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
54235 + create_attr.numa_node = map->numa_node;
54236 + create_attr.map_extra = map->map_extra;
54237 +
54238 +- if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
54239 +- int nr_cpus;
54240 +-
54241 +- nr_cpus = libbpf_num_possible_cpus();
54242 +- if (nr_cpus < 0) {
54243 +- pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
54244 +- map->name, nr_cpus);
54245 +- return nr_cpus;
54246 +- }
54247 +- pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
54248 +- max_entries = nr_cpus;
54249 +- } else {
54250 +- max_entries = def->max_entries;
54251 +- }
54252 +-
54253 + if (bpf_map__is_struct_ops(map))
54254 + create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
54255 +
54256 +@@ -4928,7 +4912,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
54257 +
54258 + if (obj->gen_loader) {
54259 + bpf_gen__map_create(obj->gen_loader, def->type, map_name,
54260 +- def->key_size, def->value_size, max_entries,
54261 ++ def->key_size, def->value_size, def->max_entries,
54262 + &create_attr, is_inner ? -1 : map - obj->maps);
54263 + /* Pretend to have valid FD to pass various fd >= 0 checks.
54264 + * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
54265 +@@ -4937,7 +4921,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
54266 + } else {
54267 + map->fd = bpf_map_create(def->type, map_name,
54268 + def->key_size, def->value_size,
54269 +- max_entries, &create_attr);
54270 ++ def->max_entries, &create_attr);
54271 + }
54272 + if (map->fd < 0 && (create_attr.btf_key_type_id ||
54273 + create_attr.btf_value_type_id)) {
54274 +@@ -4954,7 +4938,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
54275 + map->btf_value_type_id = 0;
54276 + map->fd = bpf_map_create(def->type, map_name,
54277 + def->key_size, def->value_size,
54278 +- max_entries, &create_attr);
54279 ++ def->max_entries, &create_attr);
54280 + }
54281 +
54282 + err = map->fd < 0 ? -errno : 0;
54283 +@@ -5058,6 +5042,24 @@ static int bpf_object_init_prog_arrays(struct bpf_object *obj)
54284 + return 0;
54285 + }
54286 +
54287 ++static int map_set_def_max_entries(struct bpf_map *map)
54288 ++{
54289 ++ if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
54290 ++ int nr_cpus;
54291 ++
54292 ++ nr_cpus = libbpf_num_possible_cpus();
54293 ++ if (nr_cpus < 0) {
54294 ++ pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
54295 ++ map->name, nr_cpus);
54296 ++ return nr_cpus;
54297 ++ }
54298 ++ pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
54299 ++ map->def.max_entries = nr_cpus;
54300 ++ }
54301 ++
54302 ++ return 0;
54303 ++}
54304 ++
54305 + static int
54306 + bpf_object__create_maps(struct bpf_object *obj)
54307 + {
54308 +@@ -5090,6 +5092,10 @@ bpf_object__create_maps(struct bpf_object *obj)
54309 + continue;
54310 + }
54311 +
54312 ++ err = map_set_def_max_entries(map);
54313 ++ if (err)
54314 ++ goto err_out;
54315 ++
54316 + retried = false;
54317 + retry:
54318 + if (map->pin_path) {
54319 +@@ -11795,6 +11801,9 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
54320 +
54321 + void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
54322 + {
54323 ++ if (!s)
54324 ++ return;
54325 ++
54326 + if (s->progs)
54327 + bpf_object__detach_skeleton(s);
54328 + if (s->obj)
54329 +diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
54330 +index 5297839677930..9a89fdfe4987e 100644
54331 +--- a/tools/lib/bpf/libbpf.map
54332 ++++ b/tools/lib/bpf/libbpf.map
54333 +@@ -431,4 +431,4 @@ LIBBPF_0.7.0 {
54334 + libbpf_probe_bpf_map_type;
54335 + libbpf_probe_bpf_prog_type;
54336 + libbpf_set_memlock_rlim_max;
54337 +-};
54338 ++} LIBBPF_0.6.0;
54339 +diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
54340 +index 39f25e09b51e2..fadde7d80a51c 100644
54341 +--- a/tools/lib/bpf/netlink.c
54342 ++++ b/tools/lib/bpf/netlink.c
54343 +@@ -87,29 +87,75 @@ enum {
54344 + NL_DONE,
54345 + };
54346 +
54347 ++static int netlink_recvmsg(int sock, struct msghdr *mhdr, int flags)
54348 ++{
54349 ++ int len;
54350 ++
54351 ++ do {
54352 ++ len = recvmsg(sock, mhdr, flags);
54353 ++ } while (len < 0 && (errno == EINTR || errno == EAGAIN));
54354 ++
54355 ++ if (len < 0)
54356 ++ return -errno;
54357 ++ return len;
54358 ++}
54359 ++
54360 ++static int alloc_iov(struct iovec *iov, int len)
54361 ++{
54362 ++ void *nbuf;
54363 ++
54364 ++ nbuf = realloc(iov->iov_base, len);
54365 ++ if (!nbuf)
54366 ++ return -ENOMEM;
54367 ++
54368 ++ iov->iov_base = nbuf;
54369 ++ iov->iov_len = len;
54370 ++ return 0;
54371 ++}
54372 ++
54373 + static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq,
54374 + __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn,
54375 + void *cookie)
54376 + {
54377 ++ struct iovec iov = {};
54378 ++ struct msghdr mhdr = {
54379 ++ .msg_iov = &iov,
54380 ++ .msg_iovlen = 1,
54381 ++ };
54382 + bool multipart = true;
54383 + struct nlmsgerr *err;
54384 + struct nlmsghdr *nh;
54385 +- char buf[4096];
54386 + int len, ret;
54387 +
54388 ++ ret = alloc_iov(&iov, 4096);
54389 ++ if (ret)
54390 ++ goto done;
54391 ++
54392 + while (multipart) {
54393 + start:
54394 + multipart = false;
54395 +- len = recv(sock, buf, sizeof(buf), 0);
54396 ++ len = netlink_recvmsg(sock, &mhdr, MSG_PEEK | MSG_TRUNC);
54397 ++ if (len < 0) {
54398 ++ ret = len;
54399 ++ goto done;
54400 ++ }
54401 ++
54402 ++ if (len > iov.iov_len) {
54403 ++ ret = alloc_iov(&iov, len);
54404 ++ if (ret)
54405 ++ goto done;
54406 ++ }
54407 ++
54408 ++ len = netlink_recvmsg(sock, &mhdr, 0);
54409 + if (len < 0) {
54410 +- ret = -errno;
54411 ++ ret = len;
54412 + goto done;
54413 + }
54414 +
54415 + if (len == 0)
54416 + break;
54417 +
54418 +- for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
54419 ++ for (nh = (struct nlmsghdr *)iov.iov_base; NLMSG_OK(nh, len);
54420 + nh = NLMSG_NEXT(nh, len)) {
54421 + if (nh->nlmsg_pid != nl_pid) {
54422 + ret = -LIBBPF_ERRNO__WRNGPID;
54423 +@@ -130,7 +176,8 @@ start:
54424 + libbpf_nla_dump_errormsg(nh);
54425 + goto done;
54426 + case NLMSG_DONE:
54427 +- return 0;
54428 ++ ret = 0;
54429 ++ goto done;
54430 + default:
54431 + break;
54432 + }
54433 +@@ -142,15 +189,17 @@ start:
54434 + case NL_NEXT:
54435 + goto start;
54436 + case NL_DONE:
54437 +- return 0;
54438 ++ ret = 0;
54439 ++ goto done;
54440 + default:
54441 +- return ret;
54442 ++ goto done;
54443 + }
54444 + }
54445 + }
54446 + }
54447 + ret = 0;
54448 + done:
54449 ++ free(iov.iov_base);
54450 + return ret;
54451 + }
54452 +
54453 +diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
54454 +index edafe56664f3a..32a2f5749c711 100644
54455 +--- a/tools/lib/bpf/xsk.c
54456 ++++ b/tools/lib/bpf/xsk.c
54457 +@@ -1193,12 +1193,23 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
54458 +
54459 + int xsk_umem__delete(struct xsk_umem *umem)
54460 + {
54461 ++ struct xdp_mmap_offsets off;
54462 ++ int err;
54463 ++
54464 + if (!umem)
54465 + return 0;
54466 +
54467 + if (umem->refcount)
54468 + return -EBUSY;
54469 +
54470 ++ err = xsk_get_mmap_offsets(umem->fd, &off);
54471 ++ if (!err && umem->fill_save && umem->comp_save) {
54472 ++ munmap(umem->fill_save->ring - off.fr.desc,
54473 ++ off.fr.desc + umem->config.fill_size * sizeof(__u64));
54474 ++ munmap(umem->comp_save->ring - off.cr.desc,
54475 ++ off.cr.desc + umem->config.comp_size * sizeof(__u64));
54476 ++ }
54477 ++
54478 + close(umem->fd);
54479 + free(umem);
54480 +
54481 +diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
54482 +index fa854c83b7e7b..ed616fc19b4f2 100644
54483 +--- a/tools/lib/perf/tests/test-evlist.c
54484 ++++ b/tools/lib/perf/tests/test-evlist.c
54485 +@@ -69,7 +69,7 @@ static int test_stat_cpu(void)
54486 + perf_evlist__set_maps(evlist, cpus, NULL);
54487 +
54488 + err = perf_evlist__open(evlist);
54489 +- __T("failed to open evsel", err == 0);
54490 ++ __T("failed to open evlist", err == 0);
54491 +
54492 + perf_evlist__for_each_evsel(evlist, evsel) {
54493 + cpus = perf_evsel__cpus(evsel);
54494 +@@ -130,7 +130,7 @@ static int test_stat_thread(void)
54495 + perf_evlist__set_maps(evlist, NULL, threads);
54496 +
54497 + err = perf_evlist__open(evlist);
54498 +- __T("failed to open evsel", err == 0);
54499 ++ __T("failed to open evlist", err == 0);
54500 +
54501 + perf_evlist__for_each_evsel(evlist, evsel) {
54502 + perf_evsel__read(evsel, 0, 0, &counts);
54503 +@@ -187,7 +187,7 @@ static int test_stat_thread_enable(void)
54504 + perf_evlist__set_maps(evlist, NULL, threads);
54505 +
54506 + err = perf_evlist__open(evlist);
54507 +- __T("failed to open evsel", err == 0);
54508 ++ __T("failed to open evlist", err == 0);
54509 +
54510 + perf_evlist__for_each_evsel(evlist, evsel) {
54511 + perf_evsel__read(evsel, 0, 0, &counts);
54512 +@@ -507,7 +507,7 @@ static int test_stat_multiplexing(void)
54513 + perf_evlist__set_maps(evlist, NULL, threads);
54514 +
54515 + err = perf_evlist__open(evlist);
54516 +- __T("failed to open evsel", err == 0);
54517 ++ __T("failed to open evlist", err == 0);
54518 +
54519 + perf_evlist__enable(evlist);
54520 +
54521 +diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c
54522 +index 8d9b55959256a..cfc208d71f00a 100644
54523 +--- a/tools/perf/arch/x86/util/evlist.c
54524 ++++ b/tools/perf/arch/x86/util/evlist.c
54525 +@@ -20,17 +20,27 @@ int arch_evlist__add_default_attrs(struct evlist *evlist)
54526 +
54527 + struct evsel *arch_evlist__leader(struct list_head *list)
54528 + {
54529 +- struct evsel *evsel, *first;
54530 ++ struct evsel *evsel, *first, *slots = NULL;
54531 ++ bool has_topdown = false;
54532 +
54533 + first = list_first_entry(list, struct evsel, core.node);
54534 +
54535 + if (!pmu_have_event("cpu", "slots"))
54536 + return first;
54537 +
54538 ++ /* If there is a slots event and a topdown event then the slots event comes first. */
54539 + __evlist__for_each_entry(list, evsel) {
54540 +- if (evsel->pmu_name && !strcmp(evsel->pmu_name, "cpu") &&
54541 +- evsel->name && strcasestr(evsel->name, "slots"))
54542 +- return evsel;
54543 ++ if (evsel->pmu_name && !strcmp(evsel->pmu_name, "cpu") && evsel->name) {
54544 ++ if (strcasestr(evsel->name, "slots")) {
54545 ++ slots = evsel;
54546 ++ if (slots == first)
54547 ++ return first;
54548 ++ }
54549 ++ if (!strncasecmp(evsel->name, "topdown", 7))
54550 ++ has_topdown = true;
54551 ++ if (slots && has_topdown)
54552 ++ return slots;
54553 ++ }
54554 + }
54555 + return first;
54556 + }
54557 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
54558 +index 3f98689dd6878..60baa3dadc4b6 100644
54559 +--- a/tools/perf/builtin-stat.c
54560 ++++ b/tools/perf/builtin-stat.c
54561 +@@ -955,10 +955,10 @@ try_again_reset:
54562 + * Enable counters and exec the command:
54563 + */
54564 + if (forks) {
54565 +- evlist__start_workload(evsel_list);
54566 + err = enable_counters();
54567 + if (err)
54568 + return -1;
54569 ++ evlist__start_workload(evsel_list);
54570 +
54571 + t0 = rdclock();
54572 + clock_gettime(CLOCK_MONOTONIC, &ref_time);
54573 +diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
54574 +index 9ff67206ade4e..821d2f2a8f251 100644
54575 +--- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json
54576 ++++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
54577 +@@ -314,6 +314,19 @@
54578 + "SampleAfterValue": "2000003",
54579 + "UMask": "0x82"
54580 + },
54581 ++ {
54582 ++ "BriefDescription": "All retired memory instructions.",
54583 ++ "Counter": "0,1,2,3",
54584 ++ "CounterHTOff": "0,1,2,3",
54585 ++ "Data_LA": "1",
54586 ++ "EventCode": "0xD0",
54587 ++ "EventName": "MEM_INST_RETIRED.ANY",
54588 ++ "L1_Hit_Indication": "1",
54589 ++ "PEBS": "1",
54590 ++ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
54591 ++ "SampleAfterValue": "2000003",
54592 ++ "UMask": "0x83"
54593 ++ },
54594 + {
54595 + "BriefDescription": "Retired load instructions with locked access.",
54596 + "Counter": "0,1,2,3",
54597 +@@ -358,6 +371,7 @@
54598 + "EventCode": "0xD0",
54599 + "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
54600 + "PEBS": "1",
54601 ++ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
54602 + "SampleAfterValue": "100003",
54603 + "UMask": "0x11"
54604 + },
54605 +@@ -370,6 +384,7 @@
54606 + "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
54607 + "L1_Hit_Indication": "1",
54608 + "PEBS": "1",
54609 ++ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
54610 + "SampleAfterValue": "100003",
54611 + "UMask": "0x12"
54612 + },
54613 +@@ -733,7 +748,7 @@
54614 + "EventCode": "0xB7, 0xBB",
54615 + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
54616 + "MSRIndex": "0x1a6,0x1a7",
54617 +- "MSRValue": "0x0000010491",
54618 ++ "MSRValue": "0x10491",
54619 + "Offcore": "1",
54620 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54621 + "SampleAfterValue": "100003",
54622 +@@ -772,7 +787,7 @@
54623 + "EventCode": "0xB7, 0xBB",
54624 + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
54625 + "MSRIndex": "0x1a6,0x1a7",
54626 +- "MSRValue": "0x04003C0491",
54627 ++ "MSRValue": "0x4003C0491",
54628 + "Offcore": "1",
54629 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54630 + "SampleAfterValue": "100003",
54631 +@@ -785,7 +800,7 @@
54632 + "EventCode": "0xB7, 0xBB",
54633 + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
54634 + "MSRIndex": "0x1a6,0x1a7",
54635 +- "MSRValue": "0x01003C0491",
54636 ++ "MSRValue": "0x1003C0491",
54637 + "Offcore": "1",
54638 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54639 + "SampleAfterValue": "100003",
54640 +@@ -798,7 +813,7 @@
54641 + "EventCode": "0xB7, 0xBB",
54642 + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
54643 + "MSRIndex": "0x1a6,0x1a7",
54644 +- "MSRValue": "0x08003C0491",
54645 ++ "MSRValue": "0x8003C0491",
54646 + "Offcore": "1",
54647 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54648 + "SampleAfterValue": "100003",
54649 +@@ -811,7 +826,7 @@
54650 + "EventCode": "0xB7, 0xBB",
54651 + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
54652 + "MSRIndex": "0x1a6,0x1a7",
54653 +- "MSRValue": "0x0000010490",
54654 ++ "MSRValue": "0x10490",
54655 + "Offcore": "1",
54656 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54657 + "SampleAfterValue": "100003",
54658 +@@ -850,7 +865,7 @@
54659 + "EventCode": "0xB7, 0xBB",
54660 + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
54661 + "MSRIndex": "0x1a6,0x1a7",
54662 +- "MSRValue": "0x04003C0490",
54663 ++ "MSRValue": "0x4003C0490",
54664 + "Offcore": "1",
54665 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54666 + "SampleAfterValue": "100003",
54667 +@@ -863,7 +878,7 @@
54668 + "EventCode": "0xB7, 0xBB",
54669 + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
54670 + "MSRIndex": "0x1a6,0x1a7",
54671 +- "MSRValue": "0x01003C0490",
54672 ++ "MSRValue": "0x1003C0490",
54673 + "Offcore": "1",
54674 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54675 + "SampleAfterValue": "100003",
54676 +@@ -876,7 +891,7 @@
54677 + "EventCode": "0xB7, 0xBB",
54678 + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
54679 + "MSRIndex": "0x1a6,0x1a7",
54680 +- "MSRValue": "0x08003C0490",
54681 ++ "MSRValue": "0x8003C0490",
54682 + "Offcore": "1",
54683 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54684 + "SampleAfterValue": "100003",
54685 +@@ -889,7 +904,7 @@
54686 + "EventCode": "0xB7, 0xBB",
54687 + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
54688 + "MSRIndex": "0x1a6,0x1a7",
54689 +- "MSRValue": "0x0000010120",
54690 ++ "MSRValue": "0x10120",
54691 + "Offcore": "1",
54692 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54693 + "SampleAfterValue": "100003",
54694 +@@ -928,7 +943,7 @@
54695 + "EventCode": "0xB7, 0xBB",
54696 + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
54697 + "MSRIndex": "0x1a6,0x1a7",
54698 +- "MSRValue": "0x04003C0120",
54699 ++ "MSRValue": "0x4003C0120",
54700 + "Offcore": "1",
54701 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54702 + "SampleAfterValue": "100003",
54703 +@@ -941,7 +956,7 @@
54704 + "EventCode": "0xB7, 0xBB",
54705 + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
54706 + "MSRIndex": "0x1a6,0x1a7",
54707 +- "MSRValue": "0x01003C0120",
54708 ++ "MSRValue": "0x1003C0120",
54709 + "Offcore": "1",
54710 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54711 + "SampleAfterValue": "100003",
54712 +@@ -954,7 +969,7 @@
54713 + "EventCode": "0xB7, 0xBB",
54714 + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
54715 + "MSRIndex": "0x1a6,0x1a7",
54716 +- "MSRValue": "0x08003C0120",
54717 ++ "MSRValue": "0x8003C0120",
54718 + "Offcore": "1",
54719 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54720 + "SampleAfterValue": "100003",
54721 +@@ -967,7 +982,7 @@
54722 + "EventCode": "0xB7, 0xBB",
54723 + "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
54724 + "MSRIndex": "0x1a6,0x1a7",
54725 +- "MSRValue": "0x0000010122",
54726 ++ "MSRValue": "0x10122",
54727 + "Offcore": "1",
54728 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54729 + "SampleAfterValue": "100003",
54730 +@@ -1006,7 +1021,7 @@
54731 + "EventCode": "0xB7, 0xBB",
54732 + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
54733 + "MSRIndex": "0x1a6,0x1a7",
54734 +- "MSRValue": "0x04003C0122",
54735 ++ "MSRValue": "0x4003C0122",
54736 + "Offcore": "1",
54737 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54738 + "SampleAfterValue": "100003",
54739 +@@ -1019,7 +1034,7 @@
54740 + "EventCode": "0xB7, 0xBB",
54741 + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
54742 + "MSRIndex": "0x1a6,0x1a7",
54743 +- "MSRValue": "0x01003C0122",
54744 ++ "MSRValue": "0x1003C0122",
54745 + "Offcore": "1",
54746 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54747 + "SampleAfterValue": "100003",
54748 +@@ -1032,7 +1047,7 @@
54749 + "EventCode": "0xB7, 0xBB",
54750 + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
54751 + "MSRIndex": "0x1a6,0x1a7",
54752 +- "MSRValue": "0x08003C0122",
54753 ++ "MSRValue": "0x8003C0122",
54754 + "Offcore": "1",
54755 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54756 + "SampleAfterValue": "100003",
54757 +@@ -1045,7 +1060,7 @@
54758 + "EventCode": "0xB7, 0xBB",
54759 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
54760 + "MSRIndex": "0x1a6,0x1a7",
54761 +- "MSRValue": "0x0000010004",
54762 ++ "MSRValue": "0x10004",
54763 + "Offcore": "1",
54764 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54765 + "SampleAfterValue": "100003",
54766 +@@ -1084,7 +1099,7 @@
54767 + "EventCode": "0xB7, 0xBB",
54768 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
54769 + "MSRIndex": "0x1a6,0x1a7",
54770 +- "MSRValue": "0x04003C0004",
54771 ++ "MSRValue": "0x4003C0004",
54772 + "Offcore": "1",
54773 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54774 + "SampleAfterValue": "100003",
54775 +@@ -1097,7 +1112,7 @@
54776 + "EventCode": "0xB7, 0xBB",
54777 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
54778 + "MSRIndex": "0x1a6,0x1a7",
54779 +- "MSRValue": "0x01003C0004",
54780 ++ "MSRValue": "0x1003C0004",
54781 + "Offcore": "1",
54782 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54783 + "SampleAfterValue": "100003",
54784 +@@ -1110,7 +1125,7 @@
54785 + "EventCode": "0xB7, 0xBB",
54786 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
54787 + "MSRIndex": "0x1a6,0x1a7",
54788 +- "MSRValue": "0x08003C0004",
54789 ++ "MSRValue": "0x8003C0004",
54790 + "Offcore": "1",
54791 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54792 + "SampleAfterValue": "100003",
54793 +@@ -1123,7 +1138,7 @@
54794 + "EventCode": "0xB7, 0xBB",
54795 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
54796 + "MSRIndex": "0x1a6,0x1a7",
54797 +- "MSRValue": "0x0000010001",
54798 ++ "MSRValue": "0x10001",
54799 + "Offcore": "1",
54800 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54801 + "SampleAfterValue": "100003",
54802 +@@ -1162,7 +1177,7 @@
54803 + "EventCode": "0xB7, 0xBB",
54804 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
54805 + "MSRIndex": "0x1a6,0x1a7",
54806 +- "MSRValue": "0x04003C0001",
54807 ++ "MSRValue": "0x4003C0001",
54808 + "Offcore": "1",
54809 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54810 + "SampleAfterValue": "100003",
54811 +@@ -1175,7 +1190,7 @@
54812 + "EventCode": "0xB7, 0xBB",
54813 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
54814 + "MSRIndex": "0x1a6,0x1a7",
54815 +- "MSRValue": "0x01003C0001",
54816 ++ "MSRValue": "0x1003C0001",
54817 + "Offcore": "1",
54818 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54819 + "SampleAfterValue": "100003",
54820 +@@ -1188,7 +1203,7 @@
54821 + "EventCode": "0xB7, 0xBB",
54822 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
54823 + "MSRIndex": "0x1a6,0x1a7",
54824 +- "MSRValue": "0x08003C0001",
54825 ++ "MSRValue": "0x8003C0001",
54826 + "Offcore": "1",
54827 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54828 + "SampleAfterValue": "100003",
54829 +@@ -1201,7 +1216,7 @@
54830 + "EventCode": "0xB7, 0xBB",
54831 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
54832 + "MSRIndex": "0x1a6,0x1a7",
54833 +- "MSRValue": "0x0000010002",
54834 ++ "MSRValue": "0x10002",
54835 + "Offcore": "1",
54836 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54837 + "SampleAfterValue": "100003",
54838 +@@ -1240,7 +1255,7 @@
54839 + "EventCode": "0xB7, 0xBB",
54840 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
54841 + "MSRIndex": "0x1a6,0x1a7",
54842 +- "MSRValue": "0x04003C0002",
54843 ++ "MSRValue": "0x4003C0002",
54844 + "Offcore": "1",
54845 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54846 + "SampleAfterValue": "100003",
54847 +@@ -1253,7 +1268,7 @@
54848 + "EventCode": "0xB7, 0xBB",
54849 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
54850 + "MSRIndex": "0x1a6,0x1a7",
54851 +- "MSRValue": "0x01003C0002",
54852 ++ "MSRValue": "0x1003C0002",
54853 + "Offcore": "1",
54854 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54855 + "SampleAfterValue": "100003",
54856 +@@ -1266,7 +1281,7 @@
54857 + "EventCode": "0xB7, 0xBB",
54858 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
54859 + "MSRIndex": "0x1a6,0x1a7",
54860 +- "MSRValue": "0x08003C0002",
54861 ++ "MSRValue": "0x8003C0002",
54862 + "Offcore": "1",
54863 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54864 + "SampleAfterValue": "100003",
54865 +@@ -1279,7 +1294,7 @@
54866 + "EventCode": "0xB7, 0xBB",
54867 + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
54868 + "MSRIndex": "0x1a6,0x1a7",
54869 +- "MSRValue": "0x0000010400",
54870 ++ "MSRValue": "0x10400",
54871 + "Offcore": "1",
54872 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54873 + "SampleAfterValue": "100003",
54874 +@@ -1318,7 +1333,7 @@
54875 + "EventCode": "0xB7, 0xBB",
54876 + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
54877 + "MSRIndex": "0x1a6,0x1a7",
54878 +- "MSRValue": "0x04003C0400",
54879 ++ "MSRValue": "0x4003C0400",
54880 + "Offcore": "1",
54881 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54882 + "SampleAfterValue": "100003",
54883 +@@ -1331,7 +1346,7 @@
54884 + "EventCode": "0xB7, 0xBB",
54885 + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
54886 + "MSRIndex": "0x1a6,0x1a7",
54887 +- "MSRValue": "0x01003C0400",
54888 ++ "MSRValue": "0x1003C0400",
54889 + "Offcore": "1",
54890 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54891 + "SampleAfterValue": "100003",
54892 +@@ -1344,7 +1359,7 @@
54893 + "EventCode": "0xB7, 0xBB",
54894 + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
54895 + "MSRIndex": "0x1a6,0x1a7",
54896 +- "MSRValue": "0x08003C0400",
54897 ++ "MSRValue": "0x8003C0400",
54898 + "Offcore": "1",
54899 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54900 + "SampleAfterValue": "100003",
54901 +@@ -1357,7 +1372,7 @@
54902 + "EventCode": "0xB7, 0xBB",
54903 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
54904 + "MSRIndex": "0x1a6,0x1a7",
54905 +- "MSRValue": "0x0000010010",
54906 ++ "MSRValue": "0x10010",
54907 + "Offcore": "1",
54908 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54909 + "SampleAfterValue": "100003",
54910 +@@ -1396,7 +1411,7 @@
54911 + "EventCode": "0xB7, 0xBB",
54912 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
54913 + "MSRIndex": "0x1a6,0x1a7",
54914 +- "MSRValue": "0x04003C0010",
54915 ++ "MSRValue": "0x4003C0010",
54916 + "Offcore": "1",
54917 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54918 + "SampleAfterValue": "100003",
54919 +@@ -1409,7 +1424,7 @@
54920 + "EventCode": "0xB7, 0xBB",
54921 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
54922 + "MSRIndex": "0x1a6,0x1a7",
54923 +- "MSRValue": "0x01003C0010",
54924 ++ "MSRValue": "0x1003C0010",
54925 + "Offcore": "1",
54926 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54927 + "SampleAfterValue": "100003",
54928 +@@ -1422,7 +1437,7 @@
54929 + "EventCode": "0xB7, 0xBB",
54930 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
54931 + "MSRIndex": "0x1a6,0x1a7",
54932 +- "MSRValue": "0x08003C0010",
54933 ++ "MSRValue": "0x8003C0010",
54934 + "Offcore": "1",
54935 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54936 + "SampleAfterValue": "100003",
54937 +@@ -1435,7 +1450,7 @@
54938 + "EventCode": "0xB7, 0xBB",
54939 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
54940 + "MSRIndex": "0x1a6,0x1a7",
54941 +- "MSRValue": "0x0000010020",
54942 ++ "MSRValue": "0x10020",
54943 + "Offcore": "1",
54944 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54945 + "SampleAfterValue": "100003",
54946 +@@ -1474,7 +1489,7 @@
54947 + "EventCode": "0xB7, 0xBB",
54948 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
54949 + "MSRIndex": "0x1a6,0x1a7",
54950 +- "MSRValue": "0x04003C0020",
54951 ++ "MSRValue": "0x4003C0020",
54952 + "Offcore": "1",
54953 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54954 + "SampleAfterValue": "100003",
54955 +@@ -1487,7 +1502,7 @@
54956 + "EventCode": "0xB7, 0xBB",
54957 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
54958 + "MSRIndex": "0x1a6,0x1a7",
54959 +- "MSRValue": "0x01003C0020",
54960 ++ "MSRValue": "0x1003C0020",
54961 + "Offcore": "1",
54962 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54963 + "SampleAfterValue": "100003",
54964 +@@ -1500,7 +1515,7 @@
54965 + "EventCode": "0xB7, 0xBB",
54966 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
54967 + "MSRIndex": "0x1a6,0x1a7",
54968 +- "MSRValue": "0x08003C0020",
54969 ++ "MSRValue": "0x8003C0020",
54970 + "Offcore": "1",
54971 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54972 + "SampleAfterValue": "100003",
54973 +@@ -1513,7 +1528,7 @@
54974 + "EventCode": "0xB7, 0xBB",
54975 + "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
54976 + "MSRIndex": "0x1a6,0x1a7",
54977 +- "MSRValue": "0x0000010080",
54978 ++ "MSRValue": "0x10080",
54979 + "Offcore": "1",
54980 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54981 + "SampleAfterValue": "100003",
54982 +@@ -1552,7 +1567,7 @@
54983 + "EventCode": "0xB7, 0xBB",
54984 + "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
54985 + "MSRIndex": "0x1a6,0x1a7",
54986 +- "MSRValue": "0x04003C0080",
54987 ++ "MSRValue": "0x4003C0080",
54988 + "Offcore": "1",
54989 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54990 + "SampleAfterValue": "100003",
54991 +@@ -1565,7 +1580,7 @@
54992 + "EventCode": "0xB7, 0xBB",
54993 + "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
54994 + "MSRIndex": "0x1a6,0x1a7",
54995 +- "MSRValue": "0x01003C0080",
54996 ++ "MSRValue": "0x1003C0080",
54997 + "Offcore": "1",
54998 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
54999 + "SampleAfterValue": "100003",
55000 +@@ -1578,7 +1593,7 @@
55001 + "EventCode": "0xB7, 0xBB",
55002 + "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
55003 + "MSRIndex": "0x1a6,0x1a7",
55004 +- "MSRValue": "0x08003C0080",
55005 ++ "MSRValue": "0x8003C0080",
55006 + "Offcore": "1",
55007 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55008 + "SampleAfterValue": "100003",
55009 +@@ -1591,7 +1606,7 @@
55010 + "EventCode": "0xB7, 0xBB",
55011 + "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
55012 + "MSRIndex": "0x1a6,0x1a7",
55013 +- "MSRValue": "0x0000010100",
55014 ++ "MSRValue": "0x10100",
55015 + "Offcore": "1",
55016 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55017 + "SampleAfterValue": "100003",
55018 +@@ -1630,7 +1645,7 @@
55019 + "EventCode": "0xB7, 0xBB",
55020 + "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
55021 + "MSRIndex": "0x1a6,0x1a7",
55022 +- "MSRValue": "0x04003C0100",
55023 ++ "MSRValue": "0x4003C0100",
55024 + "Offcore": "1",
55025 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55026 + "SampleAfterValue": "100003",
55027 +@@ -1643,7 +1658,7 @@
55028 + "EventCode": "0xB7, 0xBB",
55029 + "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
55030 + "MSRIndex": "0x1a6,0x1a7",
55031 +- "MSRValue": "0x01003C0100",
55032 ++ "MSRValue": "0x1003C0100",
55033 + "Offcore": "1",
55034 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55035 + "SampleAfterValue": "100003",
55036 +@@ -1656,7 +1671,7 @@
55037 + "EventCode": "0xB7, 0xBB",
55038 + "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
55039 + "MSRIndex": "0x1a6,0x1a7",
55040 +- "MSRValue": "0x08003C0100",
55041 ++ "MSRValue": "0x8003C0100",
55042 + "Offcore": "1",
55043 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55044 + "SampleAfterValue": "100003",
55045 +diff --git a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
55046 +index 503737ed3a83c..9e873ab224502 100644
55047 +--- a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
55048 ++++ b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
55049 +@@ -1,73 +1,81 @@
55050 + [
55051 + {
55052 +- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT14 RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
55053 ++ "BriefDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
55054 + "Counter": "0,1,2,3",
55055 + "CounterHTOff": "0,1,2,3,4,5,6,7",
55056 + "EventCode": "0xC7",
55057 + "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
55058 ++ "PublicDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
55059 + "SampleAfterValue": "2000003",
55060 + "UMask": "0x4"
55061 + },
55062 + {
55063 +- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
55064 ++ "BriefDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instruction retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
55065 + "Counter": "0,1,2,3",
55066 + "CounterHTOff": "0,1,2,3,4,5,6,7",
55067 + "EventCode": "0xC7",
55068 + "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
55069 ++ "PublicDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
55070 + "SampleAfterValue": "2000003",
55071 + "UMask": "0x8"
55072 + },
55073 + {
55074 +- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
55075 ++ "BriefDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
55076 + "Counter": "0,1,2,3",
55077 + "CounterHTOff": "0,1,2,3,4,5,6,7",
55078 + "EventCode": "0xC7",
55079 + "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
55080 ++ "PublicDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
55081 + "SampleAfterValue": "2000003",
55082 + "UMask": "0x10"
55083 + },
55084 + {
55085 +- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
55086 ++ "BriefDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
55087 + "Counter": "0,1,2,3",
55088 + "CounterHTOff": "0,1,2,3,4,5,6,7",
55089 + "EventCode": "0xC7",
55090 + "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
55091 ++ "PublicDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
55092 + "SampleAfterValue": "2000003",
55093 + "UMask": "0x20"
55094 + },
55095 + {
55096 +- "BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
55097 ++ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
55098 + "Counter": "0,1,2,3",
55099 + "CounterHTOff": "0,1,2,3,4,5,6,7",
55100 + "EventCode": "0xC7",
55101 + "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
55102 ++ "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
55103 + "SampleAfterValue": "2000003",
55104 + "UMask": "0x40"
55105 + },
55106 + {
55107 +- "BriefDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 16 calculations per element.",
55108 ++ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
55109 + "Counter": "0,1,2,3",
55110 + "CounterHTOff": "0,1,2,3,4,5,6,7",
55111 + "EventCode": "0xC7",
55112 + "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
55113 ++ "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
55114 + "SampleAfterValue": "2000003",
55115 + "UMask": "0x80"
55116 + },
55117 + {
55118 +- "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
55119 ++ "BriefDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
55120 + "Counter": "0,1,2,3",
55121 + "CounterHTOff": "0,1,2,3,4,5,6,7",
55122 + "EventCode": "0xC7",
55123 + "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
55124 ++ "PublicDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
55125 + "SampleAfterValue": "2000003",
55126 + "UMask": "0x1"
55127 + },
55128 + {
55129 +- "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
55130 ++ "BriefDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
55131 + "Counter": "0,1,2,3",
55132 + "CounterHTOff": "0,1,2,3,4,5,6,7",
55133 + "EventCode": "0xC7",
55134 + "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
55135 ++ "PublicDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
55136 + "SampleAfterValue": "2000003",
55137 + "UMask": "0x2"
55138 + },
55139 +diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
55140 +index 078706a500919..ecce4273ae52c 100644
55141 +--- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
55142 ++++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
55143 +@@ -30,7 +30,21 @@
55144 + "UMask": "0x2"
55145 + },
55146 + {
55147 +- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
55148 ++ "BriefDescription": "Retired Instructions who experienced DSB miss.",
55149 ++ "Counter": "0,1,2,3",
55150 ++ "CounterHTOff": "0,1,2,3",
55151 ++ "EventCode": "0xC6",
55152 ++ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
55153 ++ "MSRIndex": "0x3F7",
55154 ++ "MSRValue": "0x1",
55155 ++ "PEBS": "1",
55156 ++ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
55157 ++ "SampleAfterValue": "100007",
55158 ++ "TakenAlone": "1",
55159 ++ "UMask": "0x1"
55160 ++ },
55161 ++ {
55162 ++ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
55163 + "Counter": "0,1,2,3",
55164 + "CounterHTOff": "0,1,2,3",
55165 + "EventCode": "0xC6",
55166 +@@ -38,7 +52,7 @@
55167 + "MSRIndex": "0x3F7",
55168 + "MSRValue": "0x11",
55169 + "PEBS": "1",
55170 +- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
55171 ++ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
55172 + "SampleAfterValue": "100007",
55173 + "TakenAlone": "1",
55174 + "UMask": "0x1"
55175 +diff --git a/tools/perf/pmu-events/arch/x86/skylakex/memory.json b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
55176 +index 6f29b02fa320c..60c286b4fe54c 100644
55177 +--- a/tools/perf/pmu-events/arch/x86/skylakex/memory.json
55178 ++++ b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
55179 +@@ -299,7 +299,7 @@
55180 + "EventCode": "0xB7, 0xBB",
55181 + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
55182 + "MSRIndex": "0x1a6,0x1a7",
55183 +- "MSRValue": "0x083FC00491",
55184 ++ "MSRValue": "0x83FC00491",
55185 + "Offcore": "1",
55186 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55187 + "SampleAfterValue": "100003",
55188 +@@ -312,7 +312,7 @@
55189 + "EventCode": "0xB7, 0xBB",
55190 + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55191 + "MSRIndex": "0x1a6,0x1a7",
55192 +- "MSRValue": "0x063FC00491",
55193 ++ "MSRValue": "0x63FC00491",
55194 + "Offcore": "1",
55195 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55196 + "SampleAfterValue": "100003",
55197 +@@ -325,7 +325,7 @@
55198 + "EventCode": "0xB7, 0xBB",
55199 + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55200 + "MSRIndex": "0x1a6,0x1a7",
55201 +- "MSRValue": "0x0604000491",
55202 ++ "MSRValue": "0x604000491",
55203 + "Offcore": "1",
55204 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55205 + "SampleAfterValue": "100003",
55206 +@@ -338,7 +338,7 @@
55207 + "EventCode": "0xB7, 0xBB",
55208 + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55209 + "MSRIndex": "0x1a6,0x1a7",
55210 +- "MSRValue": "0x063B800491",
55211 ++ "MSRValue": "0x63B800491",
55212 + "Offcore": "1",
55213 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55214 + "SampleAfterValue": "100003",
55215 +@@ -377,7 +377,7 @@
55216 + "EventCode": "0xB7, 0xBB",
55217 + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
55218 + "MSRIndex": "0x1a6,0x1a7",
55219 +- "MSRValue": "0x083FC00490",
55220 ++ "MSRValue": "0x83FC00490",
55221 + "Offcore": "1",
55222 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55223 + "SampleAfterValue": "100003",
55224 +@@ -390,7 +390,7 @@
55225 + "EventCode": "0xB7, 0xBB",
55226 + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55227 + "MSRIndex": "0x1a6,0x1a7",
55228 +- "MSRValue": "0x063FC00490",
55229 ++ "MSRValue": "0x63FC00490",
55230 + "Offcore": "1",
55231 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55232 + "SampleAfterValue": "100003",
55233 +@@ -403,7 +403,7 @@
55234 + "EventCode": "0xB7, 0xBB",
55235 + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55236 + "MSRIndex": "0x1a6,0x1a7",
55237 +- "MSRValue": "0x0604000490",
55238 ++ "MSRValue": "0x604000490",
55239 + "Offcore": "1",
55240 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55241 + "SampleAfterValue": "100003",
55242 +@@ -416,7 +416,7 @@
55243 + "EventCode": "0xB7, 0xBB",
55244 + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55245 + "MSRIndex": "0x1a6,0x1a7",
55246 +- "MSRValue": "0x063B800490",
55247 ++ "MSRValue": "0x63B800490",
55248 + "Offcore": "1",
55249 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55250 + "SampleAfterValue": "100003",
55251 +@@ -455,7 +455,7 @@
55252 + "EventCode": "0xB7, 0xBB",
55253 + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
55254 + "MSRIndex": "0x1a6,0x1a7",
55255 +- "MSRValue": "0x083FC00120",
55256 ++ "MSRValue": "0x83FC00120",
55257 + "Offcore": "1",
55258 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55259 + "SampleAfterValue": "100003",
55260 +@@ -468,7 +468,7 @@
55261 + "EventCode": "0xB7, 0xBB",
55262 + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55263 + "MSRIndex": "0x1a6,0x1a7",
55264 +- "MSRValue": "0x063FC00120",
55265 ++ "MSRValue": "0x63FC00120",
55266 + "Offcore": "1",
55267 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55268 + "SampleAfterValue": "100003",
55269 +@@ -481,7 +481,7 @@
55270 + "EventCode": "0xB7, 0xBB",
55271 + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55272 + "MSRIndex": "0x1a6,0x1a7",
55273 +- "MSRValue": "0x0604000120",
55274 ++ "MSRValue": "0x604000120",
55275 + "Offcore": "1",
55276 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55277 + "SampleAfterValue": "100003",
55278 +@@ -494,7 +494,7 @@
55279 + "EventCode": "0xB7, 0xBB",
55280 + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55281 + "MSRIndex": "0x1a6,0x1a7",
55282 +- "MSRValue": "0x063B800120",
55283 ++ "MSRValue": "0x63B800120",
55284 + "Offcore": "1",
55285 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55286 + "SampleAfterValue": "100003",
55287 +@@ -533,7 +533,7 @@
55288 + "EventCode": "0xB7, 0xBB",
55289 + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
55290 + "MSRIndex": "0x1a6,0x1a7",
55291 +- "MSRValue": "0x083FC00122",
55292 ++ "MSRValue": "0x83FC00122",
55293 + "Offcore": "1",
55294 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55295 + "SampleAfterValue": "100003",
55296 +@@ -546,7 +546,7 @@
55297 + "EventCode": "0xB7, 0xBB",
55298 + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55299 + "MSRIndex": "0x1a6,0x1a7",
55300 +- "MSRValue": "0x063FC00122",
55301 ++ "MSRValue": "0x63FC00122",
55302 + "Offcore": "1",
55303 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55304 + "SampleAfterValue": "100003",
55305 +@@ -559,7 +559,7 @@
55306 + "EventCode": "0xB7, 0xBB",
55307 + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55308 + "MSRIndex": "0x1a6,0x1a7",
55309 +- "MSRValue": "0x0604000122",
55310 ++ "MSRValue": "0x604000122",
55311 + "Offcore": "1",
55312 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55313 + "SampleAfterValue": "100003",
55314 +@@ -572,7 +572,7 @@
55315 + "EventCode": "0xB7, 0xBB",
55316 + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55317 + "MSRIndex": "0x1a6,0x1a7",
55318 +- "MSRValue": "0x063B800122",
55319 ++ "MSRValue": "0x63B800122",
55320 + "Offcore": "1",
55321 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55322 + "SampleAfterValue": "100003",
55323 +@@ -611,7 +611,7 @@
55324 + "EventCode": "0xB7, 0xBB",
55325 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
55326 + "MSRIndex": "0x1a6,0x1a7",
55327 +- "MSRValue": "0x083FC00004",
55328 ++ "MSRValue": "0x83FC00004",
55329 + "Offcore": "1",
55330 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55331 + "SampleAfterValue": "100003",
55332 +@@ -624,7 +624,7 @@
55333 + "EventCode": "0xB7, 0xBB",
55334 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55335 + "MSRIndex": "0x1a6,0x1a7",
55336 +- "MSRValue": "0x063FC00004",
55337 ++ "MSRValue": "0x63FC00004",
55338 + "Offcore": "1",
55339 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55340 + "SampleAfterValue": "100003",
55341 +@@ -637,7 +637,7 @@
55342 + "EventCode": "0xB7, 0xBB",
55343 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55344 + "MSRIndex": "0x1a6,0x1a7",
55345 +- "MSRValue": "0x0604000004",
55346 ++ "MSRValue": "0x604000004",
55347 + "Offcore": "1",
55348 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55349 + "SampleAfterValue": "100003",
55350 +@@ -650,7 +650,7 @@
55351 + "EventCode": "0xB7, 0xBB",
55352 + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55353 + "MSRIndex": "0x1a6,0x1a7",
55354 +- "MSRValue": "0x063B800004",
55355 ++ "MSRValue": "0x63B800004",
55356 + "Offcore": "1",
55357 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55358 + "SampleAfterValue": "100003",
55359 +@@ -689,7 +689,7 @@
55360 + "EventCode": "0xB7, 0xBB",
55361 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
55362 + "MSRIndex": "0x1a6,0x1a7",
55363 +- "MSRValue": "0x083FC00001",
55364 ++ "MSRValue": "0x83FC00001",
55365 + "Offcore": "1",
55366 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55367 + "SampleAfterValue": "100003",
55368 +@@ -702,7 +702,7 @@
55369 + "EventCode": "0xB7, 0xBB",
55370 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55371 + "MSRIndex": "0x1a6,0x1a7",
55372 +- "MSRValue": "0x063FC00001",
55373 ++ "MSRValue": "0x63FC00001",
55374 + "Offcore": "1",
55375 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55376 + "SampleAfterValue": "100003",
55377 +@@ -715,7 +715,7 @@
55378 + "EventCode": "0xB7, 0xBB",
55379 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55380 + "MSRIndex": "0x1a6,0x1a7",
55381 +- "MSRValue": "0x0604000001",
55382 ++ "MSRValue": "0x604000001",
55383 + "Offcore": "1",
55384 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55385 + "SampleAfterValue": "100003",
55386 +@@ -728,7 +728,7 @@
55387 + "EventCode": "0xB7, 0xBB",
55388 + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55389 + "MSRIndex": "0x1a6,0x1a7",
55390 +- "MSRValue": "0x063B800001",
55391 ++ "MSRValue": "0x63B800001",
55392 + "Offcore": "1",
55393 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55394 + "SampleAfterValue": "100003",
55395 +@@ -767,7 +767,7 @@
55396 + "EventCode": "0xB7, 0xBB",
55397 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
55398 + "MSRIndex": "0x1a6,0x1a7",
55399 +- "MSRValue": "0x083FC00002",
55400 ++ "MSRValue": "0x83FC00002",
55401 + "Offcore": "1",
55402 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55403 + "SampleAfterValue": "100003",
55404 +@@ -780,7 +780,7 @@
55405 + "EventCode": "0xB7, 0xBB",
55406 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55407 + "MSRIndex": "0x1a6,0x1a7",
55408 +- "MSRValue": "0x063FC00002",
55409 ++ "MSRValue": "0x63FC00002",
55410 + "Offcore": "1",
55411 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55412 + "SampleAfterValue": "100003",
55413 +@@ -793,7 +793,7 @@
55414 + "EventCode": "0xB7, 0xBB",
55415 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55416 + "MSRIndex": "0x1a6,0x1a7",
55417 +- "MSRValue": "0x0604000002",
55418 ++ "MSRValue": "0x604000002",
55419 + "Offcore": "1",
55420 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55421 + "SampleAfterValue": "100003",
55422 +@@ -806,7 +806,7 @@
55423 + "EventCode": "0xB7, 0xBB",
55424 + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55425 + "MSRIndex": "0x1a6,0x1a7",
55426 +- "MSRValue": "0x063B800002",
55427 ++ "MSRValue": "0x63B800002",
55428 + "Offcore": "1",
55429 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55430 + "SampleAfterValue": "100003",
55431 +@@ -845,7 +845,7 @@
55432 + "EventCode": "0xB7, 0xBB",
55433 + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
55434 + "MSRIndex": "0x1a6,0x1a7",
55435 +- "MSRValue": "0x083FC00400",
55436 ++ "MSRValue": "0x83FC00400",
55437 + "Offcore": "1",
55438 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55439 + "SampleAfterValue": "100003",
55440 +@@ -858,7 +858,7 @@
55441 + "EventCode": "0xB7, 0xBB",
55442 + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55443 + "MSRIndex": "0x1a6,0x1a7",
55444 +- "MSRValue": "0x063FC00400",
55445 ++ "MSRValue": "0x63FC00400",
55446 + "Offcore": "1",
55447 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55448 + "SampleAfterValue": "100003",
55449 +@@ -871,7 +871,7 @@
55450 + "EventCode": "0xB7, 0xBB",
55451 + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55452 + "MSRIndex": "0x1a6,0x1a7",
55453 +- "MSRValue": "0x0604000400",
55454 ++ "MSRValue": "0x604000400",
55455 + "Offcore": "1",
55456 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55457 + "SampleAfterValue": "100003",
55458 +@@ -884,7 +884,7 @@
55459 + "EventCode": "0xB7, 0xBB",
55460 + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55461 + "MSRIndex": "0x1a6,0x1a7",
55462 +- "MSRValue": "0x063B800400",
55463 ++ "MSRValue": "0x63B800400",
55464 + "Offcore": "1",
55465 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55466 + "SampleAfterValue": "100003",
55467 +@@ -923,7 +923,7 @@
55468 + "EventCode": "0xB7, 0xBB",
55469 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
55470 + "MSRIndex": "0x1a6,0x1a7",
55471 +- "MSRValue": "0x083FC00010",
55472 ++ "MSRValue": "0x83FC00010",
55473 + "Offcore": "1",
55474 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55475 + "SampleAfterValue": "100003",
55476 +@@ -936,7 +936,7 @@
55477 + "EventCode": "0xB7, 0xBB",
55478 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55479 + "MSRIndex": "0x1a6,0x1a7",
55480 +- "MSRValue": "0x063FC00010",
55481 ++ "MSRValue": "0x63FC00010",
55482 + "Offcore": "1",
55483 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55484 + "SampleAfterValue": "100003",
55485 +@@ -949,7 +949,7 @@
55486 + "EventCode": "0xB7, 0xBB",
55487 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55488 + "MSRIndex": "0x1a6,0x1a7",
55489 +- "MSRValue": "0x0604000010",
55490 ++ "MSRValue": "0x604000010",
55491 + "Offcore": "1",
55492 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55493 + "SampleAfterValue": "100003",
55494 +@@ -962,7 +962,7 @@
55495 + "EventCode": "0xB7, 0xBB",
55496 + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55497 + "MSRIndex": "0x1a6,0x1a7",
55498 +- "MSRValue": "0x063B800010",
55499 ++ "MSRValue": "0x63B800010",
55500 + "Offcore": "1",
55501 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55502 + "SampleAfterValue": "100003",
55503 +@@ -1001,7 +1001,7 @@
55504 + "EventCode": "0xB7, 0xBB",
55505 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
55506 + "MSRIndex": "0x1a6,0x1a7",
55507 +- "MSRValue": "0x083FC00020",
55508 ++ "MSRValue": "0x83FC00020",
55509 + "Offcore": "1",
55510 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55511 + "SampleAfterValue": "100003",
55512 +@@ -1014,7 +1014,7 @@
55513 + "EventCode": "0xB7, 0xBB",
55514 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55515 + "MSRIndex": "0x1a6,0x1a7",
55516 +- "MSRValue": "0x063FC00020",
55517 ++ "MSRValue": "0x63FC00020",
55518 + "Offcore": "1",
55519 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55520 + "SampleAfterValue": "100003",
55521 +@@ -1027,7 +1027,7 @@
55522 + "EventCode": "0xB7, 0xBB",
55523 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55524 + "MSRIndex": "0x1a6,0x1a7",
55525 +- "MSRValue": "0x0604000020",
55526 ++ "MSRValue": "0x604000020",
55527 + "Offcore": "1",
55528 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55529 + "SampleAfterValue": "100003",
55530 +@@ -1040,7 +1040,7 @@
55531 + "EventCode": "0xB7, 0xBB",
55532 + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55533 + "MSRIndex": "0x1a6,0x1a7",
55534 +- "MSRValue": "0x063B800020",
55535 ++ "MSRValue": "0x63B800020",
55536 + "Offcore": "1",
55537 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55538 + "SampleAfterValue": "100003",
55539 +@@ -1079,7 +1079,7 @@
55540 + "EventCode": "0xB7, 0xBB",
55541 + "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
55542 + "MSRIndex": "0x1a6,0x1a7",
55543 +- "MSRValue": "0x083FC00080",
55544 ++ "MSRValue": "0x83FC00080",
55545 + "Offcore": "1",
55546 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55547 + "SampleAfterValue": "100003",
55548 +@@ -1092,7 +1092,7 @@
55549 + "EventCode": "0xB7, 0xBB",
55550 + "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55551 + "MSRIndex": "0x1a6,0x1a7",
55552 +- "MSRValue": "0x063FC00080",
55553 ++ "MSRValue": "0x63FC00080",
55554 + "Offcore": "1",
55555 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55556 + "SampleAfterValue": "100003",
55557 +@@ -1105,7 +1105,7 @@
55558 + "EventCode": "0xB7, 0xBB",
55559 + "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55560 + "MSRIndex": "0x1a6,0x1a7",
55561 +- "MSRValue": "0x0604000080",
55562 ++ "MSRValue": "0x604000080",
55563 + "Offcore": "1",
55564 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55565 + "SampleAfterValue": "100003",
55566 +@@ -1118,7 +1118,7 @@
55567 + "EventCode": "0xB7, 0xBB",
55568 + "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55569 + "MSRIndex": "0x1a6,0x1a7",
55570 +- "MSRValue": "0x063B800080",
55571 ++ "MSRValue": "0x63B800080",
55572 + "Offcore": "1",
55573 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55574 + "SampleAfterValue": "100003",
55575 +@@ -1157,7 +1157,7 @@
55576 + "EventCode": "0xB7, 0xBB",
55577 + "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
55578 + "MSRIndex": "0x1a6,0x1a7",
55579 +- "MSRValue": "0x083FC00100",
55580 ++ "MSRValue": "0x83FC00100",
55581 + "Offcore": "1",
55582 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55583 + "SampleAfterValue": "100003",
55584 +@@ -1170,7 +1170,7 @@
55585 + "EventCode": "0xB7, 0xBB",
55586 + "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
55587 + "MSRIndex": "0x1a6,0x1a7",
55588 +- "MSRValue": "0x063FC00100",
55589 ++ "MSRValue": "0x63FC00100",
55590 + "Offcore": "1",
55591 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55592 + "SampleAfterValue": "100003",
55593 +@@ -1183,7 +1183,7 @@
55594 + "EventCode": "0xB7, 0xBB",
55595 + "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
55596 + "MSRIndex": "0x1a6,0x1a7",
55597 +- "MSRValue": "0x0604000100",
55598 ++ "MSRValue": "0x604000100",
55599 + "Offcore": "1",
55600 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55601 + "SampleAfterValue": "100003",
55602 +@@ -1196,7 +1196,7 @@
55603 + "EventCode": "0xB7, 0xBB",
55604 + "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
55605 + "MSRIndex": "0x1a6,0x1a7",
55606 +- "MSRValue": "0x063B800100",
55607 ++ "MSRValue": "0x63B800100",
55608 + "Offcore": "1",
55609 + "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
55610 + "SampleAfterValue": "100003",
55611 +diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
55612 +index ca57481206660..12eabae3e2242 100644
55613 +--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
55614 ++++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
55615 +@@ -435,6 +435,17 @@
55616 + "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
55617 + "SampleAfterValue": "2000003"
55618 + },
55619 ++ {
55620 ++ "BriefDescription": "Number of all retired NOP instructions.",
55621 ++ "Counter": "0,1,2,3",
55622 ++ "CounterHTOff": "0,1,2,3,4,5,6,7",
55623 ++ "Errata": "SKL091, SKL044",
55624 ++ "EventCode": "0xC0",
55625 ++ "EventName": "INST_RETIRED.NOP",
55626 ++ "PEBS": "1",
55627 ++ "SampleAfterValue": "2000003",
55628 ++ "UMask": "0x2"
55629 ++ },
55630 + {
55631 + "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
55632 + "Counter": "1",
55633 +diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
55634 +index 863c9e103969e..b016f7d1ff3de 100644
55635 +--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
55636 ++++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
55637 +@@ -1,26 +1,167 @@
55638 + [
55639 ++ {
55640 ++ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
55641 ++ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)",
55642 ++ "MetricGroup": "TopdownL1",
55643 ++ "MetricName": "Frontend_Bound",
55644 ++ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Machine_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
55645 ++ },
55646 ++ {
55647 ++ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
55648 ++ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
55649 ++ "MetricGroup": "TopdownL1_SMT",
55650 ++ "MetricName": "Frontend_Bound_SMT",
55651 ++ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Machine_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
55652 ++ },
55653 ++ {
55654 ++ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
55655 ++ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)",
55656 ++ "MetricGroup": "TopdownL1",
55657 ++ "MetricName": "Bad_Speculation",
55658 ++ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
55659 ++ },
55660 ++ {
55661 ++ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
55662 ++ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
55663 ++ "MetricGroup": "TopdownL1_SMT",
55664 ++ "MetricName": "Bad_Speculation_SMT",
55665 ++ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
55666 ++ },
55667 ++ {
55668 ++ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
55669 ++ "MetricConstraint": "NO_NMI_WATCHDOG",
55670 ++ "MetricExpr": "1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)",
55671 ++ "MetricGroup": "TopdownL1",
55672 ++ "MetricName": "Backend_Bound",
55673 ++ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
55674 ++ },
55675 ++ {
55676 ++ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
55677 ++ "MetricExpr": "1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
55678 ++ "MetricGroup": "TopdownL1_SMT",
55679 ++ "MetricName": "Backend_Bound_SMT",
55680 ++ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
55681 ++ },
55682 ++ {
55683 ++ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
55684 ++ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)",
55685 ++ "MetricGroup": "TopdownL1",
55686 ++ "MetricName": "Retiring",
55687 ++ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. "
55688 ++ },
55689 ++ {
55690 ++ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
55691 ++ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
55692 ++ "MetricGroup": "TopdownL1_SMT",
55693 ++ "MetricName": "Retiring_SMT",
55694 ++ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
55695 ++ },
55696 ++ {
55697 ++ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
55698 ++ "MetricExpr": "100 * ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) )",
55699 ++ "MetricGroup": "Bad;BadSpec;BrMispredicts",
55700 ++ "MetricName": "Mispredictions"
55701 ++ },
55702 ++ {
55703 ++ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
55704 ++ "MetricExpr": "100 * ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) )",
55705 ++ "MetricGroup": "Bad;BadSpec;BrMispredicts_SMT",
55706 ++ "MetricName": "Mispredictions_SMT"
55707 ++ },
55708 ++ {
55709 ++ "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
55710 ++ "MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) * ( ( (CYCLE_ACTIVITY.STALLS_L3_MISS / CPU_CLK_UNHALTED.THREAD + (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD) - (( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) / ( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ ) ) * (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD))) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES )
55711 / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (min( CPU_CLK_UNHALTED.THREAD , cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@ ) / CPU_CLK_UNHALTED.THREAD) / #(CYCLE_ACTIVITY.STALLS_L3_MISS / CPU_CLK_UNHALTED.THREAD + (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD) - (( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) / ( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ ) ) * (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD))) ) + ( (( CYCLE_ACTIVITY.STALLS_L2_MISS - CY
55712 CLE_ACTIVITY.STALLS_L3_MISS ) / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (OFFCORE_REQUESTS_BUFFER.SQ_FULL / CPU_CLK_UNHALTED.THREAD) / #(( CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS ) / CPU_CLK_UNHALTED.THREAD) ) ) + ( (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES))
55713 * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( ((L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )) * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / CPU_CLK_UNHALTED.THREAD) / #(max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) ) ",
55714 ++ "MetricGroup": "Mem;MemoryBW;Offcore",
55715 ++ "MetricName": "Memory_Bandwidth"
55716 ++ },
55717 ++ {
55718 ++ "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
55719 ++ "MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * ( ( (CYCLE_ACTIVITY.STALLS_L3_MISS / CPU_CLK_UNHALTED.THREAD + (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD) - (( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) / ( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (M
55720 EM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ ) ) * (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD))) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (min( CPU_CLK_UNHALTED.THREAD , cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@ ) / CPU_CLK_UNHALTED.THREA
55721 D) / #(CYCLE_ACTIVITY.STALLS_L3_MISS / CPU_CLK_UNHALTED.THREAD + (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD) - (( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) / ( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ ) ) * (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD))) ) + ( (( CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS ) / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THR
55722 EAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (( OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / #(( CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS ) / CPU_CLK_UNHALTED.THREAD) ) ) + ( (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_
55723 NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( ((L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )) * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / CPU_CLK_UNHALTED.THREAD) / #(max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) ) ",
55724 ++ "MetricGroup": "Mem;MemoryBW;Offcore_SMT",
55725 ++ "MetricName": "Memory_Bandwidth_SMT"
55726 ++ },
55727 ++ {
55728 ++ "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
55729 ++ "MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) * ( ( (CYCLE_ACTIVITY.STALLS_L3_MISS / CPU_CLK_UNHALTED.THREAD + (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD) - (( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) / ( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ ) ) * (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD))) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES )
55730 / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (min( CPU_CLK_UNHALTED.THREAD , OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD ) / CPU_CLK_UNHALTED.THREAD - (min( CPU_CLK_UNHALTED.THREAD , cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@ ) / CPU_CLK_UNHALTED.THREAD)) / #(CYCLE_ACTIVITY.STALLS_L3_MISS / CPU_CLK_UNHALTED.THREAD + (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD) - (( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) / ( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ ) ) * (( CYCLE_ACTIVITY.STALLS_L1D
55731 _MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD))) ) + ( (( CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS ) / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (( (20.5 * ((CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC) * msr@tsc@ / 1000000000 / duration_time)) - (3.5 * ((CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC) * msr@tsc@ / 1000000000 / duration_time)) ) * MEM_LOAD_RETIRED.L3_HIT * (1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / 2) / CPU_CLK_UNHALTED.THREAD) / #(( CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS ) / CPU_CLK_
55732 UNHALTED.THREAD) ) + ( (( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) / ( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ ) ) * (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD)) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) )",
55733 ++ "MetricGroup": "Mem;MemoryLat;Offcore",
55734 ++ "MetricName": "Memory_Latency"
55735 ++ },
55736 ++ {
55737 ++ "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
55738 ++ "MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * ( ( (CYCLE_ACTIVITY.STALLS_L3_MISS / CPU_CLK_UNHALTED.THREAD + (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD) - (( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) / ( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (M
55739 EM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ ) ) * (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD))) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (min( CPU_CLK_UNHALTED.THREAD , OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD ) / CPU_CLK_UNHALTED.THREAD - (min(
55740 CPU_CLK_UNHALTED.THREAD , cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@ ) / CPU_CLK_UNHALTED.THREAD)) / #(CYCLE_ACTIVITY.STALLS_L3_MISS / CPU_CLK_UNHALTED.THREAD + (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD) - (( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) / ( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ ) ) * (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD))) ) + ( (( CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS ) / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_
55741 PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (( (20.5 * ((CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC) * msr@tsc@ / 1000000000 / duration_time)) - (3.5 * ((CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC) * msr@tsc@ / 1000000000 / duration_time)) ) * MEM_LOAD_RETIRED.L3_HIT * (1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / 2) / CPU_CLK_UNHALTED.THREAD) / #(( CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS ) / CPU_CLK_UNHALTED.THREAD) ) + ( (( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) )) / ( (MEM_LOAD_RETIRED.L2_HIT * ( 1 + (MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.
55742 L1_MISS) )) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ ) ) * (( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / CPU_CLK_UNHALTED.THREAD)) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) )",
55743 ++ "MetricGroup": "Mem;MemoryLat;Offcore_SMT",
55744 ++ "MetricName": "Memory_Latency_SMT"
55745 ++ },
55746 ++ {
55747 ++ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
55748 ++ "MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) * ( ( (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) / ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (min( 9 * cpu@DTLB_LOAD_MISSES.ST
55749 LB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE , max( CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS , 0 ) ) / CPU_CLK_UNHALTED.THREAD) / (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) ) + ( (EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (( 9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE ) / CPU_CLK_UNHALTED.THREAD) / #(EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) ) ) ",
55750 ++ "MetricGroup": "Mem;MemoryTLB",
55751 ++ "MetricName": "Memory_Data_TLBs"
55752 ++ },
55753 ++ {
55754 ++ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
55755 ++ "MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * ( ( (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) / ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHA
55756 LTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (min( 9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE , max( CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS , 0 ) ) / CPU_CLK_UNHALTED.THREAD) / (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) ) + ( (EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOP
55757 S_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (( 9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / #(EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) ) ) ",
55758 ++ "MetricGroup": "Mem;MemoryTLB;_SMT",
55759 ++ "MetricName": "Memory_Data_TLBs_SMT"
55760 ++ },
55761 ++ {
55762 ++ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
55763 ++ "MetricExpr": "100 * (( BR_INST_RETIRED.CONDITIONAL + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - ( BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN ) - 2 * BR_INST_RETIRED.NEAR_CALL) ) / (4 * CPU_CLK_UNHALTED.THREAD))",
55764 ++ "MetricGroup": "Ret",
55765 ++ "MetricName": "Branching_Overhead"
55766 ++ },
55767 ++ {
55768 ++ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
55769 ++ "MetricExpr": "100 * (( BR_INST_RETIRED.CONDITIONAL + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - ( BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN ) - 2 * BR_INST_RETIRED.NEAR_CALL) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
55770 ++ "MetricGroup": "Ret_SMT",
55771 ++ "MetricName": "Branching_Overhead_SMT"
55772 ++ },
55773 ++ {
55774 ++ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
55775 ++ "MetricExpr": "100 * (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * ( (ICACHE_64B.IFTAG_STALL / CPU_CLK_UNHALTED.THREAD) + (( ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@ ) / CPU_CLK_UNHALTED.THREAD) + (9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) ) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD))",
55776 ++ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB",
55777 ++ "MetricName": "Big_Code"
55778 ++ },
55779 ++ {
55780 ++ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
55781 ++ "MetricExpr": "100 * (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * ( (ICACHE_64B.IFTAG_STALL / CPU_CLK_UNHALTED.THREAD) + (( ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@ ) / CPU_CLK_UNHALTED.THREAD) + (9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) ) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
55782 ++ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB_SMT",
55783 ++ "MetricName": "Big_Code_SMT"
55784 ++ },
55785 ++ {
55786 ++ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
55787 ++ "MetricExpr": "100 * ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) - (100 * (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * ( (ICACHE_64B.IFTAG_STALL / CPU_CLK_UNHALTED.THREAD) + (( ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@ ) / CPU_CLK_UNHALTED.THREAD) + (9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) ) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)))",
55788 ++ "MetricGroup": "Fed;FetchBW;Frontend",
55789 ++ "MetricName": "Instruction_Fetch_BW"
55790 ++ },
55791 ++ {
55792 ++ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
55793 ++ "MetricExpr": "100 * ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) - (100 * (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * ( (ICACHE_64B.IFTAG_STALL / CPU_CLK_UNHALTED.THREAD) + (( ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@
55794 ) / CPU_CLK_UNHALTED.THREAD) + (9 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) ) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))",
55795 ++ "MetricGroup": "Fed;FetchBW;Frontend_SMT",
55796 ++ "MetricName": "Instruction_Fetch_BW_SMT"
55797 ++ },
55798 + {
55799 + "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
55800 + "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
55801 +- "MetricGroup": "Summary",
55802 ++ "MetricGroup": "Ret;Summary",
55803 + "MetricName": "IPC"
55804 + },
55805 + {
55806 + "BriefDescription": "Uops Per Instruction",
55807 + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
55808 +- "MetricGroup": "Pipeline;Retire",
55809 ++ "MetricGroup": "Pipeline;Ret;Retire",
55810 + "MetricName": "UPI"
55811 + },
55812 + {
55813 + "BriefDescription": "Instruction per taken branch",
55814 +- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
55815 +- "MetricGroup": "Branches;FetchBW;PGO",
55816 +- "MetricName": "IpTB"
55817 ++ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
55818 ++ "MetricGroup": "Branches;Fed;FetchBW",
55819 ++ "MetricName": "UpTB"
55820 + },
55821 + {
55822 + "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
55823 + "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
55824 +- "MetricGroup": "Pipeline",
55825 ++ "MetricGroup": "Pipeline;Mem",
55826 + "MetricName": "CPI"
55827 + },
55828 + {
55829 +@@ -30,39 +171,84 @@
55830 + "MetricName": "CLKS"
55831 + },
55832 + {
55833 +- "BriefDescription": "Instructions Per Cycle (per physical core)",
55834 ++ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
55835 ++ "MetricExpr": "4 * CPU_CLK_UNHALTED.THREAD",
55836 ++ "MetricGroup": "TmaL1",
55837 ++ "MetricName": "SLOTS"
55838 ++ },
55839 ++ {
55840 ++ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
55841 ++ "MetricExpr": "4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
55842 ++ "MetricGroup": "TmaL1_SMT",
55843 ++ "MetricName": "SLOTS_SMT"
55844 ++ },
55845 ++ {
55846 ++ "BriefDescription": "The ratio of Executed- by Issued-Uops",
55847 ++ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
55848 ++ "MetricGroup": "Cor;Pipeline",
55849 ++ "MetricName": "Execute_per_Issue",
55850 ++ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
55851 ++ },
55852 ++ {
55853 ++ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
55854 + "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
55855 +- "MetricGroup": "SMT;TmaL1",
55856 ++ "MetricGroup": "Ret;SMT;TmaL1",
55857 + "MetricName": "CoreIPC"
55858 + },
55859 + {
55860 +- "BriefDescription": "Instructions Per Cycle (per physical core)",
55861 ++ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
55862 + "MetricExpr": "INST_RETIRED.ANY / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
55863 +- "MetricGroup": "SMT;TmaL1",
55864 ++ "MetricGroup": "Ret;SMT;TmaL1_SMT",
55865 + "MetricName": "CoreIPC_SMT"
55866 + },
55867 + {
55868 + "BriefDescription": "Floating Point Operations Per Cycle",
55869 + "MetricExpr": "( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / CPU_CLK_UNHALTED.THREAD",
55870 +- "MetricGroup": "Flops",
55871 ++ "MetricGroup": "Ret;Flops",
55872 + "MetricName": "FLOPc"
55873 + },
55874 + {
55875 + "BriefDescription": "Floating Point Operations Per Cycle",
55876 + "MetricExpr": "( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
55877 +- "MetricGroup": "Flops_SMT",
55878 ++ "MetricGroup": "Ret;Flops_SMT",
55879 + "MetricName": "FLOPc_SMT"
55880 + },
55881 ++ {
55882 ++ "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width)",
55883 ++ "MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.THREAD )",
55884 ++ "MetricGroup": "Cor;Flops;HPC",
55885 ++ "MetricName": "FP_Arith_Utilization",
55886 ++ "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting."
55887 ++ },
55888 ++ {
55889 ++ "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). SMT version; use when SMT is enabled and measuring per logical CPU.",
55890 ++ "MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) ) / ( 2 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) )",
55891 ++ "MetricGroup": "Cor;Flops;HPC_SMT",
55892 ++ "MetricName": "FP_Arith_Utilization_SMT",
55893 ++ "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting. SMT version; use when SMT is enabled and measuring per logical CPU."
55894 ++ },
55895 + {
55896 + "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
55897 + "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
55898 +- "MetricGroup": "Pipeline;PortsUtil",
55899 ++ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
55900 + "MetricName": "ILP"
55901 + },
55902 ++ {
55903 ++ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
55904 ++ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
55905 ++ "MetricGroup": "Bad;BrMispredicts",
55906 ++ "MetricName": "Branch_Misprediction_Cost"
55907 ++ },
55908 ++ {
55909 ++ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
55910 ++ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
55911 ++ "MetricGroup": "Bad;BrMispredicts_SMT",
55912 ++ "MetricName": "Branch_Misprediction_Cost_SMT"
55913 ++ },
55914 + {
55915 + "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
55916 + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
55917 +- "MetricGroup": "BrMispredicts",
55918 ++ "MetricGroup": "Bad;BadSpec;BrMispredicts",
55919 + "MetricName": "IpMispredict"
55920 + },
55921 + {
55922 +@@ -86,122 +272,249 @@
55923 + {
55924 + "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
55925 + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
55926 +- "MetricGroup": "Branches;InsType",
55927 ++ "MetricGroup": "Branches;Fed;InsType",
55928 + "MetricName": "IpBranch"
55929 + },
55930 + {
55931 + "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
55932 + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
55933 +- "MetricGroup": "Branches",
55934 ++ "MetricGroup": "Branches;Fed;PGO",
55935 + "MetricName": "IpCall"
55936 + },
55937 ++ {
55938 ++ "BriefDescription": "Instruction per taken branch",
55939 ++ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
55940 ++ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO",
55941 ++ "MetricName": "IpTB"
55942 ++ },
55943 + {
55944 + "BriefDescription": "Branch instructions per taken branch. ",
55945 + "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
55946 +- "MetricGroup": "Branches;PGO",
55947 ++ "MetricGroup": "Branches;Fed;PGO",
55948 + "MetricName": "BpTkBranch"
55949 + },
55950 + {
55951 + "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
55952 + "MetricExpr": "INST_RETIRED.ANY / ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )",
55953 +- "MetricGroup": "Flops;FpArith;InsType",
55954 ++ "MetricGroup": "Flops;InsType",
55955 + "MetricName": "IpFLOP"
55956 + },
55957 ++ {
55958 ++ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
55959 ++ "MetricExpr": "INST_RETIRED.ANY / ( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) )",
55960 ++ "MetricGroup": "Flops;InsType",
55961 ++ "MetricName": "IpArith",
55962 ++ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
55963 ++ },
55964 ++ {
55965 ++ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
55966 ++ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
55967 ++ "MetricGroup": "Flops;FpScalar;InsType",
55968 ++ "MetricName": "IpArith_Scalar_SP",
55969 ++ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
55970 ++ },
55971 ++ {
55972 ++ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
55973 ++ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
55974 ++ "MetricGroup": "Flops;FpScalar;InsType",
55975 ++ "MetricName": "IpArith_Scalar_DP",
55976 ++ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
55977 ++ },
55978 ++ {
55979 ++ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
55980 ++ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE )",
55981 ++ "MetricGroup": "Flops;FpVector;InsType",
55982 ++ "MetricName": "IpArith_AVX128",
55983 ++ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
55984 ++ },
55985 ++ {
55986 ++ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
55987 ++ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )",
55988 ++ "MetricGroup": "Flops;FpVector;InsType",
55989 ++ "MetricName": "IpArith_AVX256",
55990 ++ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
55991 ++ },
55992 ++ {
55993 ++ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
55994 ++ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )",
55995 ++ "MetricGroup": "Flops;FpVector;InsType",
55996 ++ "MetricName": "IpArith_AVX512",
55997 ++ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
55998 ++ },
55999 + {
56000 + "BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
56001 + "MetricExpr": "INST_RETIRED.ANY",
56002 + "MetricGroup": "Summary;TmaL1",
56003 + "MetricName": "Instructions"
56004 + },
56005 ++ {
56006 ++ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
56007 ++ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
56008 ++ "MetricGroup": "Fed;FetchBW",
56009 ++ "MetricName": "Fetch_UpC"
56010 ++ },
56011 + {
56012 + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
56013 + "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
56014 +- "MetricGroup": "DSB;FetchBW",
56015 ++ "MetricGroup": "DSB;Fed;FetchBW",
56016 + "MetricName": "DSB_Coverage"
56017 + },
56018 + {
56019 +- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
56020 ++ "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset/see of/the Instruction_Fetch_BW Bottleneck.",
56021 ++ "MetricExpr": "(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / CPU_CLK_UNHALTED.THREAD / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)))",
56022 ++ "MetricGroup": "DSBmiss;Fed",
56023 ++ "MetricName": "DSB_Misses_Cost"
56024 ++ },
56025 ++ {
56026 ++ "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset/see of/the Instruction_Fetch_BW Bottleneck.",
56027 ++ "MetricExpr": "(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED
56028 .THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))",
56029 ++ "MetricGroup": "DSBmiss;Fed_SMT",
56030 ++ "MetricName": "DSB_Misses_Cost_SMT"
56031 ++ },
56032 ++ {
56033 ++ "BriefDescription": "Number of Instructions per non-speculative DSB miss",
56034 ++ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
56035 ++ "MetricGroup": "DSBmiss;Fed",
56036 ++ "MetricName": "IpDSB_Miss_Ret"
56037 ++ },
56038 ++ {
56039 ++ "BriefDescription": "Fraction of branches that are non-taken conditionals",
56040 ++ "MetricExpr": "BR_INST_RETIRED.NOT_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
56041 ++ "MetricGroup": "Bad;Branches;CodeGen;PGO",
56042 ++ "MetricName": "Cond_NT"
56043 ++ },
56044 ++ {
56045 ++ "BriefDescription": "Fraction of branches that are taken conditionals",
56046 ++ "MetricExpr": "( BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN ) / BR_INST_RETIRED.ALL_BRANCHES",
56047 ++ "MetricGroup": "Bad;Branches;CodeGen;PGO",
56048 ++ "MetricName": "Cond_TK"
56049 ++ },
56050 ++ {
56051 ++ "BriefDescription": "Fraction of branches that are CALL or RET",
56052 ++ "MetricExpr": "( BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN ) / BR_INST_RETIRED.ALL_BRANCHES",
56053 ++ "MetricGroup": "Bad;Branches",
56054 ++ "MetricName": "CallRet"
56055 ++ },
56056 ++ {
56057 ++ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
56058 ++ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - ( BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN ) - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
56059 ++ "MetricGroup": "Bad;Branches",
56060 ++ "MetricName": "Jump"
56061 ++ },
56062 ++ {
56063 ++ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
56064 + "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
56065 +- "MetricGroup": "MemoryBound;MemoryLat",
56066 +- "MetricName": "Load_Miss_Real_Latency"
56067 ++ "MetricGroup": "Mem;MemoryBound;MemoryLat",
56068 ++ "MetricName": "Load_Miss_Real_Latency",
56069 ++ "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
56070 + },
56071 + {
56072 + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
56073 + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
56074 +- "MetricGroup": "MemoryBound;MemoryBW",
56075 ++ "MetricGroup": "Mem;MemoryBound;MemoryBW",
56076 + "MetricName": "MLP"
56077 + },
56078 +- {
56079 +- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
56080 +- "MetricConstraint": "NO_NMI_WATCHDOG",
56081 +- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * CORE_CLKS )",
56082 +- "MetricGroup": "MemoryTLB",
56083 +- "MetricName": "Page_Walks_Utilization"
56084 +- },
56085 + {
56086 + "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
56087 + "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
56088 +- "MetricGroup": "MemoryBW",
56089 ++ "MetricGroup": "Mem;MemoryBW",
56090 + "MetricName": "L1D_Cache_Fill_BW"
56091 + },
56092 + {
56093 + "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
56094 + "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
56095 +- "MetricGroup": "MemoryBW",
56096 ++ "MetricGroup": "Mem;MemoryBW",
56097 + "MetricName": "L2_Cache_Fill_BW"
56098 + },
56099 + {
56100 + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
56101 + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
56102 +- "MetricGroup": "MemoryBW",
56103 ++ "MetricGroup": "Mem;MemoryBW",
56104 + "MetricName": "L3_Cache_Fill_BW"
56105 + },
56106 + {
56107 + "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
56108 + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
56109 +- "MetricGroup": "MemoryBW;Offcore",
56110 ++ "MetricGroup": "Mem;MemoryBW;Offcore",
56111 + "MetricName": "L3_Cache_Access_BW"
56112 + },
56113 + {
56114 + "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
56115 + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
56116 +- "MetricGroup": "CacheMisses",
56117 ++ "MetricGroup": "Mem;CacheMisses",
56118 + "MetricName": "L1MPKI"
56119 + },
56120 ++ {
56121 ++ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
56122 ++ "MetricExpr": "1000 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
56123 ++ "MetricGroup": "Mem;CacheMisses",
56124 ++ "MetricName": "L1MPKI_Load"
56125 ++ },
56126 + {
56127 + "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
56128 + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
56129 +- "MetricGroup": "CacheMisses",
56130 ++ "MetricGroup": "Mem;Backend;CacheMisses",
56131 + "MetricName": "L2MPKI"
56132 + },
56133 + {
56134 + "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
56135 + "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
56136 +- "MetricGroup": "CacheMisses;Offcore",
56137 ++ "MetricGroup": "Mem;CacheMisses;Offcore",
56138 + "MetricName": "L2MPKI_All"
56139 + },
56140 ++ {
56141 ++ "BriefDescription": "L2 cache misses per kilo instruction for all demand loads (including speculative)",
56142 ++ "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
56143 ++ "MetricGroup": "Mem;CacheMisses",
56144 ++ "MetricName": "L2MPKI_Load"
56145 ++ },
56146 + {
56147 + "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
56148 + "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
56149 +- "MetricGroup": "CacheMisses",
56150 ++ "MetricGroup": "Mem;CacheMisses",
56151 + "MetricName": "L2HPKI_All"
56152 + },
56153 ++ {
56154 ++ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
56155 ++ "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
56156 ++ "MetricGroup": "Mem;CacheMisses",
56157 ++ "MetricName": "L2HPKI_Load"
56158 ++ },
56159 + {
56160 + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
56161 + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
56162 +- "MetricGroup": "CacheMisses",
56163 ++ "MetricGroup": "Mem;CacheMisses",
56164 + "MetricName": "L3MPKI"
56165 + },
56166 ++ {
56167 ++ "BriefDescription": "Fill Buffer (FB) true hits per kilo instructions for retired demand loads",
56168 ++ "MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
56169 ++ "MetricGroup": "Mem;CacheMisses",
56170 ++ "MetricName": "FB_HPKI"
56171 ++ },
56172 ++ {
56173 ++ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
56174 ++ "MetricConstraint": "NO_NMI_WATCHDOG",
56175 ++ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * CPU_CLK_UNHALTED.THREAD )",
56176 ++ "MetricGroup": "Mem;MemoryTLB",
56177 ++ "MetricName": "Page_Walks_Utilization"
56178 ++ },
56179 ++ {
56180 ++ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
56181 ++ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) )",
56182 ++ "MetricGroup": "Mem;MemoryTLB_SMT",
56183 ++ "MetricName": "Page_Walks_Utilization_SMT"
56184 ++ },
56185 + {
56186 + "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
56187 + "MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
56188 +- "MetricGroup": "L2Evicts;Server",
56189 ++ "MetricGroup": "L2Evicts;Mem;Server",
56190 + "MetricName": "L2_Evictions_Silent_PKI"
56191 + },
56192 + {
56193 + "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
56194 + "MetricExpr": "1000 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
56195 +- "MetricGroup": "L2Evicts;Server",
56196 ++ "MetricGroup": "L2Evicts;Mem;Server",
56197 + "MetricName": "L2_Evictions_NonSilent_PKI"
56198 + },
56199 + {
56200 +@@ -219,7 +532,7 @@
56201 + {
56202 + "BriefDescription": "Giga Floating Point Operations Per Second",
56203 + "MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
56204 +- "MetricGroup": "Flops;HPC",
56205 ++ "MetricGroup": "Cor;Flops;HPC",
56206 + "MetricName": "GFLOPs"
56207 + },
56208 + {
56209 +@@ -228,6 +541,48 @@
56210 + "MetricGroup": "Power",
56211 + "MetricName": "Turbo_Utilization"
56212 + },
56213 ++ {
56214 ++ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0",
56215 ++ "MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / CPU_CLK_UNHALTED.THREAD",
56216 ++ "MetricGroup": "Power",
56217 ++ "MetricName": "Power_License0_Utilization",
56218 ++ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
56219 ++ },
56220 ++ {
56221 ++ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. SMT version; use when SMT is enabled and measuring per logical CPU.",
56222 ++ "MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / 2 / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
56223 ++ "MetricGroup": "Power_SMT",
56224 ++ "MetricName": "Power_License0_Utilization_SMT",
56225 ++ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes. SMT version; use when SMT is enabled and measuring per logical CPU."
56226 ++ },
56227 ++ {
56228 ++ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
56229 ++ "MetricExpr": "CORE_POWER.LVL1_TURBO_LICENSE / CPU_CLK_UNHALTED.THREAD",
56230 ++ "MetricGroup": "Power",
56231 ++ "MetricName": "Power_License1_Utilization",
56232 ++ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
56233 ++ },
56234 ++ {
56235 ++ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. SMT version; use when SMT is enabled and measuring per logical CPU.",
56236 ++ "MetricExpr": "CORE_POWER.LVL1_TURBO_LICENSE / 2 / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
56237 ++ "MetricGroup": "Power_SMT",
56238 ++ "MetricName": "Power_License1_Utilization_SMT",
56239 ++ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions. SMT version; use when SMT is enabled and measuring per logical CPU."
56240 ++ },
56241 ++ {
56242 ++ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
56243 ++ "MetricExpr": "CORE_POWER.LVL2_TURBO_LICENSE / CPU_CLK_UNHALTED.THREAD",
56244 ++ "MetricGroup": "Power",
56245 ++ "MetricName": "Power_License2_Utilization",
56246 ++ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
56247 ++ },
56248 ++ {
56249 ++ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). SMT version; use when SMT is enabled and measuring per logical CPU.",
56250 ++ "MetricExpr": "CORE_POWER.LVL2_TURBO_LICENSE / 2 / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
56251 ++ "MetricGroup": "Power_SMT",
56252 ++ "MetricName": "Power_License2_Utilization_SMT",
56253 ++ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions. SMT version; use when SMT is enabled and measuring per logical CPU."
56254 ++ },
56255 + {
56256 + "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
56257 + "MetricExpr": "1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
56258 +@@ -240,34 +595,46 @@
56259 + "MetricGroup": "OS",
56260 + "MetricName": "Kernel_Utilization"
56261 + },
56262 ++ {
56263 ++ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
56264 ++ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
56265 ++ "MetricGroup": "OS",
56266 ++ "MetricName": "Kernel_CPI"
56267 ++ },
56268 + {
56269 + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
56270 + "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
56271 +- "MetricGroup": "HPC;MemoryBW;SoC",
56272 ++ "MetricGroup": "HPC;Mem;MemoryBW;SoC",
56273 + "MetricName": "DRAM_BW_Use"
56274 + },
56275 + {
56276 + "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
56277 + "MetricExpr": "1000000000 * ( cha@event\\=0x36\\,umask\\=0x21\\,config\\=0x40433@ / cha@event\\=0x35\\,umask\\=0x21\\,config\\=0x40433@ ) / ( cha_0@event\\=0x0@ / duration_time )",
56278 +- "MetricGroup": "MemoryLat;SoC",
56279 ++ "MetricGroup": "Mem;MemoryLat;SoC",
56280 + "MetricName": "MEM_Read_Latency"
56281 + },
56282 + {
56283 + "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
56284 + "MetricExpr": "cha@event\\=0x36\\,umask\\=0x21\\,config\\=0x40433@ / cha@event\\=0x36\\,umask\\=0x21\\,config\\=0x40433\\,thresh\\=1@",
56285 +- "MetricGroup": "MemoryBW;SoC",
56286 ++ "MetricGroup": "Mem;MemoryBW;SoC",
56287 + "MetricName": "MEM_Parallel_Reads"
56288 + },
56289 ++ {
56290 ++ "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
56291 ++ "MetricExpr": "1000000000 * ( UNC_M_RPQ_OCCUPANCY / UNC_M_RPQ_INSERTS ) / imc_0@event\\=0x0@",
56292 ++ "MetricGroup": "Mem;MemoryLat;SoC;Server",
56293 ++ "MetricName": "MEM_DRAM_Read_Latency"
56294 ++ },
56295 + {
56296 + "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]",
56297 + "MetricExpr": "( UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3 ) * 4 / 1000000000 / duration_time",
56298 +- "MetricGroup": "IoBW;SoC;Server",
56299 ++ "MetricGroup": "IoBW;Mem;SoC;Server",
56300 + "MetricName": "IO_Write_BW"
56301 + },
56302 + {
56303 + "BriefDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]",
56304 + "MetricExpr": "( UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3 ) * 4 / 1000000000 / duration_time",
56305 +- "MetricGroup": "IoBW;SoC;Server",
56306 ++ "MetricGroup": "IoBW;Mem;SoC;Server",
56307 + "MetricName": "IO_Read_BW"
56308 + },
56309 + {
56310 +diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
56311 +index 6ed92bc5c129b..06c5ca26ca3f3 100644
56312 +--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
56313 ++++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
56314 +@@ -537,6 +537,18 @@
56315 + "PublicDescription": "Counts clockticks of the 1GHz trafiic controller clock in the IIO unit.",
56316 + "Unit": "IIO"
56317 + },
56318 ++ {
56319 ++ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
56320 ++ "Counter": "0,1,2,3",
56321 ++ "EventCode": "0xC2",
56322 ++ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
56323 ++ "FCMask": "0x4",
56324 ++ "PerPkg": "1",
56325 ++ "PortMask": "0x0f",
56326 ++ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
56327 ++ "UMask": "0x03",
56328 ++ "Unit": "IIO"
56329 ++ },
56330 + {
56331 + "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
56332 + "Counter": "0,1,2,3",
56333 +@@ -585,6 +597,17 @@
56334 + "UMask": "0x03",
56335 + "Unit": "IIO"
56336 + },
56337 ++ {
56338 ++ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
56339 ++ "Counter": "2,3",
56340 ++ "EventCode": "0xD5",
56341 ++ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
56342 ++ "FCMask": "0x04",
56343 ++ "PerPkg": "1",
56344 ++ "PublicDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
56345 ++ "UMask": "0x0f",
56346 ++ "Unit": "IIO"
56347 ++ },
56348 + {
56349 + "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0",
56350 + "Counter": "2,3",
56351 +diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
56352 +new file mode 100755
56353 +index 0000000000000..6ffbb27afabac
56354 +--- /dev/null
56355 ++++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
56356 +@@ -0,0 +1,68 @@
56357 ++#!/bin/sh
56358 ++# Check Arm64 callgraphs are complete in fp mode
56359 ++# SPDX-License-Identifier: GPL-2.0
56360 ++
56361 ++lscpu | grep -q "aarch64" || exit 2
56362 ++
56363 ++if ! [ -x "$(command -v cc)" ]; then
56364 ++ echo "failed: no compiler, install gcc"
56365 ++ exit 2
56366 ++fi
56367 ++
56368 ++PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
56369 ++TEST_PROGRAM_SOURCE=$(mktemp /tmp/test_program.XXXXX.c)
56370 ++TEST_PROGRAM=$(mktemp /tmp/test_program.XXXXX)
56371 ++
56372 ++cleanup_files()
56373 ++{
56374 ++ rm -f $PERF_DATA
56375 ++ rm -f $TEST_PROGRAM_SOURCE
56376 ++ rm -f $TEST_PROGRAM
56377 ++}
56378 ++
56379 ++trap cleanup_files exit term int
56380 ++
56381 ++cat << EOF > $TEST_PROGRAM_SOURCE
56382 ++int a = 0;
56383 ++void leaf(void) {
56384 ++ for (;;)
56385 ++ a += a;
56386 ++}
56387 ++void parent(void) {
56388 ++ leaf();
56389 ++}
56390 ++int main(void) {
56391 ++ parent();
56392 ++ return 0;
56393 ++}
56394 ++EOF
56395 ++
56396 ++echo " + Compiling test program ($TEST_PROGRAM)..."
56397 ++
56398 ++CFLAGS="-g -O0 -fno-inline -fno-omit-frame-pointer"
56399 ++cc $CFLAGS $TEST_PROGRAM_SOURCE -o $TEST_PROGRAM || exit 1
56400 ++
56401 ++# Add a 1 second delay to skip samples that are not in the leaf() function
56402 ++perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 -- $TEST_PROGRAM 2> /dev/null &
56403 ++PID=$!
56404 ++
56405 ++echo " + Recording (PID=$PID)..."
56406 ++sleep 2
56407 ++echo " + Stopping perf-record..."
56408 ++
56409 ++kill $PID
56410 ++wait $PID
56411 ++
56412 ++# expected perf-script output:
56413 ++#
56414 ++# program
56415 ++# 728 leaf
56416 ++# 753 parent
56417 ++# 76c main
56418 ++# ...
56419 ++
56420 ++perf script -i $PERF_DATA -F comm,ip,sym | head -n4
56421 ++perf script -i $PERF_DATA -F comm,ip,sym | head -n4 | \
56422 ++ awk '{ if ($2 != "") sym[i++] = $2 } END { if (sym[0] != "leaf" ||
56423 ++ sym[1] != "parent" ||
56424 ++ sym[2] != "main") exit 1 }'
56425 +diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
56426 +index 1acdf2fc31c59..3299fb0977b27 100644
56427 +--- a/tools/testing/cxl/Kbuild
56428 ++++ b/tools/testing/cxl/Kbuild
56429 +@@ -25,7 +25,7 @@ cxl_pmem-y += config_check.o
56430 +
56431 + obj-m += cxl_core.o
56432 +
56433 +-cxl_core-y := $(CXL_CORE_SRC)/bus.o
56434 ++cxl_core-y := $(CXL_CORE_SRC)/port.o
56435 + cxl_core-y += $(CXL_CORE_SRC)/pmem.o
56436 + cxl_core-y += $(CXL_CORE_SRC)/regs.o
56437 + cxl_core-y += $(CXL_CORE_SRC)/memdev.o
56438 +diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
56439 +index 736d99006fb7a..f0a410962af0d 100644
56440 +--- a/tools/testing/cxl/test/cxl.c
56441 ++++ b/tools/testing/cxl/test/cxl.c
56442 +@@ -511,7 +511,7 @@ static __init int cxl_test_init(void)
56443 +
56444 + for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
56445 + struct platform_device *bridge =
56446 +- cxl_host_bridge[i / NR_CXL_ROOT_PORTS];
56447 ++ cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
56448 + struct platform_device *pdev;
56449 +
56450 + pdev = platform_device_alloc("cxl_root_port", i);
56451 +diff --git a/tools/testing/selftests/bpf/prog_tests/bind_perm.c b/tools/testing/selftests/bpf/prog_tests/bind_perm.c
56452 +index d0f06e40c16d0..eac71fbb24ce2 100644
56453 +--- a/tools/testing/selftests/bpf/prog_tests/bind_perm.c
56454 ++++ b/tools/testing/selftests/bpf/prog_tests/bind_perm.c
56455 +@@ -1,13 +1,24 @@
56456 + // SPDX-License-Identifier: GPL-2.0
56457 +-#include <test_progs.h>
56458 +-#include "bind_perm.skel.h"
56459 +-
56460 ++#define _GNU_SOURCE
56461 ++#include <sched.h>
56462 ++#include <stdlib.h>
56463 + #include <sys/types.h>
56464 + #include <sys/socket.h>
56465 + #include <sys/capability.h>
56466 +
56467 ++#include "test_progs.h"
56468 ++#include "bind_perm.skel.h"
56469 ++
56470 + static int duration;
56471 +
56472 ++static int create_netns(void)
56473 ++{
56474 ++ if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
56475 ++ return -1;
56476 ++
56477 ++ return 0;
56478 ++}
56479 ++
56480 + void try_bind(int family, int port, int expected_errno)
56481 + {
56482 + struct sockaddr_storage addr = {};
56483 +@@ -75,6 +86,9 @@ void test_bind_perm(void)
56484 + struct bind_perm *skel;
56485 + int cgroup_fd;
56486 +
56487 ++ if (create_netns())
56488 ++ return;
56489 ++
56490 + cgroup_fd = test__join_cgroup("/bind_perm");
56491 + if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno))
56492 + return;
56493 +diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
56494 +new file mode 100644
56495 +index 0000000000000..5bb11fe595a43
56496 +--- /dev/null
56497 ++++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
56498 +@@ -0,0 +1,19 @@
56499 ++/* SPDX-License-Identifier: GPL-2.0 */
56500 ++#ifndef __BPF_MISC_H__
56501 ++#define __BPF_MISC_H__
56502 ++
56503 ++#if defined(__TARGET_ARCH_x86)
56504 ++#define SYSCALL_WRAPPER 1
56505 ++#define SYS_PREFIX "__x64_"
56506 ++#elif defined(__TARGET_ARCH_s390)
56507 ++#define SYSCALL_WRAPPER 1
56508 ++#define SYS_PREFIX "__s390x_"
56509 ++#elif defined(__TARGET_ARCH_arm64)
56510 ++#define SYSCALL_WRAPPER 1
56511 ++#define SYS_PREFIX "__arm64_"
56512 ++#else
56513 ++#define SYSCALL_WRAPPER 0
56514 ++#define SYS_PREFIX "__se_"
56515 ++#endif
56516 ++
56517 ++#endif
56518 +diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c
56519 +index 8812a90da4eb8..702578a5e496d 100644
56520 +--- a/tools/testing/selftests/bpf/progs/test_probe_user.c
56521 ++++ b/tools/testing/selftests/bpf/progs/test_probe_user.c
56522 +@@ -7,20 +7,7 @@
56523 +
56524 + #include <bpf/bpf_helpers.h>
56525 + #include <bpf/bpf_tracing.h>
56526 +-
56527 +-#if defined(__TARGET_ARCH_x86)
56528 +-#define SYSCALL_WRAPPER 1
56529 +-#define SYS_PREFIX "__x64_"
56530 +-#elif defined(__TARGET_ARCH_s390)
56531 +-#define SYSCALL_WRAPPER 1
56532 +-#define SYS_PREFIX "__s390x_"
56533 +-#elif defined(__TARGET_ARCH_arm64)
56534 +-#define SYSCALL_WRAPPER 1
56535 +-#define SYS_PREFIX "__arm64_"
56536 +-#else
56537 +-#define SYSCALL_WRAPPER 0
56538 +-#define SYS_PREFIX ""
56539 +-#endif
56540 ++#include "bpf_misc.h"
56541 +
56542 + static struct sockaddr_in old;
56543 +
56544 +diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c
56545 +index 81b57b9aaaeae..7967348b11af6 100644
56546 +--- a/tools/testing/selftests/bpf/progs/test_sock_fields.c
56547 ++++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c
56548 +@@ -113,7 +113,7 @@ static void tpcpy(struct bpf_tcp_sock *dst,
56549 +
56550 + #define RET_LOG() ({ \
56551 + linum = __LINE__; \
56552 +- bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_NOEXIST); \
56553 ++ bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_ANY); \
56554 + return CG_OK; \
56555 + })
56556 +
56557 +diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh
56558 +index ec4e15948e406..5252b91f48a18 100755
56559 +--- a/tools/testing/selftests/bpf/test_lirc_mode2.sh
56560 ++++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh
56561 +@@ -3,6 +3,7 @@
56562 +
56563 + # Kselftest framework requirement - SKIP code is 4.
56564 + ksft_skip=4
56565 ++ret=$ksft_skip
56566 +
56567 + msg="skip all tests:"
56568 + if [ $UID != 0 ]; then
56569 +@@ -25,7 +26,7 @@ do
56570 + fi
56571 + done
56572 +
56573 +-if [ -n $LIRCDEV ];
56574 ++if [ -n "$LIRCDEV" ];
56575 + then
56576 + TYPE=lirc_mode2
56577 + ./test_lirc_mode2_user $LIRCDEV $INPUTDEV
56578 +@@ -36,3 +37,5 @@ then
56579 + echo -e ${GREEN}"PASS: $TYPE"${NC}
56580 + fi
56581 + fi
56582 ++
56583 ++exit $ret
56584 +diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
56585 +index b497bb85b667f..6c69c42b1d607 100755
56586 +--- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
56587 ++++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
56588 +@@ -120,6 +120,14 @@ setup()
56589 + ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
56590 + ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
56591 +
56592 ++ # disable IPv6 DAD because it sometimes takes too long and fails tests
56593 ++ ip netns exec ${NS1} sysctl -wq net.ipv6.conf.all.accept_dad=0
56594 ++ ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.accept_dad=0
56595 ++ ip netns exec ${NS3} sysctl -wq net.ipv6.conf.all.accept_dad=0
56596 ++ ip netns exec ${NS1} sysctl -wq net.ipv6.conf.default.accept_dad=0
56597 ++ ip netns exec ${NS2} sysctl -wq net.ipv6.conf.default.accept_dad=0
56598 ++ ip netns exec ${NS3} sysctl -wq net.ipv6.conf.default.accept_dad=0
56599 ++
56600 + ip link add veth1 type veth peer name veth2
56601 + ip link add veth3 type veth peer name veth4
56602 + ip link add veth5 type veth peer name veth6
56603 +@@ -289,7 +297,7 @@ test_ping()
56604 + ip netns exec ${NS1} ping -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null
56605 + RET=$?
56606 + elif [ "${PROTO}" == "IPv6" ] ; then
56607 +- ip netns exec ${NS1} ping6 -c 1 -W 6 -I veth1 ${IPv6_DST} 2>&1 > /dev/null
56608 ++ ip netns exec ${NS1} ping6 -c 1 -W 1 -I veth1 ${IPv6_DST} 2>&1 > /dev/null
56609 + RET=$?
56610 + else
56611 + echo " test_ping: unknown PROTO: ${PROTO}"
56612 +diff --git a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
56613 +index 05f8727409997..cc57cb87e65f6 100755
56614 +--- a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
56615 ++++ b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
56616 +@@ -32,6 +32,11 @@ DRV_MODE="xdpgeneric xdpdrv xdpegress"
56617 + PASS=0
56618 + FAIL=0
56619 + LOG_DIR=$(mktemp -d)
56620 ++declare -a NS
56621 ++NS[0]="ns0-$(mktemp -u XXXXXX)"
56622 ++NS[1]="ns1-$(mktemp -u XXXXXX)"
56623 ++NS[2]="ns2-$(mktemp -u XXXXXX)"
56624 ++NS[3]="ns3-$(mktemp -u XXXXXX)"
56625 +
56626 + test_pass()
56627 + {
56628 +@@ -47,11 +52,9 @@ test_fail()
56629 +
56630 + clean_up()
56631 + {
56632 +- for i in $(seq $NUM); do
56633 +- ip link del veth$i 2> /dev/null
56634 +- ip netns del ns$i 2> /dev/null
56635 ++ for i in $(seq 0 $NUM); do
56636 ++ ip netns del ${NS[$i]} 2> /dev/null
56637 + done
56638 +- ip netns del ns0 2> /dev/null
56639 + }
56640 +
56641 + # Kselftest framework requirement - SKIP code is 4.
56642 +@@ -79,23 +82,22 @@ setup_ns()
56643 + mode="xdpdrv"
56644 + fi
56645 +
56646 +- ip netns add ns0
56647 ++ ip netns add ${NS[0]}
56648 + for i in $(seq $NUM); do
56649 +- ip netns add ns$i
56650 +- ip -n ns$i link add veth0 index 2 type veth \
56651 +- peer name veth$i netns ns0 index $((1 + $i))
56652 +- ip -n ns0 link set veth$i up
56653 +- ip -n ns$i link set veth0 up
56654 +-
56655 +- ip -n ns$i addr add 192.0.2.$i/24 dev veth0
56656 +- ip -n ns$i addr add 2001:db8::$i/64 dev veth0
56657 ++ ip netns add ${NS[$i]}
56658 ++ ip -n ${NS[$i]} link add veth0 type veth peer name veth$i netns ${NS[0]}
56659 ++ ip -n ${NS[$i]} link set veth0 up
56660 ++ ip -n ${NS[0]} link set veth$i up
56661 ++
56662 ++ ip -n ${NS[$i]} addr add 192.0.2.$i/24 dev veth0
56663 ++ ip -n ${NS[$i]} addr add 2001:db8::$i/64 dev veth0
56664 + # Add a neigh entry for IPv4 ping test
56665 +- ip -n ns$i neigh add 192.0.2.253 lladdr 00:00:00:00:00:01 dev veth0
56666 +- ip -n ns$i link set veth0 $mode obj \
56667 ++ ip -n ${NS[$i]} neigh add 192.0.2.253 lladdr 00:00:00:00:00:01 dev veth0
56668 ++ ip -n ${NS[$i]} link set veth0 $mode obj \
56669 + xdp_dummy.o sec xdp &> /dev/null || \
56670 + { test_fail "Unable to load dummy xdp" && exit 1; }
56671 + IFACES="$IFACES veth$i"
56672 +- veth_mac[$i]=$(ip -n ns0 link show veth$i | awk '/link\/ether/ {print $2}')
56673 ++ veth_mac[$i]=$(ip -n ${NS[0]} link show veth$i | awk '/link\/ether/ {print $2}')
56674 + done
56675 + }
56676 +
56677 +@@ -104,10 +106,10 @@ do_egress_tests()
56678 + local mode=$1
56679 +
56680 + # mac test
56681 +- ip netns exec ns2 tcpdump -e -i veth0 -nn -l -e &> ${LOG_DIR}/mac_ns1-2_${mode}.log &
56682 +- ip netns exec ns3 tcpdump -e -i veth0 -nn -l -e &> ${LOG_DIR}/mac_ns1-3_${mode}.log &
56683 ++ ip netns exec ${NS[2]} tcpdump -e -i veth0 -nn -l -e &> ${LOG_DIR}/mac_ns1-2_${mode}.log &
56684 ++ ip netns exec ${NS[3]} tcpdump -e -i veth0 -nn -l -e &> ${LOG_DIR}/mac_ns1-3_${mode}.log &
56685 + sleep 0.5
56686 +- ip netns exec ns1 ping 192.0.2.254 -i 0.1 -c 4 &> /dev/null
56687 ++ ip netns exec ${NS[1]} ping 192.0.2.254 -i 0.1 -c 4 &> /dev/null
56688 + sleep 0.5
56689 + pkill tcpdump
56690 +
56691 +@@ -123,18 +125,18 @@ do_ping_tests()
56692 + local mode=$1
56693 +
56694 + # ping6 test: echo request should be redirect back to itself, not others
56695 +- ip netns exec ns1 ip neigh add 2001:db8::2 dev veth0 lladdr 00:00:00:00:00:02
56696 ++ ip netns exec ${NS[1]} ip neigh add 2001:db8::2 dev veth0 lladdr 00:00:00:00:00:02
56697 +
56698 +- ip netns exec ns1 tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-1_${mode}.log &
56699 +- ip netns exec ns2 tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-2_${mode}.log &
56700 +- ip netns exec ns3 tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-3_${mode}.log &
56701 ++ ip netns exec ${NS[1]} tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-1_${mode}.log &
56702 ++ ip netns exec ${NS[2]} tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-2_${mode}.log &
56703 ++ ip netns exec ${NS[3]} tcpdump -i veth0 -nn -l -e &> ${LOG_DIR}/ns1-3_${mode}.log &
56704 + sleep 0.5
56705 + # ARP test
56706 +- ip netns exec ns1 arping -q -c 2 -I veth0 192.0.2.254
56707 ++ ip netns exec ${NS[1]} arping -q -c 2 -I veth0 192.0.2.254
56708 + # IPv4 test
56709 +- ip netns exec ns1 ping 192.0.2.253 -i 0.1 -c 4 &> /dev/null
56710 ++ ip netns exec ${NS[1]} ping 192.0.2.253 -i 0.1 -c 4 &> /dev/null
56711 + # IPv6 test
56712 +- ip netns exec ns1 ping6 2001:db8::2 -i 0.1 -c 2 &> /dev/null
56713 ++ ip netns exec ${NS[1]} ping6 2001:db8::2 -i 0.1 -c 2 &> /dev/null
56714 + sleep 0.5
56715 + pkill tcpdump
56716 +
56717 +@@ -180,7 +182,7 @@ do_tests()
56718 + xdpgeneric) drv_p="-S";;
56719 + esac
56720 +
56721 +- ip netns exec ns0 ./xdp_redirect_multi $drv_p $IFACES &> ${LOG_DIR}/xdp_redirect_${mode}.log &
56722 ++ ip netns exec ${NS[0]} ./xdp_redirect_multi $drv_p $IFACES &> ${LOG_DIR}/xdp_redirect_${mode}.log &
56723 + xdp_pid=$!
56724 + sleep 1
56725 + if ! ps -p $xdp_pid > /dev/null; then
56726 +@@ -197,10 +199,10 @@ do_tests()
56727 + kill $xdp_pid
56728 + }
56729 +
56730 +-trap clean_up EXIT
56731 +-
56732 + check_env
56733 +
56734 ++trap clean_up EXIT
56735 ++
56736 + for mode in ${DRV_MODE}; do
56737 + setup_ns $mode
56738 + do_tests $mode
56739 +diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c
56740 +index 0a5d23da486df..ffa5502ad95ed 100644
56741 +--- a/tools/testing/selftests/bpf/xdpxceiver.c
56742 ++++ b/tools/testing/selftests/bpf/xdpxceiver.c
56743 +@@ -906,7 +906,10 @@ static bool rx_stats_are_valid(struct ifobject *ifobject)
56744 + return true;
56745 + case STAT_TEST_RX_FULL:
56746 + xsk_stat = stats.rx_ring_full;
56747 +- expected_stat -= RX_FULL_RXQSIZE;
56748 ++ if (ifobject->umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
56749 ++ expected_stat = ifobject->umem->num_frames - RX_FULL_RXQSIZE;
56750 ++ else
56751 ++ expected_stat = XSK_RING_PROD__DEFAULT_NUM_DESCS - RX_FULL_RXQSIZE;
56752 + break;
56753 + case STAT_TEST_RX_FILL_EMPTY:
56754 + xsk_stat = stats.rx_fill_ring_empty_descs;
56755 +diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
56756 +index a26a3fa9e9255..8bd847f0463cd 100644
56757 +--- a/tools/testing/selftests/lkdtm/config
56758 ++++ b/tools/testing/selftests/lkdtm/config
56759 +@@ -6,6 +6,7 @@ CONFIG_HARDENED_USERCOPY=y
56760 + # CONFIG_HARDENED_USERCOPY_FALLBACK is not set
56761 + CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y
56762 + CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
56763 ++CONFIG_UBSAN=y
56764 + CONFIG_UBSAN_BOUNDS=y
56765 + CONFIG_UBSAN_TRAP=y
56766 + CONFIG_STACKPROTECTOR_STRONG=y
56767 +diff --git a/tools/testing/selftests/net/af_unix/test_unix_oob.c b/tools/testing/selftests/net/af_unix/test_unix_oob.c
56768 +index 3dece8b292536..b57e91e1c3f28 100644
56769 +--- a/tools/testing/selftests/net/af_unix/test_unix_oob.c
56770 ++++ b/tools/testing/selftests/net/af_unix/test_unix_oob.c
56771 +@@ -218,10 +218,10 @@ main(int argc, char **argv)
56772 +
56773 + /* Test 1:
56774 + * veriyf that SIGURG is
56775 +- * delivered and 63 bytes are
56776 +- * read and oob is '@'
56777 ++ * delivered, 63 bytes are
56778 ++ * read, oob is '@', and POLLPRI works.
56779 + */
56780 +- wait_for_data(pfd, POLLIN | POLLPRI);
56781 ++ wait_for_data(pfd, POLLPRI);
56782 + read_oob(pfd, &oob);
56783 + len = read_data(pfd, buf, 1024);
56784 + if (!signal_recvd || len != 63 || oob != '@') {
56785 +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
56786 +index f0f4ab96b8f3e..621af6895f4d5 100755
56787 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
56788 ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
56789 +@@ -432,6 +432,8 @@ do_transfer()
56790 + local stat_ackrx_last_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
56791 + local stat_cookietx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
56792 + local stat_cookierx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
56793 ++ local stat_csum_err_s=$(get_mib_counter "${listener_ns}" "MPTcpExtDataCsumErr")
56794 ++ local stat_csum_err_c=$(get_mib_counter "${connector_ns}" "MPTcpExtDataCsumErr")
56795 +
56796 + timeout ${timeout_test} \
56797 + ip netns exec ${listener_ns} \
56798 +@@ -524,6 +526,23 @@ do_transfer()
56799 + fi
56800 + fi
56801 +
56802 ++ if $checksum; then
56803 ++ local csum_err_s=$(get_mib_counter "${listener_ns}" "MPTcpExtDataCsumErr")
56804 ++ local csum_err_c=$(get_mib_counter "${connector_ns}" "MPTcpExtDataCsumErr")
56805 ++
56806 ++ local csum_err_s_nr=$((csum_err_s - stat_csum_err_s))
56807 ++ if [ $csum_err_s_nr -gt 0 ]; then
56808 ++ printf "[ FAIL ]\nserver got $csum_err_s_nr data checksum error[s]"
56809 ++ rets=1
56810 ++ fi
56811 ++
56812 ++ local csum_err_c_nr=$((csum_err_c - stat_csum_err_c))
56813 ++ if [ $csum_err_c_nr -gt 0 ]; then
56814 ++ printf "[ FAIL ]\nclient got $csum_err_c_nr data checksum error[s]"
56815 ++ retc=1
56816 ++ fi
56817 ++ fi
56818 ++
56819 + if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
56820 + printf "[ OK ]"
56821 + fi
56822 +diff --git a/tools/testing/selftests/net/test_vxlan_under_vrf.sh b/tools/testing/selftests/net/test_vxlan_under_vrf.sh
56823 +index ea5a7a808f120..1fd1250ebc667 100755
56824 +--- a/tools/testing/selftests/net/test_vxlan_under_vrf.sh
56825 ++++ b/tools/testing/selftests/net/test_vxlan_under_vrf.sh
56826 +@@ -120,11 +120,11 @@ echo "[ OK ]"
56827 +
56828 + # Move the underlay to a non-default VRF
56829 + ip -netns hv-1 link set veth0 vrf vrf-underlay
56830 +-ip -netns hv-1 link set veth0 down
56831 +-ip -netns hv-1 link set veth0 up
56832 ++ip -netns hv-1 link set vxlan0 down
56833 ++ip -netns hv-1 link set vxlan0 up
56834 + ip -netns hv-2 link set veth0 vrf vrf-underlay
56835 +-ip -netns hv-2 link set veth0 down
56836 +-ip -netns hv-2 link set veth0 up
56837 ++ip -netns hv-2 link set vxlan0 down
56838 ++ip -netns hv-2 link set vxlan0 up
56839 +
56840 + echo -n "Check VM connectivity through VXLAN (underlay in a VRF) "
56841 + ip netns exec vm-1 ping -c 1 -W 1 10.0.0.2 &> /dev/null || (echo "[FAIL]"; false)
56842 +diff --git a/tools/testing/selftests/net/timestamping.c b/tools/testing/selftests/net/timestamping.c
56843 +index aee631c5284eb..044bc0e9ed81a 100644
56844 +--- a/tools/testing/selftests/net/timestamping.c
56845 ++++ b/tools/testing/selftests/net/timestamping.c
56846 +@@ -325,8 +325,8 @@ int main(int argc, char **argv)
56847 + struct ifreq device;
56848 + struct ifreq hwtstamp;
56849 + struct hwtstamp_config hwconfig, hwconfig_requested;
56850 +- struct so_timestamping so_timestamping_get = { 0, -1 };
56851 +- struct so_timestamping so_timestamping = { 0, -1 };
56852 ++ struct so_timestamping so_timestamping_get = { 0, 0 };
56853 ++ struct so_timestamping so_timestamping = { 0, 0 };
56854 + struct sockaddr_in addr;
56855 + struct ip_mreq imr;
56856 + struct in_addr iaddr;
56857 +diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
56858 +index 6e468e0f42f78..5d70b04c482c9 100644
56859 +--- a/tools/testing/selftests/net/tls.c
56860 ++++ b/tools/testing/selftests/net/tls.c
56861 +@@ -683,6 +683,9 @@ TEST_F(tls, splice_cmsg_to_pipe)
56862 + char buf[10];
56863 + int p[2];
56864 +
56865 ++ if (self->notls)
56866 ++ SKIP(return, "no TLS support");
56867 ++
56868 + ASSERT_GE(pipe(p), 0);
56869 + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
56870 + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1);
56871 +@@ -703,6 +706,9 @@ TEST_F(tls, splice_dec_cmsg_to_pipe)
56872 + char buf[10];
56873 + int p[2];
56874 +
56875 ++ if (self->notls)
56876 ++ SKIP(return, "no TLS support");
56877 ++
56878 + ASSERT_GE(pipe(p), 0);
56879 + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
56880 + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
56881 +diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh
56882 +index eae88aacca2aa..b2929fb15f7e6 100755
56883 +--- a/tools/testing/selftests/rcutorture/bin/torture.sh
56884 ++++ b/tools/testing/selftests/rcutorture/bin/torture.sh
56885 +@@ -71,8 +71,8 @@ usage () {
56886 + echo " --configs-rcutorture \"config-file list w/ repeat factor (3*TINY01)\""
56887 + echo " --configs-locktorture \"config-file list w/ repeat factor (10*LOCK01)\""
56888 + echo " --configs-scftorture \"config-file list w/ repeat factor (2*CFLIST)\""
56889 +- echo " --doall"
56890 +- echo " --doallmodconfig / --do-no-allmodconfig"
56891 ++ echo " --do-all"
56892 ++ echo " --do-allmodconfig / --do-no-allmodconfig"
56893 + echo " --do-clocksourcewd / --do-no-clocksourcewd"
56894 + echo " --do-kasan / --do-no-kasan"
56895 + echo " --do-kcsan / --do-no-kcsan"
56896 +diff --git a/tools/testing/selftests/sgx/Makefile b/tools/testing/selftests/sgx/Makefile
56897 +index 2956584e1e37f..75af864e07b65 100644
56898 +--- a/tools/testing/selftests/sgx/Makefile
56899 ++++ b/tools/testing/selftests/sgx/Makefile
56900 +@@ -4,7 +4,7 @@ include ../lib.mk
56901 +
56902 + .PHONY: all clean
56903 +
56904 +-CAN_BUILD_X86_64 := $(shell ../x86/check_cc.sh $(CC) \
56905 ++CAN_BUILD_X86_64 := $(shell ../x86/check_cc.sh "$(CC)" \
56906 + ../x86/trivial_64bit_program.c)
56907 +
56908 + ifndef OBJCOPY
56909 +diff --git a/tools/testing/selftests/sgx/load.c b/tools/testing/selftests/sgx/load.c
56910 +index 9d4322c946e2b..006b464c8fc94 100644
56911 +--- a/tools/testing/selftests/sgx/load.c
56912 ++++ b/tools/testing/selftests/sgx/load.c
56913 +@@ -21,7 +21,7 @@
56914 +
56915 + void encl_delete(struct encl *encl)
56916 + {
56917 +- struct encl_segment *heap_seg = &encl->segment_tbl[encl->nr_segments - 1];
56918 ++ struct encl_segment *heap_seg;
56919 +
56920 + if (encl->encl_base)
56921 + munmap((void *)encl->encl_base, encl->encl_size);
56922 +@@ -32,10 +32,11 @@ void encl_delete(struct encl *encl)
56923 + if (encl->fd)
56924 + close(encl->fd);
56925 +
56926 +- munmap(heap_seg->src, heap_seg->size);
56927 +-
56928 +- if (encl->segment_tbl)
56929 ++ if (encl->segment_tbl) {
56930 ++ heap_seg = &encl->segment_tbl[encl->nr_segments - 1];
56931 ++ munmap(heap_seg->src, heap_seg->size);
56932 + free(encl->segment_tbl);
56933 ++ }
56934 +
56935 + memset(encl, 0, sizeof(*encl));
56936 + }
56937 +diff --git a/tools/testing/selftests/sgx/main.c b/tools/testing/selftests/sgx/main.c
56938 +index 370c4995f7c4a..b0bd95a4730d5 100644
56939 +--- a/tools/testing/selftests/sgx/main.c
56940 ++++ b/tools/testing/selftests/sgx/main.c
56941 +@@ -147,6 +147,7 @@ static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
56942 + if (!encl_load("test_encl.elf", encl, heap_size)) {
56943 + encl_delete(encl);
56944 + TH_LOG("Failed to load the test enclave.\n");
56945 ++ return false;
56946 + }
56947 +
56948 + if (!encl_measure(encl))
56949 +@@ -185,8 +186,6 @@ static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
56950 + return true;
56951 +
56952 + err:
56953 +- encl_delete(encl);
56954 +-
56955 + for (i = 0; i < encl->nr_segments; i++) {
56956 + seg = &encl->segment_tbl[i];
56957 +
56958 +@@ -207,6 +206,8 @@ err:
56959 +
56960 + TH_LOG("Failed to initialize the test enclave.\n");
56961 +
56962 ++ encl_delete(encl);
56963 ++
56964 + return false;
56965 + }
56966 +
56967 +diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
56968 +index a14b5b8008970..1530c3e0242ef 100644
56969 +--- a/tools/testing/selftests/vm/Makefile
56970 ++++ b/tools/testing/selftests/vm/Makefile
56971 +@@ -51,9 +51,9 @@ TEST_GEN_FILES += split_huge_page_test
56972 + TEST_GEN_FILES += ksm_tests
56973 +
56974 + ifeq ($(MACHINE),x86_64)
56975 +-CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_32bit_program.c -m32)
56976 +-CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_64bit_program.c)
56977 +-CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_program.c -no-pie)
56978 ++CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_program.c -m32)
56979 ++CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_64bit_program.c)
56980 ++CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_program.c -no-pie)
56981 +
56982 + TARGETS := protection_keys
56983 + BINARIES_32 := $(TARGETS:%=%_32)
56984 +diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
56985 +index 3fc1d2ee29485..c964bfe9fbcda 100644
56986 +--- a/tools/testing/selftests/vm/userfaultfd.c
56987 ++++ b/tools/testing/selftests/vm/userfaultfd.c
56988 +@@ -120,6 +120,9 @@ struct uffd_stats {
56989 + ~(unsigned long)(sizeof(unsigned long long) \
56990 + - 1)))
56991 +
56992 ++#define swap(a, b) \
56993 ++ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
56994 ++
56995 + const char *examples =
56996 + "# Run anonymous memory test on 100MiB region with 99999 bounces:\n"
56997 + "./userfaultfd anon 100 99999\n\n"
56998 +diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
56999 +index 8a1f62ab3c8e6..53df7d3893d31 100644
57000 +--- a/tools/testing/selftests/x86/Makefile
57001 ++++ b/tools/testing/selftests/x86/Makefile
57002 +@@ -6,9 +6,9 @@ include ../lib.mk
57003 + .PHONY: all all_32 all_64 warn_32bit_failure clean
57004 +
57005 + UNAME_M := $(shell uname -m)
57006 +-CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
57007 +-CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
57008 +-CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh $(CC) trivial_program.c -no-pie)
57009 ++CAN_BUILD_I386 := $(shell ./check_cc.sh "$(CC)" trivial_32bit_program.c -m32)
57010 ++CAN_BUILD_X86_64 := $(shell ./check_cc.sh "$(CC)" trivial_64bit_program.c)
57011 ++CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh "$(CC)" trivial_program.c -no-pie)
57012 +
57013 + TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
57014 + check_initial_reg_state sigreturn iopl ioperm \
57015 +diff --git a/tools/testing/selftests/x86/check_cc.sh b/tools/testing/selftests/x86/check_cc.sh
57016 +index 3e2089c8cf549..8c669c0d662ee 100755
57017 +--- a/tools/testing/selftests/x86/check_cc.sh
57018 ++++ b/tools/testing/selftests/x86/check_cc.sh
57019 +@@ -7,7 +7,7 @@ CC="$1"
57020 + TESTPROG="$2"
57021 + shift 2
57022 +
57023 +-if "$CC" -o /dev/null "$TESTPROG" -O0 "$@" 2>/dev/null; then
57024 ++if [ -n "$CC" ] && $CC -o /dev/null "$TESTPROG" -O0 "$@" 2>/dev/null; then
57025 + echo 1
57026 + else
57027 + echo 0
57028 +diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
57029 +index 52c053cc1789d..e88f5c870141e 100644
57030 +--- a/tools/tracing/rtla/src/osnoise_hist.c
57031 ++++ b/tools/tracing/rtla/src/osnoise_hist.c
57032 +@@ -782,7 +782,7 @@ int osnoise_hist_main(int argc, char *argv[])
57033 + return_value = 0;
57034 +
57035 + if (!tracefs_trace_is_on(trace->inst)) {
57036 +- printf("rtla timelat hit stop tracing\n");
57037 ++ printf("rtla osnoise hit stop tracing\n");
57038 + if (params->trace_output) {
57039 + printf(" Saving trace to %s\n", params->trace_output);
57040 + save_trace_to_file(record->trace.inst, params->trace_output);
57041 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
57042 +index 0afc016cc54d4..246168310a754 100644
57043 +--- a/virt/kvm/kvm_main.c
57044 ++++ b/virt/kvm/kvm_main.c
57045 +@@ -117,6 +117,8 @@ EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
57046 +
57047 + static const struct file_operations stat_fops_per_vm;
57048 +
57049 ++static struct file_operations kvm_chardev_ops;
57050 ++
57051 + static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
57052 + unsigned long arg);
57053 + #ifdef CONFIG_KVM_COMPAT
57054 +@@ -1137,6 +1139,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
57055 + preempt_notifier_inc();
57056 + kvm_init_pm_notifier(kvm);
57057 +
57058 ++ /*
57059 ++ * When the fd passed to this ioctl() is opened it pins the module,
57060 ++ * but try_module_get() also prevents getting a reference if the module
57061 ++ * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait").
57062 ++ */
57063 ++ if (!try_module_get(kvm_chardev_ops.owner)) {
57064 ++ r = -ENODEV;
57065 ++ goto out_err;
57066 ++ }
57067 ++
57068 + return kvm;
57069 +
57070 + out_err:
57071 +@@ -1226,6 +1238,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
57072 + preempt_notifier_dec();
57073 + hardware_disable_all();
57074 + mmdrop(mm);
57075 ++ module_put(kvm_chardev_ops.owner);
57076 + }
57077 +
57078 + void kvm_get_kvm(struct kvm *kvm)
57079 +diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
57080 +index ce878f4be4daa..1621f8efd9616 100644
57081 +--- a/virt/kvm/pfncache.c
57082 ++++ b/virt/kvm/pfncache.c
57083 +@@ -191,6 +191,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
57084 + gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
57085 +
57086 + if (kvm_is_error_hva(gpc->uhva)) {
57087 ++ gpc->pfn = KVM_PFN_ERR_FAULT;
57088 + ret = -EFAULT;
57089 + goto out;
57090 + }