Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Thu, 25 Aug 2022 10:33:01
Message-Id: 1661423562.594ed98c28d347d1a1836f73e116a7af5db904ef.mpagano@gentoo
1 commit: 594ed98c28d347d1a1836f73e116a7af5db904ef
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Aug 25 10:32:42 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Aug 25 10:32:42 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=594ed98c
7
8 Linux patch 5.15.63
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1062_linux-5.15.63.patch | 8517 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 8521 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 42dd91d6..aadae770 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -291,6 +291,10 @@ Patch: 1061_linux-5.15.62.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.62
23
24 +Patch: 1062_linux-5.15.63.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.63
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1062_linux-5.15.63.patch b/1062_linux-5.15.63.patch
33 new file mode 100644
34 index 00000000..2c3f0013
35 --- /dev/null
36 +++ b/1062_linux-5.15.63.patch
37 @@ -0,0 +1,8517 @@
38 +diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt
39 +index 093cdaefdb373..d8b101c97031b 100644
40 +--- a/Documentation/atomic_bitops.txt
41 ++++ b/Documentation/atomic_bitops.txt
42 +@@ -59,7 +59,7 @@ Like with atomic_t, the rule of thumb is:
43 + - RMW operations that have a return value are fully ordered.
44 +
45 + - RMW operations that are conditional are unordered on FAILURE,
46 +- otherwise the above rules apply. In the case of test_and_{}_bit() operations,
47 ++ otherwise the above rules apply. In the case of test_and_set_bit_lock(),
48 + if the bit in memory is unchanged by the operation then it is deemed to have
49 + failed.
50 +
51 +diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
52 +index 880ddafc634e6..a702a18d845e9 100644
53 +--- a/Documentation/devicetree/bindings/arm/qcom.yaml
54 ++++ b/Documentation/devicetree/bindings/arm/qcom.yaml
55 +@@ -135,28 +135,34 @@ properties:
56 + - const: qcom,msm8974
57 +
58 + - items:
59 +- - enum:
60 +- - alcatel,idol347
61 +- - const: qcom,msm8916-mtp/1
62 + - const: qcom,msm8916-mtp
63 ++ - const: qcom,msm8916-mtp/1
64 + - const: qcom,msm8916
65 +
66 + - items:
67 + - enum:
68 +- - longcheer,l8150
69 ++ - alcatel,idol347
70 + - samsung,a3u-eur
71 + - samsung,a5u-eur
72 + - const: qcom,msm8916
73 +
74 ++ - items:
75 ++ - const: longcheer,l8150
76 ++ - const: qcom,msm8916-v1-qrd/9-v1
77 ++ - const: qcom,msm8916
78 ++
79 + - items:
80 + - enum:
81 + - sony,karin_windy
82 ++ - const: qcom,apq8094
83 ++
84 ++ - items:
85 ++ - enum:
86 + - sony,karin-row
87 + - sony,satsuki-row
88 + - sony,sumire-row
89 + - sony,suzuran-row
90 +- - qcom,msm8994
91 +- - const: qcom,apq8094
92 ++ - const: qcom,msm8994
93 +
94 + - items:
95 + - const: qcom,msm8996-mtp
96 +diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
97 +index 5a5b2214f0cae..005e0edd4609a 100644
98 +--- a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
99 ++++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
100 +@@ -22,16 +22,32 @@ properties:
101 + const: qcom,gcc-msm8996
102 +
103 + clocks:
104 ++ minItems: 3
105 + items:
106 + - description: XO source
107 + - description: Second XO source
108 + - description: Sleep clock source
109 ++ - description: PCIe 0 PIPE clock (optional)
110 ++ - description: PCIe 1 PIPE clock (optional)
111 ++ - description: PCIe 2 PIPE clock (optional)
112 ++ - description: USB3 PIPE clock (optional)
113 ++ - description: UFS RX symbol 0 clock (optional)
114 ++ - description: UFS RX symbol 1 clock (optional)
115 ++ - description: UFS TX symbol 0 clock (optional)
116 +
117 + clock-names:
118 ++ minItems: 3
119 + items:
120 + - const: cxo
121 + - const: cxo2
122 + - const: sleep_clk
123 ++ - const: pcie_0_pipe_clk_src
124 ++ - const: pcie_1_pipe_clk_src
125 ++ - const: pcie_2_pipe_clk_src
126 ++ - const: usb3_phy_pipe_clk_src
127 ++ - const: ufs_rx_symbol_0_clk_src
128 ++ - const: ufs_rx_symbol_1_clk_src
129 ++ - const: ufs_tx_symbol_0_clk_src
130 +
131 + '#clock-cells':
132 + const: 1
133 +diff --git a/Documentation/devicetree/bindings/gpio/gpio-zynq.yaml b/Documentation/devicetree/bindings/gpio/gpio-zynq.yaml
134 +index 378da2649e668..980f92ad9eba2 100644
135 +--- a/Documentation/devicetree/bindings/gpio/gpio-zynq.yaml
136 ++++ b/Documentation/devicetree/bindings/gpio/gpio-zynq.yaml
137 +@@ -11,7 +11,11 @@ maintainers:
138 +
139 + properties:
140 + compatible:
141 +- const: xlnx,zynq-gpio-1.0
142 ++ enum:
143 ++ - xlnx,zynq-gpio-1.0
144 ++ - xlnx,zynqmp-gpio-1.0
145 ++ - xlnx,versal-gpio-1.0
146 ++ - xlnx,pmc-gpio-1.0
147 +
148 + reg:
149 + maxItems: 1
150 +diff --git a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
151 +index f70f2e758a002..e66aac0ad735e 100644
152 +--- a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
153 ++++ b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
154 +@@ -47,12 +47,6 @@ properties:
155 + description:
156 + Properties for single LDO regulator.
157 +
158 +- properties:
159 +- regulator-name:
160 +- pattern: "^LDO[1-5]$"
161 +- description:
162 +- should be "LDO1", ..., "LDO5"
163 +-
164 + unevaluatedProperties: false
165 +
166 + "^BUCK[1-6]$":
167 +@@ -62,11 +56,6 @@ properties:
168 + Properties for single BUCK regulator.
169 +
170 + properties:
171 +- regulator-name:
172 +- pattern: "^BUCK[1-6]$"
173 +- description:
174 +- should be "BUCK1", ..., "BUCK6"
175 +-
176 + nxp,dvs-run-voltage:
177 + $ref: "/schemas/types.yaml#/definitions/uint32"
178 + minimum: 600000
179 +diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.yaml b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
180 +index 9787be21318e6..82d0ca5c00f3b 100644
181 +--- a/Documentation/devicetree/bindings/spi/spi-cadence.yaml
182 ++++ b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
183 +@@ -49,6 +49,13 @@ properties:
184 + enum: [ 0, 1 ]
185 + default: 0
186 +
187 ++required:
188 ++ - compatible
189 ++ - reg
190 ++ - interrupts
191 ++ - clock-names
192 ++ - clocks
193 ++
194 + unevaluatedProperties: false
195 +
196 + examples:
197 +diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
198 +index ea72c8001256f..fafde1c06be67 100644
199 +--- a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
200 ++++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
201 +@@ -30,6 +30,13 @@ properties:
202 + clocks:
203 + maxItems: 2
204 +
205 ++required:
206 ++ - compatible
207 ++ - reg
208 ++ - interrupts
209 ++ - clock-names
210 ++ - clocks
211 ++
212 + unevaluatedProperties: false
213 +
214 + examples:
215 +diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
216 +index 11f7bacd4e2b0..620cbf00bedb5 100644
217 +--- a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
218 ++++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
219 +@@ -56,6 +56,7 @@ properties:
220 + - description: optional, wakeup interrupt used to support runtime PM
221 +
222 + interrupt-names:
223 ++ minItems: 1
224 + items:
225 + - const: host
226 + - const: wakeup
227 +diff --git a/Documentation/firmware-guide/acpi/apei/einj.rst b/Documentation/firmware-guide/acpi/apei/einj.rst
228 +index c042176e17078..50ac87fa22958 100644
229 +--- a/Documentation/firmware-guide/acpi/apei/einj.rst
230 ++++ b/Documentation/firmware-guide/acpi/apei/einj.rst
231 +@@ -168,7 +168,7 @@ An error injection example::
232 + 0x00000008 Memory Correctable
233 + 0x00000010 Memory Uncorrectable non-fatal
234 + # echo 0x12345000 > param1 # Set memory address for injection
235 +- # echo $((-1 << 12)) > param2 # Mask 0xfffffffffffff000 - anywhere in this page
236 ++ # echo 0xfffffffffffff000 > param2 # Mask - anywhere in this page
237 + # echo 0x8 > error_type # Choose correctable memory error
238 + # echo 1 > error_inject # Inject now
239 +
240 +diff --git a/Makefile b/Makefile
241 +index 5b4f8f8851bf0..ea669530ec86d 100644
242 +--- a/Makefile
243 ++++ b/Makefile
244 +@@ -1,7 +1,7 @@
245 + # SPDX-License-Identifier: GPL-2.0
246 + VERSION = 5
247 + PATCHLEVEL = 15
248 +-SUBLEVEL = 62
249 ++SUBLEVEL = 63
250 + EXTRAVERSION =
251 + NAME = Trick or Treat
252 +
253 +@@ -1155,13 +1155,11 @@ vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \
254 + $(patsubst %/,%,$(filter %/, $(core-) \
255 + $(drivers-) $(libs-))))
256 +
257 +-subdir-modorder := $(addsuffix modules.order,$(filter %/, \
258 +- $(core-y) $(core-m) $(libs-y) $(libs-m) \
259 +- $(drivers-y) $(drivers-m)))
260 +-
261 + build-dirs := $(vmlinux-dirs)
262 + clean-dirs := $(vmlinux-alldirs)
263 +
264 ++subdir-modorder := $(addsuffix /modules.order, $(build-dirs))
265 ++
266 + # Externally visible symbols (used by link-vmlinux.sh)
267 + KBUILD_VMLINUX_OBJS := $(head-y) $(patsubst %/,%/built-in.a, $(core-y))
268 + KBUILD_VMLINUX_OBJS += $(addsuffix built-in.a, $(filter %/, $(libs-y)))
269 +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
270 +index fc6ee6c5972d0..1713630bf8f5a 100644
271 +--- a/arch/arm64/include/asm/kvm_host.h
272 ++++ b/arch/arm64/include/asm/kvm_host.h
273 +@@ -795,6 +795,10 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
274 + #define kvm_vcpu_has_pmu(vcpu) \
275 + (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
276 +
277 ++#define kvm_supports_32bit_el0() \
278 ++ (system_supports_32bit_el0() && \
279 ++ !static_branch_unlikely(&arm64_mismatched_32bit_el0))
280 ++
281 + int kvm_trng_call(struct kvm_vcpu *vcpu);
282 + #ifdef CONFIG_KVM
283 + extern phys_addr_t hyp_mem_base;
284 +diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
285 +index f181527f9d436..4cb265e153617 100644
286 +--- a/arch/arm64/kvm/arm.c
287 ++++ b/arch/arm64/kvm/arm.c
288 +@@ -712,8 +712,7 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
289 + if (likely(!vcpu_mode_is_32bit(vcpu)))
290 + return false;
291 +
292 +- return !system_supports_32bit_el0() ||
293 +- static_branch_unlikely(&arm64_mismatched_32bit_el0);
294 ++ return !kvm_supports_32bit_el0();
295 + }
296 +
297 + /**
298 +diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
299 +index 5ce26bedf23c0..94108e2e09179 100644
300 +--- a/arch/arm64/kvm/guest.c
301 ++++ b/arch/arm64/kvm/guest.c
302 +@@ -242,7 +242,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
303 + u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
304 + switch (mode) {
305 + case PSR_AA32_MODE_USR:
306 +- if (!system_supports_32bit_el0())
307 ++ if (!kvm_supports_32bit_el0())
308 + return -EINVAL;
309 + break;
310 + case PSR_AA32_MODE_FIQ:
311 +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
312 +index 7c18e429b4493..c11612db4a371 100644
313 +--- a/arch/arm64/kvm/sys_regs.c
314 ++++ b/arch/arm64/kvm/sys_regs.c
315 +@@ -649,7 +649,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
316 + */
317 + val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
318 + | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
319 +- if (!system_supports_32bit_el0())
320 ++ if (!kvm_supports_32bit_el0())
321 + val |= ARMV8_PMU_PMCR_LC;
322 + __vcpu_sys_reg(vcpu, r->reg) = val;
323 + }
324 +@@ -698,7 +698,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
325 + val = __vcpu_sys_reg(vcpu, PMCR_EL0);
326 + val &= ~ARMV8_PMU_PMCR_MASK;
327 + val |= p->regval & ARMV8_PMU_PMCR_MASK;
328 +- if (!system_supports_32bit_el0())
329 ++ if (!kvm_supports_32bit_el0())
330 + val |= ARMV8_PMU_PMCR_LC;
331 + __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
332 + kvm_pmu_handle_pmcr(vcpu, val);
333 +diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c
334 +index 4045894d92802..584ed9f36290f 100644
335 +--- a/arch/csky/kernel/probes/kprobes.c
336 ++++ b/arch/csky/kernel/probes/kprobes.c
337 +@@ -124,6 +124,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
338 +
339 + void __kprobes arch_remove_kprobe(struct kprobe *p)
340 + {
341 ++ if (p->ainsn.api.insn) {
342 ++ free_insn_slot(p->ainsn.api.insn, 0);
343 ++ p->ainsn.api.insn = NULL;
344 ++ }
345 + }
346 +
347 + static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
348 +diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
349 +index 4218750414bbf..7dab46728aeda 100644
350 +--- a/arch/m68k/coldfire/device.c
351 ++++ b/arch/m68k/coldfire/device.c
352 +@@ -581,7 +581,7 @@ static struct platform_device mcf_esdhc = {
353 + };
354 + #endif /* MCFSDHC_BASE */
355 +
356 +-#if IS_ENABLED(CONFIG_CAN_FLEXCAN)
357 ++#ifdef MCFFLEXCAN_SIZE
358 +
359 + #include <linux/can/platform/flexcan.h>
360 +
361 +@@ -620,7 +620,7 @@ static struct platform_device mcf_flexcan0 = {
362 + .resource = mcf5441x_flexcan0_resource,
363 + .dev.platform_data = &mcf5441x_flexcan_info,
364 + };
365 +-#endif /* IS_ENABLED(CONFIG_CAN_FLEXCAN) */
366 ++#endif /* MCFFLEXCAN_SIZE */
367 +
368 + static struct platform_device *mcf_devices[] __initdata = {
369 + &mcf_uart,
370 +@@ -657,7 +657,7 @@ static struct platform_device *mcf_devices[] __initdata = {
371 + #ifdef MCFSDHC_BASE
372 + &mcf_esdhc,
373 + #endif
374 +-#if IS_ENABLED(CONFIG_CAN_FLEXCAN)
375 ++#ifdef MCFFLEXCAN_SIZE
376 + &mcf_flexcan0,
377 + #endif
378 + };
379 +diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
380 +index a994022e32c9f..ce05c0dd3acd7 100644
381 +--- a/arch/mips/cavium-octeon/octeon-platform.c
382 ++++ b/arch/mips/cavium-octeon/octeon-platform.c
383 +@@ -86,11 +86,12 @@ static void octeon2_usb_clocks_start(struct device *dev)
384 + "refclk-frequency", &clock_rate);
385 + if (i) {
386 + dev_err(dev, "No UCTL \"refclk-frequency\"\n");
387 ++ of_node_put(uctl_node);
388 + goto exit;
389 + }
390 + i = of_property_read_string(uctl_node,
391 + "refclk-type", &clock_type);
392 +-
393 ++ of_node_put(uctl_node);
394 + if (!i && strcmp("crystal", clock_type) == 0)
395 + is_crystal_clock = true;
396 + }
397 +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
398 +index 046d51a454afc..3471a089bc05f 100644
399 +--- a/arch/mips/mm/tlbex.c
400 ++++ b/arch/mips/mm/tlbex.c
401 +@@ -634,7 +634,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
402 + return;
403 + }
404 +
405 +- if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
406 ++ if (cpu_has_rixi && _PAGE_NO_EXEC != 0) {
407 + if (fill_includes_sw_bits) {
408 + UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
409 + } else {
410 +@@ -2573,7 +2573,7 @@ static void check_pabits(void)
411 + unsigned long entry;
412 + unsigned pabits, fillbits;
413 +
414 +- if (!cpu_has_rixi || !_PAGE_NO_EXEC) {
415 ++ if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) {
416 + /*
417 + * We'll only be making use of the fact that we can rotate bits
418 + * into the fill if the CPU supports RIXI, so don't bother
419 +diff --git a/arch/nios2/include/asm/entry.h b/arch/nios2/include/asm/entry.h
420 +index cf37f55efbc22..bafb7b2ca59fc 100644
421 +--- a/arch/nios2/include/asm/entry.h
422 ++++ b/arch/nios2/include/asm/entry.h
423 +@@ -50,7 +50,8 @@
424 + stw r13, PT_R13(sp)
425 + stw r14, PT_R14(sp)
426 + stw r15, PT_R15(sp)
427 +- stw r2, PT_ORIG_R2(sp)
428 ++ movi r24, -1
429 ++ stw r24, PT_ORIG_R2(sp)
430 + stw r7, PT_ORIG_R7(sp)
431 +
432 + stw ra, PT_RA(sp)
433 +diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h
434 +index 6424621448728..9da34c3022a27 100644
435 +--- a/arch/nios2/include/asm/ptrace.h
436 ++++ b/arch/nios2/include/asm/ptrace.h
437 +@@ -74,6 +74,8 @@ extern void show_regs(struct pt_regs *);
438 + ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE)\
439 + - 1)
440 +
441 ++#define force_successful_syscall_return() (current_pt_regs()->orig_r2 = -1)
442 ++
443 + int do_syscall_trace_enter(void);
444 + void do_syscall_trace_exit(void);
445 + #endif /* __ASSEMBLY__ */
446 +diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
447 +index 0794cd7803dfe..99f0a65e62347 100644
448 +--- a/arch/nios2/kernel/entry.S
449 ++++ b/arch/nios2/kernel/entry.S
450 +@@ -185,6 +185,7 @@ ENTRY(handle_system_call)
451 + ldw r5, PT_R5(sp)
452 +
453 + local_restart:
454 ++ stw r2, PT_ORIG_R2(sp)
455 + /* Check that the requested system call is within limits */
456 + movui r1, __NR_syscalls
457 + bgeu r2, r1, ret_invsyscall
458 +@@ -192,7 +193,6 @@ local_restart:
459 + movhi r11, %hiadj(sys_call_table)
460 + add r1, r1, r11
461 + ldw r1, %lo(sys_call_table)(r1)
462 +- beq r1, r0, ret_invsyscall
463 +
464 + /* Check if we are being traced */
465 + GET_THREAD_INFO r11
466 +@@ -213,6 +213,9 @@ local_restart:
467 + translate_rc_and_ret:
468 + movi r1, 0
469 + bge r2, zero, 3f
470 ++ ldw r1, PT_ORIG_R2(sp)
471 ++ addi r1, r1, 1
472 ++ beq r1, zero, 3f
473 + sub r2, zero, r2
474 + movi r1, 1
475 + 3:
476 +@@ -255,9 +258,9 @@ traced_system_call:
477 + ldw r6, PT_R6(sp)
478 + ldw r7, PT_R7(sp)
479 +
480 +- /* Fetch the syscall function, we don't need to check the boundaries
481 +- * since this is already done.
482 +- */
483 ++ /* Fetch the syscall function. */
484 ++ movui r1, __NR_syscalls
485 ++ bgeu r2, r1, traced_invsyscall
486 + slli r1, r2, 2
487 + movhi r11,%hiadj(sys_call_table)
488 + add r1, r1, r11
489 +@@ -276,6 +279,9 @@ traced_system_call:
490 + translate_rc_and_ret2:
491 + movi r1, 0
492 + bge r2, zero, 4f
493 ++ ldw r1, PT_ORIG_R2(sp)
494 ++ addi r1, r1, 1
495 ++ beq r1, zero, 4f
496 + sub r2, zero, r2
497 + movi r1, 1
498 + 4:
499 +@@ -287,6 +293,11 @@ end_translate_rc_and_ret2:
500 + RESTORE_SWITCH_STACK
501 + br ret_from_exception
502 +
503 ++ /* If the syscall number was invalid return ENOSYS */
504 ++traced_invsyscall:
505 ++ movi r2, -ENOSYS
506 ++ br translate_rc_and_ret2
507 ++
508 + Luser_return:
509 + GET_THREAD_INFO r11 /* get thread_info pointer */
510 + ldw r10, TI_FLAGS(r11) /* get thread_info->flags */
511 +@@ -336,9 +347,6 @@ external_interrupt:
512 + /* skip if no interrupt is pending */
513 + beq r12, r0, ret_from_interrupt
514 +
515 +- movi r24, -1
516 +- stw r24, PT_ORIG_R2(sp)
517 +-
518 + /*
519 + * Process an external hardware interrupt.
520 + */
521 +diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
522 +index 386e46443b605..68d626c4f1ba7 100644
523 +--- a/arch/nios2/kernel/signal.c
524 ++++ b/arch/nios2/kernel/signal.c
525 +@@ -242,7 +242,7 @@ static int do_signal(struct pt_regs *regs)
526 + /*
527 + * If we were from a system call, check for system call restarting...
528 + */
529 +- if (regs->orig_r2 >= 0) {
530 ++ if (regs->orig_r2 >= 0 && regs->r1) {
531 + continue_addr = regs->ea;
532 + restart_addr = continue_addr - 4;
533 + retval = regs->r2;
534 +@@ -264,6 +264,7 @@ static int do_signal(struct pt_regs *regs)
535 + regs->ea = restart_addr;
536 + break;
537 + }
538 ++ regs->orig_r2 = -1;
539 + }
540 +
541 + if (get_signal(&ksig)) {
542 +diff --git a/arch/nios2/kernel/syscall_table.c b/arch/nios2/kernel/syscall_table.c
543 +index 6176d63023c1d..c2875a6dd5a4a 100644
544 +--- a/arch/nios2/kernel/syscall_table.c
545 ++++ b/arch/nios2/kernel/syscall_table.c
546 +@@ -13,5 +13,6 @@
547 + #define __SYSCALL(nr, call) [nr] = (call),
548 +
549 + void *sys_call_table[__NR_syscalls] = {
550 ++ [0 ... __NR_syscalls-1] = sys_ni_syscall,
551 + #include <asm/unistd.h>
552 + };
553 +diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
554 +index c298061c70a7e..8aa3e78181e9a 100644
555 +--- a/arch/openrisc/include/asm/io.h
556 ++++ b/arch/openrisc/include/asm/io.h
557 +@@ -31,7 +31,7 @@
558 + void __iomem *ioremap(phys_addr_t offset, unsigned long size);
559 +
560 + #define iounmap iounmap
561 +-extern void iounmap(void __iomem *addr);
562 ++extern void iounmap(volatile void __iomem *addr);
563 +
564 + #include <asm-generic/io.h>
565 +
566 +diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
567 +index daae13a76743b..8ec0dafecf257 100644
568 +--- a/arch/openrisc/mm/ioremap.c
569 ++++ b/arch/openrisc/mm/ioremap.c
570 +@@ -77,7 +77,7 @@ void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
571 + }
572 + EXPORT_SYMBOL(ioremap);
573 +
574 +-void iounmap(void __iomem *addr)
575 ++void iounmap(volatile void __iomem *addr)
576 + {
577 + /* If the page is from the fixmap pool then we just clear out
578 + * the fixmap mapping.
579 +diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
580 +index 72610e2d61765..2bb0fe9b20585 100644
581 +--- a/arch/powerpc/Makefile
582 ++++ b/arch/powerpc/Makefile
583 +@@ -17,23 +17,6 @@ HAS_BIARCH := $(call cc-option-yn, -m32)
584 + # Set default 32 bits cross compilers for vdso and boot wrapper
585 + CROSS32_COMPILE ?=
586 +
587 +-ifeq ($(HAS_BIARCH),y)
588 +-ifeq ($(CROSS32_COMPILE),)
589 +-ifdef CONFIG_PPC32
590 +-# These options will be overridden by any -mcpu option that the CPU
591 +-# or platform code sets later on the command line, but they are needed
592 +-# to set a sane 32-bit cpu target for the 64-bit cross compiler which
593 +-# may default to the wrong ISA.
594 +-KBUILD_CFLAGS += -mcpu=powerpc
595 +-KBUILD_AFLAGS += -mcpu=powerpc
596 +-endif
597 +-endif
598 +-endif
599 +-
600 +-ifdef CONFIG_PPC_BOOK3S_32
601 +-KBUILD_CFLAGS += -mcpu=powerpc
602 +-endif
603 +-
604 + # If we're on a ppc/ppc64/ppc64le machine use that defconfig, otherwise just use
605 + # ppc64_defconfig because we have nothing better to go on.
606 + uname := $(shell uname -m)
607 +@@ -185,6 +168,7 @@ endif
608 + endif
609 +
610 + CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
611 ++AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
612 +
613 + # Altivec option not allowed with e500mc64 in GCC.
614 + ifdef CONFIG_ALTIVEC
615 +@@ -195,14 +179,6 @@ endif
616 + CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU)
617 + CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
618 +
619 +-ifdef CONFIG_PPC32
620 +-ifdef CONFIG_PPC_E500MC
621 +-CFLAGS-y += $(call cc-option,-mcpu=e500mc,-mcpu=powerpc)
622 +-else
623 +-CFLAGS-$(CONFIG_E500) += $(call cc-option,-mcpu=8540 -msoft-float,-mcpu=powerpc)
624 +-endif
625 +-endif
626 +-
627 + asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
628 +
629 + KBUILD_CPPFLAGS += -I $(srctree)/arch/$(ARCH) $(asinstr)
630 +diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
631 +index 2e2a8211b17be..68e5c0a7e99d1 100644
632 +--- a/arch/powerpc/kernel/head_book3s_32.S
633 ++++ b/arch/powerpc/kernel/head_book3s_32.S
634 +@@ -421,14 +421,14 @@ InstructionTLBMiss:
635 + */
636 + /* Get PTE (linux-style) and check access */
637 + mfspr r3,SPRN_IMISS
638 +-#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
639 ++#ifdef CONFIG_MODULES
640 + lis r1, TASK_SIZE@h /* check if kernel address */
641 + cmplw 0,r1,r3
642 + #endif
643 + mfspr r2, SPRN_SDR1
644 + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
645 + rlwinm r2, r2, 28, 0xfffff000
646 +-#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
647 ++#ifdef CONFIG_MODULES
648 + bgt- 112f
649 + lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
650 + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
651 +diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
652 +index fc5187c6559a1..1aabb82b5f375 100644
653 +--- a/arch/powerpc/kernel/pci-common.c
654 ++++ b/arch/powerpc/kernel/pci-common.c
655 +@@ -67,10 +67,6 @@ void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
656 + pci_dma_ops = dma_ops;
657 + }
658 +
659 +-/*
660 +- * This function should run under locking protection, specifically
661 +- * hose_spinlock.
662 +- */
663 + static int get_phb_number(struct device_node *dn)
664 + {
665 + int ret, phb_id = -1;
666 +@@ -107,15 +103,20 @@ static int get_phb_number(struct device_node *dn)
667 + if (!ret)
668 + phb_id = (int)(prop & (MAX_PHBS - 1));
669 +
670 ++ spin_lock(&hose_spinlock);
671 ++
672 + /* We need to be sure to not use the same PHB number twice. */
673 + if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
674 +- return phb_id;
675 ++ goto out_unlock;
676 +
677 + /* If everything fails then fallback to dynamic PHB numbering. */
678 + phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
679 + BUG_ON(phb_id >= MAX_PHBS);
680 + set_bit(phb_id, phb_bitmap);
681 +
682 ++out_unlock:
683 ++ spin_unlock(&hose_spinlock);
684 ++
685 + return phb_id;
686 + }
687 +
688 +@@ -126,10 +127,13 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
689 + phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
690 + if (phb == NULL)
691 + return NULL;
692 +- spin_lock(&hose_spinlock);
693 ++
694 + phb->global_number = get_phb_number(dev);
695 ++
696 ++ spin_lock(&hose_spinlock);
697 + list_add_tail(&phb->list_node, &hose_list);
698 + spin_unlock(&hose_spinlock);
699 ++
700 + phb->dn = dev;
701 + phb->is_dynamic = slab_is_available();
702 + #ifdef CONFIG_PPC64
703 +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
704 +index 2e67588f6f6e6..86ffbabd26c6e 100644
705 +--- a/arch/powerpc/kernel/prom.c
706 ++++ b/arch/powerpc/kernel/prom.c
707 +@@ -751,6 +751,13 @@ void __init early_init_devtree(void *params)
708 + of_scan_flat_dt(early_init_dt_scan_root, NULL);
709 + of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
710 +
711 ++ /*
712 ++ * As generic code authors expect to be able to use static keys
713 ++ * in early_param() handlers, we initialize the static keys just
714 ++ * before parsing early params (it's fine to call jump_label_init()
715 ++ * more than once).
716 ++ */
717 ++ jump_label_init();
718 + parse_early_param();
719 +
720 + /* make sure we've parsed cmdline for mem= before this */
721 +diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
722 +index 961b3d70483ca..a0e0c28408c07 100644
723 +--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
724 ++++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
725 +@@ -7,15 +7,6 @@
726 + #include <asm/ppc-opcode.h>
727 +
728 + #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
729 +-static void __start_timing(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
730 +-{
731 +- struct kvmppc_vcore *vc = vcpu->arch.vcore;
732 +- u64 tb = mftb() - vc->tb_offset_applied;
733 +-
734 +- vcpu->arch.cur_activity = next;
735 +- vcpu->arch.cur_tb_start = tb;
736 +-}
737 +-
738 + static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
739 + {
740 + struct kvmppc_vcore *vc = vcpu->arch.vcore;
741 +@@ -47,8 +38,8 @@ static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator
742 + curr->seqcount = seq + 2;
743 + }
744 +
745 +-#define start_timing(vcpu, next) __start_timing(vcpu, next)
746 +-#define end_timing(vcpu) __start_timing(vcpu, NULL)
747 ++#define start_timing(vcpu, next) __accumulate_time(vcpu, next)
748 ++#define end_timing(vcpu) __accumulate_time(vcpu, NULL)
749 + #define accumulate_time(vcpu, next) __accumulate_time(vcpu, next)
750 + #else
751 + #define start_timing(vcpu, next) do {} while (0)
752 +diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
753 +index 203735caf6915..bfca0afe91126 100644
754 +--- a/arch/powerpc/mm/book3s32/mmu.c
755 ++++ b/arch/powerpc/mm/book3s32/mmu.c
756 +@@ -160,7 +160,10 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
757 + {
758 + unsigned long done;
759 + unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
760 ++ unsigned long size;
761 +
762 ++ size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET);
763 ++ setibat(0, PAGE_OFFSET, 0, size, PAGE_KERNEL_X);
764 +
765 + if (debug_pagealloc_enabled_or_kfence() || __map_without_bats) {
766 + pr_debug_once("Read-Write memory mapped without BATs\n");
767 +@@ -246,10 +249,9 @@ void mmu_mark_rodata_ro(void)
768 + }
769 +
770 + /*
771 +- * Set up one of the I/D BAT (block address translation) register pairs.
772 ++ * Set up one of the D BAT (block address translation) register pairs.
773 + * The parameters are not checked; in particular size must be a power
774 + * of 2 between 128k and 256M.
775 +- * On 603+, only set IBAT when _PAGE_EXEC is set
776 + */
777 + void __init setbat(int index, unsigned long virt, phys_addr_t phys,
778 + unsigned int size, pgprot_t prot)
779 +@@ -285,10 +287,6 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
780 + /* G bit must be zero in IBATs */
781 + flags &= ~_PAGE_EXEC;
782 + }
783 +- if (flags & _PAGE_EXEC)
784 +- bat[0] = bat[1];
785 +- else
786 +- bat[0].batu = bat[0].batl = 0;
787 +
788 + bat_addrs[index].start = virt;
789 + bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
790 +diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
791 +index 81f8c96348321..1b1e67ff9d211 100644
792 +--- a/arch/powerpc/platforms/Kconfig.cputype
793 ++++ b/arch/powerpc/platforms/Kconfig.cputype
794 +@@ -137,9 +137,9 @@ config GENERIC_CPU
795 + depends on PPC64 && CPU_LITTLE_ENDIAN
796 + select ARCH_HAS_FAST_MULTIPLIER
797 +
798 +-config GENERIC_CPU
799 ++config POWERPC_CPU
800 + bool "Generic 32 bits powerpc"
801 +- depends on PPC32 && !PPC_8xx
802 ++ depends on PPC32 && !PPC_8xx && !PPC_85xx
803 +
804 + config CELL_CPU
805 + bool "Cell Broadband Engine"
806 +@@ -193,11 +193,23 @@ config G4_CPU
807 + depends on PPC_BOOK3S_32
808 + select ALTIVEC
809 +
810 ++config E500_CPU
811 ++ bool "e500 (8540)"
812 ++ depends on PPC_85xx && !PPC_E500MC
813 ++
814 ++config E500MC_CPU
815 ++ bool "e500mc"
816 ++ depends on PPC_85xx && PPC_E500MC
817 ++
818 ++config TOOLCHAIN_DEFAULT_CPU
819 ++ bool "Rely on the toolchain's implicit default CPU"
820 ++ depends on PPC32
821 ++
822 + endchoice
823 +
824 + config TARGET_CPU_BOOL
825 + bool
826 +- default !GENERIC_CPU
827 ++ default !GENERIC_CPU && !TOOLCHAIN_DEFAULT_CPU
828 +
829 + config TARGET_CPU
830 + string
831 +@@ -212,6 +224,9 @@ config TARGET_CPU
832 + default "e300c2" if E300C2_CPU
833 + default "e300c3" if E300C3_CPU
834 + default "G4" if G4_CPU
835 ++ default "8540" if E500_CPU
836 ++ default "e500mc" if E500MC_CPU
837 ++ default "powerpc" if POWERPC_CPU
838 +
839 + config PPC_BOOK3S
840 + def_bool y
841 +diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
842 +index 3dd35c327d1c5..624822a810193 100644
843 +--- a/arch/powerpc/platforms/powernv/pci-ioda.c
844 ++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
845 +@@ -1618,6 +1618,7 @@ found:
846 + tbl->it_ops = &pnv_ioda1_iommu_ops;
847 + pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
848 + pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
849 ++ tbl->it_index = (phb->hose->global_number << 16) | pe->pe_number;
850 + if (!iommu_init_table(tbl, phb->hose->node, 0, 0))
851 + panic("Failed to initialize iommu table");
852 +
853 +@@ -1788,6 +1789,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
854 + res_end = min(window_size, SZ_4G) >> tbl->it_page_shift;
855 + }
856 +
857 ++ tbl->it_index = (pe->phb->hose->global_number << 16) | pe->pe_number;
858 + if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end))
859 + rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
860 + else
861 +diff --git a/arch/riscv/boot/dts/canaan/k210.dtsi b/arch/riscv/boot/dts/canaan/k210.dtsi
862 +index 780416d489aa7..fa9162e3afa3f 100644
863 +--- a/arch/riscv/boot/dts/canaan/k210.dtsi
864 ++++ b/arch/riscv/boot/dts/canaan/k210.dtsi
865 +@@ -65,6 +65,18 @@
866 + compatible = "riscv,cpu-intc";
867 + };
868 + };
869 ++
870 ++ cpu-map {
871 ++ cluster0 {
872 ++ core0 {
873 ++ cpu = <&cpu0>;
874 ++ };
875 ++
876 ++ core1 {
877 ++ cpu = <&cpu1>;
878 ++ };
879 ++ };
880 ++ };
881 + };
882 +
883 + sram: memory@80000000 {
884 +diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
885 +index abbb960f90a00..454079a69ab44 100644
886 +--- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
887 ++++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
888 +@@ -134,6 +134,30 @@
889 + interrupt-controller;
890 + };
891 + };
892 ++
893 ++ cpu-map {
894 ++ cluster0 {
895 ++ core0 {
896 ++ cpu = <&cpu0>;
897 ++ };
898 ++
899 ++ core1 {
900 ++ cpu = <&cpu1>;
901 ++ };
902 ++
903 ++ core2 {
904 ++ cpu = <&cpu2>;
905 ++ };
906 ++
907 ++ core3 {
908 ++ cpu = <&cpu3>;
909 ++ };
910 ++
911 ++ core4 {
912 ++ cpu = <&cpu4>;
913 ++ };
914 ++ };
915 ++ };
916 + };
917 + soc {
918 + #address-cells = <2>;
919 +diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
920 +index 12f8a7fce78b1..8a7880b9c433e 100644
921 +--- a/arch/riscv/kernel/sys_riscv.c
922 ++++ b/arch/riscv/kernel/sys_riscv.c
923 +@@ -18,9 +18,8 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
924 + if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
925 + return -EINVAL;
926 +
927 +- if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
928 +- if (unlikely(!(prot & PROT_READ)))
929 +- return -EINVAL;
930 ++ if (unlikely((prot & PROT_WRITE) && !(prot & PROT_READ)))
931 ++ return -EINVAL;
932 +
933 + return ksys_mmap_pgoff(addr, len, prot, flags, fd,
934 + offset >> (PAGE_SHIFT - page_shift_offset));
935 +diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
936 +index 0daaa3e4630d4..b938ffe129d6b 100644
937 +--- a/arch/riscv/kernel/traps.c
938 ++++ b/arch/riscv/kernel/traps.c
939 +@@ -16,6 +16,7 @@
940 + #include <linux/mm.h>
941 + #include <linux/module.h>
942 + #include <linux/irq.h>
943 ++#include <linux/kexec.h>
944 +
945 + #include <asm/asm-prototypes.h>
946 + #include <asm/bug.h>
947 +@@ -44,6 +45,9 @@ void die(struct pt_regs *regs, const char *str)
948 +
949 + ret = notify_die(DIE_OOPS, str, regs, 0, regs->cause, SIGSEGV);
950 +
951 ++ if (regs && kexec_should_crash(current))
952 ++ crash_kexec(regs);
953 ++
954 + bust_spinlocks(0);
955 + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
956 + spin_unlock_irq(&die_lock);
957 +diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
958 +index 87d3129e7362e..0df2ebcc97c0d 100644
959 +--- a/arch/um/os-Linux/skas/process.c
960 ++++ b/arch/um/os-Linux/skas/process.c
961 +@@ -5,6 +5,7 @@
962 + */
963 +
964 + #include <stdlib.h>
965 ++#include <stdbool.h>
966 + #include <unistd.h>
967 + #include <sched.h>
968 + #include <errno.h>
969 +@@ -707,10 +708,24 @@ void halt_skas(void)
970 + UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
971 + }
972 +
973 ++static bool noreboot;
974 ++
975 ++static int __init noreboot_cmd_param(char *str, int *add)
976 ++{
977 ++ noreboot = true;
978 ++ return 0;
979 ++}
980 ++
981 ++__uml_setup("noreboot", noreboot_cmd_param,
982 ++"noreboot\n"
983 ++" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n"
984 ++" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n"
985 ++" crashes in CI\n");
986 ++
987 + void reboot_skas(void)
988 + {
989 + block_signals_trace();
990 +- UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
991 ++ UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
992 + }
993 +
994 + void __switch_mm(struct mm_id *mm_idp)
995 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
996 +index 7a7f671c60b4a..6872f3834668d 100644
997 +--- a/arch/x86/kernel/kprobes/core.c
998 ++++ b/arch/x86/kernel/kprobes/core.c
999 +@@ -495,7 +495,7 @@ static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
1000 + match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
1001 + ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
1002 + if (p->ainsn.jcc.type >= 0xe)
1003 +- match = match && (regs->flags & X86_EFLAGS_ZF);
1004 ++ match = match || (regs->flags & X86_EFLAGS_ZF);
1005 + }
1006 + __kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
1007 + }
1008 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
1009 +index b01f5d2caad04..200ad5ceeb43f 100644
1010 +--- a/arch/x86/mm/init_64.c
1011 ++++ b/arch/x86/mm/init_64.c
1012 +@@ -646,7 +646,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
1013 + pages++;
1014 + spin_lock(&init_mm.page_table_lock);
1015 +
1016 +- prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE);
1017 ++ prot = __pgprot(pgprot_val(prot) | _PAGE_PSE);
1018 +
1019 + set_pte_init((pte_t *)pud,
1020 + pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
1021 +diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
1022 +index 53cab975f612c..63b98eae5e75e 100644
1023 +--- a/drivers/acpi/pci_mcfg.c
1024 ++++ b/drivers/acpi/pci_mcfg.c
1025 +@@ -41,6 +41,8 @@ struct mcfg_fixup {
1026 + static struct mcfg_fixup mcfg_quirks[] = {
1027 + /* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
1028 +
1029 ++#ifdef CONFIG_ARM64
1030 ++
1031 + #define AL_ECAM(table_id, rev, seg, ops) \
1032 + { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
1033 +
1034 +@@ -169,6 +171,7 @@ static struct mcfg_fixup mcfg_quirks[] = {
1035 + ALTRA_ECAM_QUIRK(1, 13),
1036 + ALTRA_ECAM_QUIRK(1, 14),
1037 + ALTRA_ECAM_QUIRK(1, 15),
1038 ++#endif /* ARM64 */
1039 + };
1040 +
1041 + static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
1042 +diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
1043 +index b86d3b869a58a..488915328646e 100644
1044 +--- a/drivers/acpi/property.c
1045 ++++ b/drivers/acpi/property.c
1046 +@@ -155,10 +155,10 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
1047 + return acpi_nondev_subnode_data_ok(handle, link, list, parent);
1048 + }
1049 +
1050 +-static int acpi_add_nondev_subnodes(acpi_handle scope,
1051 +- const union acpi_object *links,
1052 +- struct list_head *list,
1053 +- struct fwnode_handle *parent)
1054 ++static bool acpi_add_nondev_subnodes(acpi_handle scope,
1055 ++ const union acpi_object *links,
1056 ++ struct list_head *list,
1057 ++ struct fwnode_handle *parent)
1058 + {
1059 + bool ret = false;
1060 + int i;
1061 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1062 +index 1d4a6f1e88cd1..7aea631edb274 100644
1063 +--- a/drivers/ata/libata-eh.c
1064 ++++ b/drivers/ata/libata-eh.c
1065 +@@ -2130,6 +2130,7 @@ const char *ata_get_cmd_descript(u8 command)
1066 + { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
1067 + { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
1068 + { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
1069 ++ { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
1070 + { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
1071 + { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
1072 + { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
1073 +diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
1074 +index 81ce81a75fc67..681cb3786794d 100644
1075 +--- a/drivers/atm/idt77252.c
1076 ++++ b/drivers/atm/idt77252.c
1077 +@@ -3752,6 +3752,7 @@ static void __exit idt77252_exit(void)
1078 + card = idt77252_chain;
1079 + dev = card->atmdev;
1080 + idt77252_chain = card->next;
1081 ++ del_timer_sync(&card->tst_timer);
1082 +
1083 + if (dev->phy->stop)
1084 + dev->phy->stop(dev);
1085 +diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
1086 +index 052aa3f65514e..0916de952e091 100644
1087 +--- a/drivers/block/zram/zcomp.c
1088 ++++ b/drivers/block/zram/zcomp.c
1089 +@@ -63,12 +63,6 @@ static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
1090 +
1091 + bool zcomp_available_algorithm(const char *comp)
1092 + {
1093 +- int i;
1094 +-
1095 +- i = sysfs_match_string(backends, comp);
1096 +- if (i >= 0)
1097 +- return true;
1098 +-
1099 + /*
1100 + * Crypto does not ignore a trailing new line symbol,
1101 + * so make sure you don't supply a string containing
1102 +@@ -217,6 +211,11 @@ struct zcomp *zcomp_create(const char *compress)
1103 + struct zcomp *comp;
1104 + int error;
1105 +
1106 ++ /*
1107 ++ * Crypto API will execute /sbin/modprobe if the compression module
1108 ++ * is not loaded yet. We must do it here, otherwise we are about to
1109 ++ * call /sbin/modprobe under CPU hot-plug lock.
1110 ++ */
1111 + if (!zcomp_available_algorithm(compress))
1112 + return ERR_PTR(-EINVAL);
1113 +
1114 +diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
1115 +index 8f65b9bdafce4..5e44ceb730ad1 100644
1116 +--- a/drivers/clk/qcom/clk-alpha-pll.c
1117 ++++ b/drivers/clk/qcom/clk-alpha-pll.c
1118 +@@ -1420,7 +1420,7 @@ const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
1119 + EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
1120 +
1121 + /**
1122 +- * clk_lucid_pll_configure - configure the lucid pll
1123 ++ * clk_trion_pll_configure - configure the trion pll
1124 + *
1125 + * @pll: clk alpha pll
1126 + * @regmap: register map
1127 +diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
1128 +index 2c2ecfc5e61f5..d6d5defb82c9f 100644
1129 +--- a/drivers/clk/qcom/gcc-ipq8074.c
1130 ++++ b/drivers/clk/qcom/gcc-ipq8074.c
1131 +@@ -662,6 +662,7 @@ static struct clk_branch gcc_sleep_clk_src = {
1132 + },
1133 + .num_parents = 1,
1134 + .ops = &clk_branch2_ops,
1135 ++ .flags = CLK_IS_CRITICAL,
1136 + },
1137 + },
1138 + };
1139 +diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
1140 +index d078e5d73ed94..868bc7af21b0b 100644
1141 +--- a/drivers/clk/ti/clk-44xx.c
1142 ++++ b/drivers/clk/ti/clk-44xx.c
1143 +@@ -56,7 +56,7 @@ static const struct omap_clkctrl_bit_data omap4_aess_bit_data[] __initconst = {
1144 + };
1145 +
1146 + static const char * const omap4_func_dmic_abe_gfclk_parents[] __initconst = {
1147 +- "abe_cm:clk:0018:26",
1148 ++ "abe-clkctrl:0018:26",
1149 + "pad_clks_ck",
1150 + "slimbus_clk",
1151 + NULL,
1152 +@@ -76,7 +76,7 @@ static const struct omap_clkctrl_bit_data omap4_dmic_bit_data[] __initconst = {
1153 + };
1154 +
1155 + static const char * const omap4_func_mcasp_abe_gfclk_parents[] __initconst = {
1156 +- "abe_cm:clk:0020:26",
1157 ++ "abe-clkctrl:0020:26",
1158 + "pad_clks_ck",
1159 + "slimbus_clk",
1160 + NULL,
1161 +@@ -89,7 +89,7 @@ static const struct omap_clkctrl_bit_data omap4_mcasp_bit_data[] __initconst = {
1162 + };
1163 +
1164 + static const char * const omap4_func_mcbsp1_gfclk_parents[] __initconst = {
1165 +- "abe_cm:clk:0028:26",
1166 ++ "abe-clkctrl:0028:26",
1167 + "pad_clks_ck",
1168 + "slimbus_clk",
1169 + NULL,
1170 +@@ -102,7 +102,7 @@ static const struct omap_clkctrl_bit_data omap4_mcbsp1_bit_data[] __initconst =
1171 + };
1172 +
1173 + static const char * const omap4_func_mcbsp2_gfclk_parents[] __initconst = {
1174 +- "abe_cm:clk:0030:26",
1175 ++ "abe-clkctrl:0030:26",
1176 + "pad_clks_ck",
1177 + "slimbus_clk",
1178 + NULL,
1179 +@@ -115,7 +115,7 @@ static const struct omap_clkctrl_bit_data omap4_mcbsp2_bit_data[] __initconst =
1180 + };
1181 +
1182 + static const char * const omap4_func_mcbsp3_gfclk_parents[] __initconst = {
1183 +- "abe_cm:clk:0038:26",
1184 ++ "abe-clkctrl:0038:26",
1185 + "pad_clks_ck",
1186 + "slimbus_clk",
1187 + NULL,
1188 +@@ -183,18 +183,18 @@ static const struct omap_clkctrl_bit_data omap4_timer8_bit_data[] __initconst =
1189 +
1190 + static const struct omap_clkctrl_reg_data omap4_abe_clkctrl_regs[] __initconst = {
1191 + { OMAP4_L4_ABE_CLKCTRL, NULL, 0, "ocp_abe_iclk" },
1192 +- { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" },
1193 ++ { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" },
1194 + { OMAP4_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
1195 +- { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" },
1196 +- { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "abe_cm:clk:0020:24" },
1197 +- { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" },
1198 +- { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "abe_cm:clk:0030:24" },
1199 +- { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "abe_cm:clk:0038:24" },
1200 +- { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0040:8" },
1201 +- { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "abe_cm:clk:0048:24" },
1202 +- { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "abe_cm:clk:0050:24" },
1203 +- { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "abe_cm:clk:0058:24" },
1204 +- { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "abe_cm:clk:0060:24" },
1205 ++ { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" },
1206 ++ { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "abe-clkctrl:0020:24" },
1207 ++ { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" },
1208 ++ { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" },
1209 ++ { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" },
1210 ++ { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0040:8" },
1211 ++ { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" },
1212 ++ { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" },
1213 ++ { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" },
1214 ++ { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" },
1215 + { OMAP4_WD_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
1216 + { 0 },
1217 + };
1218 +@@ -287,7 +287,7 @@ static const struct omap_clkctrl_bit_data omap4_fdif_bit_data[] __initconst = {
1219 +
1220 + static const struct omap_clkctrl_reg_data omap4_iss_clkctrl_regs[] __initconst = {
1221 + { OMAP4_ISS_CLKCTRL, omap4_iss_bit_data, CLKF_SW_SUP, "ducati_clk_mux_ck" },
1222 +- { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "iss_cm:clk:0008:24" },
1223 ++ { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "iss-clkctrl:0008:24" },
1224 + { 0 },
1225 + };
1226 +
1227 +@@ -320,7 +320,7 @@ static const struct omap_clkctrl_bit_data omap4_dss_core_bit_data[] __initconst
1228 + };
1229 +
1230 + static const struct omap_clkctrl_reg_data omap4_l3_dss_clkctrl_regs[] __initconst = {
1231 +- { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "l3_dss_cm:clk:0000:8" },
1232 ++ { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "l3-dss-clkctrl:0000:8" },
1233 + { 0 },
1234 + };
1235 +
1236 +@@ -336,7 +336,7 @@ static const struct omap_clkctrl_bit_data omap4_gpu_bit_data[] __initconst = {
1237 + };
1238 +
1239 + static const struct omap_clkctrl_reg_data omap4_l3_gfx_clkctrl_regs[] __initconst = {
1240 +- { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "l3_gfx_cm:clk:0000:24" },
1241 ++ { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "l3-gfx-clkctrl:0000:24" },
1242 + { 0 },
1243 + };
1244 +
1245 +@@ -372,12 +372,12 @@ static const struct omap_clkctrl_bit_data omap4_hsi_bit_data[] __initconst = {
1246 + };
1247 +
1248 + static const char * const omap4_usb_host_hs_utmi_p1_clk_parents[] __initconst = {
1249 +- "l3_init_cm:clk:0038:24",
1250 ++ "l3-init-clkctrl:0038:24",
1251 + NULL,
1252 + };
1253 +
1254 + static const char * const omap4_usb_host_hs_utmi_p2_clk_parents[] __initconst = {
1255 +- "l3_init_cm:clk:0038:25",
1256 ++ "l3-init-clkctrl:0038:25",
1257 + NULL,
1258 + };
1259 +
1260 +@@ -418,7 +418,7 @@ static const struct omap_clkctrl_bit_data omap4_usb_host_hs_bit_data[] __initcon
1261 + };
1262 +
1263 + static const char * const omap4_usb_otg_hs_xclk_parents[] __initconst = {
1264 +- "l3_init_cm:clk:0040:24",
1265 ++ "l3-init-clkctrl:0040:24",
1266 + NULL,
1267 + };
1268 +
1269 +@@ -452,14 +452,14 @@ static const struct omap_clkctrl_bit_data omap4_ocp2scp_usb_phy_bit_data[] __ini
1270 + };
1271 +
1272 + static const struct omap_clkctrl_reg_data omap4_l3_init_clkctrl_regs[] __initconst = {
1273 +- { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "l3_init_cm:clk:0008:24" },
1274 +- { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "l3_init_cm:clk:0010:24" },
1275 +- { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "l3_init_cm:clk:0018:24" },
1276 ++ { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0008:24" },
1277 ++ { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0010:24" },
1278 ++ { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:0018:24" },
1279 + { OMAP4_USB_HOST_HS_CLKCTRL, omap4_usb_host_hs_bit_data, CLKF_SW_SUP, "init_60m_fclk" },
1280 + { OMAP4_USB_OTG_HS_CLKCTRL, omap4_usb_otg_hs_bit_data, CLKF_HW_SUP, "l3_div_ck" },
1281 + { OMAP4_USB_TLL_HS_CLKCTRL, omap4_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_div_ck" },
1282 + { OMAP4_USB_HOST_FS_CLKCTRL, NULL, CLKF_SW_SUP, "func_48mc_fclk" },
1283 +- { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "l3_init_cm:clk:00c0:8" },
1284 ++ { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:00c0:8" },
1285 + { 0 },
1286 + };
1287 +
1288 +@@ -530,7 +530,7 @@ static const struct omap_clkctrl_bit_data omap4_gpio6_bit_data[] __initconst = {
1289 + };
1290 +
1291 + static const char * const omap4_per_mcbsp4_gfclk_parents[] __initconst = {
1292 +- "l4_per_cm:clk:00c0:26",
1293 ++ "l4-per-clkctrl:00c0:26",
1294 + "pad_clks_ck",
1295 + NULL,
1296 + };
1297 +@@ -570,12 +570,12 @@ static const struct omap_clkctrl_bit_data omap4_slimbus2_bit_data[] __initconst
1298 + };
1299 +
1300 + static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initconst = {
1301 +- { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0008:24" },
1302 +- { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0010:24" },
1303 +- { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0018:24" },
1304 +- { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0020:24" },
1305 +- { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0028:24" },
1306 +- { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0030:24" },
1307 ++ { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0008:24" },
1308 ++ { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0010:24" },
1309 ++ { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0018:24" },
1310 ++ { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0020:24" },
1311 ++ { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0028:24" },
1312 ++ { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0030:24" },
1313 + { OMAP4_ELM_CLKCTRL, NULL, 0, "l4_div_ck" },
1314 + { OMAP4_GPIO2_CLKCTRL, omap4_gpio2_bit_data, CLKF_HW_SUP, "l4_div_ck" },
1315 + { OMAP4_GPIO3_CLKCTRL, omap4_gpio3_bit_data, CLKF_HW_SUP, "l4_div_ck" },
1316 +@@ -588,14 +588,14 @@ static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initcons
1317 + { OMAP4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
1318 + { OMAP4_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
1319 + { OMAP4_L4_PER_CLKCTRL, NULL, 0, "l4_div_ck" },
1320 +- { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:00c0:24" },
1321 ++ { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:00c0:24" },
1322 + { OMAP4_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
1323 + { OMAP4_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
1324 + { OMAP4_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
1325 + { OMAP4_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
1326 + { OMAP4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
1327 + { OMAP4_MMC4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
1328 +- { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0118:8" },
1329 ++ { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0118:8" },
1330 + { OMAP4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
1331 + { OMAP4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
1332 + { OMAP4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
1333 +@@ -630,7 +630,7 @@ static const struct omap_clkctrl_reg_data omap4_l4_wkup_clkctrl_regs[] __initcon
1334 + { OMAP4_L4_WKUP_CLKCTRL, NULL, 0, "l4_wkup_clk_mux_ck" },
1335 + { OMAP4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
1336 + { OMAP4_GPIO1_CLKCTRL, omap4_gpio1_bit_data, CLKF_HW_SUP, "l4_wkup_clk_mux_ck" },
1337 +- { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0020:24" },
1338 ++ { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "l4-wkup-clkctrl:0020:24" },
1339 + { OMAP4_COUNTER_32K_CLKCTRL, NULL, 0, "sys_32k_ck" },
1340 + { OMAP4_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
1341 + { 0 },
1342 +@@ -644,7 +644,7 @@ static const char * const omap4_pmd_stm_clock_mux_ck_parents[] __initconst = {
1343 + };
1344 +
1345 + static const char * const omap4_trace_clk_div_div_ck_parents[] __initconst = {
1346 +- "emu_sys_cm:clk:0000:22",
1347 ++ "emu-sys-clkctrl:0000:22",
1348 + NULL,
1349 + };
1350 +
1351 +@@ -662,7 +662,7 @@ static const struct omap_clkctrl_div_data omap4_trace_clk_div_div_ck_data __init
1352 + };
1353 +
1354 + static const char * const omap4_stm_clk_div_ck_parents[] __initconst = {
1355 +- "emu_sys_cm:clk:0000:20",
1356 ++ "emu-sys-clkctrl:0000:20",
1357 + NULL,
1358 + };
1359 +
1360 +@@ -716,73 +716,73 @@ static struct ti_dt_clk omap44xx_clks[] = {
1361 + * hwmod support. Once hwmod is removed, these can be removed
1362 + * also.
1363 + */
1364 +- DT_CLK(NULL, "aess_fclk", "abe_cm:0008:24"),
1365 +- DT_CLK(NULL, "cm2_dm10_mux", "l4_per_cm:0008:24"),
1366 +- DT_CLK(NULL, "cm2_dm11_mux", "l4_per_cm:0010:24"),
1367 +- DT_CLK(NULL, "cm2_dm2_mux", "l4_per_cm:0018:24"),
1368 +- DT_CLK(NULL, "cm2_dm3_mux", "l4_per_cm:0020:24"),
1369 +- DT_CLK(NULL, "cm2_dm4_mux", "l4_per_cm:0028:24"),
1370 +- DT_CLK(NULL, "cm2_dm9_mux", "l4_per_cm:0030:24"),
1371 +- DT_CLK(NULL, "dmic_sync_mux_ck", "abe_cm:0018:26"),
1372 +- DT_CLK(NULL, "dmt1_clk_mux", "l4_wkup_cm:0020:24"),
1373 +- DT_CLK(NULL, "dss_48mhz_clk", "l3_dss_cm:0000:9"),
1374 +- DT_CLK(NULL, "dss_dss_clk", "l3_dss_cm:0000:8"),
1375 +- DT_CLK(NULL, "dss_sys_clk", "l3_dss_cm:0000:10"),
1376 +- DT_CLK(NULL, "dss_tv_clk", "l3_dss_cm:0000:11"),
1377 +- DT_CLK(NULL, "fdif_fck", "iss_cm:0008:24"),
1378 +- DT_CLK(NULL, "func_dmic_abe_gfclk", "abe_cm:0018:24"),
1379 +- DT_CLK(NULL, "func_mcasp_abe_gfclk", "abe_cm:0020:24"),
1380 +- DT_CLK(NULL, "func_mcbsp1_gfclk", "abe_cm:0028:24"),
1381 +- DT_CLK(NULL, "func_mcbsp2_gfclk", "abe_cm:0030:24"),
1382 +- DT_CLK(NULL, "func_mcbsp3_gfclk", "abe_cm:0038:24"),
1383 +- DT_CLK(NULL, "gpio1_dbclk", "l4_wkup_cm:0018:8"),
1384 +- DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:0040:8"),
1385 +- DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:0048:8"),
1386 +- DT_CLK(NULL, "gpio4_dbclk", "l4_per_cm:0050:8"),
1387 +- DT_CLK(NULL, "gpio5_dbclk", "l4_per_cm:0058:8"),
1388 +- DT_CLK(NULL, "gpio6_dbclk", "l4_per_cm:0060:8"),
1389 +- DT_CLK(NULL, "hsi_fck", "l3_init_cm:0018:24"),
1390 +- DT_CLK(NULL, "hsmmc1_fclk", "l3_init_cm:0008:24"),
1391 +- DT_CLK(NULL, "hsmmc2_fclk", "l3_init_cm:0010:24"),
1392 +- DT_CLK(NULL, "iss_ctrlclk", "iss_cm:0000:8"),
1393 +- DT_CLK(NULL, "mcasp_sync_mux_ck", "abe_cm:0020:26"),
1394 +- DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe_cm:0028:26"),
1395 +- DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe_cm:0030:26"),
1396 +- DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe_cm:0038:26"),
1397 +- DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4_per_cm:00c0:26"),
1398 +- DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3_init_cm:00c0:8"),
1399 +- DT_CLK(NULL, "otg_60m_gfclk", "l3_init_cm:0040:24"),
1400 +- DT_CLK(NULL, "per_mcbsp4_gfclk", "l4_per_cm:00c0:24"),
1401 +- DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu_sys_cm:0000:20"),
1402 +- DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu_sys_cm:0000:22"),
1403 +- DT_CLK(NULL, "sgx_clk_mux", "l3_gfx_cm:0000:24"),
1404 +- DT_CLK(NULL, "slimbus1_fclk_0", "abe_cm:0040:8"),
1405 +- DT_CLK(NULL, "slimbus1_fclk_1", "abe_cm:0040:9"),
1406 +- DT_CLK(NULL, "slimbus1_fclk_2", "abe_cm:0040:10"),
1407 +- DT_CLK(NULL, "slimbus1_slimbus_clk", "abe_cm:0040:11"),
1408 +- DT_CLK(NULL, "slimbus2_fclk_0", "l4_per_cm:0118:8"),
1409 +- DT_CLK(NULL, "slimbus2_fclk_1", "l4_per_cm:0118:9"),
1410 +- DT_CLK(NULL, "slimbus2_slimbus_clk", "l4_per_cm:0118:10"),
1411 +- DT_CLK(NULL, "stm_clk_div_ck", "emu_sys_cm:0000:27"),
1412 +- DT_CLK(NULL, "timer5_sync_mux", "abe_cm:0048:24"),
1413 +- DT_CLK(NULL, "timer6_sync_mux", "abe_cm:0050:24"),
1414 +- DT_CLK(NULL, "timer7_sync_mux", "abe_cm:0058:24"),
1415 +- DT_CLK(NULL, "timer8_sync_mux", "abe_cm:0060:24"),
1416 +- DT_CLK(NULL, "trace_clk_div_div_ck", "emu_sys_cm:0000:24"),
1417 +- DT_CLK(NULL, "usb_host_hs_func48mclk", "l3_init_cm:0038:15"),
1418 +- DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3_init_cm:0038:13"),
1419 +- DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3_init_cm:0038:14"),
1420 +- DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3_init_cm:0038:11"),
1421 +- DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3_init_cm:0038:12"),
1422 +- DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3_init_cm:0038:8"),
1423 +- DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3_init_cm:0038:9"),
1424 +- DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3_init_cm:0038:10"),
1425 +- DT_CLK(NULL, "usb_otg_hs_xclk", "l3_init_cm:0040:8"),
1426 +- DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3_init_cm:0048:8"),
1427 +- DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3_init_cm:0048:9"),
1428 +- DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3_init_cm:0048:10"),
1429 +- DT_CLK(NULL, "utmi_p1_gfclk", "l3_init_cm:0038:24"),
1430 +- DT_CLK(NULL, "utmi_p2_gfclk", "l3_init_cm:0038:25"),
1431 ++ DT_CLK(NULL, "aess_fclk", "abe-clkctrl:0008:24"),
1432 ++ DT_CLK(NULL, "cm2_dm10_mux", "l4-per-clkctrl:0008:24"),
1433 ++ DT_CLK(NULL, "cm2_dm11_mux", "l4-per-clkctrl:0010:24"),
1434 ++ DT_CLK(NULL, "cm2_dm2_mux", "l4-per-clkctrl:0018:24"),
1435 ++ DT_CLK(NULL, "cm2_dm3_mux", "l4-per-clkctrl:0020:24"),
1436 ++ DT_CLK(NULL, "cm2_dm4_mux", "l4-per-clkctrl:0028:24"),
1437 ++ DT_CLK(NULL, "cm2_dm9_mux", "l4-per-clkctrl:0030:24"),
1438 ++ DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"),
1439 ++ DT_CLK(NULL, "dmt1_clk_mux", "l4-wkup-clkctrl:0020:24"),
1440 ++ DT_CLK(NULL, "dss_48mhz_clk", "l3-dss-clkctrl:0000:9"),
1441 ++ DT_CLK(NULL, "dss_dss_clk", "l3-dss-clkctrl:0000:8"),
1442 ++ DT_CLK(NULL, "dss_sys_clk", "l3-dss-clkctrl:0000:10"),
1443 ++ DT_CLK(NULL, "dss_tv_clk", "l3-dss-clkctrl:0000:11"),
1444 ++ DT_CLK(NULL, "fdif_fck", "iss-clkctrl:0008:24"),
1445 ++ DT_CLK(NULL, "func_dmic_abe_gfclk", "abe-clkctrl:0018:24"),
1446 ++ DT_CLK(NULL, "func_mcasp_abe_gfclk", "abe-clkctrl:0020:24"),
1447 ++ DT_CLK(NULL, "func_mcbsp1_gfclk", "abe-clkctrl:0028:24"),
1448 ++ DT_CLK(NULL, "func_mcbsp2_gfclk", "abe-clkctrl:0030:24"),
1449 ++ DT_CLK(NULL, "func_mcbsp3_gfclk", "abe-clkctrl:0038:24"),
1450 ++ DT_CLK(NULL, "gpio1_dbclk", "l4-wkup-clkctrl:0018:8"),
1451 ++ DT_CLK(NULL, "gpio2_dbclk", "l4-per-clkctrl:0040:8"),
1452 ++ DT_CLK(NULL, "gpio3_dbclk", "l4-per-clkctrl:0048:8"),
1453 ++ DT_CLK(NULL, "gpio4_dbclk", "l4-per-clkctrl:0050:8"),
1454 ++ DT_CLK(NULL, "gpio5_dbclk", "l4-per-clkctrl:0058:8"),
1455 ++ DT_CLK(NULL, "gpio6_dbclk", "l4-per-clkctrl:0060:8"),
1456 ++ DT_CLK(NULL, "hsi_fck", "l3-init-clkctrl:0018:24"),
1457 ++ DT_CLK(NULL, "hsmmc1_fclk", "l3-init-clkctrl:0008:24"),
1458 ++ DT_CLK(NULL, "hsmmc2_fclk", "l3-init-clkctrl:0010:24"),
1459 ++ DT_CLK(NULL, "iss_ctrlclk", "iss-clkctrl:0000:8"),
1460 ++ DT_CLK(NULL, "mcasp_sync_mux_ck", "abe-clkctrl:0020:26"),
1461 ++ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
1462 ++ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
1463 ++ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
1464 ++ DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"),
1465 ++ DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"),
1466 ++ DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"),
1467 ++ DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"),
1468 ++ DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"),
1469 ++ DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"),
1470 ++ DT_CLK(NULL, "sgx_clk_mux", "l3-gfx-clkctrl:0000:24"),
1471 ++ DT_CLK(NULL, "slimbus1_fclk_0", "abe-clkctrl:0040:8"),
1472 ++ DT_CLK(NULL, "slimbus1_fclk_1", "abe-clkctrl:0040:9"),
1473 ++ DT_CLK(NULL, "slimbus1_fclk_2", "abe-clkctrl:0040:10"),
1474 ++ DT_CLK(NULL, "slimbus1_slimbus_clk", "abe-clkctrl:0040:11"),
1475 ++ DT_CLK(NULL, "slimbus2_fclk_0", "l4-per-clkctrl:0118:8"),
1476 ++ DT_CLK(NULL, "slimbus2_fclk_1", "l4-per-clkctrl:0118:9"),
1477 ++ DT_CLK(NULL, "slimbus2_slimbus_clk", "l4-per-clkctrl:0118:10"),
1478 ++ DT_CLK(NULL, "stm_clk_div_ck", "emu-sys-clkctrl:0000:27"),
1479 ++ DT_CLK(NULL, "timer5_sync_mux", "abe-clkctrl:0048:24"),
1480 ++ DT_CLK(NULL, "timer6_sync_mux", "abe-clkctrl:0050:24"),
1481 ++ DT_CLK(NULL, "timer7_sync_mux", "abe-clkctrl:0058:24"),
1482 ++ DT_CLK(NULL, "timer8_sync_mux", "abe-clkctrl:0060:24"),
1483 ++ DT_CLK(NULL, "trace_clk_div_div_ck", "emu-sys-clkctrl:0000:24"),
1484 ++ DT_CLK(NULL, "usb_host_hs_func48mclk", "l3-init-clkctrl:0038:15"),
1485 ++ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3-init-clkctrl:0038:13"),
1486 ++ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3-init-clkctrl:0038:14"),
1487 ++ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3-init-clkctrl:0038:11"),
1488 ++ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3-init-clkctrl:0038:12"),
1489 ++ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3-init-clkctrl:0038:8"),
1490 ++ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3-init-clkctrl:0038:9"),
1491 ++ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3_init-clkctrl:0038:10"),
1492 ++ DT_CLK(NULL, "usb_otg_hs_xclk", "l3-init-clkctrl:0040:8"),
1493 ++ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3-init-clkctrl:0048:8"),
1494 ++ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3-init-clkctrl:0048:9"),
1495 ++ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3-init-clkctrl:0048:10"),
1496 ++ DT_CLK(NULL, "utmi_p1_gfclk", "l3-init-clkctrl:0038:24"),
1497 ++ DT_CLK(NULL, "utmi_p2_gfclk", "l3-init-clkctrl:0038:25"),
1498 + { .node_name = NULL },
1499 + };
1500 +
1501 +diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
1502 +index 90e0a9ea63515..b4aff76eb3735 100644
1503 +--- a/drivers/clk/ti/clk-54xx.c
1504 ++++ b/drivers/clk/ti/clk-54xx.c
1505 +@@ -50,7 +50,7 @@ static const struct omap_clkctrl_bit_data omap5_aess_bit_data[] __initconst = {
1506 + };
1507 +
1508 + static const char * const omap5_dmic_gfclk_parents[] __initconst = {
1509 +- "abe_cm:clk:0018:26",
1510 ++ "abe-clkctrl:0018:26",
1511 + "pad_clks_ck",
1512 + "slimbus_clk",
1513 + NULL,
1514 +@@ -70,7 +70,7 @@ static const struct omap_clkctrl_bit_data omap5_dmic_bit_data[] __initconst = {
1515 + };
1516 +
1517 + static const char * const omap5_mcbsp1_gfclk_parents[] __initconst = {
1518 +- "abe_cm:clk:0028:26",
1519 ++ "abe-clkctrl:0028:26",
1520 + "pad_clks_ck",
1521 + "slimbus_clk",
1522 + NULL,
1523 +@@ -83,7 +83,7 @@ static const struct omap_clkctrl_bit_data omap5_mcbsp1_bit_data[] __initconst =
1524 + };
1525 +
1526 + static const char * const omap5_mcbsp2_gfclk_parents[] __initconst = {
1527 +- "abe_cm:clk:0030:26",
1528 ++ "abe-clkctrl:0030:26",
1529 + "pad_clks_ck",
1530 + "slimbus_clk",
1531 + NULL,
1532 +@@ -96,7 +96,7 @@ static const struct omap_clkctrl_bit_data omap5_mcbsp2_bit_data[] __initconst =
1533 + };
1534 +
1535 + static const char * const omap5_mcbsp3_gfclk_parents[] __initconst = {
1536 +- "abe_cm:clk:0038:26",
1537 ++ "abe-clkctrl:0038:26",
1538 + "pad_clks_ck",
1539 + "slimbus_clk",
1540 + NULL,
1541 +@@ -136,16 +136,16 @@ static const struct omap_clkctrl_bit_data omap5_timer8_bit_data[] __initconst =
1542 +
1543 + static const struct omap_clkctrl_reg_data omap5_abe_clkctrl_regs[] __initconst = {
1544 + { OMAP5_L4_ABE_CLKCTRL, NULL, 0, "abe_iclk" },
1545 +- { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" },
1546 ++ { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" },
1547 + { OMAP5_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
1548 +- { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" },
1549 +- { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" },
1550 +- { OMAP5_MCBSP2_CLKCTRL, omap5_mcbsp2_bit_data, CLKF_SW_SUP, "abe_cm:clk:0030:24" },
1551 +- { OMAP5_MCBSP3_CLKCTRL, omap5_mcbsp3_bit_data, CLKF_SW_SUP, "abe_cm:clk:0038:24" },
1552 +- { OMAP5_TIMER5_CLKCTRL, omap5_timer5_bit_data, CLKF_SW_SUP, "abe_cm:clk:0048:24" },
1553 +- { OMAP5_TIMER6_CLKCTRL, omap5_timer6_bit_data, CLKF_SW_SUP, "abe_cm:clk:0050:24" },
1554 +- { OMAP5_TIMER7_CLKCTRL, omap5_timer7_bit_data, CLKF_SW_SUP, "abe_cm:clk:0058:24" },
1555 +- { OMAP5_TIMER8_CLKCTRL, omap5_timer8_bit_data, CLKF_SW_SUP, "abe_cm:clk:0060:24" },
1556 ++ { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" },
1557 ++ { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" },
1558 ++ { OMAP5_MCBSP2_CLKCTRL, omap5_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" },
1559 ++ { OMAP5_MCBSP3_CLKCTRL, omap5_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" },
1560 ++ { OMAP5_TIMER5_CLKCTRL, omap5_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" },
1561 ++ { OMAP5_TIMER6_CLKCTRL, omap5_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" },
1562 ++ { OMAP5_TIMER7_CLKCTRL, omap5_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" },
1563 ++ { OMAP5_TIMER8_CLKCTRL, omap5_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" },
1564 + { 0 },
1565 + };
1566 +
1567 +@@ -268,12 +268,12 @@ static const struct omap_clkctrl_bit_data omap5_gpio8_bit_data[] __initconst = {
1568 + };
1569 +
1570 + static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst = {
1571 +- { OMAP5_TIMER10_CLKCTRL, omap5_timer10_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0008:24" },
1572 +- { OMAP5_TIMER11_CLKCTRL, omap5_timer11_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0010:24" },
1573 +- { OMAP5_TIMER2_CLKCTRL, omap5_timer2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0018:24" },
1574 +- { OMAP5_TIMER3_CLKCTRL, omap5_timer3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0020:24" },
1575 +- { OMAP5_TIMER4_CLKCTRL, omap5_timer4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0028:24" },
1576 +- { OMAP5_TIMER9_CLKCTRL, omap5_timer9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0030:24" },
1577 ++ { OMAP5_TIMER10_CLKCTRL, omap5_timer10_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0008:24" },
1578 ++ { OMAP5_TIMER11_CLKCTRL, omap5_timer11_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0010:24" },
1579 ++ { OMAP5_TIMER2_CLKCTRL, omap5_timer2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0018:24" },
1580 ++ { OMAP5_TIMER3_CLKCTRL, omap5_timer3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0020:24" },
1581 ++ { OMAP5_TIMER4_CLKCTRL, omap5_timer4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0028:24" },
1582 ++ { OMAP5_TIMER9_CLKCTRL, omap5_timer9_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0030:24" },
1583 + { OMAP5_GPIO2_CLKCTRL, omap5_gpio2_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
1584 + { OMAP5_GPIO3_CLKCTRL, omap5_gpio3_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
1585 + { OMAP5_GPIO4_CLKCTRL, omap5_gpio4_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
1586 +@@ -345,7 +345,7 @@ static const struct omap_clkctrl_bit_data omap5_dss_core_bit_data[] __initconst
1587 + };
1588 +
1589 + static const struct omap_clkctrl_reg_data omap5_dss_clkctrl_regs[] __initconst = {
1590 +- { OMAP5_DSS_CORE_CLKCTRL, omap5_dss_core_bit_data, CLKF_SW_SUP, "dss_cm:clk:0000:8" },
1591 ++ { OMAP5_DSS_CORE_CLKCTRL, omap5_dss_core_bit_data, CLKF_SW_SUP, "dss-clkctrl:0000:8" },
1592 + { 0 },
1593 + };
1594 +
1595 +@@ -378,7 +378,7 @@ static const struct omap_clkctrl_bit_data omap5_gpu_core_bit_data[] __initconst
1596 + };
1597 +
1598 + static const struct omap_clkctrl_reg_data omap5_gpu_clkctrl_regs[] __initconst = {
1599 +- { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24" },
1600 ++ { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu-clkctrl:0000:24" },
1601 + { 0 },
1602 + };
1603 +
1604 +@@ -389,7 +389,7 @@ static const char * const omap5_mmc1_fclk_mux_parents[] __initconst = {
1605 + };
1606 +
1607 + static const char * const omap5_mmc1_fclk_parents[] __initconst = {
1608 +- "l3init_cm:clk:0008:24",
1609 ++ "l3init-clkctrl:0008:24",
1610 + NULL,
1611 + };
1612 +
1613 +@@ -405,7 +405,7 @@ static const struct omap_clkctrl_bit_data omap5_mmc1_bit_data[] __initconst = {
1614 + };
1615 +
1616 + static const char * const omap5_mmc2_fclk_parents[] __initconst = {
1617 +- "l3init_cm:clk:0010:24",
1618 ++ "l3init-clkctrl:0010:24",
1619 + NULL,
1620 + };
1621 +
1622 +@@ -430,12 +430,12 @@ static const char * const omap5_usb_host_hs_hsic480m_p3_clk_parents[] __initcons
1623 + };
1624 +
1625 + static const char * const omap5_usb_host_hs_utmi_p1_clk_parents[] __initconst = {
1626 +- "l3init_cm:clk:0038:24",
1627 ++ "l3init-clkctrl:0038:24",
1628 + NULL,
1629 + };
1630 +
1631 + static const char * const omap5_usb_host_hs_utmi_p2_clk_parents[] __initconst = {
1632 +- "l3init_cm:clk:0038:25",
1633 ++ "l3init-clkctrl:0038:25",
1634 + NULL,
1635 + };
1636 +
1637 +@@ -494,8 +494,8 @@ static const struct omap_clkctrl_bit_data omap5_usb_otg_ss_bit_data[] __initcons
1638 + };
1639 +
1640 + static const struct omap_clkctrl_reg_data omap5_l3init_clkctrl_regs[] __initconst = {
1641 +- { OMAP5_MMC1_CLKCTRL, omap5_mmc1_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0008:25" },
1642 +- { OMAP5_MMC2_CLKCTRL, omap5_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" },
1643 ++ { OMAP5_MMC1_CLKCTRL, omap5_mmc1_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0008:25" },
1644 ++ { OMAP5_MMC2_CLKCTRL, omap5_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" },
1645 + { OMAP5_USB_HOST_HS_CLKCTRL, omap5_usb_host_hs_bit_data, CLKF_SW_SUP, "l3init_60m_fclk" },
1646 + { OMAP5_USB_TLL_HS_CLKCTRL, omap5_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
1647 + { OMAP5_SATA_CLKCTRL, omap5_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
1648 +@@ -519,7 +519,7 @@ static const struct omap_clkctrl_reg_data omap5_wkupaon_clkctrl_regs[] __initcon
1649 + { OMAP5_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
1650 + { OMAP5_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
1651 + { OMAP5_GPIO1_CLKCTRL, omap5_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
1652 +- { OMAP5_TIMER1_CLKCTRL, omap5_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" },
1653 ++ { OMAP5_TIMER1_CLKCTRL, omap5_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" },
1654 + { OMAP5_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
1655 + { OMAP5_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
1656 + { 0 },
1657 +@@ -549,58 +549,58 @@ const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = {
1658 + static struct ti_dt_clk omap54xx_clks[] = {
1659 + DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
1660 + DT_CLK(NULL, "sys_clkin_ck", "sys_clkin"),
1661 +- DT_CLK(NULL, "dmic_gfclk", "abe_cm:0018:24"),
1662 +- DT_CLK(NULL, "dmic_sync_mux_ck", "abe_cm:0018:26"),
1663 +- DT_CLK(NULL, "dss_32khz_clk", "dss_cm:0000:11"),
1664 +- DT_CLK(NULL, "dss_48mhz_clk", "dss_cm:0000:9"),
1665 +- DT_CLK(NULL, "dss_dss_clk", "dss_cm:0000:8"),
1666 +- DT_CLK(NULL, "dss_sys_clk", "dss_cm:0000:10"),
1667 +- DT_CLK(NULL, "gpio1_dbclk", "wkupaon_cm:0018:8"),
1668 +- DT_CLK(NULL, "gpio2_dbclk", "l4per_cm:0040:8"),
1669 +- DT_CLK(NULL, "gpio3_dbclk", "l4per_cm:0048:8"),
1670 +- DT_CLK(NULL, "gpio4_dbclk", "l4per_cm:0050:8"),
1671 +- DT_CLK(NULL, "gpio5_dbclk", "l4per_cm:0058:8"),
1672 +- DT_CLK(NULL, "gpio6_dbclk", "l4per_cm:0060:8"),
1673 +- DT_CLK(NULL, "gpio7_dbclk", "l4per_cm:00f0:8"),
1674 +- DT_CLK(NULL, "gpio8_dbclk", "l4per_cm:00f8:8"),
1675 +- DT_CLK(NULL, "mcbsp1_gfclk", "abe_cm:0028:24"),
1676 +- DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe_cm:0028:26"),
1677 +- DT_CLK(NULL, "mcbsp2_gfclk", "abe_cm:0030:24"),
1678 +- DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe_cm:0030:26"),
1679 +- DT_CLK(NULL, "mcbsp3_gfclk", "abe_cm:0038:24"),
1680 +- DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe_cm:0038:26"),
1681 +- DT_CLK(NULL, "mmc1_32khz_clk", "l3init_cm:0008:8"),
1682 +- DT_CLK(NULL, "mmc1_fclk", "l3init_cm:0008:25"),
1683 +- DT_CLK(NULL, "mmc1_fclk_mux", "l3init_cm:0008:24"),
1684 +- DT_CLK(NULL, "mmc2_fclk", "l3init_cm:0010:25"),
1685 +- DT_CLK(NULL, "mmc2_fclk_mux", "l3init_cm:0010:24"),
1686 +- DT_CLK(NULL, "sata_ref_clk", "l3init_cm:0068:8"),
1687 +- DT_CLK(NULL, "timer10_gfclk_mux", "l4per_cm:0008:24"),
1688 +- DT_CLK(NULL, "timer11_gfclk_mux", "l4per_cm:0010:24"),
1689 +- DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon_cm:0020:24"),
1690 +- DT_CLK(NULL, "timer2_gfclk_mux", "l4per_cm:0018:24"),
1691 +- DT_CLK(NULL, "timer3_gfclk_mux", "l4per_cm:0020:24"),
1692 +- DT_CLK(NULL, "timer4_gfclk_mux", "l4per_cm:0028:24"),
1693 +- DT_CLK(NULL, "timer5_gfclk_mux", "abe_cm:0048:24"),
1694 +- DT_CLK(NULL, "timer6_gfclk_mux", "abe_cm:0050:24"),
1695 +- DT_CLK(NULL, "timer7_gfclk_mux", "abe_cm:0058:24"),
1696 +- DT_CLK(NULL, "timer8_gfclk_mux", "abe_cm:0060:24"),
1697 +- DT_CLK(NULL, "timer9_gfclk_mux", "l4per_cm:0030:24"),
1698 +- DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3init_cm:0038:13"),
1699 +- DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3init_cm:0038:14"),
1700 +- DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "l3init_cm:0038:7"),
1701 +- DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3init_cm:0038:11"),
1702 +- DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3init_cm:0038:12"),
1703 +- DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "l3init_cm:0038:6"),
1704 +- DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3init_cm:0038:8"),
1705 +- DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3init_cm:0038:9"),
1706 +- DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3init_cm:0038:10"),
1707 +- DT_CLK(NULL, "usb_otg_ss_refclk960m", "l3init_cm:00d0:8"),
1708 +- DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3init_cm:0048:8"),
1709 +- DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3init_cm:0048:9"),
1710 +- DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3init_cm:0048:10"),
1711 +- DT_CLK(NULL, "utmi_p1_gfclk", "l3init_cm:0038:24"),
1712 +- DT_CLK(NULL, "utmi_p2_gfclk", "l3init_cm:0038:25"),
1713 ++ DT_CLK(NULL, "dmic_gfclk", "abe-clkctrl:0018:24"),
1714 ++ DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"),
1715 ++ DT_CLK(NULL, "dss_32khz_clk", "dss-clkctrl:0000:11"),
1716 ++ DT_CLK(NULL, "dss_48mhz_clk", "dss-clkctrl:0000:9"),
1717 ++ DT_CLK(NULL, "dss_dss_clk", "dss-clkctrl:0000:8"),
1718 ++ DT_CLK(NULL, "dss_sys_clk", "dss-clkctrl:0000:10"),
1719 ++ DT_CLK(NULL, "gpio1_dbclk", "wkupaon-clkctrl:0018:8"),
1720 ++ DT_CLK(NULL, "gpio2_dbclk", "l4per-clkctrl:0040:8"),
1721 ++ DT_CLK(NULL, "gpio3_dbclk", "l4per-clkctrl:0048:8"),
1722 ++ DT_CLK(NULL, "gpio4_dbclk", "l4per-clkctrl:0050:8"),
1723 ++ DT_CLK(NULL, "gpio5_dbclk", "l4per-clkctrl:0058:8"),
1724 ++ DT_CLK(NULL, "gpio6_dbclk", "l4per-clkctrl:0060:8"),
1725 ++ DT_CLK(NULL, "gpio7_dbclk", "l4per-clkctrl:00f0:8"),
1726 ++ DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"),
1727 ++ DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"),
1728 ++ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
1729 ++ DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"),
1730 ++ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
1731 ++ DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"),
1732 ++ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
1733 ++ DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"),
1734 ++ DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"),
1735 ++ DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
1736 ++ DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"),
1737 ++ DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
1738 ++ DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
1739 ++ DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"),
1740 ++ DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"),
1741 ++ DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon-clkctrl:0020:24"),
1742 ++ DT_CLK(NULL, "timer2_gfclk_mux", "l4per-clkctrl:0018:24"),
1743 ++ DT_CLK(NULL, "timer3_gfclk_mux", "l4per-clkctrl:0020:24"),
1744 ++ DT_CLK(NULL, "timer4_gfclk_mux", "l4per-clkctrl:0028:24"),
1745 ++ DT_CLK(NULL, "timer5_gfclk_mux", "abe-clkctrl:0048:24"),
1746 ++ DT_CLK(NULL, "timer6_gfclk_mux", "abe-clkctrl:0050:24"),
1747 ++ DT_CLK(NULL, "timer7_gfclk_mux", "abe-clkctrl:0058:24"),
1748 ++ DT_CLK(NULL, "timer8_gfclk_mux", "abe-clkctrl:0060:24"),
1749 ++ DT_CLK(NULL, "timer9_gfclk_mux", "l4per-clkctrl:0030:24"),
1750 ++ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3init-clkctrl:0038:13"),
1751 ++ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3init-clkctrl:0038:14"),
1752 ++ DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "l3init-clkctrl:0038:7"),
1753 ++ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3init-clkctrl:0038:11"),
1754 ++ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3init-clkctrl:0038:12"),
1755 ++ DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "l3init-clkctrl:0038:6"),
1756 ++ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3init-clkctrl:0038:8"),
1757 ++ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3init-clkctrl:0038:9"),
1758 ++ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3init-clkctrl:0038:10"),
1759 ++ DT_CLK(NULL, "usb_otg_ss_refclk960m", "l3init-clkctrl:00d0:8"),
1760 ++ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3init-clkctrl:0048:8"),
1761 ++ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3init-clkctrl:0048:9"),
1762 ++ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3init-clkctrl:0048:10"),
1763 ++ DT_CLK(NULL, "utmi_p1_gfclk", "l3init-clkctrl:0038:24"),
1764 ++ DT_CLK(NULL, "utmi_p2_gfclk", "l3init-clkctrl:0038:25"),
1765 + { .node_name = NULL },
1766 + };
1767 +
1768 +diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
1769 +index 864c484bde1b4..08a85c559f795 100644
1770 +--- a/drivers/clk/ti/clkctrl.c
1771 ++++ b/drivers/clk/ti/clkctrl.c
1772 +@@ -511,10 +511,6 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
1773 + char *c;
1774 + u16 soc_mask = 0;
1775 +
1776 +- if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
1777 +- of_node_name_eq(node, "clk"))
1778 +- ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
1779 +-
1780 + addrp = of_get_address(node, 0, NULL, NULL);
1781 + addr = (u32)of_translate_address(node, addrp);
1782 +
1783 +diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
1784 +index 35993ab921547..48de8d2b32f2c 100644
1785 +--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
1786 ++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
1787 +@@ -944,6 +944,11 @@ static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
1788 + static void axi_chan_dump_lli(struct axi_dma_chan *chan,
1789 + struct axi_dma_hw_desc *desc)
1790 + {
1791 ++ if (!desc->lli) {
1792 ++ dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
1793 ++ return;
1794 ++ }
1795 ++
1796 + dev_err(dchan2dev(&chan->vc.chan),
1797 + "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
1798 + le64_to_cpu(desc->lli->sar),
1799 +@@ -1011,6 +1016,11 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
1800 +
1801 + /* The completed descriptor currently is in the head of vc list */
1802 + vd = vchan_next_desc(&chan->vc);
1803 ++ if (!vd) {
1804 ++ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1805 ++ axi_chan_name(chan));
1806 ++ goto out;
1807 ++ }
1808 +
1809 + if (chan->cyclic) {
1810 + desc = vd_to_axi_desc(vd);
1811 +@@ -1040,6 +1050,7 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
1812 + axi_chan_start_first_queued(chan);
1813 + }
1814 +
1815 ++out:
1816 + spin_unlock_irqrestore(&chan->vc.lock, flags);
1817 + }
1818 +
1819 +diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
1820 +index 4357d2395e6b7..60115d8d40832 100644
1821 +--- a/drivers/dma/sprd-dma.c
1822 ++++ b/drivers/dma/sprd-dma.c
1823 +@@ -1236,11 +1236,8 @@ static int sprd_dma_remove(struct platform_device *pdev)
1824 + {
1825 + struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
1826 + struct sprd_dma_chn *c, *cn;
1827 +- int ret;
1828 +
1829 +- ret = pm_runtime_get_sync(&pdev->dev);
1830 +- if (ret < 0)
1831 +- return ret;
1832 ++ pm_runtime_get_sync(&pdev->dev);
1833 +
1834 + /* explicitly free the irq */
1835 + if (sdev->irq > 0)
1836 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
1837 +index 589ddab61c2a9..7aad0340f7946 100644
1838 +--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
1839 ++++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
1840 +@@ -500,7 +500,7 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
1841 + int afmt_inst;
1842 +
1843 + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
1844 +- if (eng_id <= ENGINE_ID_DIGE) {
1845 ++ if (eng_id <= ENGINE_ID_DIGB) {
1846 + vpg_inst = eng_id;
1847 + afmt_inst = eng_id;
1848 + } else
1849 +diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
1850 +index 9b84df34a6a12..8cf3352d88582 100644
1851 +--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
1852 ++++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
1853 +@@ -142,8 +142,6 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
1854 +
1855 + drm_kms_helper_poll_init(drm);
1856 +
1857 +- drm_bridge_connector_enable_hpd(kms->connector);
1858 +-
1859 + ret = drm_dev_register(drm, 0);
1860 + if (ret)
1861 + goto cleanup_crtc;
1862 +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
1863 +index c98525d60df5b..a56607501d360 100644
1864 +--- a/drivers/gpu/drm/meson/meson_drv.c
1865 ++++ b/drivers/gpu/drm/meson/meson_drv.c
1866 +@@ -114,8 +114,11 @@ static bool meson_vpu_has_available_connectors(struct device *dev)
1867 + for_each_endpoint_of_node(dev->of_node, ep) {
1868 + /* If the endpoint node exists, consider it enabled */
1869 + remote = of_graph_get_remote_port(ep);
1870 +- if (remote)
1871 ++ if (remote) {
1872 ++ of_node_put(remote);
1873 ++ of_node_put(ep);
1874 + return true;
1875 ++ }
1876 + }
1877 +
1878 + return false;
1879 +diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
1880 +index 259f3e6bec90a..bb7e109534de1 100644
1881 +--- a/drivers/gpu/drm/meson/meson_viu.c
1882 ++++ b/drivers/gpu/drm/meson/meson_viu.c
1883 +@@ -469,17 +469,17 @@ void meson_viu_init(struct meson_drm *priv)
1884 + priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
1885 +
1886 + if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
1887 +- writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) |
1888 +- VIU_OSD_BLEND_REORDER(1, 0) |
1889 +- VIU_OSD_BLEND_REORDER(2, 0) |
1890 +- VIU_OSD_BLEND_REORDER(3, 0) |
1891 +- VIU_OSD_BLEND_DIN_EN(1) |
1892 +- VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
1893 +- VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
1894 +- VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
1895 +- VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
1896 +- VIU_OSD_BLEND_HOLD_LINES(4),
1897 +- priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
1898 ++ u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) |
1899 ++ (u32)VIU_OSD_BLEND_REORDER(1, 0) |
1900 ++ (u32)VIU_OSD_BLEND_REORDER(2, 0) |
1901 ++ (u32)VIU_OSD_BLEND_REORDER(3, 0) |
1902 ++ (u32)VIU_OSD_BLEND_DIN_EN(1) |
1903 ++ (u32)VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
1904 ++ (u32)VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
1905 ++ (u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
1906 ++ (u32)VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
1907 ++ (u32)VIU_OSD_BLEND_HOLD_LINES(4);
1908 ++ writel_relaxed(val, priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
1909 +
1910 + writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
1911 + priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
1912 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1913 +index 88d262ba648cf..76156833a832a 100644
1914 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1915 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1916 +@@ -2605,6 +2605,27 @@ nv172_chipset = {
1917 + .fifo = { 0x00000001, ga102_fifo_new },
1918 + };
1919 +
1920 ++static const struct nvkm_device_chip
1921 ++nv173_chipset = {
1922 ++ .name = "GA103",
1923 ++ .bar = { 0x00000001, tu102_bar_new },
1924 ++ .bios = { 0x00000001, nvkm_bios_new },
1925 ++ .devinit = { 0x00000001, ga100_devinit_new },
1926 ++ .fb = { 0x00000001, ga102_fb_new },
1927 ++ .gpio = { 0x00000001, ga102_gpio_new },
1928 ++ .i2c = { 0x00000001, gm200_i2c_new },
1929 ++ .imem = { 0x00000001, nv50_instmem_new },
1930 ++ .mc = { 0x00000001, ga100_mc_new },
1931 ++ .mmu = { 0x00000001, tu102_mmu_new },
1932 ++ .pci = { 0x00000001, gp100_pci_new },
1933 ++ .privring = { 0x00000001, gm200_privring_new },
1934 ++ .timer = { 0x00000001, gk20a_timer_new },
1935 ++ .top = { 0x00000001, ga100_top_new },
1936 ++ .disp = { 0x00000001, ga102_disp_new },
1937 ++ .dma = { 0x00000001, gv100_dma_new },
1938 ++ .fifo = { 0x00000001, ga102_fifo_new },
1939 ++};
1940 ++
1941 + static const struct nvkm_device_chip
1942 + nv174_chipset = {
1943 + .name = "GA104",
1944 +@@ -3092,6 +3113,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
1945 + case 0x167: device->chip = &nv167_chipset; break;
1946 + case 0x168: device->chip = &nv168_chipset; break;
1947 + case 0x172: device->chip = &nv172_chipset; break;
1948 ++ case 0x173: device->chip = &nv173_chipset; break;
1949 + case 0x174: device->chip = &nv174_chipset; break;
1950 + case 0x176: device->chip = &nv176_chipset; break;
1951 + case 0x177: device->chip = &nv177_chipset; break;
1952 +diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
1953 +index 4f5efcace68ea..51edb4244af7c 100644
1954 +--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
1955 ++++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
1956 +@@ -531,7 +531,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
1957 + struct drm_display_mode *mode)
1958 + {
1959 + struct mipi_dsi_device *device = dsi->device;
1960 +- unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
1961 ++ int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
1962 + u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
1963 + u32 basic_ctl = 0;
1964 + size_t bytes;
1965 +@@ -555,7 +555,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
1966 + * (4 bytes). Its minimal size is therefore 10 bytes
1967 + */
1968 + #define HSA_PACKET_OVERHEAD 10
1969 +- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
1970 ++ hsa = max(HSA_PACKET_OVERHEAD,
1971 + (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
1972 +
1973 + /*
1974 +@@ -564,7 +564,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
1975 + * therefore 6 bytes
1976 + */
1977 + #define HBP_PACKET_OVERHEAD 6
1978 +- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
1979 ++ hbp = max(HBP_PACKET_OVERHEAD,
1980 + (mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
1981 +
1982 + /*
1983 +@@ -574,7 +574,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
1984 + * 16 bytes
1985 + */
1986 + #define HFP_PACKET_OVERHEAD 16
1987 +- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
1988 ++ hfp = max(HFP_PACKET_OVERHEAD,
1989 + (mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
1990 +
1991 + /*
1992 +@@ -583,7 +583,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
1993 + * bytes). Its minimal size is therefore 10 bytes.
1994 + */
1995 + #define HBLK_PACKET_OVERHEAD 10
1996 +- hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
1997 ++ hblk = max(HBLK_PACKET_OVERHEAD,
1998 + (mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
1999 + HBLK_PACKET_OVERHEAD);
2000 +
2001 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
2002 +index 900edaf5d68ee..33e78f56123e5 100644
2003 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
2004 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
2005 +@@ -987,7 +987,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
2006 + /*
2007 + * We might need to add a TTM.
2008 + */
2009 +- if (bo->resource->mem_type == TTM_PL_SYSTEM) {
2010 ++ if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
2011 + ret = ttm_tt_create(bo, true);
2012 + if (ret)
2013 + return ret;
2014 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
2015 +index f382444dc2dba..a14c48de4446c 100644
2016 +--- a/drivers/hid/hid-multitouch.c
2017 ++++ b/drivers/hid/hid-multitouch.c
2018 +@@ -194,6 +194,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
2019 + #define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015
2020 + #define MT_CLS_WIN_8_DISABLE_WAKEUP 0x0016
2021 + #define MT_CLS_WIN_8_NO_STICKY_FINGERS 0x0017
2022 ++#define MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU 0x0018
2023 +
2024 + /* vendor specific classes */
2025 + #define MT_CLS_3M 0x0101
2026 +@@ -286,6 +287,15 @@ static const struct mt_class mt_classes[] = {
2027 + MT_QUIRK_WIN8_PTP_BUTTONS |
2028 + MT_QUIRK_FORCE_MULTI_INPUT,
2029 + .export_all_inputs = true },
2030 ++ { .name = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
2031 ++ .quirks = MT_QUIRK_IGNORE_DUPLICATES |
2032 ++ MT_QUIRK_HOVERING |
2033 ++ MT_QUIRK_CONTACT_CNT_ACCURATE |
2034 ++ MT_QUIRK_STICKY_FINGERS |
2035 ++ MT_QUIRK_WIN8_PTP_BUTTONS |
2036 ++ MT_QUIRK_FORCE_MULTI_INPUT |
2037 ++ MT_QUIRK_NOT_SEEN_MEANS_UP,
2038 ++ .export_all_inputs = true },
2039 + { .name = MT_CLS_WIN_8_DISABLE_WAKEUP,
2040 + .quirks = MT_QUIRK_ALWAYS_VALID |
2041 + MT_QUIRK_IGNORE_DUPLICATES |
2042 +@@ -783,6 +793,7 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
2043 + case HID_DG_CONFIDENCE:
2044 + if ((cls->name == MT_CLS_WIN_8 ||
2045 + cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT ||
2046 ++ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU ||
2047 + cls->name == MT_CLS_WIN_8_DISABLE_WAKEUP) &&
2048 + (field->application == HID_DG_TOUCHPAD ||
2049 + field->application == HID_DG_TOUCHSCREEN))
2050 +@@ -2033,7 +2044,7 @@ static const struct hid_device_id mt_devices[] = {
2051 + USB_DEVICE_ID_LENOVO_X1_TAB3) },
2052 +
2053 + /* Lenovo X12 TAB Gen 1 */
2054 +- { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
2055 ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
2056 + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
2057 + USB_VENDOR_ID_LENOVO,
2058 + USB_DEVICE_ID_LENOVO_X12_TAB) },
2059 +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
2060 +index e5b79bdb9851c..794b29639035e 100644
2061 +--- a/drivers/hwtracing/coresight/coresight-etm4x.h
2062 ++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
2063 +@@ -7,6 +7,7 @@
2064 + #define _CORESIGHT_CORESIGHT_ETM_H
2065 +
2066 + #include <asm/local.h>
2067 ++#include <linux/const.h>
2068 + #include <linux/spinlock.h>
2069 + #include <linux/types.h>
2070 + #include "coresight-priv.h"
2071 +@@ -417,7 +418,7 @@
2072 + ({ \
2073 + u64 __val; \
2074 + \
2075 +- if (__builtin_constant_p((offset))) \
2076 ++ if (__is_constexpr((offset))) \
2077 + __val = read_etm4x_sysreg_const_offset((offset)); \
2078 + else \
2079 + __val = etm4x_sysreg_read((offset), true, (_64bit)); \
2080 +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
2081 +index 3576b63a6c037..3f40995c0ca9a 100644
2082 +--- a/drivers/i2c/busses/i2c-imx.c
2083 ++++ b/drivers/i2c/busses/i2c-imx.c
2084 +@@ -1487,9 +1487,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
2085 + struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
2086 + int irq, ret;
2087 +
2088 +- ret = pm_runtime_resume_and_get(&pdev->dev);
2089 +- if (ret < 0)
2090 +- return ret;
2091 ++ ret = pm_runtime_get_sync(&pdev->dev);
2092 +
2093 + /* remove adapter */
2094 + dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n");
2095 +@@ -1498,17 +1496,21 @@ static int i2c_imx_remove(struct platform_device *pdev)
2096 + if (i2c_imx->dma)
2097 + i2c_imx_dma_free(i2c_imx);
2098 +
2099 +- /* setup chip registers to defaults */
2100 +- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
2101 +- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
2102 +- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
2103 +- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
2104 ++ if (ret == 0) {
2105 ++ /* setup chip registers to defaults */
2106 ++ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
2107 ++ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
2108 ++ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
2109 ++ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
2110 ++ clk_disable(i2c_imx->clk);
2111 ++ }
2112 +
2113 + clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
2114 + irq = platform_get_irq(pdev, 0);
2115 + if (irq >= 0)
2116 + free_irq(irq, i2c_imx);
2117 +- clk_disable_unprepare(i2c_imx->clk);
2118 ++
2119 ++ clk_unprepare(i2c_imx->clk);
2120 +
2121 + pm_runtime_put_noidle(&pdev->dev);
2122 + pm_runtime_disable(&pdev->dev);
2123 +diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
2124 +index b5a70cbe94aac..8723898701063 100644
2125 +--- a/drivers/infiniband/sw/rxe/rxe_param.h
2126 ++++ b/drivers/infiniband/sw/rxe/rxe_param.h
2127 +@@ -103,6 +103,12 @@ enum rxe_device_param {
2128 + RXE_INFLIGHT_SKBS_PER_QP_HIGH = 64,
2129 + RXE_INFLIGHT_SKBS_PER_QP_LOW = 16,
2130 +
2131 ++ /* Max number of interations of each tasklet
2132 ++ * before yielding the cpu to let other
2133 ++ * work make progress
2134 ++ */
2135 ++ RXE_MAX_ITERATIONS = 1024,
2136 ++
2137 + /* Delay before calling arbiter timer */
2138 + RXE_NSEC_ARB_TIMER_DELAY = 200,
2139 +
2140 +diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
2141 +index 6951fdcb31bf5..568cf56c236bc 100644
2142 +--- a/drivers/infiniband/sw/rxe/rxe_task.c
2143 ++++ b/drivers/infiniband/sw/rxe/rxe_task.c
2144 +@@ -8,7 +8,7 @@
2145 + #include <linux/interrupt.h>
2146 + #include <linux/hardirq.h>
2147 +
2148 +-#include "rxe_task.h"
2149 ++#include "rxe.h"
2150 +
2151 + int __rxe_do_task(struct rxe_task *task)
2152 +
2153 +@@ -34,6 +34,7 @@ void rxe_do_task(struct tasklet_struct *t)
2154 + int ret;
2155 + unsigned long flags;
2156 + struct rxe_task *task = from_tasklet(task, t, tasklet);
2157 ++ unsigned int iterations = RXE_MAX_ITERATIONS;
2158 +
2159 + spin_lock_irqsave(&task->state_lock, flags);
2160 + switch (task->state) {
2161 +@@ -62,13 +63,20 @@ void rxe_do_task(struct tasklet_struct *t)
2162 + spin_lock_irqsave(&task->state_lock, flags);
2163 + switch (task->state) {
2164 + case TASK_STATE_BUSY:
2165 +- if (ret)
2166 ++ if (ret) {
2167 + task->state = TASK_STATE_START;
2168 +- else
2169 ++ } else if (iterations--) {
2170 + cont = 1;
2171 ++ } else {
2172 ++ /* reschedule the tasklet and exit
2173 ++ * the loop to give up the cpu
2174 ++ */
2175 ++ tasklet_schedule(&task->tasklet);
2176 ++ task->state = TASK_STATE_START;
2177 ++ }
2178 + break;
2179 +
2180 +- /* soneone tried to run the task since the last time we called
2181 ++ /* someone tried to run the task since the last time we called
2182 + * func, so we will call one more time regardless of the
2183 + * return value
2184 + */
2185 +diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
2186 +index cbe0dd4129121..4b7eee01c6aad 100644
2187 +--- a/drivers/input/touchscreen/exc3000.c
2188 ++++ b/drivers/input/touchscreen/exc3000.c
2189 +@@ -220,6 +220,7 @@ static int exc3000_vendor_data_request(struct exc3000_data *data, u8 *request,
2190 + {
2191 + u8 buf[EXC3000_LEN_VENDOR_REQUEST] = { 0x67, 0x00, 0x42, 0x00, 0x03 };
2192 + int ret;
2193 ++ unsigned long time_left;
2194 +
2195 + mutex_lock(&data->query_lock);
2196 +
2197 +@@ -233,9 +234,9 @@ static int exc3000_vendor_data_request(struct exc3000_data *data, u8 *request,
2198 + goto out_unlock;
2199 +
2200 + if (response) {
2201 +- ret = wait_for_completion_timeout(&data->wait_event,
2202 +- timeout * HZ);
2203 +- if (ret <= 0) {
2204 ++ time_left = wait_for_completion_timeout(&data->wait_event,
2205 ++ timeout * HZ);
2206 ++ if (time_left == 0) {
2207 + ret = -ETIMEDOUT;
2208 + goto out_unlock;
2209 + }
2210 +diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
2211 +index be066c1503d37..ba3115fd0f86a 100644
2212 +--- a/drivers/iommu/io-pgtable-arm-v7s.c
2213 ++++ b/drivers/iommu/io-pgtable-arm-v7s.c
2214 +@@ -182,14 +182,8 @@ static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg)
2215 + (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT);
2216 + }
2217 +
2218 +-static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
2219 +- struct io_pgtable_cfg *cfg)
2220 ++static arm_v7s_iopte to_mtk_iopte(phys_addr_t paddr, arm_v7s_iopte pte)
2221 + {
2222 +- arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
2223 +-
2224 +- if (!arm_v7s_is_mtk_enabled(cfg))
2225 +- return pte;
2226 +-
2227 + if (paddr & BIT_ULL(32))
2228 + pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
2229 + if (paddr & BIT_ULL(33))
2230 +@@ -199,6 +193,17 @@ static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
2231 + return pte;
2232 + }
2233 +
2234 ++static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
2235 ++ struct io_pgtable_cfg *cfg)
2236 ++{
2237 ++ arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
2238 ++
2239 ++ if (arm_v7s_is_mtk_enabled(cfg))
2240 ++ return to_mtk_iopte(paddr, pte);
2241 ++
2242 ++ return pte;
2243 ++}
2244 ++
2245 + static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
2246 + struct io_pgtable_cfg *cfg)
2247 + {
2248 +@@ -240,10 +245,17 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
2249 + dma_addr_t dma;
2250 + size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
2251 + void *table = NULL;
2252 ++ gfp_t gfp_l1;
2253 ++
2254 ++ /*
2255 ++ * ARM_MTK_TTBR_EXT extend the translation table base support larger
2256 ++ * memory address.
2257 ++ */
2258 ++ gfp_l1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
2259 ++ GFP_KERNEL : ARM_V7S_TABLE_GFP_DMA;
2260 +
2261 + if (lvl == 1)
2262 +- table = (void *)__get_free_pages(
2263 +- __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
2264 ++ table = (void *)__get_free_pages(gfp_l1 | __GFP_ZERO, get_order(size));
2265 + else if (lvl == 2)
2266 + table = kmem_cache_zalloc(data->l2_tables, gfp);
2267 +
2268 +@@ -251,7 +263,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
2269 + return NULL;
2270 +
2271 + phys = virt_to_phys(table);
2272 +- if (phys != (arm_v7s_iopte)phys) {
2273 ++ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
2274 ++ phys >= (1ULL << cfg->oas) : phys != (arm_v7s_iopte)phys) {
2275 + /* Doesn't fit in PTE */
2276 + dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
2277 + goto out_free;
2278 +@@ -457,9 +470,14 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
2279 + arm_v7s_iopte curr,
2280 + struct io_pgtable_cfg *cfg)
2281 + {
2282 ++ phys_addr_t phys = virt_to_phys(table);
2283 + arm_v7s_iopte old, new;
2284 +
2285 +- new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE;
2286 ++ new = phys | ARM_V7S_PTE_TYPE_TABLE;
2287 ++
2288 ++ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT)
2289 ++ new = to_mtk_iopte(phys, new);
2290 ++
2291 + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
2292 + new |= ARM_V7S_ATTR_NS_TABLE;
2293 +
2294 +@@ -779,6 +797,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
2295 + void *cookie)
2296 + {
2297 + struct arm_v7s_io_pgtable *data;
2298 ++ slab_flags_t slab_flag;
2299 ++ phys_addr_t paddr;
2300 +
2301 + if (cfg->ias > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
2302 + return NULL;
2303 +@@ -788,7 +808,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
2304 +
2305 + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
2306 + IO_PGTABLE_QUIRK_NO_PERMS |
2307 +- IO_PGTABLE_QUIRK_ARM_MTK_EXT))
2308 ++ IO_PGTABLE_QUIRK_ARM_MTK_EXT |
2309 ++ IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT))
2310 + return NULL;
2311 +
2312 + /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
2313 +@@ -796,15 +817,27 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
2314 + !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
2315 + return NULL;
2316 +
2317 ++ if ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT) &&
2318 ++ !arm_v7s_is_mtk_enabled(cfg))
2319 ++ return NULL;
2320 ++
2321 + data = kmalloc(sizeof(*data), GFP_KERNEL);
2322 + if (!data)
2323 + return NULL;
2324 +
2325 + spin_lock_init(&data->split_lock);
2326 ++
2327 ++ /*
2328 ++ * ARM_MTK_TTBR_EXT extend the translation table base support larger
2329 ++ * memory address.
2330 ++ */
2331 ++ slab_flag = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
2332 ++ 0 : ARM_V7S_TABLE_SLAB_FLAGS;
2333 ++
2334 + data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
2335 + ARM_V7S_TABLE_SIZE(2, cfg),
2336 + ARM_V7S_TABLE_SIZE(2, cfg),
2337 +- ARM_V7S_TABLE_SLAB_FLAGS, NULL);
2338 ++ slab_flag, NULL);
2339 + if (!data->l2_tables)
2340 + goto out_free_data;
2341 +
2342 +@@ -850,12 +883,16 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
2343 + wmb();
2344 +
2345 + /* TTBR */
2346 +- cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S |
2347 +- (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
2348 +- ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
2349 +- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
2350 +- (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
2351 +- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
2352 ++ paddr = virt_to_phys(data->pgd);
2353 ++ if (arm_v7s_is_mtk_enabled(cfg))
2354 ++ cfg->arm_v7s_cfg.ttbr = paddr | upper_32_bits(paddr);
2355 ++ else
2356 ++ cfg->arm_v7s_cfg.ttbr = paddr | ARM_V7S_TTBR_S |
2357 ++ (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
2358 ++ ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
2359 ++ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
2360 ++ (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
2361 ++ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
2362 + return &data->iop;
2363 +
2364 + out_free_data:
2365 +diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
2366 +index e1f771c72fc4c..ad3e2c1b3c87b 100644
2367 +--- a/drivers/irqchip/irq-tegra.c
2368 ++++ b/drivers/irqchip/irq-tegra.c
2369 +@@ -148,10 +148,10 @@ static int tegra_ictlr_suspend(void)
2370 + lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
2371 +
2372 + /* Disable COP interrupts */
2373 +- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
2374 ++ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
2375 +
2376 + /* Disable CPU interrupts */
2377 +- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
2378 ++ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
2379 +
2380 + /* Enable the wakeup sources of ictlr */
2381 + writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
2382 +@@ -172,12 +172,12 @@ static void tegra_ictlr_resume(void)
2383 +
2384 + writel_relaxed(lic->cpu_iep[i],
2385 + ictlr + ICTLR_CPU_IEP_CLASS);
2386 +- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
2387 ++ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
2388 + writel_relaxed(lic->cpu_ier[i],
2389 + ictlr + ICTLR_CPU_IER_SET);
2390 + writel_relaxed(lic->cop_iep[i],
2391 + ictlr + ICTLR_COP_IEP_CLASS);
2392 +- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
2393 ++ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
2394 + writel_relaxed(lic->cop_ier[i],
2395 + ictlr + ICTLR_COP_IER_SET);
2396 + }
2397 +@@ -312,7 +312,7 @@ static int __init tegra_ictlr_init(struct device_node *node,
2398 + lic->base[i] = base;
2399 +
2400 + /* Disable all interrupts */
2401 +- writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR);
2402 ++ writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR);
2403 + /* All interrupts target IRQ */
2404 + writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS);
2405 +
2406 +diff --git a/drivers/md/md.c b/drivers/md/md.c
2407 +index 4bfaf7d4977dd..33946adb0d6f6 100644
2408 +--- a/drivers/md/md.c
2409 ++++ b/drivers/md/md.c
2410 +@@ -9467,6 +9467,7 @@ void md_reap_sync_thread(struct mddev *mddev)
2411 + wake_up(&resync_wait);
2412 + /* flag recovery needed just to double check */
2413 + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2414 ++ sysfs_notify_dirent_safe(mddev->sysfs_completed);
2415 + sysfs_notify_dirent_safe(mddev->sysfs_action);
2416 + md_new_event(mddev);
2417 + if (mddev->event_work.func)
2418 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2419 +index b58984ddca13b..19e497a7e7475 100644
2420 +--- a/drivers/md/raid5.c
2421 ++++ b/drivers/md/raid5.c
2422 +@@ -2864,10 +2864,10 @@ static void raid5_end_write_request(struct bio *bi)
2423 + if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
2424 + clear_bit(R5_LOCKED, &sh->dev[i].flags);
2425 + set_bit(STRIPE_HANDLE, &sh->state);
2426 +- raid5_release_stripe(sh);
2427 +
2428 + if (sh->batch_head && sh != sh->batch_head)
2429 + raid5_release_stripe(sh->batch_head);
2430 ++ raid5_release_stripe(sh);
2431 + }
2432 +
2433 + static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2434 +diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
2435 +index a591dd315ebcc..03fc82cb3fead 100644
2436 +--- a/drivers/media/platform/qcom/venus/pm_helpers.c
2437 ++++ b/drivers/media/platform/qcom/venus/pm_helpers.c
2438 +@@ -875,7 +875,7 @@ static int vcodec_domains_get(struct venus_core *core)
2439 + }
2440 +
2441 + skip_pmdomains:
2442 +- if (!core->has_opp_table)
2443 ++ if (!core->res->opp_pmdomain)
2444 + return 0;
2445 +
2446 + /* Attach the power domain for setting performance state */
2447 +@@ -1007,6 +1007,10 @@ static int core_get_v4(struct venus_core *core)
2448 + if (ret)
2449 + return ret;
2450 +
2451 ++ ret = vcodec_domains_get(core);
2452 ++ if (ret)
2453 ++ return ret;
2454 ++
2455 + if (core->res->opp_pmdomain) {
2456 + ret = devm_pm_opp_of_add_table(dev);
2457 + if (!ret) {
2458 +@@ -1017,10 +1021,6 @@ static int core_get_v4(struct venus_core *core)
2459 + }
2460 + }
2461 +
2462 +- ret = vcodec_domains_get(core);
2463 +- if (ret)
2464 +- return ret;
2465 +-
2466 + return 0;
2467 + }
2468 +
2469 +diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
2470 +index 4cb829d5d873c..2e4dcfebf19af 100644
2471 +--- a/drivers/misc/cxl/irq.c
2472 ++++ b/drivers/misc/cxl/irq.c
2473 +@@ -349,6 +349,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
2474 +
2475 + out:
2476 + cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
2477 ++ bitmap_free(ctx->irq_bitmap);
2478 + afu_irq_name_free(ctx);
2479 + return -ENOMEM;
2480 + }
2481 +diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
2482 +index 14da87b38e835..8132200dca67f 100644
2483 +--- a/drivers/misc/habanalabs/gaudi/gaudi.c
2484 ++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
2485 +@@ -3318,19 +3318,19 @@ static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset,
2486 + u32 nic_qm_err_cfg, irq_handler_offset;
2487 + u32 q_off;
2488 +
2489 +- mtr_base_en_lo = lower_32_bits(CFG_BASE +
2490 ++ mtr_base_en_lo = lower_32_bits((CFG_BASE & U32_MAX) +
2491 + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2492 + mtr_base_en_hi = upper_32_bits(CFG_BASE +
2493 + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2494 +- so_base_en_lo = lower_32_bits(CFG_BASE +
2495 ++ so_base_en_lo = lower_32_bits((CFG_BASE & U32_MAX) +
2496 + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2497 + so_base_en_hi = upper_32_bits(CFG_BASE +
2498 + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2499 +- mtr_base_ws_lo = lower_32_bits(CFG_BASE +
2500 ++ mtr_base_ws_lo = lower_32_bits((CFG_BASE & U32_MAX) +
2501 + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2502 + mtr_base_ws_hi = upper_32_bits(CFG_BASE +
2503 + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2504 +- so_base_ws_lo = lower_32_bits(CFG_BASE +
2505 ++ so_base_ws_lo = lower_32_bits((CFG_BASE & U32_MAX) +
2506 + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
2507 + so_base_ws_hi = upper_32_bits(CFG_BASE +
2508 + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
2509 +@@ -5744,15 +5744,17 @@ static int gaudi_parse_cb_no_ext_queue(struct hl_device *hdev,
2510 + {
2511 + struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
2512 + struct gaudi_device *gaudi = hdev->asic_specific;
2513 +- u32 nic_mask_q_id = 1 << (HW_CAP_NIC_SHIFT +
2514 +- ((parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2));
2515 ++ u32 nic_queue_offset, nic_mask_q_id;
2516 +
2517 + if ((parser->hw_queue_id >= GAUDI_QUEUE_ID_NIC_0_0) &&
2518 +- (parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3) &&
2519 +- (!(gaudi->hw_cap_initialized & nic_mask_q_id))) {
2520 +- dev_err(hdev->dev, "h/w queue %d is disabled\n",
2521 +- parser->hw_queue_id);
2522 +- return -EINVAL;
2523 ++ (parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3)) {
2524 ++ nic_queue_offset = parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0;
2525 ++ nic_mask_q_id = 1 << (HW_CAP_NIC_SHIFT + (nic_queue_offset >> 2));
2526 ++
2527 ++ if (!(gaudi->hw_cap_initialized & nic_mask_q_id)) {
2528 ++ dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id);
2529 ++ return -EINVAL;
2530 ++ }
2531 + }
2532 +
2533 + /* For internal queue jobs just check if CB address is valid */
2534 +diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
2535 +index 488eeb2811aeb..976d051071dc3 100644
2536 +--- a/drivers/misc/uacce/uacce.c
2537 ++++ b/drivers/misc/uacce/uacce.c
2538 +@@ -9,43 +9,38 @@
2539 +
2540 + static struct class *uacce_class;
2541 + static dev_t uacce_devt;
2542 +-static DEFINE_MUTEX(uacce_mutex);
2543 + static DEFINE_XARRAY_ALLOC(uacce_xa);
2544 +
2545 +-static int uacce_start_queue(struct uacce_queue *q)
2546 ++/*
2547 ++ * If the parent driver or the device disappears, the queue state is invalid and
2548 ++ * ops are not usable anymore.
2549 ++ */
2550 ++static bool uacce_queue_is_valid(struct uacce_queue *q)
2551 + {
2552 +- int ret = 0;
2553 ++ return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
2554 ++}
2555 +
2556 +- mutex_lock(&uacce_mutex);
2557 ++static int uacce_start_queue(struct uacce_queue *q)
2558 ++{
2559 ++ int ret;
2560 +
2561 +- if (q->state != UACCE_Q_INIT) {
2562 +- ret = -EINVAL;
2563 +- goto out_with_lock;
2564 +- }
2565 ++ if (q->state != UACCE_Q_INIT)
2566 ++ return -EINVAL;
2567 +
2568 + if (q->uacce->ops->start_queue) {
2569 + ret = q->uacce->ops->start_queue(q);
2570 + if (ret < 0)
2571 +- goto out_with_lock;
2572 ++ return ret;
2573 + }
2574 +
2575 + q->state = UACCE_Q_STARTED;
2576 +-
2577 +-out_with_lock:
2578 +- mutex_unlock(&uacce_mutex);
2579 +-
2580 +- return ret;
2581 ++ return 0;
2582 + }
2583 +
2584 + static int uacce_put_queue(struct uacce_queue *q)
2585 + {
2586 + struct uacce_device *uacce = q->uacce;
2587 +
2588 +- mutex_lock(&uacce_mutex);
2589 +-
2590 +- if (q->state == UACCE_Q_ZOMBIE)
2591 +- goto out;
2592 +-
2593 + if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
2594 + uacce->ops->stop_queue(q);
2595 +
2596 +@@ -54,8 +49,6 @@ static int uacce_put_queue(struct uacce_queue *q)
2597 + uacce->ops->put_queue(q);
2598 +
2599 + q->state = UACCE_Q_ZOMBIE;
2600 +-out:
2601 +- mutex_unlock(&uacce_mutex);
2602 +
2603 + return 0;
2604 + }
2605 +@@ -65,20 +58,36 @@ static long uacce_fops_unl_ioctl(struct file *filep,
2606 + {
2607 + struct uacce_queue *q = filep->private_data;
2608 + struct uacce_device *uacce = q->uacce;
2609 ++ long ret = -ENXIO;
2610 ++
2611 ++ /*
2612 ++ * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
2613 ++ * user. Avoid a circular lock dependency with uacce_fops_mmap(), which
2614 ++ * gets called with mmap_lock held, by taking uacce->mutex instead of
2615 ++ * q->mutex. Doing this in uacce_fops_mmap() is not possible because
2616 ++ * uacce_fops_open() calls iommu_sva_bind_device(), which takes
2617 ++ * mmap_lock, while holding uacce->mutex.
2618 ++ */
2619 ++ mutex_lock(&uacce->mutex);
2620 ++ if (!uacce_queue_is_valid(q))
2621 ++ goto out_unlock;
2622 +
2623 + switch (cmd) {
2624 + case UACCE_CMD_START_Q:
2625 +- return uacce_start_queue(q);
2626 +-
2627 ++ ret = uacce_start_queue(q);
2628 ++ break;
2629 + case UACCE_CMD_PUT_Q:
2630 +- return uacce_put_queue(q);
2631 +-
2632 ++ ret = uacce_put_queue(q);
2633 ++ break;
2634 + default:
2635 +- if (!uacce->ops->ioctl)
2636 +- return -EINVAL;
2637 +-
2638 +- return uacce->ops->ioctl(q, cmd, arg);
2639 ++ if (uacce->ops->ioctl)
2640 ++ ret = uacce->ops->ioctl(q, cmd, arg);
2641 ++ else
2642 ++ ret = -EINVAL;
2643 + }
2644 ++out_unlock:
2645 ++ mutex_unlock(&uacce->mutex);
2646 ++ return ret;
2647 + }
2648 +
2649 + #ifdef CONFIG_COMPAT
2650 +@@ -136,6 +145,13 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
2651 + if (!q)
2652 + return -ENOMEM;
2653 +
2654 ++ mutex_lock(&uacce->mutex);
2655 ++
2656 ++ if (!uacce->parent) {
2657 ++ ret = -EINVAL;
2658 ++ goto out_with_mem;
2659 ++ }
2660 ++
2661 + ret = uacce_bind_queue(uacce, q);
2662 + if (ret)
2663 + goto out_with_mem;
2664 +@@ -152,10 +168,9 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
2665 + filep->private_data = q;
2666 + uacce->inode = inode;
2667 + q->state = UACCE_Q_INIT;
2668 +-
2669 +- mutex_lock(&uacce->queues_lock);
2670 ++ mutex_init(&q->mutex);
2671 + list_add(&q->list, &uacce->queues);
2672 +- mutex_unlock(&uacce->queues_lock);
2673 ++ mutex_unlock(&uacce->mutex);
2674 +
2675 + return 0;
2676 +
2677 +@@ -163,18 +178,20 @@ out_with_bond:
2678 + uacce_unbind_queue(q);
2679 + out_with_mem:
2680 + kfree(q);
2681 ++ mutex_unlock(&uacce->mutex);
2682 + return ret;
2683 + }
2684 +
2685 + static int uacce_fops_release(struct inode *inode, struct file *filep)
2686 + {
2687 + struct uacce_queue *q = filep->private_data;
2688 ++ struct uacce_device *uacce = q->uacce;
2689 +
2690 +- mutex_lock(&q->uacce->queues_lock);
2691 +- list_del(&q->list);
2692 +- mutex_unlock(&q->uacce->queues_lock);
2693 ++ mutex_lock(&uacce->mutex);
2694 + uacce_put_queue(q);
2695 + uacce_unbind_queue(q);
2696 ++ list_del(&q->list);
2697 ++ mutex_unlock(&uacce->mutex);
2698 + kfree(q);
2699 +
2700 + return 0;
2701 +@@ -217,10 +234,9 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
2702 + vma->vm_private_data = q;
2703 + qfr->type = type;
2704 +
2705 +- mutex_lock(&uacce_mutex);
2706 +-
2707 +- if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
2708 +- ret = -EINVAL;
2709 ++ mutex_lock(&q->mutex);
2710 ++ if (!uacce_queue_is_valid(q)) {
2711 ++ ret = -ENXIO;
2712 + goto out_with_lock;
2713 + }
2714 +
2715 +@@ -248,12 +264,12 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
2716 + }
2717 +
2718 + q->qfrs[type] = qfr;
2719 +- mutex_unlock(&uacce_mutex);
2720 ++ mutex_unlock(&q->mutex);
2721 +
2722 + return ret;
2723 +
2724 + out_with_lock:
2725 +- mutex_unlock(&uacce_mutex);
2726 ++ mutex_unlock(&q->mutex);
2727 + kfree(qfr);
2728 + return ret;
2729 + }
2730 +@@ -262,12 +278,20 @@ static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
2731 + {
2732 + struct uacce_queue *q = file->private_data;
2733 + struct uacce_device *uacce = q->uacce;
2734 ++ __poll_t ret = 0;
2735 ++
2736 ++ mutex_lock(&q->mutex);
2737 ++ if (!uacce_queue_is_valid(q))
2738 ++ goto out_unlock;
2739 +
2740 + poll_wait(file, &q->wait, wait);
2741 ++
2742 + if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
2743 +- return EPOLLIN | EPOLLRDNORM;
2744 ++ ret = EPOLLIN | EPOLLRDNORM;
2745 +
2746 +- return 0;
2747 ++out_unlock:
2748 ++ mutex_unlock(&q->mutex);
2749 ++ return ret;
2750 + }
2751 +
2752 + static const struct file_operations uacce_fops = {
2753 +@@ -450,7 +474,7 @@ struct uacce_device *uacce_alloc(struct device *parent,
2754 + goto err_with_uacce;
2755 +
2756 + INIT_LIST_HEAD(&uacce->queues);
2757 +- mutex_init(&uacce->queues_lock);
2758 ++ mutex_init(&uacce->mutex);
2759 + device_initialize(&uacce->dev);
2760 + uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
2761 + uacce->dev.class = uacce_class;
2762 +@@ -507,13 +531,23 @@ void uacce_remove(struct uacce_device *uacce)
2763 + if (uacce->inode)
2764 + unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
2765 +
2766 ++ /*
2767 ++ * uacce_fops_open() may be running concurrently, even after we remove
2768 ++ * the cdev. Holding uacce->mutex ensures that open() does not obtain a
2769 ++ * removed uacce device.
2770 ++ */
2771 ++ mutex_lock(&uacce->mutex);
2772 + /* ensure no open queue remains */
2773 +- mutex_lock(&uacce->queues_lock);
2774 + list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
2775 ++ /*
2776 ++ * Taking q->mutex ensures that fops do not use the defunct
2777 ++ * uacce->ops after the queue is disabled.
2778 ++ */
2779 ++ mutex_lock(&q->mutex);
2780 + uacce_put_queue(q);
2781 ++ mutex_unlock(&q->mutex);
2782 + uacce_unbind_queue(q);
2783 + }
2784 +- mutex_unlock(&uacce->queues_lock);
2785 +
2786 + /* disable sva now since no opened queues */
2787 + uacce_disable_sva(uacce);
2788 +@@ -521,6 +555,13 @@ void uacce_remove(struct uacce_device *uacce)
2789 + if (uacce->cdev)
2790 + cdev_device_del(uacce->cdev, &uacce->dev);
2791 + xa_erase(&uacce_xa, uacce->dev_id);
2792 ++ /*
2793 ++ * uacce exists as long as there are open fds, but ops will be freed
2794 ++ * now. Ensure that bugs cause NULL deref rather than use-after-free.
2795 ++ */
2796 ++ uacce->ops = NULL;
2797 ++ uacce->parent = NULL;
2798 ++ mutex_unlock(&uacce->mutex);
2799 + put_device(&uacce->dev);
2800 + }
2801 + EXPORT_SYMBOL_GPL(uacce_remove);
2802 +diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
2803 +index 58ab9d90bc8b9..9b2e2548bd18b 100644
2804 +--- a/drivers/mmc/host/meson-gx-mmc.c
2805 ++++ b/drivers/mmc/host/meson-gx-mmc.c
2806 +@@ -1172,8 +1172,10 @@ static int meson_mmc_probe(struct platform_device *pdev)
2807 + }
2808 +
2809 + ret = device_reset_optional(&pdev->dev);
2810 +- if (ret)
2811 +- return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
2812 ++ if (ret) {
2813 ++ dev_err_probe(&pdev->dev, ret, "device reset failed\n");
2814 ++ goto free_host;
2815 ++ }
2816 +
2817 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2818 + host->regs = devm_ioremap_resource(&pdev->dev, res);
2819 +diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
2820 +index 316393c694d7a..55868b6b86583 100644
2821 +--- a/drivers/mmc/host/pxamci.c
2822 ++++ b/drivers/mmc/host/pxamci.c
2823 +@@ -648,7 +648,7 @@ static int pxamci_probe(struct platform_device *pdev)
2824 +
2825 + ret = pxamci_of_init(pdev, mmc);
2826 + if (ret)
2827 +- return ret;
2828 ++ goto out;
2829 +
2830 + host = mmc_priv(mmc);
2831 + host->mmc = mmc;
2832 +@@ -672,7 +672,7 @@ static int pxamci_probe(struct platform_device *pdev)
2833 +
2834 + ret = pxamci_init_ocr(host);
2835 + if (ret < 0)
2836 +- return ret;
2837 ++ goto out;
2838 +
2839 + mmc->caps = 0;
2840 + host->cmdat = 0;
2841 +diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
2842 +index 791e180a06170..387f2a4f693a0 100644
2843 +--- a/drivers/mmc/host/renesas_sdhi_core.c
2844 ++++ b/drivers/mmc/host/renesas_sdhi_core.c
2845 +@@ -51,9 +51,6 @@
2846 + #define HOST_MODE_GEN3_32BIT (HOST_MODE_GEN3_WMODE | HOST_MODE_GEN3_BUSWIDTH)
2847 + #define HOST_MODE_GEN3_64BIT 0
2848 +
2849 +-#define CTL_SDIF_MODE 0xe6
2850 +-#define SDIF_MODE_HS400 BIT(0)
2851 +-
2852 + #define SDHI_VER_GEN2_SDR50 0x490c
2853 + #define SDHI_VER_RZ_A1 0x820b
2854 + /* very old datasheets said 0x490c for SDR104, too. They are wrong! */
2855 +@@ -550,23 +547,25 @@ static void renesas_sdhi_scc_reset(struct tmio_mmc_host *host, struct renesas_sd
2856 + }
2857 +
2858 + /* only populated for TMIO_MMC_MIN_RCAR2 */
2859 +-static void renesas_sdhi_reset(struct tmio_mmc_host *host)
2860 ++static void renesas_sdhi_reset(struct tmio_mmc_host *host, bool preserve)
2861 + {
2862 + struct renesas_sdhi *priv = host_to_priv(host);
2863 + int ret;
2864 + u16 val;
2865 +
2866 +- if (priv->rstc) {
2867 +- reset_control_reset(priv->rstc);
2868 +- /* Unknown why but without polling reset status, it will hang */
2869 +- read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
2870 +- false, priv->rstc);
2871 +- /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
2872 +- sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
2873 +- priv->needs_adjust_hs400 = false;
2874 +- renesas_sdhi_set_clock(host, host->clk_cache);
2875 +- } else if (priv->scc_ctl) {
2876 +- renesas_sdhi_scc_reset(host, priv);
2877 ++ if (!preserve) {
2878 ++ if (priv->rstc) {
2879 ++ reset_control_reset(priv->rstc);
2880 ++ /* Unknown why but without polling reset status, it will hang */
2881 ++ read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
2882 ++ false, priv->rstc);
2883 ++ /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
2884 ++ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
2885 ++ priv->needs_adjust_hs400 = false;
2886 ++ renesas_sdhi_set_clock(host, host->clk_cache);
2887 ++ } else if (priv->scc_ctl) {
2888 ++ renesas_sdhi_scc_reset(host, priv);
2889 ++ }
2890 + }
2891 +
2892 + if (sd_ctrl_read16(host, CTL_VERSION) >= SDHI_VER_GEN3_SD) {
2893 +diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
2894 +index b55a29c53d9c3..53a2ad9a24b87 100644
2895 +--- a/drivers/mmc/host/tmio_mmc.c
2896 ++++ b/drivers/mmc/host/tmio_mmc.c
2897 +@@ -75,7 +75,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
2898 + tmio_mmc_clk_start(host);
2899 + }
2900 +
2901 +-static void tmio_mmc_reset(struct tmio_mmc_host *host)
2902 ++static void tmio_mmc_reset(struct tmio_mmc_host *host, bool preserve)
2903 + {
2904 + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
2905 + usleep_range(10000, 11000);
2906 +diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
2907 +index f936aad945ce3..da63193dd45b0 100644
2908 +--- a/drivers/mmc/host/tmio_mmc.h
2909 ++++ b/drivers/mmc/host/tmio_mmc.h
2910 +@@ -42,6 +42,7 @@
2911 + #define CTL_DMA_ENABLE 0xd8
2912 + #define CTL_RESET_SD 0xe0
2913 + #define CTL_VERSION 0xe2
2914 ++#define CTL_SDIF_MODE 0xe6 /* only known on R-Car 2+ */
2915 +
2916 + /* Definitions for values the CTL_STOP_INTERNAL_ACTION register can take */
2917 + #define TMIO_STOP_STP BIT(0)
2918 +@@ -98,6 +99,9 @@
2919 + /* Definitions for values the CTL_DMA_ENABLE register can take */
2920 + #define DMA_ENABLE_DMASDRW BIT(1)
2921 +
2922 ++/* Definitions for values the CTL_SDIF_MODE register can take */
2923 ++#define SDIF_MODE_HS400 BIT(0) /* only known on R-Car 2+ */
2924 ++
2925 + /* Define some IRQ masks */
2926 + /* This is the mask used at reset by the chip */
2927 + #define TMIO_MASK_ALL 0x837f031d
2928 +@@ -181,7 +185,7 @@ struct tmio_mmc_host {
2929 + int (*multi_io_quirk)(struct mmc_card *card,
2930 + unsigned int direction, int blk_size);
2931 + int (*write16_hook)(struct tmio_mmc_host *host, int addr);
2932 +- void (*reset)(struct tmio_mmc_host *host);
2933 ++ void (*reset)(struct tmio_mmc_host *host, bool preserve);
2934 + bool (*check_retune)(struct tmio_mmc_host *host, struct mmc_request *mrq);
2935 + void (*fixup_request)(struct tmio_mmc_host *host, struct mmc_request *mrq);
2936 + unsigned int (*get_timeout_cycles)(struct tmio_mmc_host *host);
2937 +diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
2938 +index a5850d83908be..437048bb80273 100644
2939 +--- a/drivers/mmc/host/tmio_mmc_core.c
2940 ++++ b/drivers/mmc/host/tmio_mmc_core.c
2941 +@@ -179,8 +179,17 @@ static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
2942 + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
2943 + }
2944 +
2945 +-static void tmio_mmc_reset(struct tmio_mmc_host *host)
2946 ++static void tmio_mmc_reset(struct tmio_mmc_host *host, bool preserve)
2947 + {
2948 ++ u16 card_opt, clk_ctrl, sdif_mode;
2949 ++
2950 ++ if (preserve) {
2951 ++ card_opt = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT);
2952 ++ clk_ctrl = sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL);
2953 ++ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
2954 ++ sdif_mode = sd_ctrl_read16(host, CTL_SDIF_MODE);
2955 ++ }
2956 ++
2957 + /* FIXME - should we set stop clock reg here */
2958 + sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
2959 + usleep_range(10000, 11000);
2960 +@@ -190,7 +199,7 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
2961 + tmio_mmc_abort_dma(host);
2962 +
2963 + if (host->reset)
2964 +- host->reset(host);
2965 ++ host->reset(host, preserve);
2966 +
2967 + sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
2968 + host->sdcard_irq_mask = host->sdcard_irq_mask_all;
2969 +@@ -206,6 +215,13 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
2970 + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
2971 + }
2972 +
2973 ++ if (preserve) {
2974 ++ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, card_opt);
2975 ++ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk_ctrl);
2976 ++ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
2977 ++ sd_ctrl_write16(host, CTL_SDIF_MODE, sdif_mode);
2978 ++ }
2979 ++
2980 + if (host->mmc->card)
2981 + mmc_retune_needed(host->mmc);
2982 + }
2983 +@@ -248,7 +264,7 @@ static void tmio_mmc_reset_work(struct work_struct *work)
2984 +
2985 + spin_unlock_irqrestore(&host->lock, flags);
2986 +
2987 +- tmio_mmc_reset(host);
2988 ++ tmio_mmc_reset(host, true);
2989 +
2990 + /* Ready for new calls */
2991 + host->mrq = NULL;
2992 +@@ -961,7 +977,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2993 + tmio_mmc_power_off(host);
2994 + /* For R-Car Gen2+, we need to reset SDHI specific SCC */
2995 + if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
2996 +- tmio_mmc_reset(host);
2997 ++ tmio_mmc_reset(host, false);
2998 +
2999 + host->set_clock(host, 0);
3000 + break;
3001 +@@ -1189,7 +1205,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
3002 + _host->sdcard_irq_mask_all = TMIO_MASK_ALL;
3003 +
3004 + _host->set_clock(_host, 0);
3005 +- tmio_mmc_reset(_host);
3006 ++ tmio_mmc_reset(_host, false);
3007 +
3008 + spin_lock_init(&_host->lock);
3009 + mutex_init(&_host->ios_lock);
3010 +@@ -1285,7 +1301,7 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
3011 + struct tmio_mmc_host *host = dev_get_drvdata(dev);
3012 +
3013 + tmio_mmc_clk_enable(host);
3014 +- tmio_mmc_reset(host);
3015 ++ tmio_mmc_reset(host, false);
3016 +
3017 + if (host->clk_cache)
3018 + host->set_clock(host, host->clk_cache);
3019 +diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
3020 +index 0579ab74f728a..baab3adc34bc6 100644
3021 +--- a/drivers/net/can/spi/mcp251x.c
3022 ++++ b/drivers/net/can/spi/mcp251x.c
3023 +@@ -1074,9 +1074,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
3024 +
3025 + mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
3026 +
3027 +- /* mask out flags we don't care about */
3028 +- intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
3029 +-
3030 + /* receive buffer 0 */
3031 + if (intf & CANINTF_RX0IF) {
3032 + mcp251x_hw_rx(spi, 0);
3033 +@@ -1086,6 +1083,18 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
3034 + if (mcp251x_is_2510(spi))
3035 + mcp251x_write_bits(spi, CANINTF,
3036 + CANINTF_RX0IF, 0x00);
3037 ++
3038 ++ /* check if buffer 1 is already known to be full, no need to re-read */
3039 ++ if (!(intf & CANINTF_RX1IF)) {
3040 ++ u8 intf1, eflag1;
3041 ++
3042 ++ /* intf needs to be read again to avoid a race condition */
3043 ++ mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1);
3044 ++
3045 ++ /* combine flags from both operations for error handling */
3046 ++ intf |= intf1;
3047 ++ eflag |= eflag1;
3048 ++ }
3049 + }
3050 +
3051 + /* receive buffer 1 */
3052 +@@ -1096,6 +1105,9 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
3053 + clear_intf |= CANINTF_RX1IF;
3054 + }
3055 +
3056 ++ /* mask out flags we don't care about */
3057 ++ intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
3058 ++
3059 + /* any error or tx interrupt we need to clear? */
3060 + if (intf & (CANINTF_ERR | CANINTF_TX))
3061 + clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
3062 +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
3063 +index c9552846fe257..a1b7c1a451c0c 100644
3064 +--- a/drivers/net/can/usb/ems_usb.c
3065 ++++ b/drivers/net/can/usb/ems_usb.c
3066 +@@ -194,7 +194,7 @@ struct __packed ems_cpc_msg {
3067 + __le32 ts_sec; /* timestamp in seconds */
3068 + __le32 ts_nsec; /* timestamp in nano seconds */
3069 +
3070 +- union {
3071 ++ union __packed {
3072 + u8 generic[64];
3073 + struct cpc_can_msg can_msg;
3074 + struct cpc_can_params can_params;
3075 +diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
3076 +index 854e25f43fa70..379b38c5844f4 100644
3077 +--- a/drivers/net/dsa/microchip/ksz9477.c
3078 ++++ b/drivers/net/dsa/microchip/ksz9477.c
3079 +@@ -759,6 +759,9 @@ static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
3080 + goto exit;
3081 + }
3082 +
3083 ++ if (!(ksz_data & ALU_VALID))
3084 ++ continue;
3085 ++
3086 + /* read ALU table */
3087 + ksz9477_read_table(dev, alu_table);
3088 +
3089 +diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
3090 +index a4c6eb9a52d0d..83dca9179aa07 100644
3091 +--- a/drivers/net/dsa/mv88e6060.c
3092 ++++ b/drivers/net/dsa/mv88e6060.c
3093 +@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
3094 + int addr = REG_PORT(p);
3095 + int ret;
3096 +
3097 ++ if (dsa_is_unused_port(priv->ds, p))
3098 ++ return 0;
3099 ++
3100 + /* Do not force flow control, disable Ingress and Egress
3101 + * Header tagging, disable VLAN tunneling, and set the port
3102 + * state to Forwarding. Additionally, if this is the CPU
3103 +diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
3104 +index a9c7ada890d88..5ba7e5c820dde 100644
3105 +--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
3106 ++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
3107 +@@ -578,7 +578,8 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
3108 + { .offset = 0x87, .name = "tx_frames_below_65_octets", },
3109 + { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
3110 + { .offset = 0x89, .name = "tx_frames_128_255_octets", },
3111 +- { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
3112 ++ { .offset = 0x8A, .name = "tx_frames_256_511_octets", },
3113 ++ { .offset = 0x8B, .name = "tx_frames_512_1023_octets", },
3114 + { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
3115 + { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
3116 + { .offset = 0x8E, .name = "tx_yellow_prio_0", },
3117 +diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
3118 +index 0569ff066634d..10c6fea1227fa 100644
3119 +--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
3120 ++++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
3121 +@@ -93,7 +93,7 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
3122 +
3123 + region = dsa_devlink_region_create(ds, ops, 1, size);
3124 + if (IS_ERR(region)) {
3125 +- while (i-- >= 0)
3126 ++ while (--i >= 0)
3127 + dsa_devlink_region_destroy(priv->regions[i]);
3128 + return PTR_ERR(region);
3129 + }
3130 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3131 +index fbb1e05d58783..ea2e7cd8946da 100644
3132 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3133 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3134 +@@ -265,12 +265,10 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
3135 + static void aq_nic_polling_timer_cb(struct timer_list *t)
3136 + {
3137 + struct aq_nic_s *self = from_timer(self, t, polling_timer);
3138 +- struct aq_vec_s *aq_vec = NULL;
3139 + unsigned int i = 0U;
3140 +
3141 +- for (i = 0U, aq_vec = self->aq_vec[0];
3142 +- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
3143 +- aq_vec_isr(i, (void *)aq_vec);
3144 ++ for (i = 0U; self->aq_vecs > i; ++i)
3145 ++ aq_vec_isr(i, (void *)self->aq_vec[i]);
3146 +
3147 + mod_timer(&self->polling_timer, jiffies +
3148 + AQ_CFG_POLLING_TIMER_INTERVAL);
3149 +@@ -872,7 +870,6 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
3150 +
3151 + u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
3152 + {
3153 +- struct aq_vec_s *aq_vec = NULL;
3154 + struct aq_stats_s *stats;
3155 + unsigned int count = 0U;
3156 + unsigned int i = 0U;
3157 +@@ -922,11 +919,11 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
3158 + data += i;
3159 +
3160 + for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
3161 +- for (i = 0U, aq_vec = self->aq_vec[0];
3162 +- aq_vec && self->aq_vecs > i;
3163 +- ++i, aq_vec = self->aq_vec[i]) {
3164 ++ for (i = 0U; self->aq_vecs > i; ++i) {
3165 ++ if (!self->aq_vec[i])
3166 ++ break;
3167 + data += count;
3168 +- count = aq_vec_get_sw_stats(aq_vec, tc, data);
3169 ++ count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
3170 + }
3171 + }
3172 +
3173 +@@ -1240,7 +1237,6 @@ int aq_nic_set_loopback(struct aq_nic_s *self)
3174 +
3175 + int aq_nic_stop(struct aq_nic_s *self)
3176 + {
3177 +- struct aq_vec_s *aq_vec = NULL;
3178 + unsigned int i = 0U;
3179 +
3180 + netif_tx_disable(self->ndev);
3181 +@@ -1258,9 +1254,8 @@ int aq_nic_stop(struct aq_nic_s *self)
3182 +
3183 + aq_ptp_irq_free(self);
3184 +
3185 +- for (i = 0U, aq_vec = self->aq_vec[0];
3186 +- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
3187 +- aq_vec_stop(aq_vec);
3188 ++ for (i = 0U; self->aq_vecs > i; ++i)
3189 ++ aq_vec_stop(self->aq_vec[i]);
3190 +
3191 + aq_ptp_ring_stop(self);
3192 +
3193 +diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
3194 +index fe4d99abd5487..6e8bc67260311 100644
3195 +--- a/drivers/net/ethernet/broadcom/bgmac.c
3196 ++++ b/drivers/net/ethernet/broadcom/bgmac.c
3197 +@@ -189,8 +189,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
3198 + }
3199 +
3200 + slot->skb = skb;
3201 +- ring->end += nr_frags + 1;
3202 + netdev_sent_queue(net_dev, skb->len);
3203 ++ ring->end += nr_frags + 1;
3204 +
3205 + wmb();
3206 +
3207 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
3208 +index 89d16c587bb7d..dbd2ede53f946 100644
3209 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
3210 ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
3211 +@@ -361,6 +361,9 @@ int bcmgenet_mii_probe(struct net_device *dev)
3212 + if (priv->internal_phy && !GENET_IS_V5(priv))
3213 + dev->phydev->irq = PHY_MAC_INTERRUPT;
3214 +
3215 ++ /* Indicate that the MAC is responsible for PHY PM */
3216 ++ dev->phydev->mac_managed_pm = true;
3217 ++
3218 + return 0;
3219 + }
3220 +
3221 +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3222 +index 8b7a29e1e221b..5899139aec97a 100644
3223 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3224 ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3225 +@@ -1389,8 +1389,8 @@ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
3226 + buf_array[i] = addr;
3227 +
3228 + /* tracing point */
3229 +- trace_dpaa2_eth_buf_seed(priv->net_dev,
3230 +- page, DPAA2_ETH_RX_BUF_RAW_SIZE,
3231 ++ trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
3232 ++ DPAA2_ETH_RX_BUF_RAW_SIZE,
3233 + addr, priv->rx_buf_size,
3234 + bpid);
3235 + }
3236 +diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
3237 +index d71eac7e19249..c5ae673005908 100644
3238 +--- a/drivers/net/ethernet/freescale/fec_ptp.c
3239 ++++ b/drivers/net/ethernet/freescale/fec_ptp.c
3240 +@@ -136,11 +136,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
3241 + * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
3242 + * to current timer would be next second.
3243 + */
3244 +- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
3245 +- tempval |= FEC_T_CTRL_CAPTURE;
3246 +- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
3247 +-
3248 +- tempval = readl(fep->hwp + FEC_ATIME);
3249 ++ tempval = fep->cc.read(&fep->cc);
3250 + /* Convert the ptp local counter to 1588 timestamp */
3251 + ns = timecounter_cyc2time(&fep->tc, tempval);
3252 + ts = ns_to_timespec64(ns);
3253 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
3254 +index b07d55c99317e..536f9198bd47a 100644
3255 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
3256 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
3257 +@@ -383,7 +383,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3258 + set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
3259 + break;
3260 + default:
3261 +- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
3262 ++ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
3263 ++ set_bit(__I40E_DOWN_REQUESTED, pf->state);
3264 ++ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
3265 + break;
3266 + }
3267 +
3268 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
3269 +index 9fa3fa99b4c20..897b349cdaf1c 100644
3270 +--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
3271 ++++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
3272 +@@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
3273 + static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
3274 + {
3275 + enum iavf_status ret_code = 0;
3276 ++ int i;
3277 +
3278 + if (hw->aq.asq.count > 0) {
3279 + /* queue already initialized */
3280 +@@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
3281 + /* initialize base registers */
3282 + ret_code = iavf_config_asq_regs(hw);
3283 + if (ret_code)
3284 +- goto init_adminq_free_rings;
3285 ++ goto init_free_asq_bufs;
3286 +
3287 + /* success! */
3288 + hw->aq.asq.count = hw->aq.num_asq_entries;
3289 + goto init_adminq_exit;
3290 +
3291 ++init_free_asq_bufs:
3292 ++ for (i = 0; i < hw->aq.num_asq_entries; i++)
3293 ++ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
3294 ++ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
3295 ++
3296 + init_adminq_free_rings:
3297 + iavf_free_adminq_asq(hw);
3298 +
3299 +@@ -383,6 +389,7 @@ init_adminq_exit:
3300 + static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
3301 + {
3302 + enum iavf_status ret_code = 0;
3303 ++ int i;
3304 +
3305 + if (hw->aq.arq.count > 0) {
3306 + /* queue already initialized */
3307 +@@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
3308 + /* initialize base registers */
3309 + ret_code = iavf_config_arq_regs(hw);
3310 + if (ret_code)
3311 +- goto init_adminq_free_rings;
3312 ++ goto init_free_arq_bufs;
3313 +
3314 + /* success! */
3315 + hw->aq.arq.count = hw->aq.num_arq_entries;
3316 + goto init_adminq_exit;
3317 +
3318 ++init_free_arq_bufs:
3319 ++ for (i = 0; i < hw->aq.num_arq_entries; i++)
3320 ++ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
3321 ++ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
3322 + init_adminq_free_rings:
3323 + iavf_free_adminq_arq(hw);
3324 +
3325 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
3326 +index e2349131a4286..db95786c3419f 100644
3327 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
3328 ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
3329 +@@ -2414,12 +2414,15 @@ continue_reset:
3330 +
3331 + return;
3332 + reset_err:
3333 ++ if (running) {
3334 ++ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3335 ++ iavf_free_traffic_irqs(adapter);
3336 ++ }
3337 ++ iavf_disable_vf(adapter);
3338 ++
3339 + mutex_unlock(&adapter->client_lock);
3340 + mutex_unlock(&adapter->crit_lock);
3341 +- if (running)
3342 +- iavf_change_state(adapter, __IAVF_RUNNING);
3343 + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3344 +- iavf_close(netdev);
3345 + }
3346 +
3347 + /**
3348 +diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
3349 +index 3b6c1420aa7be..deb828e761fa5 100644
3350 +--- a/drivers/net/ethernet/intel/ice/ice_switch.c
3351 ++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
3352 +@@ -2614,7 +2614,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3353 + else
3354 + status = ice_set_vsi_promisc(hw, vsi_handle,
3355 + promisc_mask, vlan_id);
3356 +- if (status)
3357 ++ if (status && status != -EEXIST)
3358 + break;
3359 + }
3360 +
3361 +diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
3362 +index 2d3daf022651c..015b781441149 100644
3363 +--- a/drivers/net/ethernet/intel/igb/igb.h
3364 ++++ b/drivers/net/ethernet/intel/igb/igb.h
3365 +@@ -664,6 +664,8 @@ struct igb_adapter {
3366 + struct igb_mac_addr *mac_table;
3367 + struct vf_mac_filter vf_macs;
3368 + struct vf_mac_filter *vf_mac_list;
3369 ++ /* lock for VF resources */
3370 ++ spinlock_t vfs_lock;
3371 + };
3372 +
3373 + /* flags controlling PTP/1588 function */
3374 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
3375 +index db11a1c278f69..f19e648307398 100644
3376 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
3377 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
3378 +@@ -3637,6 +3637,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
3379 + struct net_device *netdev = pci_get_drvdata(pdev);
3380 + struct igb_adapter *adapter = netdev_priv(netdev);
3381 + struct e1000_hw *hw = &adapter->hw;
3382 ++ unsigned long flags;
3383 +
3384 + /* reclaim resources allocated to VFs */
3385 + if (adapter->vf_data) {
3386 +@@ -3649,12 +3650,13 @@ static int igb_disable_sriov(struct pci_dev *pdev)
3387 + pci_disable_sriov(pdev);
3388 + msleep(500);
3389 + }
3390 +-
3391 ++ spin_lock_irqsave(&adapter->vfs_lock, flags);
3392 + kfree(adapter->vf_mac_list);
3393 + adapter->vf_mac_list = NULL;
3394 + kfree(adapter->vf_data);
3395 + adapter->vf_data = NULL;
3396 + adapter->vfs_allocated_count = 0;
3397 ++ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
3398 + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3399 + wrfl();
3400 + msleep(100);
3401 +@@ -3814,7 +3816,9 @@ static void igb_remove(struct pci_dev *pdev)
3402 + igb_release_hw_control(adapter);
3403 +
3404 + #ifdef CONFIG_PCI_IOV
3405 ++ rtnl_lock();
3406 + igb_disable_sriov(pdev);
3407 ++ rtnl_unlock();
3408 + #endif
3409 +
3410 + unregister_netdev(netdev);
3411 +@@ -3974,6 +3978,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
3412 +
3413 + spin_lock_init(&adapter->nfc_lock);
3414 + spin_lock_init(&adapter->stats64_lock);
3415 ++
3416 ++ /* init spinlock to avoid concurrency of VF resources */
3417 ++ spin_lock_init(&adapter->vfs_lock);
3418 + #ifdef CONFIG_PCI_IOV
3419 + switch (hw->mac.type) {
3420 + case e1000_82576:
3421 +@@ -7846,8 +7853,10 @@ unlock:
3422 + static void igb_msg_task(struct igb_adapter *adapter)
3423 + {
3424 + struct e1000_hw *hw = &adapter->hw;
3425 ++ unsigned long flags;
3426 + u32 vf;
3427 +
3428 ++ spin_lock_irqsave(&adapter->vfs_lock, flags);
3429 + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
3430 + /* process any reset requests */
3431 + if (!igb_check_for_rst(hw, vf))
3432 +@@ -7861,6 +7870,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
3433 + if (!igb_check_for_ack(hw, vf))
3434 + igb_rcv_ack_from_vf(adapter, vf);
3435 + }
3436 ++ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
3437 + }
3438 +
3439 + /**
3440 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3441 +index 11ef46e72ddd9..bd33b90aaa67b 100644
3442 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3443 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3444 +@@ -2504,6 +2504,12 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
3445 + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
3446 + rvu_reset_lmt_map_tbl(rvu, pcifunc);
3447 + rvu_detach_rsrcs(rvu, NULL, pcifunc);
3448 ++ /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
3449 ++ * entries, check and free the MCAM entries explicitly to avoid leak.
3450 ++ * Since LF is detached use LF number as -1.
3451 ++ */
3452 ++ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
3453 ++
3454 + mutex_unlock(&rvu->flr_lock);
3455 + }
3456 +
3457 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
3458 +index c4a46b295d401..d1249da7a18fb 100644
3459 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
3460 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
3461 +@@ -1096,6 +1096,9 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
3462 +
3463 + void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
3464 + {
3465 ++ if (nixlf < 0)
3466 ++ return;
3467 ++
3468 + npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
3469 +
3470 + /* Delete multicast and promisc MCAM entries */
3471 +@@ -1107,6 +1110,9 @@ void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
3472 +
3473 + void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
3474 + {
3475 ++ if (nixlf < 0)
3476 ++ return;
3477 ++
3478 + /* Enables only broadcast match entry. Promisc/Allmulti are enabled
3479 + * in set_rx_mode mbox handler.
3480 + */
3481 +@@ -1650,7 +1656,7 @@ static void npc_load_kpu_profile(struct rvu *rvu)
3482 + * Firmware database method.
3483 + * Default KPU profile.
3484 + */
3485 +- if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
3486 ++ if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) {
3487 + dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
3488 + kpu_profile);
3489 + rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
3490 +@@ -1915,6 +1921,7 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
3491 +
3492 + static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
3493 + {
3494 ++ struct npc_mcam_kex *mkex = rvu->kpu.mkex;
3495 + struct npc_mcam *mcam = &rvu->hw->mcam;
3496 + struct rvu_hwinfo *hw = rvu->hw;
3497 + u64 nibble_ena, rx_kex, tx_kex;
3498 +@@ -1927,15 +1934,15 @@ static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
3499 + mcam->counters.max--;
3500 + mcam->rx_miss_act_cntr = mcam->counters.max;
3501 +
3502 +- rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
3503 +- tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
3504 ++ rx_kex = mkex->keyx_cfg[NIX_INTF_RX];
3505 ++ tx_kex = mkex->keyx_cfg[NIX_INTF_TX];
3506 + nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
3507 +
3508 + nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
3509 + if (nibble_ena) {
3510 + tx_kex &= ~NPC_PARSE_NIBBLE;
3511 + tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
3512 +- npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex;
3513 ++ mkex->keyx_cfg[NIX_INTF_TX] = tx_kex;
3514 + }
3515 +
3516 + /* Configure RX interfaces */
3517 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
3518 +index ca404d51d9f56..750aaa1676878 100644
3519 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
3520 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
3521 +@@ -445,7 +445,8 @@ do { \
3522 + NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
3523 + NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
3524 + NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
3525 +- NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
3526 ++ /* SMAC follows the DMAC(which is 6 bytes) */
3527 ++ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
3528 + /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
3529 + NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
3530 + }
3531 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
3532 +index 78df173e6df24..7cf24dd5c8782 100644
3533 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
3534 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
3535 +@@ -631,6 +631,12 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
3536 + req->num_regs++;
3537 + req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
3538 + req->regval[1] = dwrr_val;
3539 ++ if (lvl == hw->txschq_link_cfg_lvl) {
3540 ++ req->num_regs++;
3541 ++ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
3542 ++ /* Enable this queue and backpressure */
3543 ++ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
3544 ++ }
3545 + } else if (lvl == NIX_TXSCH_LVL_TL2) {
3546 + parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
3547 + req->reg[0] = NIX_AF_TL2X_PARENT(schq);
3548 +@@ -640,11 +646,12 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
3549 + req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
3550 + req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
3551 +
3552 +- req->num_regs++;
3553 +- req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
3554 +- /* Enable this queue and backpressure */
3555 +- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
3556 +-
3557 ++ if (lvl == hw->txschq_link_cfg_lvl) {
3558 ++ req->num_regs++;
3559 ++ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
3560 ++ /* Enable this queue and backpressure */
3561 ++ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
3562 ++ }
3563 + } else if (lvl == NIX_TXSCH_LVL_TL1) {
3564 + /* Default config for TL1.
3565 + * For VF this is always ignored.
3566 +@@ -1563,6 +1570,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
3567 + for (schq = 0; schq < rsp->schq[lvl]; schq++)
3568 + pf->hw.txschq_list[lvl][schq] =
3569 + rsp->schq_list[lvl][schq];
3570 ++
3571 ++ pf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
3572 + }
3573 + EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
3574 +
3575 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
3576 +index 637450de189c8..4ecd0ef05f3b4 100644
3577 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
3578 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
3579 +@@ -182,6 +182,7 @@ struct otx2_hw {
3580 + u16 sqb_size;
3581 +
3582 + /* NIX */
3583 ++ u8 txschq_link_cfg_lvl;
3584 + u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
3585 + u16 matchall_ipolicer;
3586 + u32 dwrr_mtu;
3587 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
3588 +index cf03297c84710..d90c6dc41c9f4 100644
3589 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
3590 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
3591 +@@ -497,7 +497,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
3592 + int err;
3593 +
3594 + attr.ttl = tun_key->ttl;
3595 +- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
3596 ++ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
3597 + attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
3598 + attr.fl.fl6.saddr = tun_key->u.ipv6.src;
3599 +
3600 +@@ -611,7 +611,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
3601 +
3602 + attr.ttl = tun_key->ttl;
3603 +
3604 +- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
3605 ++ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
3606 + attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
3607 + attr.fl.fl6.saddr = tun_key->u.ipv6.src;
3608 +
3609 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3610 +index edfdd44de579c..35908a8c640a1 100644
3611 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3612 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3613 +@@ -1730,9 +1730,9 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3614 +
3615 + cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
3616 + cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
3617 +- mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
3618 + mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
3619 + unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
3620 ++ mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
3621 + mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
3622 + mlxsw_sp->ports[local_port] = NULL;
3623 + mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
3624 +diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
3625 +index 49def6934cad1..54a91d2b33b53 100644
3626 +--- a/drivers/net/ethernet/moxa/moxart_ether.c
3627 ++++ b/drivers/net/ethernet/moxa/moxart_ether.c
3628 +@@ -77,7 +77,7 @@ static void moxart_mac_free_memory(struct net_device *ndev)
3629 + int i;
3630 +
3631 + for (i = 0; i < RX_DESC_NUM; i++)
3632 +- dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
3633 ++ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
3634 + priv->rx_buf_size, DMA_FROM_DEVICE);
3635 +
3636 + if (priv->tx_desc_base)
3637 +@@ -147,11 +147,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
3638 + desc + RX_REG_OFFSET_DESC1);
3639 +
3640 + priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
3641 +- priv->rx_mapping[i] = dma_map_single(&ndev->dev,
3642 ++ priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
3643 + priv->rx_buf[i],
3644 + priv->rx_buf_size,
3645 + DMA_FROM_DEVICE);
3646 +- if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
3647 ++ if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
3648 + netdev_err(ndev, "DMA mapping error\n");
3649 +
3650 + moxart_desc_write(priv->rx_mapping[i],
3651 +@@ -240,7 +240,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
3652 + if (len > RX_BUF_SIZE)
3653 + len = RX_BUF_SIZE;
3654 +
3655 +- dma_sync_single_for_cpu(&ndev->dev,
3656 ++ dma_sync_single_for_cpu(&priv->pdev->dev,
3657 + priv->rx_mapping[rx_head],
3658 + priv->rx_buf_size, DMA_FROM_DEVICE);
3659 + skb = netdev_alloc_skb_ip_align(ndev, len);
3660 +@@ -294,7 +294,7 @@ static void moxart_tx_finished(struct net_device *ndev)
3661 + unsigned int tx_tail = priv->tx_tail;
3662 +
3663 + while (tx_tail != tx_head) {
3664 +- dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
3665 ++ dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
3666 + priv->tx_len[tx_tail], DMA_TO_DEVICE);
3667 +
3668 + ndev->stats.tx_packets++;
3669 +@@ -358,9 +358,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
3670 +
3671 + len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
3672 +
3673 +- priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
3674 ++ priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
3675 + len, DMA_TO_DEVICE);
3676 +- if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
3677 ++ if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
3678 + netdev_err(ndev, "DMA mapping error\n");
3679 + goto out_unlock;
3680 + }
3681 +@@ -379,7 +379,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
3682 + len = ETH_ZLEN;
3683 + }
3684 +
3685 +- dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
3686 ++ dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
3687 + priv->tx_buf_size, DMA_TO_DEVICE);
3688 +
3689 + txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
3690 +@@ -493,7 +493,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
3691 + priv->tx_buf_size = TX_BUF_SIZE;
3692 + priv->rx_buf_size = RX_BUF_SIZE;
3693 +
3694 +- priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
3695 ++ priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
3696 + TX_DESC_NUM, &priv->tx_base,
3697 + GFP_DMA | GFP_KERNEL);
3698 + if (!priv->tx_desc_base) {
3699 +@@ -501,7 +501,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
3700 + goto init_fail;
3701 + }
3702 +
3703 +- priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
3704 ++ priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
3705 + RX_DESC_NUM, &priv->rx_base,
3706 + GFP_DMA | GFP_KERNEL);
3707 + if (!priv->rx_desc_base) {
3708 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
3709 +index 8b614b0201e74..ae72cde713438 100644
3710 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
3711 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
3712 +@@ -1219,6 +1219,8 @@ nfp_port_get_module_info(struct net_device *netdev,
3713 + u8 data;
3714 +
3715 + port = nfp_port_from_netdev(netdev);
3716 ++ /* update port state to get latest interface */
3717 ++ set_bit(NFP_PORT_CHANGED, &port->flags);
3718 + eth_port = nfp_port_get_eth_port(port);
3719 + if (!eth_port)
3720 + return -EOPNOTSUPP;
3721 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
3722 +index 502fbbc082fb8..b32f1f5d841f4 100644
3723 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
3724 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
3725 +@@ -1098,6 +1098,7 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
3726 +
3727 + stmmac_dvr_remove(&pdev->dev);
3728 +
3729 ++ clk_disable_unprepare(priv->plat->stmmac_clk);
3730 + clk_unregister_fixed_rate(priv->plat->stmmac_clk);
3731 +
3732 + pcim_iounmap_regions(pdev, BIT(0));
3733 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
3734 +index 1ab94b5f9bbf4..605332f36d9df 100644
3735 +--- a/drivers/net/geneve.c
3736 ++++ b/drivers/net/geneve.c
3737 +@@ -774,7 +774,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
3738 + struct geneve_sock *gs4,
3739 + struct flowi4 *fl4,
3740 + const struct ip_tunnel_info *info,
3741 +- __be16 dport, __be16 sport)
3742 ++ __be16 dport, __be16 sport,
3743 ++ __u8 *full_tos)
3744 + {
3745 + bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
3746 + struct geneve_dev *geneve = netdev_priv(dev);
3747 +@@ -799,6 +800,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
3748 + use_cache = false;
3749 + }
3750 + fl4->flowi4_tos = RT_TOS(tos);
3751 ++ if (full_tos)
3752 ++ *full_tos = tos;
3753 +
3754 + dst_cache = (struct dst_cache *)&info->dst_cache;
3755 + if (use_cache) {
3756 +@@ -852,8 +855,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
3757 + use_cache = false;
3758 + }
3759 +
3760 +- fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
3761 +- info->key.label);
3762 ++ fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label);
3763 + dst_cache = (struct dst_cache *)&info->dst_cache;
3764 + if (use_cache) {
3765 + dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
3766 +@@ -887,6 +889,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
3767 + const struct ip_tunnel_key *key = &info->key;
3768 + struct rtable *rt;
3769 + struct flowi4 fl4;
3770 ++ __u8 full_tos;
3771 + __u8 tos, ttl;
3772 + __be16 df = 0;
3773 + __be16 sport;
3774 +@@ -897,7 +900,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
3775 +
3776 + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
3777 + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
3778 +- geneve->cfg.info.key.tp_dst, sport);
3779 ++ geneve->cfg.info.key.tp_dst, sport, &full_tos);
3780 + if (IS_ERR(rt))
3781 + return PTR_ERR(rt);
3782 +
3783 +@@ -941,7 +944,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
3784 +
3785 + df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
3786 + } else {
3787 +- tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
3788 ++ tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb);
3789 + if (geneve->cfg.ttl_inherit)
3790 + ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
3791 + else
3792 +@@ -1123,7 +1126,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
3793 + 1, USHRT_MAX, true);
3794 +
3795 + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
3796 +- geneve->cfg.info.key.tp_dst, sport);
3797 ++ geneve->cfg.info.key.tp_dst, sport, NULL);
3798 + if (IS_ERR(rt))
3799 + return PTR_ERR(rt);
3800 +
3801 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
3802 +index 0d3f8fe8e42ce..834a68d758327 100644
3803 +--- a/drivers/net/phy/phy_device.c
3804 ++++ b/drivers/net/phy/phy_device.c
3805 +@@ -315,6 +315,12 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
3806 +
3807 + phydev->suspended_by_mdio_bus = 0;
3808 +
3809 ++ /* If we managed to get here with the PHY state machine in a state other
3810 ++ * than PHY_HALTED this is an indication that something went wrong and
3811 ++ * we should most likely be using MAC managed PM and we are not.
3812 ++ */
3813 ++ WARN_ON(phydev->state != PHY_HALTED && !phydev->mac_managed_pm);
3814 ++
3815 + ret = phy_init_hw(phydev);
3816 + if (ret < 0)
3817 + return ret;
3818 +diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
3819 +index 82d6094017113..2a2cb9d453e8e 100644
3820 +--- a/drivers/net/plip/plip.c
3821 ++++ b/drivers/net/plip/plip.c
3822 +@@ -1107,7 +1107,7 @@ plip_open(struct net_device *dev)
3823 + /* Any address will do - we take the first. We already
3824 + have the first two bytes filled with 0xfc, from
3825 + plip_init_dev(). */
3826 +- const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
3827 ++ const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list);
3828 + if (ifa != NULL) {
3829 + memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
3830 + }
3831 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
3832 +index 53cefad2a79d2..48fb7bdc0f0b1 100644
3833 +--- a/drivers/net/virtio_net.c
3834 ++++ b/drivers/net/virtio_net.c
3835 +@@ -1017,8 +1017,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
3836 + case XDP_TX:
3837 + stats->xdp_tx++;
3838 + xdpf = xdp_convert_buff_to_frame(&xdp);
3839 +- if (unlikely(!xdpf))
3840 ++ if (unlikely(!xdpf)) {
3841 ++ if (unlikely(xdp_page != page))
3842 ++ put_page(xdp_page);
3843 + goto err_xdp;
3844 ++ }
3845 + err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
3846 + if (unlikely(!err)) {
3847 + xdp_return_frame_rx_napi(xdpf);
3848 +diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
3849 +index b7bf3f863d79b..5ee0afa621a95 100644
3850 +--- a/drivers/ntb/test/ntb_tool.c
3851 ++++ b/drivers/ntb/test/ntb_tool.c
3852 +@@ -367,14 +367,16 @@ static ssize_t tool_fn_write(struct tool_ctx *tc,
3853 + u64 bits;
3854 + int n;
3855 +
3856 ++ if (*offp)
3857 ++ return 0;
3858 ++
3859 + buf = kmalloc(size + 1, GFP_KERNEL);
3860 + if (!buf)
3861 + return -ENOMEM;
3862 +
3863 +- ret = simple_write_to_buffer(buf, size, offp, ubuf, size);
3864 +- if (ret < 0) {
3865 ++ if (copy_from_user(buf, ubuf, size)) {
3866 + kfree(buf);
3867 +- return ret;
3868 ++ return -EFAULT;
3869 + }
3870 +
3871 + buf[size] = 0;
3872 +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
3873 +index f592e5f7f5f3d..889c5433c94d2 100644
3874 +--- a/drivers/nvme/target/tcp.c
3875 ++++ b/drivers/nvme/target/tcp.c
3876 +@@ -1834,7 +1834,8 @@ static int __init nvmet_tcp_init(void)
3877 + {
3878 + int ret;
3879 +
3880 +- nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
3881 ++ nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
3882 ++ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
3883 + if (!nvmet_tcp_wq)
3884 + return -ENOMEM;
3885 +
3886 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
3887 +index 7cc2c54daad03..215f7510de9a7 100644
3888 +--- a/drivers/pci/controller/pci-aardvark.c
3889 ++++ b/drivers/pci/controller/pci-aardvark.c
3890 +@@ -8,6 +8,7 @@
3891 + * Author: Hezi Shahmoon <hezi.shahmoon@×××××××.com>
3892 + */
3893 +
3894 ++#include <linux/bitfield.h>
3895 + #include <linux/delay.h>
3896 + #include <linux/gpio/consumer.h>
3897 + #include <linux/interrupt.h>
3898 +@@ -857,14 +858,11 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
3899 +
3900 +
3901 + switch (reg) {
3902 +- case PCI_EXP_SLTCTL:
3903 +- *value = PCI_EXP_SLTSTA_PDS << 16;
3904 +- return PCI_BRIDGE_EMUL_HANDLED;
3905 +-
3906 + /*
3907 +- * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need
3908 +- * to be handled here, because their values are stored in emulated
3909 +- * config space buffer, and we read them from there when needed.
3910 ++ * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are
3911 ++ * also supported, but do not need to be handled here, because their
3912 ++ * values are stored in emulated config space buffer, and we read them
3913 ++ * from there when needed.
3914 + */
3915 +
3916 + case PCI_EXP_LNKCAP: {
3917 +@@ -977,8 +975,25 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
3918 + /* Support interrupt A for MSI feature */
3919 + bridge->conf.intpin = PCI_INTERRUPT_INTA;
3920 +
3921 +- /* Aardvark HW provides PCIe Capability structure in version 2 */
3922 +- bridge->pcie_conf.cap = cpu_to_le16(2);
3923 ++ /*
3924 ++ * Aardvark HW provides PCIe Capability structure in version 2 and
3925 ++ * indicate slot support, which is emulated.
3926 ++ */
3927 ++ bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT);
3928 ++
3929 ++ /*
3930 ++ * Set Presence Detect State bit permanently since there is no support
3931 ++ * for unplugging the card nor detecting whether it is plugged. (If a
3932 ++ * platform exists in the future that supports it, via a GPIO for
3933 ++ * example, it should be implemented via this bit.)
3934 ++ *
3935 ++ * Set physical slot number to 1 since there is only one port and zero
3936 ++ * value is reserved for ports within the same silicon as Root Port
3937 ++ * which is not our case.
3938 ++ */
3939 ++ bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN,
3940 ++ 1));
3941 ++ bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
3942 +
3943 + /* Indicates supports for Completion Retry Status */
3944 + bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
3945 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3946 +index 4893b1e824031..a531064233f98 100644
3947 +--- a/drivers/pci/quirks.c
3948 ++++ b/drivers/pci/quirks.c
3949 +@@ -4923,6 +4923,9 @@ static const struct pci_dev_acs_enabled {
3950 + { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
3951 + /* Broadcom multi-function device */
3952 + { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
3953 ++ { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs },
3954 ++ { PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs },
3955 ++ { PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs },
3956 + { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
3957 + /* Amazon Annapurna Labs */
3958 + { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
3959 +diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c
3960 +index 578cfe07d07ab..53c9230c29078 100644
3961 +--- a/drivers/phy/samsung/phy-exynos-pcie.c
3962 ++++ b/drivers/phy/samsung/phy-exynos-pcie.c
3963 +@@ -51,6 +51,13 @@ static int exynos5433_pcie_phy_init(struct phy *phy)
3964 + {
3965 + struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
3966 +
3967 ++ regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
3968 ++ BIT(0), 1);
3969 ++ regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
3970 ++ PCIE_APP_REQ_EXIT_L1_MODE, 0);
3971 ++ regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
3972 ++ PCIE_REFCLK_GATING_EN, 0);
3973 ++
3974 + regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_COMMON_RESET,
3975 + PCIE_PHY_RESET, 1);
3976 + regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET,
3977 +@@ -109,20 +116,7 @@ static int exynos5433_pcie_phy_init(struct phy *phy)
3978 + return 0;
3979 + }
3980 +
3981 +-static int exynos5433_pcie_phy_power_on(struct phy *phy)
3982 +-{
3983 +- struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
3984 +-
3985 +- regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
3986 +- BIT(0), 1);
3987 +- regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
3988 +- PCIE_APP_REQ_EXIT_L1_MODE, 0);
3989 +- regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
3990 +- PCIE_REFCLK_GATING_EN, 0);
3991 +- return 0;
3992 +-}
3993 +-
3994 +-static int exynos5433_pcie_phy_power_off(struct phy *phy)
3995 ++static int exynos5433_pcie_phy_exit(struct phy *phy)
3996 + {
3997 + struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
3998 +
3999 +@@ -135,8 +129,7 @@ static int exynos5433_pcie_phy_power_off(struct phy *phy)
4000 +
4001 + static const struct phy_ops exynos5433_phy_ops = {
4002 + .init = exynos5433_pcie_phy_init,
4003 +- .power_on = exynos5433_pcie_phy_power_on,
4004 +- .power_off = exynos5433_pcie_phy_power_off,
4005 ++ .exit = exynos5433_pcie_phy_exit,
4006 + .owner = THIS_MODULE,
4007 + };
4008 +
4009 +diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
4010 +index 826d494f3cc66..48f55991ae8cc 100644
4011 +--- a/drivers/pinctrl/intel/pinctrl-intel.c
4012 ++++ b/drivers/pinctrl/intel/pinctrl-intel.c
4013 +@@ -1626,16 +1626,14 @@ EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_uid);
4014 +
4015 + const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev)
4016 + {
4017 ++ const struct intel_pinctrl_soc_data * const *table;
4018 + const struct intel_pinctrl_soc_data *data = NULL;
4019 +- const struct intel_pinctrl_soc_data **table;
4020 +- struct acpi_device *adev;
4021 +- unsigned int i;
4022 +
4023 +- adev = ACPI_COMPANION(&pdev->dev);
4024 +- if (adev) {
4025 +- const void *match = device_get_match_data(&pdev->dev);
4026 ++ table = device_get_match_data(&pdev->dev);
4027 ++ if (table) {
4028 ++ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
4029 ++ unsigned int i;
4030 +
4031 +- table = (const struct intel_pinctrl_soc_data **)match;
4032 + for (i = 0; table[i]; i++) {
4033 + if (!strcmp(adev->pnp.unique_id, table[i]->uid)) {
4034 + data = table[i];
4035 +@@ -1649,7 +1647,7 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
4036 + if (!id)
4037 + return ERR_PTR(-ENODEV);
4038 +
4039 +- table = (const struct intel_pinctrl_soc_data **)id->driver_data;
4040 ++ table = (const struct intel_pinctrl_soc_data * const *)id->driver_data;
4041 + data = table[pdev->id];
4042 + }
4043 +
4044 +diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
4045 +index 4757bf964d3cd..6dd930a839ecc 100644
4046 +--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
4047 ++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
4048 +@@ -1421,8 +1421,10 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
4049 +
4050 + has_config = nmk_pinctrl_dt_get_config(np, &configs);
4051 + np_config = of_parse_phandle(np, "ste,config", 0);
4052 +- if (np_config)
4053 ++ if (np_config) {
4054 + has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
4055 ++ of_node_put(np_config);
4056 ++ }
4057 + if (has_config) {
4058 + const char *gpio_name;
4059 + const char *pin;
4060 +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
4061 +index ecab9064a8458..5c4acf2308d4f 100644
4062 +--- a/drivers/pinctrl/pinctrl-amd.c
4063 ++++ b/drivers/pinctrl/pinctrl-amd.c
4064 +@@ -912,6 +912,7 @@ static int amd_gpio_suspend(struct device *dev)
4065 + {
4066 + struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
4067 + struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
4068 ++ unsigned long flags;
4069 + int i;
4070 +
4071 + for (i = 0; i < desc->npins; i++) {
4072 +@@ -920,7 +921,9 @@ static int amd_gpio_suspend(struct device *dev)
4073 + if (!amd_gpio_should_save(gpio_dev, pin))
4074 + continue;
4075 +
4076 +- gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
4077 ++ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
4078 ++ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
4079 ++ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
4080 + }
4081 +
4082 + return 0;
4083 +@@ -930,6 +933,7 @@ static int amd_gpio_resume(struct device *dev)
4084 + {
4085 + struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
4086 + struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
4087 ++ unsigned long flags;
4088 + int i;
4089 +
4090 + for (i = 0; i < desc->npins; i++) {
4091 +@@ -938,7 +942,10 @@ static int amd_gpio_resume(struct device *dev)
4092 + if (!amd_gpio_should_save(gpio_dev, pin))
4093 + continue;
4094 +
4095 +- writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
4096 ++ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
4097 ++ gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
4098 ++ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
4099 ++ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
4100 + }
4101 +
4102 + return 0;
4103 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
4104 +index 396db12ae9048..bf68913ba8212 100644
4105 +--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
4106 ++++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
4107 +@@ -844,8 +844,8 @@ static const struct msm_pingroup msm8916_groups[] = {
4108 + PINGROUP(28, pwr_modem_enabled_a, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
4109 + PINGROUP(29, cci_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
4110 + PINGROUP(30, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
4111 +- PINGROUP(31, cci_timer0, NA, NA, NA, NA, NA, NA, NA, NA),
4112 +- PINGROUP(32, cci_timer1, NA, NA, NA, NA, NA, NA, NA, NA),
4113 ++ PINGROUP(31, cci_timer0, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
4114 ++ PINGROUP(32, cci_timer1, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
4115 + PINGROUP(33, cci_async, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
4116 + PINGROUP(34, pwr_nav_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
4117 + PINGROUP(35, pwr_crypto_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
4118 +diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
4119 +index af144e724bd9c..3bd7f9fedcc34 100644
4120 +--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
4121 ++++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
4122 +@@ -1316,7 +1316,7 @@ static const struct msm_pingroup sm8250_groups[] = {
4123 + static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = {
4124 + { 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 },
4125 + { 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 },
4126 +- { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 },
4127 ++ { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 73 },
4128 + { 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 },
4129 + { 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 },
4130 + { 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 },
4131 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
4132 +index c7d90c44e87aa..7b4b9f3d45558 100644
4133 +--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
4134 ++++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
4135 +@@ -107,6 +107,7 @@ static const struct sunxi_pinctrl_desc sun50i_h6_r_pinctrl_data = {
4136 + .npins = ARRAY_SIZE(sun50i_h6_r_pins),
4137 + .pin_base = PL_BASE,
4138 + .irq_banks = 2,
4139 ++ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
4140 + };
4141 +
4142 + static int sun50i_h6_r_pinctrl_probe(struct platform_device *pdev)
4143 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
4144 +index 1431ab21aca6f..30ca0fe5c31a4 100644
4145 +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
4146 ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
4147 +@@ -624,7 +624,7 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
4148 + unsigned pin,
4149 + struct regulator *supply)
4150 + {
4151 +- unsigned short bank = pin / PINS_PER_BANK;
4152 ++ unsigned short bank;
4153 + unsigned long flags;
4154 + u32 val, reg;
4155 + int uV;
4156 +@@ -640,6 +640,9 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
4157 + if (uV == 0)
4158 + return 0;
4159 +
4160 ++ pin -= pctl->desc->pin_base;
4161 ++ bank = pin / PINS_PER_BANK;
4162 ++
4163 + switch (pctl->desc->io_bias_cfg_variant) {
4164 + case BIAS_VOLTAGE_GRP_CONFIG:
4165 + /*
4166 +@@ -657,8 +660,6 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
4167 + else
4168 + val = 0xD; /* 3.3V */
4169 +
4170 +- pin -= pctl->desc->pin_base;
4171 +-
4172 + reg = readl(pctl->membase + sunxi_grp_config_reg(pin));
4173 + reg &= ~IO_BIAS_MASK;
4174 + writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin));
4175 +diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
4176 +index ed2b4807328da..1575d603d3ff3 100644
4177 +--- a/drivers/platform/chrome/cros_ec_proto.c
4178 ++++ b/drivers/platform/chrome/cros_ec_proto.c
4179 +@@ -507,13 +507,13 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
4180 + ret = cros_ec_get_host_command_version_mask(ec_dev,
4181 + EC_CMD_GET_NEXT_EVENT,
4182 + &ver_mask);
4183 +- if (ret < 0 || ver_mask == 0)
4184 ++ if (ret < 0 || ver_mask == 0) {
4185 + ec_dev->mkbp_event_supported = 0;
4186 +- else
4187 ++ } else {
4188 + ec_dev->mkbp_event_supported = fls(ver_mask);
4189 +
4190 +- dev_dbg(ec_dev->dev, "MKBP support version %u\n",
4191 +- ec_dev->mkbp_event_supported - 1);
4192 ++ dev_dbg(ec_dev->dev, "MKBP support version %u\n", ec_dev->mkbp_event_supported - 1);
4193 ++ }
4194 +
4195 + /* Probe if host sleep v1 is supported for S0ix failure detection. */
4196 + ret = cros_ec_get_host_command_version_mask(ec_dev,
4197 +diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
4198 +index 08b2e85dcd7d8..79bc86ba59b35 100644
4199 +--- a/drivers/scsi/lpfc/lpfc_debugfs.c
4200 ++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
4201 +@@ -2607,8 +2607,8 @@ lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf,
4202 + struct lpfc_sli4_hdw_queue *qp;
4203 + struct lpfc_multixri_pool *multixri_pool;
4204 +
4205 +- if (nbytes > 64)
4206 +- nbytes = 64;
4207 ++ if (nbytes > sizeof(mybuf) - 1)
4208 ++ nbytes = sizeof(mybuf) - 1;
4209 +
4210 + memset(mybuf, 0, sizeof(mybuf));
4211 +
4212 +@@ -2688,8 +2688,8 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
4213 + if (!phba->targetport)
4214 + return -ENXIO;
4215 +
4216 +- if (nbytes > 64)
4217 +- nbytes = 64;
4218 ++ if (nbytes > sizeof(mybuf) - 1)
4219 ++ nbytes = sizeof(mybuf) - 1;
4220 +
4221 + memset(mybuf, 0, sizeof(mybuf));
4222 +
4223 +@@ -2826,8 +2826,8 @@ lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf,
4224 + char mybuf[64];
4225 + char *pbuf;
4226 +
4227 +- if (nbytes > 64)
4228 +- nbytes = 64;
4229 ++ if (nbytes > sizeof(mybuf) - 1)
4230 ++ nbytes = sizeof(mybuf) - 1;
4231 +
4232 + memset(mybuf, 0, sizeof(mybuf));
4233 +
4234 +@@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
4235 + char mybuf[64];
4236 + char *pbuf;
4237 +
4238 +- if (nbytes > 63)
4239 +- nbytes = 63;
4240 ++ if (nbytes > sizeof(mybuf) - 1)
4241 ++ nbytes = sizeof(mybuf) - 1;
4242 +
4243 + memset(mybuf, 0, sizeof(mybuf));
4244 +
4245 +@@ -3060,8 +3060,8 @@ lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf,
4246 + char *pbuf;
4247 + int i;
4248 +
4249 +- if (nbytes > 64)
4250 +- nbytes = 64;
4251 ++ if (nbytes > sizeof(mybuf) - 1)
4252 ++ nbytes = sizeof(mybuf) - 1;
4253 +
4254 + memset(mybuf, 0, sizeof(mybuf));
4255 +
4256 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
4257 +index fb69416c9623b..f594a006d04c6 100644
4258 +--- a/drivers/scsi/lpfc/lpfc_sli.c
4259 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
4260 +@@ -2012,10 +2012,12 @@ initpath:
4261 +
4262 + sync_buf->cmd_flag |= LPFC_IO_CMF;
4263 + ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
4264 +- if (ret_val)
4265 ++ if (ret_val) {
4266 + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
4267 + "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
4268 + ret_val);
4269 ++ __lpfc_sli_release_iocbq(phba, sync_buf);
4270 ++ }
4271 + out_unlock:
4272 + spin_unlock_irqrestore(&phba->hbalock, iflags);
4273 + return ret_val;
4274 +diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
4275 +index 4e53857605de8..a9ddb50d593cf 100644
4276 +--- a/drivers/scsi/ufs/ufs-mediatek.c
4277 ++++ b/drivers/scsi/ufs/ufs-mediatek.c
4278 +@@ -949,7 +949,6 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
4279 + * ufshcd_suspend() re-enabling regulators while vreg is still
4280 + * in low-power mode.
4281 + */
4282 +- ufs_mtk_vreg_set_lpm(hba, true);
4283 + err = ufs_mtk_mphy_power_on(hba, false);
4284 + if (err)
4285 + goto fail;
4286 +@@ -973,12 +972,13 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
4287 + {
4288 + int err;
4289 +
4290 ++ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
4291 ++ ufs_mtk_vreg_set_lpm(hba, false);
4292 ++
4293 + err = ufs_mtk_mphy_power_on(hba, true);
4294 + if (err)
4295 + goto fail;
4296 +
4297 +- ufs_mtk_vreg_set_lpm(hba, false);
4298 +-
4299 + if (ufshcd_is_link_hibern8(hba)) {
4300 + err = ufs_mtk_link_set_hpm(hba);
4301 + if (err)
4302 +@@ -1139,9 +1139,59 @@ static int ufs_mtk_remove(struct platform_device *pdev)
4303 + return 0;
4304 + }
4305 +
4306 ++#ifdef CONFIG_PM_SLEEP
4307 ++int ufs_mtk_system_suspend(struct device *dev)
4308 ++{
4309 ++ struct ufs_hba *hba = dev_get_drvdata(dev);
4310 ++ int ret;
4311 ++
4312 ++ ret = ufshcd_system_suspend(dev);
4313 ++ if (ret)
4314 ++ return ret;
4315 ++
4316 ++ ufs_mtk_vreg_set_lpm(hba, true);
4317 ++
4318 ++ return 0;
4319 ++}
4320 ++
4321 ++int ufs_mtk_system_resume(struct device *dev)
4322 ++{
4323 ++ struct ufs_hba *hba = dev_get_drvdata(dev);
4324 ++
4325 ++ ufs_mtk_vreg_set_lpm(hba, false);
4326 ++
4327 ++ return ufshcd_system_resume(dev);
4328 ++}
4329 ++#endif
4330 ++
4331 ++int ufs_mtk_runtime_suspend(struct device *dev)
4332 ++{
4333 ++ struct ufs_hba *hba = dev_get_drvdata(dev);
4334 ++ int ret = 0;
4335 ++
4336 ++ ret = ufshcd_runtime_suspend(dev);
4337 ++ if (ret)
4338 ++ return ret;
4339 ++
4340 ++ ufs_mtk_vreg_set_lpm(hba, true);
4341 ++
4342 ++ return 0;
4343 ++}
4344 ++
4345 ++int ufs_mtk_runtime_resume(struct device *dev)
4346 ++{
4347 ++ struct ufs_hba *hba = dev_get_drvdata(dev);
4348 ++
4349 ++ ufs_mtk_vreg_set_lpm(hba, false);
4350 ++
4351 ++ return ufshcd_runtime_resume(dev);
4352 ++}
4353 ++
4354 + static const struct dev_pm_ops ufs_mtk_pm_ops = {
4355 +- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
4356 +- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
4357 ++ SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
4358 ++ ufs_mtk_system_resume)
4359 ++ SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
4360 ++ ufs_mtk_runtime_resume, NULL)
4361 + .prepare = ufshcd_suspend_prepare,
4362 + .complete = ufshcd_resume_complete,
4363 + };
4364 +diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
4365 +index 0bc7daa7afc83..e4cb52e1fe261 100644
4366 +--- a/drivers/spi/spi-meson-spicc.c
4367 ++++ b/drivers/spi/spi-meson-spicc.c
4368 +@@ -156,6 +156,7 @@ struct meson_spicc_device {
4369 + void __iomem *base;
4370 + struct clk *core;
4371 + struct clk *pclk;
4372 ++ struct clk_divider pow2_div;
4373 + struct clk *clk;
4374 + struct spi_message *message;
4375 + struct spi_transfer *xfer;
4376 +@@ -168,6 +169,8 @@ struct meson_spicc_device {
4377 + unsigned long xfer_remain;
4378 + };
4379 +
4380 ++#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
4381 ++
4382 + static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
4383 + {
4384 + u32 conf;
4385 +@@ -421,7 +424,7 @@ static int meson_spicc_prepare_message(struct spi_master *master,
4386 + {
4387 + struct meson_spicc_device *spicc = spi_master_get_devdata(master);
4388 + struct spi_device *spi = message->spi;
4389 +- u32 conf = 0;
4390 ++ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
4391 +
4392 + /* Store current message */
4393 + spicc->message = message;
4394 +@@ -458,8 +461,6 @@ static int meson_spicc_prepare_message(struct spi_master *master,
4395 + /* Select CS */
4396 + conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
4397 +
4398 +- /* Default Clock rate core/4 */
4399 +-
4400 + /* Default 8bit word */
4401 + conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
4402 +
4403 +@@ -476,12 +477,16 @@ static int meson_spicc_prepare_message(struct spi_master *master,
4404 + static int meson_spicc_unprepare_transfer(struct spi_master *master)
4405 + {
4406 + struct meson_spicc_device *spicc = spi_master_get_devdata(master);
4407 ++ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
4408 +
4409 + /* Disable all IRQs */
4410 + writel(0, spicc->base + SPICC_INTREG);
4411 +
4412 + device_reset_optional(&spicc->pdev->dev);
4413 +
4414 ++ /* Set default configuration, keeping datarate field */
4415 ++ writel_relaxed(conf, spicc->base + SPICC_CONREG);
4416 ++
4417 + return 0;
4418 + }
4419 +
4420 +@@ -518,14 +523,60 @@ static void meson_spicc_cleanup(struct spi_device *spi)
4421 + * Clk path for G12A series:
4422 + * pclk -> pow2 fixed div -> pow2 div -> mux -> out
4423 + * pclk -> enh fixed div -> enh div -> mux -> out
4424 ++ *
4425 ++ * The pow2 divider is tied to the controller HW state, and the
4426 ++ * divider is only valid when the controller is initialized.
4427 ++ *
4428 ++ * A set of clock ops is added to make sure we don't read/set this
4429 ++ * clock rate while the controller is in an unknown state.
4430 + */
4431 +
4432 +-static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
4433 ++static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
4434 ++ unsigned long parent_rate)
4435 ++{
4436 ++ struct clk_divider *divider = to_clk_divider(hw);
4437 ++ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
4438 ++
4439 ++ if (!spicc->master->cur_msg || !spicc->master->busy)
4440 ++ return 0;
4441 ++
4442 ++ return clk_divider_ops.recalc_rate(hw, parent_rate);
4443 ++}
4444 ++
4445 ++static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
4446 ++ struct clk_rate_request *req)
4447 ++{
4448 ++ struct clk_divider *divider = to_clk_divider(hw);
4449 ++ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
4450 ++
4451 ++ if (!spicc->master->cur_msg || !spicc->master->busy)
4452 ++ return -EINVAL;
4453 ++
4454 ++ return clk_divider_ops.determine_rate(hw, req);
4455 ++}
4456 ++
4457 ++static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
4458 ++ unsigned long parent_rate)
4459 ++{
4460 ++ struct clk_divider *divider = to_clk_divider(hw);
4461 ++ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
4462 ++
4463 ++ if (!spicc->master->cur_msg || !spicc->master->busy)
4464 ++ return -EINVAL;
4465 ++
4466 ++ return clk_divider_ops.set_rate(hw, rate, parent_rate);
4467 ++}
4468 ++
4469 ++const struct clk_ops meson_spicc_pow2_clk_ops = {
4470 ++ .recalc_rate = meson_spicc_pow2_recalc_rate,
4471 ++ .determine_rate = meson_spicc_pow2_determine_rate,
4472 ++ .set_rate = meson_spicc_pow2_set_rate,
4473 ++};
4474 ++
4475 ++static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
4476 + {
4477 + struct device *dev = &spicc->pdev->dev;
4478 +- struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
4479 +- struct clk_divider *pow2_div, *enh_div;
4480 +- struct clk_mux *mux;
4481 ++ struct clk_fixed_factor *pow2_fixed_div;
4482 + struct clk_init_data init;
4483 + struct clk *clk;
4484 + struct clk_parent_data parent_data[2];
4485 +@@ -560,31 +611,45 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
4486 + if (WARN_ON(IS_ERR(clk)))
4487 + return PTR_ERR(clk);
4488 +
4489 +- pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
4490 +- if (!pow2_div)
4491 +- return -ENOMEM;
4492 +-
4493 + snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
4494 + init.name = name;
4495 +- init.ops = &clk_divider_ops;
4496 +- init.flags = CLK_SET_RATE_PARENT;
4497 ++ init.ops = &meson_spicc_pow2_clk_ops;
4498 ++ /*
4499 ++ * Set NOCACHE here to make sure we read the actual HW value
4500 ++ * since we reset the HW after each transfer.
4501 ++ */
4502 ++ init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
4503 + parent_data[0].hw = &pow2_fixed_div->hw;
4504 + init.num_parents = 1;
4505 +
4506 +- pow2_div->shift = 16,
4507 +- pow2_div->width = 3,
4508 +- pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
4509 +- pow2_div->reg = spicc->base + SPICC_CONREG;
4510 +- pow2_div->hw.init = &init;
4511 ++ spicc->pow2_div.shift = 16,
4512 ++ spicc->pow2_div.width = 3,
4513 ++ spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
4514 ++ spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
4515 ++ spicc->pow2_div.hw.init = &init;
4516 +
4517 +- clk = devm_clk_register(dev, &pow2_div->hw);
4518 +- if (WARN_ON(IS_ERR(clk)))
4519 +- return PTR_ERR(clk);
4520 ++ spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
4521 ++ if (WARN_ON(IS_ERR(spicc->clk)))
4522 ++ return PTR_ERR(spicc->clk);
4523 +
4524 +- if (!spicc->data->has_enhance_clk_div) {
4525 +- spicc->clk = clk;
4526 +- return 0;
4527 +- }
4528 ++ return 0;
4529 ++}
4530 ++
4531 ++static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
4532 ++{
4533 ++ struct device *dev = &spicc->pdev->dev;
4534 ++ struct clk_fixed_factor *enh_fixed_div;
4535 ++ struct clk_divider *enh_div;
4536 ++ struct clk_mux *mux;
4537 ++ struct clk_init_data init;
4538 ++ struct clk *clk;
4539 ++ struct clk_parent_data parent_data[2];
4540 ++ char name[64];
4541 ++
4542 ++ memset(&init, 0, sizeof(init));
4543 ++ memset(&parent_data, 0, sizeof(parent_data));
4544 ++
4545 ++ init.parent_data = parent_data;
4546 +
4547 + /* algorithm for enh div: rate = freq / 2 / (N + 1) */
4548 +
4549 +@@ -637,7 +702,7 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
4550 + snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
4551 + init.name = name;
4552 + init.ops = &clk_mux_ops;
4553 +- parent_data[0].hw = &pow2_div->hw;
4554 ++ parent_data[0].hw = &spicc->pow2_div.hw;
4555 + parent_data[1].hw = &enh_div->hw;
4556 + init.num_parents = 2;
4557 + init.flags = CLK_SET_RATE_PARENT;
4558 +@@ -754,12 +819,20 @@ static int meson_spicc_probe(struct platform_device *pdev)
4559 +
4560 + meson_spicc_oen_enable(spicc);
4561 +
4562 +- ret = meson_spicc_clk_init(spicc);
4563 ++ ret = meson_spicc_pow2_clk_init(spicc);
4564 + if (ret) {
4565 +- dev_err(&pdev->dev, "clock registration failed\n");
4566 ++ dev_err(&pdev->dev, "pow2 clock registration failed\n");
4567 + goto out_clk;
4568 + }
4569 +
4570 ++ if (spicc->data->has_enhance_clk_div) {
4571 ++ ret = meson_spicc_enh_clk_init(spicc);
4572 ++ if (ret) {
4573 ++ dev_err(&pdev->dev, "clock registration failed\n");
4574 ++ goto out_clk;
4575 ++ }
4576 ++ }
4577 ++
4578 + ret = devm_spi_register_master(&pdev->dev, master);
4579 + if (ret) {
4580 + dev_err(&pdev->dev, "spi master registration failed\n");
4581 +diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
4582 +index 6000853973c10..3cc9ef08455c2 100644
4583 +--- a/drivers/tty/serial/ucc_uart.c
4584 ++++ b/drivers/tty/serial/ucc_uart.c
4585 +@@ -1137,6 +1137,8 @@ static unsigned int soc_info(unsigned int *rev_h, unsigned int *rev_l)
4586 + /* No compatible property, so try the name. */
4587 + soc_string = np->name;
4588 +
4589 ++ of_node_put(np);
4590 ++
4591 + /* Extract the SOC number from the "PowerPC," string */
4592 + if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc)
4593 + return 0;
4594 +diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
4595 +index ae049eb28b93c..3f1ce89110776 100644
4596 +--- a/drivers/usb/cdns3/cdns3-gadget.c
4597 ++++ b/drivers/usb/cdns3/cdns3-gadget.c
4598 +@@ -220,7 +220,7 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
4599 +
4600 + if (!priv_ep->trb_pool) {
4601 + priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool,
4602 +- GFP_DMA32 | GFP_ATOMIC,
4603 ++ GFP_ATOMIC,
4604 + &priv_ep->trb_pool_dma);
4605 +
4606 + if (!priv_ep->trb_pool)
4607 +@@ -625,9 +625,9 @@ static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep)
4608 + trace_cdns3_wa2(priv_ep, "removes eldest request");
4609 +
4610 + kfree(priv_req->request.buf);
4611 ++ list_del_init(&priv_req->list);
4612 + cdns3_gadget_ep_free_request(&priv_ep->endpoint,
4613 + &priv_req->request);
4614 +- list_del_init(&priv_req->list);
4615 + --priv_ep->wa2_counter;
4616 +
4617 + if (!chain)
4618 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
4619 +index e1cebf581a4af..519bb82b00e80 100644
4620 +--- a/drivers/usb/dwc2/gadget.c
4621 ++++ b/drivers/usb/dwc2/gadget.c
4622 +@@ -3594,7 +3594,8 @@ void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
4623 + void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
4624 + {
4625 + /* remove the soft-disconnect and let's go */
4626 +- dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
4627 ++ if (!hsotg->role_sw || (dwc2_readl(hsotg, GOTGCTL) & GOTGCTL_BSESVLD))
4628 ++ dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
4629 + }
4630 +
4631 + /**
4632 +diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
4633 +index 99dc9adf56efa..a64b842665b92 100644
4634 +--- a/drivers/usb/gadget/function/uvc_queue.c
4635 ++++ b/drivers/usb/gadget/function/uvc_queue.c
4636 +@@ -44,7 +44,8 @@ static int uvc_queue_setup(struct vb2_queue *vq,
4637 + {
4638 + struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
4639 + struct uvc_video *video = container_of(queue, struct uvc_video, queue);
4640 +- struct usb_composite_dev *cdev = video->uvc->func.config->cdev;
4641 ++ unsigned int req_size;
4642 ++ unsigned int nreq;
4643 +
4644 + if (*nbuffers > UVC_MAX_VIDEO_BUFFERS)
4645 + *nbuffers = UVC_MAX_VIDEO_BUFFERS;
4646 +@@ -53,10 +54,16 @@ static int uvc_queue_setup(struct vb2_queue *vq,
4647 +
4648 + sizes[0] = video->imagesize;
4649 +
4650 +- if (cdev->gadget->speed < USB_SPEED_SUPER)
4651 +- video->uvc_num_requests = 4;
4652 +- else
4653 +- video->uvc_num_requests = 64;
4654 ++ req_size = video->ep->maxpacket
4655 ++ * max_t(unsigned int, video->ep->maxburst, 1)
4656 ++ * (video->ep->mult);
4657 ++
4658 ++ /* We divide by two, to increase the chance to run
4659 ++ * into fewer requests for smaller framesizes.
4660 ++ */
4661 ++ nreq = DIV_ROUND_UP(DIV_ROUND_UP(sizes[0], 2), req_size);
4662 ++ nreq = clamp(nreq, 4U, 64U);
4663 ++ video->uvc_num_requests = nreq;
4664 +
4665 + return 0;
4666 + }
4667 +diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
4668 +index b4a763e5f70e1..e170e88abf3a0 100644
4669 +--- a/drivers/usb/gadget/function/uvc_video.c
4670 ++++ b/drivers/usb/gadget/function/uvc_video.c
4671 +@@ -225,7 +225,7 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
4672 + break;
4673 +
4674 + default:
4675 +- uvcg_info(&video->uvc->func,
4676 ++ uvcg_warn(&video->uvc->func,
4677 + "VS request completed with status %d.\n",
4678 + req->status);
4679 + uvcg_queue_cancel(queue, 0);
4680 +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
4681 +index 3279b4767424c..9e8b678f0548e 100644
4682 +--- a/drivers/usb/gadget/legacy/inode.c
4683 ++++ b/drivers/usb/gadget/legacy/inode.c
4684 +@@ -362,6 +362,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
4685 + spin_unlock_irq (&epdata->dev->lock);
4686 +
4687 + DBG (epdata->dev, "endpoint gone\n");
4688 ++ wait_for_completion(&done);
4689 + epdata->status = -ENODEV;
4690 + }
4691 + }
4692 +diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
4693 +index 45f7cceb6df31..98e46725999e9 100644
4694 +--- a/drivers/usb/host/ohci-ppc-of.c
4695 ++++ b/drivers/usb/host/ohci-ppc-of.c
4696 +@@ -169,6 +169,7 @@ static int ohci_hcd_ppc_of_probe(struct platform_device *op)
4697 + release_mem_region(res.start, 0x4);
4698 + } else
4699 + pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
4700 ++ of_node_put(np);
4701 + }
4702 +
4703 + irq_dispose_mapping(irq);
4704 +diff --git a/drivers/usb/renesas_usbhs/rza.c b/drivers/usb/renesas_usbhs/rza.c
4705 +index 24de64edb674b..2d77edefb4b30 100644
4706 +--- a/drivers/usb/renesas_usbhs/rza.c
4707 ++++ b/drivers/usb/renesas_usbhs/rza.c
4708 +@@ -23,6 +23,10 @@ static int usbhs_rza1_hardware_init(struct platform_device *pdev)
4709 + extal_clk = of_find_node_by_name(NULL, "extal");
4710 + of_property_read_u32(usb_x1_clk, "clock-frequency", &freq_usb);
4711 + of_property_read_u32(extal_clk, "clock-frequency", &freq_extal);
4712 ++
4713 ++ of_node_put(usb_x1_clk);
4714 ++ of_node_put(extal_clk);
4715 ++
4716 + if (freq_usb == 0) {
4717 + if (freq_extal == 12000000) {
4718 + /* Select 12MHz XTAL */
4719 +diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
4720 +index 3c034fe14ccb0..818e47fc08968 100644
4721 +--- a/drivers/vfio/vfio.c
4722 ++++ b/drivers/vfio/vfio.c
4723 +@@ -1850,6 +1850,7 @@ struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
4724 + buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
4725 + if (!buf) {
4726 + kfree(caps->buf);
4727 ++ caps->buf = NULL;
4728 + caps->size = 0;
4729 + return ERR_PTR(-ENOMEM);
4730 + }
4731 +diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
4732 +index 52cce0db8bd34..ad5ced4ef972d 100644
4733 +--- a/drivers/video/fbdev/i740fb.c
4734 ++++ b/drivers/video/fbdev/i740fb.c
4735 +@@ -400,7 +400,7 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
4736 + u32 xres, right, hslen, left, xtotal;
4737 + u32 yres, lower, vslen, upper, ytotal;
4738 + u32 vxres, xoffset, vyres, yoffset;
4739 +- u32 bpp, base, dacspeed24, mem;
4740 ++ u32 bpp, base, dacspeed24, mem, freq;
4741 + u8 r7;
4742 + int i;
4743 +
4744 +@@ -643,7 +643,12 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
4745 + par->atc[VGA_ATC_OVERSCAN] = 0;
4746 +
4747 + /* Calculate VCLK that most closely matches the requested dot clock */
4748 +- i740_calc_vclk((((u32)1e9) / var->pixclock) * (u32)(1e3), par);
4749 ++ freq = (((u32)1e9) / var->pixclock) * (u32)(1e3);
4750 ++ if (freq < I740_RFREQ_FIX) {
4751 ++ fb_dbg(info, "invalid pixclock\n");
4752 ++ freq = I740_RFREQ_FIX;
4753 ++ }
4754 ++ i740_calc_vclk(freq, par);
4755 +
4756 + /* Since we program the clocks ourselves, always use VCLK2. */
4757 + par->misc |= 0x0C;
4758 +diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
4759 +index 73eb34849eaba..4ccfd30c2a304 100644
4760 +--- a/drivers/virt/vboxguest/vboxguest_linux.c
4761 ++++ b/drivers/virt/vboxguest/vboxguest_linux.c
4762 +@@ -356,8 +356,8 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
4763 + goto err_vbg_core_exit;
4764 + }
4765 +
4766 +- ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED,
4767 +- DEVICE_NAME, gdev);
4768 ++ ret = request_irq(pci->irq, vbg_core_isr, IRQF_SHARED, DEVICE_NAME,
4769 ++ gdev);
4770 + if (ret) {
4771 + vbg_err("vboxguest: Error requesting irq: %d\n", ret);
4772 + goto err_vbg_core_exit;
4773 +@@ -367,7 +367,7 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
4774 + if (ret) {
4775 + vbg_err("vboxguest: Error misc_register %s failed: %d\n",
4776 + DEVICE_NAME, ret);
4777 +- goto err_vbg_core_exit;
4778 ++ goto err_free_irq;
4779 + }
4780 +
4781 + ret = misc_register(&gdev->misc_device_user);
4782 +@@ -403,6 +403,8 @@ err_unregister_misc_device_user:
4783 + misc_deregister(&gdev->misc_device_user);
4784 + err_unregister_misc_device:
4785 + misc_deregister(&gdev->misc_device);
4786 ++err_free_irq:
4787 ++ free_irq(pci->irq, gdev);
4788 + err_vbg_core_exit:
4789 + vbg_core_exit(gdev);
4790 + err_disable_pcidev:
4791 +@@ -419,6 +421,7 @@ static void vbg_pci_remove(struct pci_dev *pci)
4792 + vbg_gdev = NULL;
4793 + mutex_unlock(&vbg_gdev_mutex);
4794 +
4795 ++ free_irq(pci->irq, gdev);
4796 + device_remove_file(gdev->dev, &dev_attr_host_features);
4797 + device_remove_file(gdev->dev, &dev_attr_host_version);
4798 + misc_deregister(&gdev->misc_device_user);
4799 +diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
4800 +index 597af455a522b..0792fda49a15f 100644
4801 +--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
4802 ++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
4803 +@@ -128,7 +128,7 @@ static ssize_t xenbus_file_read(struct file *filp,
4804 + {
4805 + struct xenbus_file_priv *u = filp->private_data;
4806 + struct read_buffer *rb;
4807 +- unsigned i;
4808 ++ ssize_t i;
4809 + int ret;
4810 +
4811 + mutex_lock(&u->reply_mutex);
4812 +@@ -148,7 +148,7 @@ again:
4813 + rb = list_entry(u->read_buffers.next, struct read_buffer, list);
4814 + i = 0;
4815 + while (i < len) {
4816 +- unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
4817 ++ size_t sz = min_t(size_t, len - i, rb->len - rb->cons);
4818 +
4819 + ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
4820 +
4821 +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
4822 +index 4b2282aa274e4..909cc00ef5ce3 100644
4823 +--- a/fs/btrfs/block-group.c
4824 ++++ b/fs/btrfs/block-group.c
4825 +@@ -1575,9 +1575,11 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
4826 + div64_u64(zone_unusable * 100, bg->length));
4827 + trace_btrfs_reclaim_block_group(bg);
4828 + ret = btrfs_relocate_chunk(fs_info, bg->start);
4829 +- if (ret)
4830 ++ if (ret) {
4831 ++ btrfs_dec_block_group_ro(bg);
4832 + btrfs_err(fs_info, "error relocating chunk %llu",
4833 + bg->start);
4834 ++ }
4835 +
4836 + next:
4837 + btrfs_put_block_group(bg);
4838 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
4839 +index 429a198f8937d..673e11fcf3fc9 100644
4840 +--- a/fs/btrfs/relocation.c
4841 ++++ b/fs/btrfs/relocation.c
4842 +@@ -3576,7 +3576,12 @@ int prepare_to_relocate(struct reloc_control *rc)
4843 + */
4844 + return PTR_ERR(trans);
4845 + }
4846 +- return btrfs_commit_transaction(trans);
4847 ++
4848 ++ ret = btrfs_commit_transaction(trans);
4849 ++ if (ret)
4850 ++ unset_reloc_control(rc);
4851 ++
4852 ++ return ret;
4853 + }
4854 +
4855 + static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
4856 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4857 +index bed6811476b09..e9e1aae89030a 100644
4858 +--- a/fs/btrfs/tree-log.c
4859 ++++ b/fs/btrfs/tree-log.c
4860 +@@ -1119,7 +1119,9 @@ again:
4861 + extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
4862 + inode_objectid, parent_objectid, 0,
4863 + 0);
4864 +- if (!IS_ERR_OR_NULL(extref)) {
4865 ++ if (IS_ERR(extref)) {
4866 ++ return PTR_ERR(extref);
4867 ++ } else if (extref) {
4868 + u32 item_size;
4869 + u32 cur_offset = 0;
4870 + unsigned long base;
4871 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
4872 +index d1faa9d2f1e8a..883bb91ee257e 100644
4873 +--- a/fs/ceph/caps.c
4874 ++++ b/fs/ceph/caps.c
4875 +@@ -3543,24 +3543,23 @@ static void handle_cap_grant(struct inode *inode,
4876 + fill_inline = true;
4877 + }
4878 +
4879 +- if (ci->i_auth_cap == cap &&
4880 +- le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
4881 +- if (newcaps & ~extra_info->issued)
4882 +- wake = true;
4883 ++ if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
4884 ++ if (ci->i_auth_cap == cap) {
4885 ++ if (newcaps & ~extra_info->issued)
4886 ++ wake = true;
4887 ++
4888 ++ if (ci->i_requested_max_size > max_size ||
4889 ++ !(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) {
4890 ++ /* re-request max_size if necessary */
4891 ++ ci->i_requested_max_size = 0;
4892 ++ wake = true;
4893 ++ }
4894 +
4895 +- if (ci->i_requested_max_size > max_size ||
4896 +- !(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) {
4897 +- /* re-request max_size if necessary */
4898 +- ci->i_requested_max_size = 0;
4899 +- wake = true;
4900 ++ ceph_kick_flushing_inode_caps(session, ci);
4901 + }
4902 +-
4903 +- ceph_kick_flushing_inode_caps(session, ci);
4904 +- spin_unlock(&ci->i_ceph_lock);
4905 + up_read(&session->s_mdsc->snap_rwsem);
4906 +- } else {
4907 +- spin_unlock(&ci->i_ceph_lock);
4908 + }
4909 ++ spin_unlock(&ci->i_ceph_lock);
4910 +
4911 + if (fill_inline)
4912 + ceph_fill_inline_data(inode, NULL, extra_info->inline_data,
4913 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
4914 +index 912903de4de42..78d052dc17987 100644
4915 +--- a/fs/ceph/mds_client.c
4916 ++++ b/fs/ceph/mds_client.c
4917 +@@ -1196,14 +1196,17 @@ static int encode_supported_features(void **p, void *end)
4918 + if (count > 0) {
4919 + size_t i;
4920 + size_t size = FEATURE_BYTES(count);
4921 ++ unsigned long bit;
4922 +
4923 + if (WARN_ON_ONCE(*p + 4 + size > end))
4924 + return -ERANGE;
4925 +
4926 + ceph_encode_32(p, size);
4927 + memset(*p, 0, size);
4928 +- for (i = 0; i < count; i++)
4929 +- ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
4930 ++ for (i = 0; i < count; i++) {
4931 ++ bit = feature_bits[i];
4932 ++ ((unsigned char *)(*p))[bit / 8] |= BIT(bit % 8);
4933 ++ }
4934 + *p += size;
4935 + } else {
4936 + if (WARN_ON_ONCE(*p + 4 > end))
4937 +diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
4938 +index 97c7f7bfa55f3..2667350eb72cf 100644
4939 +--- a/fs/ceph/mds_client.h
4940 ++++ b/fs/ceph/mds_client.h
4941 +@@ -33,10 +33,6 @@ enum ceph_feature_type {
4942 + CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_METRIC_COLLECT,
4943 + };
4944 +
4945 +-/*
4946 +- * This will always have the highest feature bit value
4947 +- * as the last element of the array.
4948 +- */
4949 + #define CEPHFS_FEATURES_CLIENT_SUPPORTED { \
4950 + 0, 1, 2, 3, 4, 5, 6, 7, \
4951 + CEPHFS_FEATURE_MIMIC, \
4952 +@@ -45,8 +41,6 @@ enum ceph_feature_type {
4953 + CEPHFS_FEATURE_MULTI_RECONNECT, \
4954 + CEPHFS_FEATURE_DELEG_INO, \
4955 + CEPHFS_FEATURE_METRIC_COLLECT, \
4956 +- \
4957 +- CEPHFS_FEATURE_MAX, \
4958 + }
4959 + #define CEPHFS_FEATURES_CLIENT_REQUIRED {}
4960 +
4961 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
4962 +index 5906f7f140eb1..699f676ded478 100644
4963 +--- a/fs/cifs/misc.c
4964 ++++ b/fs/cifs/misc.c
4965 +@@ -736,6 +736,8 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
4966 + list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
4967 + if (delayed_work_pending(&cfile->deferred)) {
4968 + if (cancel_delayed_work(&cfile->deferred)) {
4969 ++ cifs_del_deferred_close(cfile);
4970 ++
4971 + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
4972 + if (tmp_list == NULL)
4973 + break;
4974 +@@ -767,6 +769,8 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
4975 + cfile = list_entry(tmp, struct cifsFileInfo, tlist);
4976 + if (delayed_work_pending(&cfile->deferred)) {
4977 + if (cancel_delayed_work(&cfile->deferred)) {
4978 ++ cifs_del_deferred_close(cfile);
4979 ++
4980 + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
4981 + if (tmp_list == NULL)
4982 + break;
4983 +@@ -802,6 +806,8 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
4984 + if (strstr(full_path, path)) {
4985 + if (delayed_work_pending(&cfile->deferred)) {
4986 + if (cancel_delayed_work(&cfile->deferred)) {
4987 ++ cifs_del_deferred_close(cfile);
4988 ++
4989 + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
4990 + if (tmp_list == NULL)
4991 + break;
4992 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
4993 +index 735aafee63be2..07895e9d537c8 100644
4994 +--- a/fs/cifs/smb2ops.c
4995 ++++ b/fs/cifs/smb2ops.c
4996 +@@ -1105,9 +1105,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
4997 + size_t name_len, value_len, user_name_len;
4998 +
4999 + while (src_size > 0) {
5000 +- name = &src->ea_data[0];
5001 + name_len = (size_t)src->ea_name_length;
5002 +- value = &src->ea_data[src->ea_name_length + 1];
5003 + value_len = (size_t)le16_to_cpu(src->ea_value_length);
5004 +
5005 + if (name_len == 0)
5006 +@@ -1119,6 +1117,9 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
5007 + goto out;
5008 + }
5009 +
5010 ++ name = &src->ea_data[0];
5011 ++ value = &src->ea_data[src->ea_name_length + 1];
5012 ++
5013 + if (ea_name) {
5014 + if (ea_name_len == name_len &&
5015 + memcmp(ea_name, name, name_len) == 0) {
5016 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
5017 +index 5821638cb8937..7d3ec39121f72 100644
5018 +--- a/fs/ext4/namei.c
5019 ++++ b/fs/ext4/namei.c
5020 +@@ -3090,11 +3090,8 @@ bool ext4_empty_dir(struct inode *inode)
5021 + de = (struct ext4_dir_entry_2 *) (bh->b_data +
5022 + (offset & (sb->s_blocksize - 1)));
5023 + if (ext4_check_dir_entry(inode, NULL, de, bh,
5024 +- bh->b_data, bh->b_size, offset)) {
5025 +- offset = (offset | (sb->s_blocksize - 1)) + 1;
5026 +- continue;
5027 +- }
5028 +- if (le32_to_cpu(de->inode)) {
5029 ++ bh->b_data, bh->b_size, offset) ||
5030 ++ le32_to_cpu(de->inode)) {
5031 + brelse(bh);
5032 + return false;
5033 + }
5034 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
5035 +index fa3c854125bbf..862cbbc01d6e7 100644
5036 +--- a/fs/ext4/resize.c
5037 ++++ b/fs/ext4/resize.c
5038 +@@ -1977,6 +1977,16 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
5039 + }
5040 + brelse(bh);
5041 +
5042 ++ /*
5043 ++ * For bigalloc, trim the requested size to the nearest cluster
5044 ++ * boundary to avoid creating an unusable filesystem. We do this
5045 ++ * silently, instead of returning an error, to avoid breaking
5046 ++ * callers that blindly resize the filesystem to the full size of
5047 ++ * the underlying block device.
5048 ++ */
5049 ++ if (ext4_has_feature_bigalloc(sb))
5050 ++ n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
5051 ++
5052 + retry:
5053 + o_blocks_count = ext4_blocks_count(es);
5054 +
5055 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
5056 +index 69c6bcaf5aae8..0e6e73bc42d4c 100644
5057 +--- a/fs/f2fs/node.c
5058 ++++ b/fs/f2fs/node.c
5059 +@@ -1291,7 +1291,11 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
5060 + dec_valid_node_count(sbi, dn->inode, !ofs);
5061 + goto fail;
5062 + }
5063 +- f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
5064 ++ if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
5065 ++ err = -EFSCORRUPTED;
5066 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
5067 ++ goto fail;
5068 ++ }
5069 + #endif
5070 + new_ni.nid = dn->nid;
5071 + new_ni.ino = dn->inode->i_ino;
5072 +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
5073 +index 841a978da0839..e98c90bd8ef6d 100644
5074 +--- a/fs/f2fs/segment.c
5075 ++++ b/fs/f2fs/segment.c
5076 +@@ -4537,6 +4537,12 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
5077 + return err;
5078 + seg_info_from_raw_sit(se, &sit);
5079 +
5080 ++ if (se->type >= NR_PERSISTENT_LOG) {
5081 ++ f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
5082 ++ se->type, start);
5083 ++ return -EFSCORRUPTED;
5084 ++ }
5085 ++
5086 + sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
5087 +
5088 + if (f2fs_block_unit_discard(sbi)) {
5089 +@@ -4585,6 +4591,13 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
5090 + break;
5091 + seg_info_from_raw_sit(se, &sit);
5092 +
5093 ++ if (se->type >= NR_PERSISTENT_LOG) {
5094 ++ f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
5095 ++ se->type, start);
5096 ++ err = -EFSCORRUPTED;
5097 ++ break;
5098 ++ }
5099 ++
5100 + sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
5101 +
5102 + if (f2fs_block_unit_discard(sbi)) {
5103 +diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
5104 +index f331866dd4182..ec6afd3c4bca6 100644
5105 +--- a/fs/nfs/nfs4idmap.c
5106 ++++ b/fs/nfs/nfs4idmap.c
5107 +@@ -561,22 +561,20 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
5108 + return true;
5109 + }
5110 +
5111 +-static void
5112 +-nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
5113 ++static void nfs_idmap_complete_pipe_upcall(struct idmap_legacy_upcalldata *data,
5114 ++ int ret)
5115 + {
5116 +- struct key *authkey = idmap->idmap_upcall_data->authkey;
5117 +-
5118 +- kfree(idmap->idmap_upcall_data);
5119 +- idmap->idmap_upcall_data = NULL;
5120 +- complete_request_key(authkey, ret);
5121 +- key_put(authkey);
5122 ++ complete_request_key(data->authkey, ret);
5123 ++ key_put(data->authkey);
5124 ++ kfree(data);
5125 + }
5126 +
5127 +-static void
5128 +-nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
5129 ++static void nfs_idmap_abort_pipe_upcall(struct idmap *idmap,
5130 ++ struct idmap_legacy_upcalldata *data,
5131 ++ int ret)
5132 + {
5133 +- if (idmap->idmap_upcall_data != NULL)
5134 +- nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
5135 ++ if (cmpxchg(&idmap->idmap_upcall_data, data, NULL) == data)
5136 ++ nfs_idmap_complete_pipe_upcall(data, ret);
5137 + }
5138 +
5139 + static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
5140 +@@ -613,7 +611,7 @@ static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
5141 +
5142 + ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
5143 + if (ret < 0)
5144 +- nfs_idmap_abort_pipe_upcall(idmap, ret);
5145 ++ nfs_idmap_abort_pipe_upcall(idmap, data, ret);
5146 +
5147 + return ret;
5148 + out2:
5149 +@@ -669,6 +667,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
5150 + struct request_key_auth *rka;
5151 + struct rpc_inode *rpci = RPC_I(file_inode(filp));
5152 + struct idmap *idmap = (struct idmap *)rpci->private;
5153 ++ struct idmap_legacy_upcalldata *data;
5154 + struct key *authkey;
5155 + struct idmap_msg im;
5156 + size_t namelen_in;
5157 +@@ -678,10 +677,11 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
5158 + * will have been woken up and someone else may now have used
5159 + * idmap_key_cons - so after this point we may no longer touch it.
5160 + */
5161 +- if (idmap->idmap_upcall_data == NULL)
5162 ++ data = xchg(&idmap->idmap_upcall_data, NULL);
5163 ++ if (data == NULL)
5164 + goto out_noupcall;
5165 +
5166 +- authkey = idmap->idmap_upcall_data->authkey;
5167 ++ authkey = data->authkey;
5168 + rka = get_request_key_auth(authkey);
5169 +
5170 + if (mlen != sizeof(im)) {
5171 +@@ -703,18 +703,17 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
5172 + if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ) {
5173 + ret = -EINVAL;
5174 + goto out;
5175 +-}
5176 ++ }
5177 +
5178 +- ret = nfs_idmap_read_and_verify_message(&im,
5179 +- &idmap->idmap_upcall_data->idmap_msg,
5180 +- rka->target_key, authkey);
5181 ++ ret = nfs_idmap_read_and_verify_message(&im, &data->idmap_msg,
5182 ++ rka->target_key, authkey);
5183 + if (ret >= 0) {
5184 + key_set_timeout(rka->target_key, nfs_idmap_cache_timeout);
5185 + ret = mlen;
5186 + }
5187 +
5188 + out:
5189 +- nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
5190 ++ nfs_idmap_complete_pipe_upcall(data, ret);
5191 + out_noupcall:
5192 + return ret;
5193 + }
5194 +@@ -728,7 +727,7 @@ idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
5195 + struct idmap *idmap = data->idmap;
5196 +
5197 + if (msg->errno)
5198 +- nfs_idmap_abort_pipe_upcall(idmap, msg->errno);
5199 ++ nfs_idmap_abort_pipe_upcall(idmap, data, msg->errno);
5200 + }
5201 +
5202 + static void
5203 +@@ -736,8 +735,11 @@ idmap_release_pipe(struct inode *inode)
5204 + {
5205 + struct rpc_inode *rpci = RPC_I(inode);
5206 + struct idmap *idmap = (struct idmap *)rpci->private;
5207 ++ struct idmap_legacy_upcalldata *data;
5208 +
5209 +- nfs_idmap_abort_pipe_upcall(idmap, -EPIPE);
5210 ++ data = xchg(&idmap->idmap_upcall_data, NULL);
5211 ++ if (data)
5212 ++ nfs_idmap_complete_pipe_upcall(data, -EPIPE);
5213 + }
5214 +
5215 + int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, kuid_t *uid)
5216 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
5217 +index cbb39aff8182e..a808763c52c19 100644
5218 +--- a/fs/nfs/nfs4proc.c
5219 ++++ b/fs/nfs/nfs4proc.c
5220 +@@ -787,10 +787,9 @@ static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
5221 + if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
5222 + slot->seq_nr_highest_sent = seqnr;
5223 + }
5224 +-static void nfs4_slot_sequence_acked(struct nfs4_slot *slot,
5225 +- u32 seqnr)
5226 ++static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr)
5227 + {
5228 +- slot->seq_nr_highest_sent = seqnr;
5229 ++ nfs4_slot_sequence_record_sent(slot, seqnr);
5230 + slot->seq_nr_last_acked = seqnr;
5231 + }
5232 +
5233 +@@ -857,7 +856,6 @@ static int nfs41_sequence_process(struct rpc_task *task,
5234 + __func__,
5235 + slot->slot_nr,
5236 + slot->seq_nr);
5237 +- nfs4_slot_sequence_acked(slot, slot->seq_nr);
5238 + goto out_retry;
5239 + case -NFS4ERR_RETRY_UNCACHED_REP:
5240 + case -NFS4ERR_SEQ_FALSE_RETRY:
5241 +@@ -3108,12 +3106,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
5242 + }
5243 +
5244 + out:
5245 +- if (opendata->lgp) {
5246 +- nfs4_lgopen_release(opendata->lgp);
5247 +- opendata->lgp = NULL;
5248 +- }
5249 +- if (!opendata->cancelled)
5250 ++ if (!opendata->cancelled) {
5251 ++ if (opendata->lgp) {
5252 ++ nfs4_lgopen_release(opendata->lgp);
5253 ++ opendata->lgp = NULL;
5254 ++ }
5255 + nfs4_sequence_free_slot(&opendata->o_res.seq_res);
5256 ++ }
5257 + return ret;
5258 + }
5259 +
5260 +@@ -9410,6 +9409,9 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
5261 + rpc_delay(task, NFS4_POLL_RETRY_MAX);
5262 + fallthrough;
5263 + case -NFS4ERR_RETRY_UNCACHED_REP:
5264 ++ case -EACCES:
5265 ++ dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n",
5266 ++ __func__, task->tk_status, clp->cl_hostname);
5267 + return -EAGAIN;
5268 + case -NFS4ERR_BADSESSION:
5269 + case -NFS4ERR_DEADSESSION:
5270 +diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
5271 +index 49b7df6167785..614513460b8e0 100644
5272 +--- a/fs/ntfs3/fslog.c
5273 ++++ b/fs/ntfs3/fslog.c
5274 +@@ -5057,7 +5057,7 @@ undo_action_next:
5275 + goto add_allocated_vcns;
5276 +
5277 + vcn = le64_to_cpu(lrh->target_vcn);
5278 +- vcn &= ~(log->clst_per_page - 1);
5279 ++ vcn &= ~(u64)(log->clst_per_page - 1);
5280 +
5281 + add_allocated_vcns:
5282 + for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
5283 +diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
5284 +index 4de9acb169689..24b57c3cc625c 100644
5285 +--- a/fs/ntfs3/fsntfs.c
5286 ++++ b/fs/ntfs3/fsntfs.c
5287 +@@ -831,10 +831,15 @@ int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
5288 + {
5289 + int err;
5290 + struct super_block *sb = sbi->sb;
5291 +- u32 blocksize = sb->s_blocksize;
5292 ++ u32 blocksize;
5293 + sector_t block1, block2;
5294 + u32 bytes;
5295 +
5296 ++ if (!sb)
5297 ++ return -EINVAL;
5298 ++
5299 ++ blocksize = sb->s_blocksize;
5300 ++
5301 + if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
5302 + return 0;
5303 +
5304 +diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
5305 +index 6f81e3a49abfb..76ebea253fa25 100644
5306 +--- a/fs/ntfs3/index.c
5307 ++++ b/fs/ntfs3/index.c
5308 +@@ -1994,7 +1994,7 @@ static int indx_free_children(struct ntfs_index *indx, struct ntfs_inode *ni,
5309 + const struct NTFS_DE *e, bool trim)
5310 + {
5311 + int err;
5312 +- struct indx_node *n;
5313 ++ struct indx_node *n = NULL;
5314 + struct INDEX_HDR *hdr;
5315 + CLST vbn = de_get_vbn(e);
5316 + size_t i;
5317 +diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
5318 +index 879952254071f..b2cc1191be695 100644
5319 +--- a/fs/ntfs3/inode.c
5320 ++++ b/fs/ntfs3/inode.c
5321 +@@ -430,6 +430,7 @@ end_enum:
5322 + } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
5323 + fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
5324 + /* Records in $Extend are not a files or general directories. */
5325 ++ inode->i_op = &ntfs_file_inode_operations;
5326 + } else {
5327 + err = -EINVAL;
5328 + goto out;
5329 +diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
5330 +index 7f85ec83e196f..f3b88c7e35f73 100644
5331 +--- a/fs/ntfs3/super.c
5332 ++++ b/fs/ntfs3/super.c
5333 +@@ -30,6 +30,7 @@
5334 + #include <linux/fs_context.h>
5335 + #include <linux/fs_parser.h>
5336 + #include <linux/log2.h>
5337 ++#include <linux/minmax.h>
5338 + #include <linux/module.h>
5339 + #include <linux/nls.h>
5340 + #include <linux/seq_file.h>
5341 +@@ -390,7 +391,7 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
5342 + return -EINVAL;
5343 + }
5344 +
5345 +- memcpy(sbi->options, new_opts, sizeof(*new_opts));
5346 ++ swap(sbi->options, fc->fs_private);
5347 +
5348 + return 0;
5349 + }
5350 +@@ -901,6 +902,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
5351 + ref.high = 0;
5352 +
5353 + sbi->sb = sb;
5354 ++ sbi->options = fc->fs_private;
5355 ++ fc->fs_private = NULL;
5356 + sb->s_flags |= SB_NODIRATIME;
5357 + sb->s_magic = 0x7366746e; // "ntfs"
5358 + sb->s_op = &ntfs_sops;
5359 +@@ -1264,8 +1267,6 @@ load_root:
5360 + goto put_inode_out;
5361 + }
5362 +
5363 +- fc->fs_private = NULL;
5364 +-
5365 + return 0;
5366 +
5367 + put_inode_out:
5368 +@@ -1418,7 +1419,6 @@ static int ntfs_init_fs_context(struct fs_context *fc)
5369 + mutex_init(&sbi->compress.mtx_lzx);
5370 + #endif
5371 +
5372 +- sbi->options = opts;
5373 + fc->s_fs_info = sbi;
5374 + ok:
5375 + fc->fs_private = opts;
5376 +diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
5377 +index 0968565ff2ca0..872eb56bb1706 100644
5378 +--- a/fs/ntfs3/xattr.c
5379 ++++ b/fs/ntfs3/xattr.c
5380 +@@ -545,28 +545,23 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
5381 + {
5382 + const char *name;
5383 + size_t size, name_len;
5384 +- void *value = NULL;
5385 +- int err = 0;
5386 ++ void *value;
5387 ++ int err;
5388 + int flags;
5389 ++ umode_t mode;
5390 +
5391 + if (S_ISLNK(inode->i_mode))
5392 + return -EOPNOTSUPP;
5393 +
5394 ++ mode = inode->i_mode;
5395 + switch (type) {
5396 + case ACL_TYPE_ACCESS:
5397 + /* Do not change i_mode if we are in init_acl */
5398 + if (acl && !init_acl) {
5399 +- umode_t mode;
5400 +-
5401 + err = posix_acl_update_mode(mnt_userns, inode, &mode,
5402 + &acl);
5403 + if (err)
5404 +- goto out;
5405 +-
5406 +- if (inode->i_mode != mode) {
5407 +- inode->i_mode = mode;
5408 +- mark_inode_dirty(inode);
5409 +- }
5410 ++ return err;
5411 + }
5412 + name = XATTR_NAME_POSIX_ACL_ACCESS;
5413 + name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
5414 +@@ -602,8 +597,13 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
5415 + err = ntfs_set_ea(inode, name, name_len, value, size, flags);
5416 + if (err == -ENODATA && !size)
5417 + err = 0; /* Removing non existed xattr. */
5418 +- if (!err)
5419 ++ if (!err) {
5420 + set_cached_acl(inode, type, acl);
5421 ++ if (inode->i_mode != mode) {
5422 ++ inode->i_mode = mode;
5423 ++ mark_inode_dirty(inode);
5424 ++ }
5425 ++ }
5426 +
5427 + out:
5428 + kfree(value);
5429 +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
5430 +index 7bb0a47cb6156..9837aaf9caf18 100644
5431 +--- a/fs/overlayfs/super.c
5432 ++++ b/fs/overlayfs/super.c
5433 +@@ -1413,11 +1413,12 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
5434 + */
5435 + err = ovl_do_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "0", 1);
5436 + if (err) {
5437 ++ pr_warn("failed to set xattr on upper\n");
5438 + ofs->noxattr = true;
5439 + if (ofs->config.index || ofs->config.metacopy) {
5440 + ofs->config.index = false;
5441 + ofs->config.metacopy = false;
5442 +- pr_warn("upper fs does not support xattr, falling back to index=off,metacopy=off.\n");
5443 ++ pr_warn("...falling back to index=off,metacopy=off.\n");
5444 + }
5445 + /*
5446 + * xattr support is required for persistent st_ino.
5447 +@@ -1425,8 +1426,10 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
5448 + */
5449 + if (ofs->config.xino == OVL_XINO_AUTO) {
5450 + ofs->config.xino = OVL_XINO_OFF;
5451 +- pr_warn("upper fs does not support xattr, falling back to xino=off.\n");
5452 ++ pr_warn("...falling back to xino=off.\n");
5453 + }
5454 ++ if (err == -EPERM && !ofs->config.userxattr)
5455 ++ pr_info("try mounting with 'userxattr' option\n");
5456 + err = 0;
5457 + } else {
5458 + ovl_do_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE);
5459 +diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
5460 +index 6a3ce0f6dc9e9..be9bcf8a1f991 100644
5461 +--- a/fs/xfs/xfs_filestream.c
5462 ++++ b/fs/xfs/xfs_filestream.c
5463 +@@ -128,11 +128,12 @@ xfs_filestream_pick_ag(
5464 + if (!pag->pagf_init) {
5465 + err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
5466 + if (err) {
5467 +- xfs_perag_put(pag);
5468 +- if (err != -EAGAIN)
5469 ++ if (err != -EAGAIN) {
5470 ++ xfs_perag_put(pag);
5471 + return err;
5472 ++ }
5473 + /* Couldn't lock the AGF, skip this AG. */
5474 +- continue;
5475 ++ goto next_ag;
5476 + }
5477 + }
5478 +
5479 +diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
5480 +index 710e857bb825f..5b5b68affe66d 100644
5481 +--- a/fs/xfs/xfs_fsops.c
5482 ++++ b/fs/xfs/xfs_fsops.c
5483 +@@ -430,46 +430,36 @@ xfs_reserve_blocks(
5484 + * If the request is larger than the current reservation, reserve the
5485 + * blocks before we update the reserve counters. Sample m_fdblocks and
5486 + * perform a partial reservation if the request exceeds free space.
5487 ++ *
5488 ++ * The code below estimates how many blocks it can request from
5489 ++ * fdblocks to stash in the reserve pool. This is a classic TOCTOU
5490 ++ * race since fdblocks updates are not always coordinated via
5491 ++ * m_sb_lock. Set the reserve size even if there's not enough free
5492 ++ * space to fill it because mod_fdblocks will refill an undersized
5493 ++ * reserve when it can.
5494 + */
5495 +- error = -ENOSPC;
5496 +- do {
5497 +- free = percpu_counter_sum(&mp->m_fdblocks) -
5498 ++ free = percpu_counter_sum(&mp->m_fdblocks) -
5499 + xfs_fdblocks_unavailable(mp);
5500 +- if (free <= 0)
5501 +- break;
5502 +-
5503 +- delta = request - mp->m_resblks;
5504 +- lcounter = free - delta;
5505 +- if (lcounter < 0)
5506 +- /* We can't satisfy the request, just get what we can */
5507 +- fdblks_delta = free;
5508 +- else
5509 +- fdblks_delta = delta;
5510 +-
5511 ++ delta = request - mp->m_resblks;
5512 ++ mp->m_resblks = request;
5513 ++ if (delta > 0 && free > 0) {
5514 + /*
5515 + * We'll either succeed in getting space from the free block
5516 +- * count or we'll get an ENOSPC. If we get a ENOSPC, it means
5517 +- * things changed while we were calculating fdblks_delta and so
5518 +- * we should try again to see if there is anything left to
5519 +- * reserve.
5520 ++ * count or we'll get an ENOSPC. Don't set the reserved flag
5521 ++ * here - we don't want to reserve the extra reserve blocks
5522 ++ * from the reserve.
5523 + *
5524 +- * Don't set the reserved flag here - we don't want to reserve
5525 +- * the extra reserve blocks from the reserve.....
5526 ++ * The desired reserve size can change after we drop the lock.
5527 ++ * Use mod_fdblocks to put the space into the reserve or into
5528 ++ * fdblocks as appropriate.
5529 + */
5530 ++ fdblks_delta = min(free, delta);
5531 + spin_unlock(&mp->m_sb_lock);
5532 + error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
5533 ++ if (!error)
5534 ++ xfs_mod_fdblocks(mp, fdblks_delta, 0);
5535 + spin_lock(&mp->m_sb_lock);
5536 +- } while (error == -ENOSPC);
5537 +-
5538 +- /*
5539 +- * Update the reserve counters if blocks have been successfully
5540 +- * allocated.
5541 +- */
5542 +- if (!error && fdblks_delta) {
5543 +- mp->m_resblks += fdblks_delta;
5544 +- mp->m_resblks_avail += fdblks_delta;
5545 + }
5546 +-
5547 + out:
5548 + if (outval) {
5549 + outval->resblks = mp->m_resblks;
5550 +diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
5551 +index f2210d927481b..5e44d7bbd8fca 100644
5552 +--- a/fs/xfs/xfs_icache.c
5553 ++++ b/fs/xfs/xfs_icache.c
5554 +@@ -1872,28 +1872,20 @@ xfs_inodegc_worker(
5555 + }
5556 +
5557 + /*
5558 +- * Force all currently queued inode inactivation work to run immediately, and
5559 +- * wait for the work to finish. Two pass - queue all the work first pass, wait
5560 +- * for it in a second pass.
5561 ++ * Force all currently queued inode inactivation work to run immediately and
5562 ++ * wait for the work to finish.
5563 + */
5564 + void
5565 + xfs_inodegc_flush(
5566 + struct xfs_mount *mp)
5567 + {
5568 +- struct xfs_inodegc *gc;
5569 +- int cpu;
5570 +-
5571 + if (!xfs_is_inodegc_enabled(mp))
5572 + return;
5573 +
5574 + trace_xfs_inodegc_flush(mp, __return_address);
5575 +
5576 + xfs_inodegc_queue_all(mp);
5577 +-
5578 +- for_each_online_cpu(cpu) {
5579 +- gc = per_cpu_ptr(mp->m_inodegc, cpu);
5580 +- flush_work(&gc->work);
5581 +- }
5582 ++ flush_workqueue(mp->m_inodegc_wq);
5583 + }
5584 +
5585 + /*
5586 +@@ -1904,18 +1896,12 @@ void
5587 + xfs_inodegc_stop(
5588 + struct xfs_mount *mp)
5589 + {
5590 +- struct xfs_inodegc *gc;
5591 +- int cpu;
5592 +-
5593 + if (!xfs_clear_inodegc_enabled(mp))
5594 + return;
5595 +
5596 + xfs_inodegc_queue_all(mp);
5597 ++ drain_workqueue(mp->m_inodegc_wq);
5598 +
5599 +- for_each_online_cpu(cpu) {
5600 +- gc = per_cpu_ptr(mp->m_inodegc, cpu);
5601 +- cancel_work_sync(&gc->work);
5602 +- }
5603 + trace_xfs_inodegc_stop(mp, __return_address);
5604 + }
5605 +
5606 +diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
5607 +index c19f3ca605af6..fb7a97cdf99f1 100644
5608 +--- a/fs/xfs/xfs_inode.c
5609 ++++ b/fs/xfs/xfs_inode.c
5610 +@@ -1223,7 +1223,7 @@ xfs_link(
5611 + {
5612 + xfs_mount_t *mp = tdp->i_mount;
5613 + xfs_trans_t *tp;
5614 +- int error;
5615 ++ int error, nospace_error = 0;
5616 + int resblks;
5617 +
5618 + trace_xfs_link(tdp, target_name);
5619 +@@ -1242,19 +1242,11 @@ xfs_link(
5620 + goto std_return;
5621 +
5622 + resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
5623 +- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
5624 +- if (error == -ENOSPC) {
5625 +- resblks = 0;
5626 +- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
5627 +- }
5628 ++ error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
5629 ++ &tp, &nospace_error);
5630 + if (error)
5631 + goto std_return;
5632 +
5633 +- xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
5634 +-
5635 +- xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
5636 +- xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
5637 +-
5638 + error = xfs_iext_count_may_overflow(tdp, XFS_DATA_FORK,
5639 + XFS_IEXT_DIR_MANIP_CNT(mp));
5640 + if (error)
5641 +@@ -1312,6 +1304,8 @@ xfs_link(
5642 + error_return:
5643 + xfs_trans_cancel(tp);
5644 + std_return:
5645 ++ if (error == -ENOSPC && nospace_error)
5646 ++ error = nospace_error;
5647 + return error;
5648 + }
5649 +
5650 +@@ -2761,6 +2755,7 @@ xfs_remove(
5651 + xfs_mount_t *mp = dp->i_mount;
5652 + xfs_trans_t *tp = NULL;
5653 + int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
5654 ++ int dontcare;
5655 + int error = 0;
5656 + uint resblks;
5657 +
5658 +@@ -2778,31 +2773,24 @@ xfs_remove(
5659 + goto std_return;
5660 +
5661 + /*
5662 +- * We try to get the real space reservation first,
5663 +- * allowing for directory btree deletion(s) implying
5664 +- * possible bmap insert(s). If we can't get the space
5665 +- * reservation then we use 0 instead, and avoid the bmap
5666 +- * btree insert(s) in the directory code by, if the bmap
5667 +- * insert tries to happen, instead trimming the LAST
5668 +- * block from the directory.
5669 ++ * We try to get the real space reservation first, allowing for
5670 ++ * directory btree deletion(s) implying possible bmap insert(s). If we
5671 ++ * can't get the space reservation then we use 0 instead, and avoid the
5672 ++ * bmap btree insert(s) in the directory code by, if the bmap insert
5673 ++ * tries to happen, instead trimming the LAST block from the directory.
5674 ++ *
5675 ++ * Ignore EDQUOT and ENOSPC being returned via nospace_error because
5676 ++ * the directory code can handle a reservationless update and we don't
5677 ++ * want to prevent a user from trying to free space by deleting things.
5678 + */
5679 + resblks = XFS_REMOVE_SPACE_RES(mp);
5680 +- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
5681 +- if (error == -ENOSPC) {
5682 +- resblks = 0;
5683 +- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
5684 +- &tp);
5685 +- }
5686 ++ error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
5687 ++ &tp, &dontcare);
5688 + if (error) {
5689 + ASSERT(error != -ENOSPC);
5690 + goto std_return;
5691 + }
5692 +
5693 +- xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
5694 +-
5695 +- xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
5696 +- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
5697 +-
5698 + /*
5699 + * If we're removing a directory perform some additional validation.
5700 + */
5701 +@@ -3115,7 +3103,8 @@ xfs_rename(
5702 + bool new_parent = (src_dp != target_dp);
5703 + bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
5704 + int spaceres;
5705 +- int error;
5706 ++ bool retried = false;
5707 ++ int error, nospace_error = 0;
5708 +
5709 + trace_xfs_rename(src_dp, target_dp, src_name, target_name);
5710 +
5711 +@@ -3139,9 +3128,12 @@ xfs_rename(
5712 + xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
5713 + inodes, &num_inodes);
5714 +
5715 ++retry:
5716 ++ nospace_error = 0;
5717 + spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
5718 + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
5719 + if (error == -ENOSPC) {
5720 ++ nospace_error = error;
5721 + spaceres = 0;
5722 + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
5723 + &tp);
5724 +@@ -3195,6 +3187,31 @@ xfs_rename(
5725 + target_dp, target_name, target_ip,
5726 + spaceres);
5727 +
5728 ++ /*
5729 ++ * Try to reserve quota to handle an expansion of the target directory.
5730 ++ * We'll allow the rename to continue in reservationless mode if we hit
5731 ++ * a space usage constraint. If we trigger reservationless mode, save
5732 ++ * the errno if there isn't any free space in the target directory.
5733 ++ */
5734 ++ if (spaceres != 0) {
5735 ++ error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
5736 ++ 0, false);
5737 ++ if (error == -EDQUOT || error == -ENOSPC) {
5738 ++ if (!retried) {
5739 ++ xfs_trans_cancel(tp);
5740 ++ xfs_blockgc_free_quota(target_dp, 0);
5741 ++ retried = true;
5742 ++ goto retry;
5743 ++ }
5744 ++
5745 ++ nospace_error = error;
5746 ++ spaceres = 0;
5747 ++ error = 0;
5748 ++ }
5749 ++ if (error)
5750 ++ goto out_trans_cancel;
5751 ++ }
5752 ++
5753 + /*
5754 + * Check for expected errors before we dirty the transaction
5755 + * so we can return an error without a transaction abort.
5756 +@@ -3441,6 +3458,8 @@ out_trans_cancel:
5757 + out_release_wip:
5758 + if (wip)
5759 + xfs_irele(wip);
5760 ++ if (error == -ENOSPC && nospace_error)
5761 ++ error = nospace_error;
5762 + return error;
5763 + }
5764 +
5765 +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
5766 +index fba52e75e98b6..bcc3c18c8080b 100644
5767 +--- a/fs/xfs/xfs_ioctl.c
5768 ++++ b/fs/xfs/xfs_ioctl.c
5769 +@@ -1545,7 +1545,7 @@ xfs_ioc_getbmap(
5770 +
5771 + if (bmx.bmv_count < 2)
5772 + return -EINVAL;
5773 +- if (bmx.bmv_count > ULONG_MAX / recsize)
5774 ++ if (bmx.bmv_count >= INT_MAX / recsize)
5775 + return -ENOMEM;
5776 +
5777 + buf = kvzalloc(bmx.bmv_count * sizeof(*buf), GFP_KERNEL);
5778 +diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
5779 +index 67dec11e34c7e..95c183072e7a2 100644
5780 +--- a/fs/xfs/xfs_trans.c
5781 ++++ b/fs/xfs/xfs_trans.c
5782 +@@ -1201,3 +1201,89 @@ out_cancel:
5783 + xfs_trans_cancel(tp);
5784 + return error;
5785 + }
5786 ++
5787 ++/*
5788 ++ * Allocate an transaction, lock and join the directory and child inodes to it,
5789 ++ * and reserve quota for a directory update. If there isn't sufficient space,
5790 ++ * @dblocks will be set to zero for a reservationless directory update and
5791 ++ * @nospace_error will be set to a negative errno describing the space
5792 ++ * constraint we hit.
5793 ++ *
5794 ++ * The caller must ensure that the on-disk dquots attached to this inode have
5795 ++ * already been allocated and initialized. The ILOCKs will be dropped when the
5796 ++ * transaction is committed or cancelled.
5797 ++ */
5798 ++int
5799 ++xfs_trans_alloc_dir(
5800 ++ struct xfs_inode *dp,
5801 ++ struct xfs_trans_res *resv,
5802 ++ struct xfs_inode *ip,
5803 ++ unsigned int *dblocks,
5804 ++ struct xfs_trans **tpp,
5805 ++ int *nospace_error)
5806 ++{
5807 ++ struct xfs_trans *tp;
5808 ++ struct xfs_mount *mp = ip->i_mount;
5809 ++ unsigned int resblks;
5810 ++ bool retried = false;
5811 ++ int error;
5812 ++
5813 ++retry:
5814 ++ *nospace_error = 0;
5815 ++ resblks = *dblocks;
5816 ++ error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp);
5817 ++ if (error == -ENOSPC) {
5818 ++ *nospace_error = error;
5819 ++ resblks = 0;
5820 ++ error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp);
5821 ++ }
5822 ++ if (error)
5823 ++ return error;
5824 ++
5825 ++ xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
5826 ++
5827 ++ xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
5828 ++ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
5829 ++
5830 ++ error = xfs_qm_dqattach_locked(dp, false);
5831 ++ if (error) {
5832 ++ /* Caller should have allocated the dquots! */
5833 ++ ASSERT(error != -ENOENT);
5834 ++ goto out_cancel;
5835 ++ }
5836 ++
5837 ++ error = xfs_qm_dqattach_locked(ip, false);
5838 ++ if (error) {
5839 ++ /* Caller should have allocated the dquots! */
5840 ++ ASSERT(error != -ENOENT);
5841 ++ goto out_cancel;
5842 ++ }
5843 ++
5844 ++ if (resblks == 0)
5845 ++ goto done;
5846 ++
5847 ++ error = xfs_trans_reserve_quota_nblks(tp, dp, resblks, 0, false);
5848 ++ if (error == -EDQUOT || error == -ENOSPC) {
5849 ++ if (!retried) {
5850 ++ xfs_trans_cancel(tp);
5851 ++ xfs_blockgc_free_quota(dp, 0);
5852 ++ retried = true;
5853 ++ goto retry;
5854 ++ }
5855 ++
5856 ++ *nospace_error = error;
5857 ++ resblks = 0;
5858 ++ error = 0;
5859 ++ }
5860 ++ if (error)
5861 ++ goto out_cancel;
5862 ++
5863 ++done:
5864 ++ *tpp = tp;
5865 ++ *dblocks = resblks;
5866 ++ return 0;
5867 ++
5868 ++out_cancel:
5869 ++ xfs_trans_cancel(tp);
5870 ++ return error;
5871 ++}
5872 +diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
5873 +index 50da47f23a077..faba74d4c7026 100644
5874 +--- a/fs/xfs/xfs_trans.h
5875 ++++ b/fs/xfs/xfs_trans.h
5876 +@@ -265,6 +265,9 @@ int xfs_trans_alloc_icreate(struct xfs_mount *mp, struct xfs_trans_res *resv,
5877 + int xfs_trans_alloc_ichange(struct xfs_inode *ip, struct xfs_dquot *udqp,
5878 + struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, bool force,
5879 + struct xfs_trans **tpp);
5880 ++int xfs_trans_alloc_dir(struct xfs_inode *dp, struct xfs_trans_res *resv,
5881 ++ struct xfs_inode *ip, unsigned int *dblocks,
5882 ++ struct xfs_trans **tpp, int *nospace_error);
5883 +
5884 + static inline void
5885 + xfs_trans_set_context(
5886 +diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
5887 +index 3872ce6714119..955c457e585a3 100644
5888 +--- a/fs/xfs/xfs_trans_dquot.c
5889 ++++ b/fs/xfs/xfs_trans_dquot.c
5890 +@@ -603,7 +603,6 @@ xfs_dqresv_check(
5891 + return QUOTA_NL_ISOFTLONGWARN;
5892 + }
5893 +
5894 +- res->warnings++;
5895 + return QUOTA_NL_ISOFTWARN;
5896 + }
5897 +
5898 +diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
5899 +index 3096f086b5a32..71ab4ba9c25d1 100644
5900 +--- a/include/asm-generic/bitops/atomic.h
5901 ++++ b/include/asm-generic/bitops/atomic.h
5902 +@@ -39,9 +39,6 @@ arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
5903 + unsigned long mask = BIT_MASK(nr);
5904 +
5905 + p += BIT_WORD(nr);
5906 +- if (READ_ONCE(*p) & mask)
5907 +- return 1;
5908 +-
5909 + old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
5910 + return !!(old & mask);
5911 + }
5912 +@@ -53,9 +50,6 @@ arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
5913 + unsigned long mask = BIT_MASK(nr);
5914 +
5915 + p += BIT_WORD(nr);
5916 +- if (!(READ_ONCE(*p) & mask))
5917 +- return 0;
5918 +-
5919 + old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
5920 + return !!(old & mask);
5921 + }
5922 +diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
5923 +index 546e27fc6d462..ee28d2b0a3091 100644
5924 +--- a/include/linux/bpfptr.h
5925 ++++ b/include/linux/bpfptr.h
5926 +@@ -48,7 +48,9 @@ static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val)
5927 + static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
5928 + size_t offset, size_t size)
5929 + {
5930 +- return copy_from_sockptr_offset(dst, (sockptr_t) src, offset, size);
5931 ++ if (!bpfptr_is_kernel(src))
5932 ++ return copy_from_user(dst, src.user + offset, size);
5933 ++ return copy_from_kernel_nofault(dst, src.kernel + offset, size);
5934 + }
5935 +
5936 + static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
5937 +@@ -77,7 +79,9 @@ static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len)
5938 +
5939 + static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
5940 + {
5941 +- return strncpy_from_sockptr(dst, (sockptr_t) src, count);
5942 ++ if (bpfptr_is_kernel(src))
5943 ++ return strncpy_from_kernel_nofault(dst, src.kernel, count);
5944 ++ return strncpy_from_user(dst, src.user, count);
5945 + }
5946 +
5947 + #endif /* _LINUX_BPFPTR_H */
5948 +diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
5949 +index 86af6f0a00a2a..ca98aeadcc804 100644
5950 +--- a/include/linux/io-pgtable.h
5951 ++++ b/include/linux/io-pgtable.h
5952 +@@ -74,17 +74,22 @@ struct io_pgtable_cfg {
5953 + * to support up to 35 bits PA where the bit32, bit33 and bit34 are
5954 + * encoded in the bit9, bit4 and bit5 of the PTE respectively.
5955 + *
5956 ++ * IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT: (ARM v7s format) MediaTek IOMMUs
5957 ++ * extend the translation table base support up to 35 bits PA, the
5958 ++ * encoding format is same with IO_PGTABLE_QUIRK_ARM_MTK_EXT.
5959 ++ *
5960 + * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
5961 + * for use in the upper half of a split address space.
5962 + *
5963 + * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
5964 + * attributes set in the TCR for a non-coherent page-table walker.
5965 + */
5966 +- #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
5967 +- #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
5968 +- #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
5969 +- #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
5970 +- #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
5971 ++ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
5972 ++ #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
5973 ++ #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
5974 ++ #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4)
5975 ++ #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
5976 ++ #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
5977 + unsigned long quirks;
5978 + unsigned long pgsize_bitmap;
5979 + unsigned int ias;
5980 +diff --git a/include/linux/nmi.h b/include/linux/nmi.h
5981 +index 750c7f395ca90..f700ff2df074e 100644
5982 +--- a/include/linux/nmi.h
5983 ++++ b/include/linux/nmi.h
5984 +@@ -122,6 +122,8 @@ int watchdog_nmi_probe(void);
5985 + int watchdog_nmi_enable(unsigned int cpu);
5986 + void watchdog_nmi_disable(unsigned int cpu);
5987 +
5988 ++void lockup_detector_reconfigure(void);
5989 ++
5990 + /**
5991 + * touch_nmi_watchdog - restart NMI watchdog timeout.
5992 + *
5993 +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
5994 +index 4417f667c757e..3a2c714d6b629 100644
5995 +--- a/include/linux/sunrpc/xdr.h
5996 ++++ b/include/linux/sunrpc/xdr.h
5997 +@@ -405,8 +405,8 @@ static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr)
5998 + */
5999 + static inline __be32 *xdr_encode_bool(__be32 *p, u32 n)
6000 + {
6001 +- *p = n ? xdr_one : xdr_zero;
6002 +- return p++;
6003 ++ *p++ = n ? xdr_one : xdr_zero;
6004 ++ return p;
6005 + }
6006 +
6007 + /**
6008 +diff --git a/include/linux/uacce.h b/include/linux/uacce.h
6009 +index 48e319f402751..9ce88c28b0a87 100644
6010 +--- a/include/linux/uacce.h
6011 ++++ b/include/linux/uacce.h
6012 +@@ -70,6 +70,7 @@ enum uacce_q_state {
6013 + * @wait: wait queue head
6014 + * @list: index into uacce queues list
6015 + * @qfrs: pointer of qfr regions
6016 ++ * @mutex: protects queue state
6017 + * @state: queue state machine
6018 + * @pasid: pasid associated to the mm
6019 + * @handle: iommu_sva handle returned by iommu_sva_bind_device()
6020 +@@ -80,6 +81,7 @@ struct uacce_queue {
6021 + wait_queue_head_t wait;
6022 + struct list_head list;
6023 + struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
6024 ++ struct mutex mutex;
6025 + enum uacce_q_state state;
6026 + u32 pasid;
6027 + struct iommu_sva *handle;
6028 +@@ -97,9 +99,9 @@ struct uacce_queue {
6029 + * @dev_id: id of the uacce device
6030 + * @cdev: cdev of the uacce
6031 + * @dev: dev of the uacce
6032 ++ * @mutex: protects uacce operation
6033 + * @priv: private pointer of the uacce
6034 + * @queues: list of queues
6035 +- * @queues_lock: lock for queues list
6036 + * @inode: core vfs
6037 + */
6038 + struct uacce_device {
6039 +@@ -113,9 +115,9 @@ struct uacce_device {
6040 + u32 dev_id;
6041 + struct cdev *cdev;
6042 + struct device dev;
6043 ++ struct mutex mutex;
6044 + void *priv;
6045 + struct list_head queues;
6046 +- struct mutex queues_lock;
6047 + struct inode *inode;
6048 + };
6049 +
6050 +diff --git a/include/sound/control.h b/include/sound/control.h
6051 +index 985c51a8fb748..a1fc7e0a47d95 100644
6052 +--- a/include/sound/control.h
6053 ++++ b/include/sound/control.h
6054 +@@ -109,7 +109,7 @@ struct snd_ctl_file {
6055 + int preferred_subdevice[SND_CTL_SUBDEV_ITEMS];
6056 + wait_queue_head_t change_sleep;
6057 + spinlock_t read_lock;
6058 +- struct fasync_struct *fasync;
6059 ++ struct snd_fasync *fasync;
6060 + int subscribed; /* read interface is activated */
6061 + struct list_head events; /* waiting events for read */
6062 + };
6063 +diff --git a/include/sound/core.h b/include/sound/core.h
6064 +index 6d4cc49584c63..39cee40ac22e0 100644
6065 +--- a/include/sound/core.h
6066 ++++ b/include/sound/core.h
6067 +@@ -501,4 +501,12 @@ snd_pci_quirk_lookup_id(u16 vendor, u16 device,
6068 + }
6069 + #endif
6070 +
6071 ++/* async signal helpers */
6072 ++struct snd_fasync;
6073 ++
6074 ++int snd_fasync_helper(int fd, struct file *file, int on,
6075 ++ struct snd_fasync **fasyncp);
6076 ++void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll);
6077 ++void snd_fasync_free(struct snd_fasync *fasync);
6078 ++
6079 + #endif /* __SOUND_CORE_H */
6080 +diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
6081 +index 447def5405444..88014cd31b28a 100644
6082 +--- a/kernel/bpf/arraymap.c
6083 ++++ b/kernel/bpf/arraymap.c
6084 +@@ -620,6 +620,11 @@ static int bpf_iter_init_array_map(void *priv_data,
6085 + seq_info->percpu_value_buf = value_buf;
6086 + }
6087 +
6088 ++ /* bpf_iter_attach_map() acquires a map uref, and the uref may be
6089 ++ * released before or in the middle of iterating map elements, so
6090 ++ * acquire an extra map uref for iterator.
6091 ++ */
6092 ++ bpf_map_inc_with_uref(map);
6093 + seq_info->map = map;
6094 + return 0;
6095 + }
6096 +@@ -628,6 +633,7 @@ static void bpf_iter_fini_array_map(void *priv_data)
6097 + {
6098 + struct bpf_iter_seq_array_map_info *seq_info = priv_data;
6099 +
6100 ++ bpf_map_put_with_uref(seq_info->map);
6101 + kfree(seq_info->percpu_value_buf);
6102 + }
6103 +
6104 +diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
6105 +index 32471ba027086..47eebb88695ee 100644
6106 +--- a/kernel/bpf/hashtab.c
6107 ++++ b/kernel/bpf/hashtab.c
6108 +@@ -291,12 +291,8 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
6109 + struct htab_elem *l;
6110 +
6111 + if (node) {
6112 +- u32 key_size = htab->map.key_size;
6113 +-
6114 + l = container_of(node, struct htab_elem, lru_node);
6115 +- memcpy(l->key, key, key_size);
6116 +- check_and_init_map_value(&htab->map,
6117 +- l->key + round_up(key_size, 8));
6118 ++ memcpy(l->key, key, htab->map.key_size);
6119 + return l;
6120 + }
6121 +
6122 +@@ -2023,6 +2019,7 @@ static int bpf_iter_init_hash_map(void *priv_data,
6123 + seq_info->percpu_value_buf = value_buf;
6124 + }
6125 +
6126 ++ bpf_map_inc_with_uref(map);
6127 + seq_info->map = map;
6128 + seq_info->htab = container_of(map, struct bpf_htab, map);
6129 + return 0;
6130 +@@ -2032,6 +2029,7 @@ static void bpf_iter_fini_hash_map(void *priv_data)
6131 + {
6132 + struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
6133 +
6134 ++ bpf_map_put_with_uref(seq_info->map);
6135 + kfree(seq_info->percpu_value_buf);
6136 + }
6137 +
6138 +diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
6139 +index 928867f527e70..32688357c6da1 100644
6140 +--- a/kernel/trace/trace_eprobe.c
6141 ++++ b/kernel/trace/trace_eprobe.c
6142 +@@ -226,6 +226,7 @@ static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
6143 + struct probe_arg *parg = &ep->tp.args[i];
6144 + struct ftrace_event_field *field;
6145 + struct list_head *head;
6146 ++ int ret = -ENOENT;
6147 +
6148 + head = trace_get_fields(ep->event);
6149 + list_for_each_entry(field, head, link) {
6150 +@@ -235,9 +236,20 @@ static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
6151 + return 0;
6152 + }
6153 + }
6154 ++
6155 ++ /*
6156 ++ * Argument not found on event. But allow for comm and COMM
6157 ++ * to be used to get the current->comm.
6158 ++ */
6159 ++ if (strcmp(parg->code->data, "COMM") == 0 ||
6160 ++ strcmp(parg->code->data, "comm") == 0) {
6161 ++ parg->code->op = FETCH_OP_COMM;
6162 ++ ret = 0;
6163 ++ }
6164 ++
6165 + kfree(parg->code->data);
6166 + parg->code->data = NULL;
6167 +- return -ENOENT;
6168 ++ return ret;
6169 + }
6170 +
6171 + static int eprobe_event_define_fields(struct trace_event_call *event_call)
6172 +@@ -308,6 +320,24 @@ static unsigned long get_event_field(struct fetch_insn *code, void *rec)
6173 +
6174 + addr = rec + field->offset;
6175 +
6176 ++ if (is_string_field(field)) {
6177 ++ switch (field->filter_type) {
6178 ++ case FILTER_DYN_STRING:
6179 ++ val = (unsigned long)(rec + (*(unsigned int *)addr & 0xffff));
6180 ++ break;
6181 ++ case FILTER_STATIC_STRING:
6182 ++ val = (unsigned long)addr;
6183 ++ break;
6184 ++ case FILTER_PTR_STRING:
6185 ++ val = (unsigned long)(*(char *)addr);
6186 ++ break;
6187 ++ default:
6188 ++ WARN_ON_ONCE(1);
6189 ++ return 0;
6190 ++ }
6191 ++ return val;
6192 ++ }
6193 ++
6194 + switch (field->size) {
6195 + case 1:
6196 + if (field->is_signed)
6197 +@@ -339,16 +369,38 @@ static unsigned long get_event_field(struct fetch_insn *code, void *rec)
6198 +
6199 + static int get_eprobe_size(struct trace_probe *tp, void *rec)
6200 + {
6201 ++ struct fetch_insn *code;
6202 + struct probe_arg *arg;
6203 + int i, len, ret = 0;
6204 +
6205 + for (i = 0; i < tp->nr_args; i++) {
6206 + arg = tp->args + i;
6207 +- if (unlikely(arg->dynamic)) {
6208 ++ if (arg->dynamic) {
6209 + unsigned long val;
6210 +
6211 +- val = get_event_field(arg->code, rec);
6212 +- len = process_fetch_insn_bottom(arg->code + 1, val, NULL, NULL);
6213 ++ code = arg->code;
6214 ++ retry:
6215 ++ switch (code->op) {
6216 ++ case FETCH_OP_TP_ARG:
6217 ++ val = get_event_field(code, rec);
6218 ++ break;
6219 ++ case FETCH_OP_IMM:
6220 ++ val = code->immediate;
6221 ++ break;
6222 ++ case FETCH_OP_COMM:
6223 ++ val = (unsigned long)current->comm;
6224 ++ break;
6225 ++ case FETCH_OP_DATA:
6226 ++ val = (unsigned long)code->data;
6227 ++ break;
6228 ++ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
6229 ++ code++;
6230 ++ goto retry;
6231 ++ default:
6232 ++ continue;
6233 ++ }
6234 ++ code++;
6235 ++ len = process_fetch_insn_bottom(code, val, NULL, NULL);
6236 + if (len > 0)
6237 + ret += len;
6238 + }
6239 +@@ -366,8 +418,28 @@ process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
6240 + {
6241 + unsigned long val;
6242 +
6243 +- val = get_event_field(code, rec);
6244 +- return process_fetch_insn_bottom(code + 1, val, dest, base);
6245 ++ retry:
6246 ++ switch (code->op) {
6247 ++ case FETCH_OP_TP_ARG:
6248 ++ val = get_event_field(code, rec);
6249 ++ break;
6250 ++ case FETCH_OP_IMM:
6251 ++ val = code->immediate;
6252 ++ break;
6253 ++ case FETCH_OP_COMM:
6254 ++ val = (unsigned long)current->comm;
6255 ++ break;
6256 ++ case FETCH_OP_DATA:
6257 ++ val = (unsigned long)code->data;
6258 ++ break;
6259 ++ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
6260 ++ code++;
6261 ++ goto retry;
6262 ++ default:
6263 ++ return -EILSEQ;
6264 ++ }
6265 ++ code++;
6266 ++ return process_fetch_insn_bottom(code, val, dest, base);
6267 + }
6268 + NOKPROBE_SYMBOL(process_fetch_insn)
6269 +
6270 +@@ -849,6 +921,10 @@ static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[
6271 + if (ep->tp.args[i].code->op == FETCH_OP_TP_ARG)
6272 + ret = trace_eprobe_tp_arg_update(ep, i);
6273 +
6274 ++ /* Handle symbols "@" */
6275 ++ if (!ret)
6276 ++ ret = traceprobe_update_arg(&ep->tp.args[i]);
6277 ++
6278 + return ret;
6279 + }
6280 +
6281 +diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
6282 +index fba8cb77a73af..083f648e32650 100644
6283 +--- a/kernel/trace/trace_event_perf.c
6284 ++++ b/kernel/trace/trace_event_perf.c
6285 +@@ -157,7 +157,7 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
6286 + int i;
6287 +
6288 + if (--tp_event->perf_refcount > 0)
6289 +- goto out;
6290 ++ return;
6291 +
6292 + tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
6293 +
6294 +@@ -176,8 +176,6 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
6295 + perf_trace_buf[i] = NULL;
6296 + }
6297 + }
6298 +-out:
6299 +- trace_event_put_ref(tp_event);
6300 + }
6301 +
6302 + static int perf_trace_event_open(struct perf_event *p_event)
6303 +@@ -241,6 +239,7 @@ void perf_trace_destroy(struct perf_event *p_event)
6304 + mutex_lock(&event_mutex);
6305 + perf_trace_event_close(p_event);
6306 + perf_trace_event_unreg(p_event);
6307 ++ trace_event_put_ref(p_event->tp_event);
6308 + mutex_unlock(&event_mutex);
6309 + }
6310 +
6311 +@@ -292,6 +291,7 @@ void perf_kprobe_destroy(struct perf_event *p_event)
6312 + mutex_lock(&event_mutex);
6313 + perf_trace_event_close(p_event);
6314 + perf_trace_event_unreg(p_event);
6315 ++ trace_event_put_ref(p_event->tp_event);
6316 + mutex_unlock(&event_mutex);
6317 +
6318 + destroy_local_trace_kprobe(p_event->tp_event);
6319 +@@ -347,6 +347,7 @@ void perf_uprobe_destroy(struct perf_event *p_event)
6320 + mutex_lock(&event_mutex);
6321 + perf_trace_event_close(p_event);
6322 + perf_trace_event_unreg(p_event);
6323 ++ trace_event_put_ref(p_event->tp_event);
6324 + mutex_unlock(&event_mutex);
6325 + destroy_local_trace_uprobe(p_event->tp_event);
6326 + }
6327 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
6328 +index c4f654efb77af..c84c94334a606 100644
6329 +--- a/kernel/trace/trace_events.c
6330 ++++ b/kernel/trace/trace_events.c
6331 +@@ -176,6 +176,7 @@ static int trace_define_generic_fields(void)
6332 +
6333 + __generic_field(int, CPU, FILTER_CPU);
6334 + __generic_field(int, cpu, FILTER_CPU);
6335 ++ __generic_field(int, common_cpu, FILTER_CPU);
6336 + __generic_field(char *, COMM, FILTER_COMM);
6337 + __generic_field(char *, comm, FILTER_COMM);
6338 +
6339 +diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
6340 +index bb4605b60de79..2bbe4a7c6a2b6 100644
6341 +--- a/kernel/trace/trace_probe.c
6342 ++++ b/kernel/trace/trace_probe.c
6343 +@@ -279,7 +279,14 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
6344 + int ret = 0;
6345 + int len;
6346 +
6347 +- if (strcmp(arg, "retval") == 0) {
6348 ++ if (flags & TPARG_FL_TPOINT) {
6349 ++ if (code->data)
6350 ++ return -EFAULT;
6351 ++ code->data = kstrdup(arg, GFP_KERNEL);
6352 ++ if (!code->data)
6353 ++ return -ENOMEM;
6354 ++ code->op = FETCH_OP_TP_ARG;
6355 ++ } else if (strcmp(arg, "retval") == 0) {
6356 + if (flags & TPARG_FL_RETURN) {
6357 + code->op = FETCH_OP_RETVAL;
6358 + } else {
6359 +@@ -303,7 +310,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
6360 + }
6361 + } else
6362 + goto inval_var;
6363 +- } else if (strcmp(arg, "comm") == 0) {
6364 ++ } else if (strcmp(arg, "comm") == 0 || strcmp(arg, "COMM") == 0) {
6365 + code->op = FETCH_OP_COMM;
6366 + #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
6367 + } else if (((flags & TPARG_FL_MASK) ==
6368 +@@ -319,13 +326,6 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
6369 + code->op = FETCH_OP_ARG;
6370 + code->param = (unsigned int)param - 1;
6371 + #endif
6372 +- } else if (flags & TPARG_FL_TPOINT) {
6373 +- if (code->data)
6374 +- return -EFAULT;
6375 +- code->data = kstrdup(arg, GFP_KERNEL);
6376 +- if (!code->data)
6377 +- return -ENOMEM;
6378 +- code->op = FETCH_OP_TP_ARG;
6379 + } else
6380 + goto inval_var;
6381 +
6382 +@@ -380,6 +380,11 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
6383 + break;
6384 +
6385 + case '%': /* named register */
6386 ++ if (flags & TPARG_FL_TPOINT) {
6387 ++ /* eprobes do not handle registers */
6388 ++ trace_probe_log_err(offs, BAD_VAR);
6389 ++ break;
6390 ++ }
6391 + ret = regs_query_register_offset(arg + 1);
6392 + if (ret >= 0) {
6393 + code->op = FETCH_OP_REG;
6394 +@@ -613,9 +618,11 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
6395 +
6396 + /*
6397 + * Since $comm and immediate string can not be dereferenced,
6398 +- * we can find those by strcmp.
6399 ++ * we can find those by strcmp. But ignore for eprobes.
6400 + */
6401 +- if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) {
6402 ++ if (!(flags & TPARG_FL_TPOINT) &&
6403 ++ (strcmp(arg, "$comm") == 0 || strcmp(arg, "$COMM") == 0 ||
6404 ++ strncmp(arg, "\\\"", 2) == 0)) {
6405 + /* The type of $comm must be "string", and not an array. */
6406 + if (parg->count || (t && strcmp(t, "string")))
6407 + goto out;
6408 +diff --git a/kernel/watchdog.c b/kernel/watchdog.c
6409 +index ad912511a0c08..1cfa269bd4488 100644
6410 +--- a/kernel/watchdog.c
6411 ++++ b/kernel/watchdog.c
6412 +@@ -537,7 +537,7 @@ int lockup_detector_offline_cpu(unsigned int cpu)
6413 + return 0;
6414 + }
6415 +
6416 +-static void lockup_detector_reconfigure(void)
6417 ++static void __lockup_detector_reconfigure(void)
6418 + {
6419 + cpus_read_lock();
6420 + watchdog_nmi_stop();
6421 +@@ -557,6 +557,13 @@ static void lockup_detector_reconfigure(void)
6422 + __lockup_detector_cleanup();
6423 + }
6424 +
6425 ++void lockup_detector_reconfigure(void)
6426 ++{
6427 ++ mutex_lock(&watchdog_mutex);
6428 ++ __lockup_detector_reconfigure();
6429 ++ mutex_unlock(&watchdog_mutex);
6430 ++}
6431 ++
6432 + /*
6433 + * Create the watchdog infrastructure and configure the detector(s).
6434 + */
6435 +@@ -573,13 +580,13 @@ static __init void lockup_detector_setup(void)
6436 + return;
6437 +
6438 + mutex_lock(&watchdog_mutex);
6439 +- lockup_detector_reconfigure();
6440 ++ __lockup_detector_reconfigure();
6441 + softlockup_initialized = true;
6442 + mutex_unlock(&watchdog_mutex);
6443 + }
6444 +
6445 + #else /* CONFIG_SOFTLOCKUP_DETECTOR */
6446 +-static void lockup_detector_reconfigure(void)
6447 ++static void __lockup_detector_reconfigure(void)
6448 + {
6449 + cpus_read_lock();
6450 + watchdog_nmi_stop();
6451 +@@ -587,9 +594,13 @@ static void lockup_detector_reconfigure(void)
6452 + watchdog_nmi_start();
6453 + cpus_read_unlock();
6454 + }
6455 ++void lockup_detector_reconfigure(void)
6456 ++{
6457 ++ __lockup_detector_reconfigure();
6458 ++}
6459 + static inline void lockup_detector_setup(void)
6460 + {
6461 +- lockup_detector_reconfigure();
6462 ++ __lockup_detector_reconfigure();
6463 + }
6464 + #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
6465 +
6466 +@@ -629,7 +640,7 @@ static void proc_watchdog_update(void)
6467 + {
6468 + /* Remove impossible cpus to keep sysctl output clean. */
6469 + cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
6470 +- lockup_detector_reconfigure();
6471 ++ __lockup_detector_reconfigure();
6472 + }
6473 +
6474 + /*
6475 +diff --git a/lib/list_debug.c b/lib/list_debug.c
6476 +index 5d5424b51b746..413daa72a3d83 100644
6477 +--- a/lib/list_debug.c
6478 ++++ b/lib/list_debug.c
6479 +@@ -20,7 +20,11 @@
6480 + bool __list_add_valid(struct list_head *new, struct list_head *prev,
6481 + struct list_head *next)
6482 + {
6483 +- if (CHECK_DATA_CORRUPTION(next->prev != prev,
6484 ++ if (CHECK_DATA_CORRUPTION(prev == NULL,
6485 ++ "list_add corruption. prev is NULL.\n") ||
6486 ++ CHECK_DATA_CORRUPTION(next == NULL,
6487 ++ "list_add corruption. next is NULL.\n") ||
6488 ++ CHECK_DATA_CORRUPTION(next->prev != prev,
6489 + "list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
6490 + prev, next->prev, next) ||
6491 + CHECK_DATA_CORRUPTION(prev->next != next,
6492 +@@ -42,7 +46,11 @@ bool __list_del_entry_valid(struct list_head *entry)
6493 + prev = entry->prev;
6494 + next = entry->next;
6495 +
6496 +- if (CHECK_DATA_CORRUPTION(next == LIST_POISON1,
6497 ++ if (CHECK_DATA_CORRUPTION(next == NULL,
6498 ++ "list_del corruption, %px->next is NULL\n", entry) ||
6499 ++ CHECK_DATA_CORRUPTION(prev == NULL,
6500 ++ "list_del corruption, %px->prev is NULL\n", entry) ||
6501 ++ CHECK_DATA_CORRUPTION(next == LIST_POISON1,
6502 + "list_del corruption, %px->next is LIST_POISON1 (%px)\n",
6503 + entry, LIST_POISON1) ||
6504 + CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
6505 +diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
6506 +index 6dff4510687a4..41016aff21c5e 100644
6507 +--- a/net/can/j1939/socket.c
6508 ++++ b/net/can/j1939/socket.c
6509 +@@ -178,7 +178,10 @@ activate_next:
6510 + if (!first)
6511 + return;
6512 +
6513 +- if (WARN_ON_ONCE(j1939_session_activate(first))) {
6514 ++ if (j1939_session_activate(first)) {
6515 ++ netdev_warn_once(first->priv->ndev,
6516 ++ "%s: 0x%p: Identical session is already activated.\n",
6517 ++ __func__, first);
6518 + first->err = -EBUSY;
6519 + goto activate_next;
6520 + } else {
6521 +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
6522 +index 307ee1174a6e2..d7d86c944d76d 100644
6523 +--- a/net/can/j1939/transport.c
6524 ++++ b/net/can/j1939/transport.c
6525 +@@ -260,6 +260,8 @@ static void __j1939_session_drop(struct j1939_session *session)
6526 +
6527 + static void j1939_session_destroy(struct j1939_session *session)
6528 + {
6529 ++ struct sk_buff *skb;
6530 ++
6531 + if (session->transmission) {
6532 + if (session->err)
6533 + j1939_sk_errqueue(session, J1939_ERRQUEUE_TX_ABORT);
6534 +@@ -274,7 +276,11 @@ static void j1939_session_destroy(struct j1939_session *session)
6535 + WARN_ON_ONCE(!list_empty(&session->sk_session_queue_entry));
6536 + WARN_ON_ONCE(!list_empty(&session->active_session_list_entry));
6537 +
6538 +- skb_queue_purge(&session->skb_queue);
6539 ++ while ((skb = skb_dequeue(&session->skb_queue)) != NULL) {
6540 ++ /* drop ref taken in j1939_session_skb_queue() */
6541 ++ skb_unref(skb);
6542 ++ kfree_skb(skb);
6543 ++ }
6544 + __j1939_session_drop(session);
6545 + j1939_priv_put(session->priv);
6546 + kfree(session);
6547 +diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
6548 +index ea61dfe19c869..d2745c54737e3 100644
6549 +--- a/net/core/bpf_sk_storage.c
6550 ++++ b/net/core/bpf_sk_storage.c
6551 +@@ -865,10 +865,18 @@ static int bpf_iter_init_sk_storage_map(void *priv_data,
6552 + {
6553 + struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
6554 +
6555 ++ bpf_map_inc_with_uref(aux->map);
6556 + seq_info->map = aux->map;
6557 + return 0;
6558 + }
6559 +
6560 ++static void bpf_iter_fini_sk_storage_map(void *priv_data)
6561 ++{
6562 ++ struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
6563 ++
6564 ++ bpf_map_put_with_uref(seq_info->map);
6565 ++}
6566 ++
6567 + static int bpf_iter_attach_map(struct bpf_prog *prog,
6568 + union bpf_iter_link_info *linfo,
6569 + struct bpf_iter_aux_info *aux)
6570 +@@ -886,7 +894,7 @@ static int bpf_iter_attach_map(struct bpf_prog *prog,
6571 + if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
6572 + goto put_map;
6573 +
6574 +- if (prog->aux->max_rdonly_access > map->value_size) {
6575 ++ if (prog->aux->max_rdwr_access > map->value_size) {
6576 + err = -EACCES;
6577 + goto put_map;
6578 + }
6579 +@@ -914,7 +922,7 @@ static const struct seq_operations bpf_sk_storage_map_seq_ops = {
6580 + static const struct bpf_iter_seq_info iter_seq_info = {
6581 + .seq_ops = &bpf_sk_storage_map_seq_ops,
6582 + .init_seq_private = bpf_iter_init_sk_storage_map,
6583 +- .fini_seq_private = NULL,
6584 ++ .fini_seq_private = bpf_iter_fini_sk_storage_map,
6585 + .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info),
6586 + };
6587 +
6588 +diff --git a/net/core/devlink.c b/net/core/devlink.c
6589 +index db76c55e1a6d7..b4d7a7f749c18 100644
6590 +--- a/net/core/devlink.c
6591 ++++ b/net/core/devlink.c
6592 +@@ -4413,7 +4413,7 @@ static int devlink_param_get(struct devlink *devlink,
6593 + const struct devlink_param *param,
6594 + struct devlink_param_gset_ctx *ctx)
6595 + {
6596 +- if (!param->get)
6597 ++ if (!param->get || devlink->reload_failed)
6598 + return -EOPNOTSUPP;
6599 + return param->get(devlink, param->id, ctx);
6600 + }
6601 +@@ -4422,7 +4422,7 @@ static int devlink_param_set(struct devlink *devlink,
6602 + const struct devlink_param *param,
6603 + struct devlink_param_gset_ctx *ctx)
6604 + {
6605 +- if (!param->set)
6606 ++ if (!param->set || devlink->reload_failed)
6607 + return -EOPNOTSUPP;
6608 + return param->set(devlink, param->id, ctx);
6609 + }
6610 +diff --git a/net/core/sock_map.c b/net/core/sock_map.c
6611 +index 6351b6af7aca9..795b3acfb9fd2 100644
6612 +--- a/net/core/sock_map.c
6613 ++++ b/net/core/sock_map.c
6614 +@@ -789,13 +789,22 @@ static int sock_map_init_seq_private(void *priv_data,
6615 + {
6616 + struct sock_map_seq_info *info = priv_data;
6617 +
6618 ++ bpf_map_inc_with_uref(aux->map);
6619 + info->map = aux->map;
6620 + return 0;
6621 + }
6622 +
6623 ++static void sock_map_fini_seq_private(void *priv_data)
6624 ++{
6625 ++ struct sock_map_seq_info *info = priv_data;
6626 ++
6627 ++ bpf_map_put_with_uref(info->map);
6628 ++}
6629 ++
6630 + static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
6631 + .seq_ops = &sock_map_seq_ops,
6632 + .init_seq_private = sock_map_init_seq_private,
6633 ++ .fini_seq_private = sock_map_fini_seq_private,
6634 + .seq_priv_size = sizeof(struct sock_map_seq_info),
6635 + };
6636 +
6637 +@@ -1376,18 +1385,27 @@ static const struct seq_operations sock_hash_seq_ops = {
6638 + };
6639 +
6640 + static int sock_hash_init_seq_private(void *priv_data,
6641 +- struct bpf_iter_aux_info *aux)
6642 ++ struct bpf_iter_aux_info *aux)
6643 + {
6644 + struct sock_hash_seq_info *info = priv_data;
6645 +
6646 ++ bpf_map_inc_with_uref(aux->map);
6647 + info->map = aux->map;
6648 + info->htab = container_of(aux->map, struct bpf_shtab, map);
6649 + return 0;
6650 + }
6651 +
6652 ++static void sock_hash_fini_seq_private(void *priv_data)
6653 ++{
6654 ++ struct sock_hash_seq_info *info = priv_data;
6655 ++
6656 ++ bpf_map_put_with_uref(info->map);
6657 ++}
6658 ++
6659 + static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
6660 + .seq_ops = &sock_hash_seq_ops,
6661 + .init_seq_private = sock_hash_init_seq_private,
6662 ++ .fini_seq_private = sock_hash_fini_seq_private,
6663 + .seq_priv_size = sizeof(struct sock_hash_seq_info),
6664 + };
6665 +
6666 +diff --git a/net/dsa/port.c b/net/dsa/port.c
6667 +index 63e88de963936..a21015d6bd365 100644
6668 +--- a/net/dsa/port.c
6669 ++++ b/net/dsa/port.c
6670 +@@ -111,11 +111,14 @@ int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
6671 + static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
6672 + bool do_fast_age)
6673 + {
6674 ++ struct dsa_switch *ds = dp->ds;
6675 + int err;
6676 +
6677 + err = dsa_port_set_state(dp, state, do_fast_age);
6678 +- if (err)
6679 +- pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
6680 ++ if (err && err != -EOPNOTSUPP) {
6681 ++ dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n",
6682 ++ dp->index, state, ERR_PTR(err));
6683 ++ }
6684 + }
6685 +
6686 + int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
6687 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
6688 +index 04c3cb4c5fec4..7951ade74d142 100644
6689 +--- a/net/ipv6/ip6_output.c
6690 ++++ b/net/ipv6/ip6_output.c
6691 +@@ -1289,8 +1289,7 @@ struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
6692 + fl6.daddr = info->key.u.ipv6.dst;
6693 + fl6.saddr = info->key.u.ipv6.src;
6694 + prio = info->key.tos;
6695 +- fl6.flowlabel = ip6_make_flowinfo(RT_TOS(prio),
6696 +- info->key.label);
6697 ++ fl6.flowlabel = ip6_make_flowinfo(prio, info->key.label);
6698 +
6699 + dst = ipv6_stub->ipv6_dst_lookup_flow(net, sock->sk, &fl6,
6700 + NULL);
6701 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
6702 +index 4b098521a44cd..8108e9a941d02 100644
6703 +--- a/net/ipv6/ndisc.c
6704 ++++ b/net/ipv6/ndisc.c
6705 +@@ -1317,6 +1317,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
6706 + if (!rt && lifetime) {
6707 + ND_PRINTK(3, info, "RA: adding default router\n");
6708 +
6709 ++ if (neigh)
6710 ++ neigh_release(neigh);
6711 ++
6712 + rt = rt6_add_dflt_router(net, &ipv6_hdr(skb)->saddr,
6713 + skb->dev, pref, defrtr_usr_metric);
6714 + if (!rt) {
6715 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
6716 +index 13d14fcc23712..2f22a172a27e1 100644
6717 +--- a/net/netfilter/nf_tables_api.c
6718 ++++ b/net/netfilter/nf_tables_api.c
6719 +@@ -837,7 +837,7 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
6720 +
6721 + rcu_read_lock();
6722 + nft_net = nft_pernet(net);
6723 +- cb->seq = nft_net->base_seq;
6724 ++ cb->seq = READ_ONCE(nft_net->base_seq);
6725 +
6726 + list_for_each_entry_rcu(table, &nft_net->tables, list) {
6727 + if (family != NFPROTO_UNSPEC && family != table->family)
6728 +@@ -1626,7 +1626,7 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
6729 +
6730 + rcu_read_lock();
6731 + nft_net = nft_pernet(net);
6732 +- cb->seq = nft_net->base_seq;
6733 ++ cb->seq = READ_ONCE(nft_net->base_seq);
6734 +
6735 + list_for_each_entry_rcu(table, &nft_net->tables, list) {
6736 + if (family != NFPROTO_UNSPEC && family != table->family)
6737 +@@ -3054,7 +3054,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
6738 +
6739 + rcu_read_lock();
6740 + nft_net = nft_pernet(net);
6741 +- cb->seq = nft_net->base_seq;
6742 ++ cb->seq = READ_ONCE(nft_net->base_seq);
6743 +
6744 + list_for_each_entry_rcu(table, &nft_net->tables, list) {
6745 + if (family != NFPROTO_UNSPEC && family != table->family)
6746 +@@ -3810,7 +3810,7 @@ cont:
6747 + list_for_each_entry(i, &ctx->table->sets, list) {
6748 + int tmp;
6749 +
6750 +- if (!nft_is_active_next(ctx->net, set))
6751 ++ if (!nft_is_active_next(ctx->net, i))
6752 + continue;
6753 + if (!sscanf(i->name, name, &tmp))
6754 + continue;
6755 +@@ -4036,7 +4036,7 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
6756 +
6757 + rcu_read_lock();
6758 + nft_net = nft_pernet(net);
6759 +- cb->seq = nft_net->base_seq;
6760 ++ cb->seq = READ_ONCE(nft_net->base_seq);
6761 +
6762 + list_for_each_entry_rcu(table, &nft_net->tables, list) {
6763 + if (ctx->family != NFPROTO_UNSPEC &&
6764 +@@ -4354,6 +4354,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
6765 + err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
6766 + if (err < 0)
6767 + return err;
6768 ++
6769 ++ if (desc.field_count > 1 && !(flags & NFT_SET_CONCAT))
6770 ++ return -EINVAL;
6771 ++ } else if (flags & NFT_SET_CONCAT) {
6772 ++ return -EINVAL;
6773 + }
6774 +
6775 + if (nla[NFTA_SET_EXPR] || nla[NFTA_SET_EXPRESSIONS])
6776 +@@ -4964,6 +4969,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
6777 +
6778 + rcu_read_lock();
6779 + nft_net = nft_pernet(net);
6780 ++ cb->seq = READ_ONCE(nft_net->base_seq);
6781 ++
6782 + list_for_each_entry_rcu(table, &nft_net->tables, list) {
6783 + if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
6784 + dump_ctx->ctx.family != table->family)
6785 +@@ -5099,6 +5106,9 @@ static int nft_setelem_parse_flags(const struct nft_set *set,
6786 + if (!(set->flags & NFT_SET_INTERVAL) &&
6787 + *flags & NFT_SET_ELEM_INTERVAL_END)
6788 + return -EINVAL;
6789 ++ if ((*flags & (NFT_SET_ELEM_INTERVAL_END | NFT_SET_ELEM_CATCHALL)) ==
6790 ++ (NFT_SET_ELEM_INTERVAL_END | NFT_SET_ELEM_CATCHALL))
6791 ++ return -EINVAL;
6792 +
6793 + return 0;
6794 + }
6795 +@@ -5477,7 +5487,7 @@ int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
6796 +
6797 + err = nft_expr_clone(expr, set->exprs[i]);
6798 + if (err < 0) {
6799 +- nft_expr_destroy(ctx, expr);
6800 ++ kfree(expr);
6801 + goto err_expr;
6802 + }
6803 + expr_array[i] = expr;
6804 +@@ -5709,6 +5719,24 @@ static void nft_setelem_remove(const struct net *net,
6805 + set->ops->remove(net, set, elem);
6806 + }
6807 +
6808 ++static bool nft_setelem_valid_key_end(const struct nft_set *set,
6809 ++ struct nlattr **nla, u32 flags)
6810 ++{
6811 ++ if ((set->flags & (NFT_SET_CONCAT | NFT_SET_INTERVAL)) ==
6812 ++ (NFT_SET_CONCAT | NFT_SET_INTERVAL)) {
6813 ++ if (flags & NFT_SET_ELEM_INTERVAL_END)
6814 ++ return false;
6815 ++ if (!nla[NFTA_SET_ELEM_KEY_END] &&
6816 ++ !(flags & NFT_SET_ELEM_CATCHALL))
6817 ++ return false;
6818 ++ } else {
6819 ++ if (nla[NFTA_SET_ELEM_KEY_END])
6820 ++ return false;
6821 ++ }
6822 ++
6823 ++ return true;
6824 ++}
6825 ++
6826 + static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
6827 + const struct nlattr *attr, u32 nlmsg_flags)
6828 + {
6829 +@@ -5759,6 +5787,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
6830 + return -EINVAL;
6831 + }
6832 +
6833 ++ if (set->flags & NFT_SET_OBJECT) {
6834 ++ if (!nla[NFTA_SET_ELEM_OBJREF] &&
6835 ++ !(flags & NFT_SET_ELEM_INTERVAL_END))
6836 ++ return -EINVAL;
6837 ++ } else {
6838 ++ if (nla[NFTA_SET_ELEM_OBJREF])
6839 ++ return -EINVAL;
6840 ++ }
6841 ++
6842 ++ if (!nft_setelem_valid_key_end(set, nla, flags))
6843 ++ return -EINVAL;
6844 ++
6845 + if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
6846 + (nla[NFTA_SET_ELEM_DATA] ||
6847 + nla[NFTA_SET_ELEM_OBJREF] ||
6848 +@@ -5766,6 +5806,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
6849 + nla[NFTA_SET_ELEM_EXPIRATION] ||
6850 + nla[NFTA_SET_ELEM_USERDATA] ||
6851 + nla[NFTA_SET_ELEM_EXPR] ||
6852 ++ nla[NFTA_SET_ELEM_KEY_END] ||
6853 + nla[NFTA_SET_ELEM_EXPRESSIONS]))
6854 + return -EINVAL;
6855 +
6856 +@@ -5896,10 +5937,6 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
6857 + }
6858 +
6859 + if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
6860 +- if (!(set->flags & NFT_SET_OBJECT)) {
6861 +- err = -EINVAL;
6862 +- goto err_parse_key_end;
6863 +- }
6864 + obj = nft_obj_lookup(ctx->net, ctx->table,
6865 + nla[NFTA_SET_ELEM_OBJREF],
6866 + set->objtype, genmask);
6867 +@@ -6184,6 +6221,9 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
6868 + if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))
6869 + return -EINVAL;
6870 +
6871 ++ if (!nft_setelem_valid_key_end(set, nla, flags))
6872 ++ return -EINVAL;
6873 ++
6874 + nft_set_ext_prepare(&tmpl);
6875 +
6876 + if (flags != 0) {
6877 +@@ -6796,7 +6836,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
6878 +
6879 + rcu_read_lock();
6880 + nft_net = nft_pernet(net);
6881 +- cb->seq = nft_net->base_seq;
6882 ++ cb->seq = READ_ONCE(nft_net->base_seq);
6883 +
6884 + list_for_each_entry_rcu(table, &nft_net->tables, list) {
6885 + if (family != NFPROTO_UNSPEC && family != table->family)
6886 +@@ -7728,7 +7768,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
6887 +
6888 + rcu_read_lock();
6889 + nft_net = nft_pernet(net);
6890 +- cb->seq = nft_net->base_seq;
6891 ++ cb->seq = READ_ONCE(nft_net->base_seq);
6892 +
6893 + list_for_each_entry_rcu(table, &nft_net->tables, list) {
6894 + if (family != NFPROTO_UNSPEC && family != table->family)
6895 +@@ -8612,6 +8652,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
6896 + struct nft_trans_elem *te;
6897 + struct nft_chain *chain;
6898 + struct nft_table *table;
6899 ++ unsigned int base_seq;
6900 + LIST_HEAD(adl);
6901 + int err;
6902 +
6903 +@@ -8661,9 +8702,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
6904 + * Bump generation counter, invalidate any dump in progress.
6905 + * Cannot fail after this point.
6906 + */
6907 +- while (++nft_net->base_seq == 0)
6908 ++ base_seq = READ_ONCE(nft_net->base_seq);
6909 ++ while (++base_seq == 0)
6910 + ;
6911 +
6912 ++ WRITE_ONCE(nft_net->base_seq, base_seq);
6913 ++
6914 + /* step 3. Start new generation, rules_gen_X now in use. */
6915 + net->nft.gencursor = nft_gencursor_next(net);
6916 +
6917 +diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
6918 +index 1afca2a6c2ac1..57010927e20a8 100644
6919 +--- a/net/netlink/genetlink.c
6920 ++++ b/net/netlink/genetlink.c
6921 +@@ -1174,13 +1174,17 @@ static int ctrl_dumppolicy_start(struct netlink_callback *cb)
6922 + op.policy,
6923 + op.maxattr);
6924 + if (err)
6925 +- return err;
6926 ++ goto err_free_state;
6927 + }
6928 + }
6929 +
6930 + if (!ctx->state)
6931 + return -ENODATA;
6932 + return 0;
6933 ++
6934 ++err_free_state:
6935 ++ netlink_policy_dump_free(ctx->state);
6936 ++ return err;
6937 + }
6938 +
6939 + static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
6940 +diff --git a/net/netlink/policy.c b/net/netlink/policy.c
6941 +index 8d7c900e27f4c..87e3de0fde896 100644
6942 +--- a/net/netlink/policy.c
6943 ++++ b/net/netlink/policy.c
6944 +@@ -144,7 +144,7 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
6945 +
6946 + err = add_policy(&state, policy, maxtype);
6947 + if (err)
6948 +- return err;
6949 ++ goto err_try_undo;
6950 +
6951 + for (policy_idx = 0;
6952 + policy_idx < state->n_alloc && state->policies[policy_idx].policy;
6953 +@@ -164,7 +164,7 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
6954 + policy[type].nested_policy,
6955 + policy[type].len);
6956 + if (err)
6957 +- return err;
6958 ++ goto err_try_undo;
6959 + break;
6960 + default:
6961 + break;
6962 +@@ -174,6 +174,16 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
6963 +
6964 + *pstate = state;
6965 + return 0;
6966 ++
6967 ++err_try_undo:
6968 ++ /* Try to preserve reasonable unwind semantics - if we're starting from
6969 ++ * scratch clean up fully, otherwise record what we got and caller will.
6970 ++ */
6971 ++ if (!*pstate)
6972 ++ netlink_policy_dump_free(state);
6973 ++ else
6974 ++ *pstate = state;
6975 ++ return err;
6976 + }
6977 +
6978 + static bool
6979 +diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
6980 +index fa611678af052..49e7cab43d24c 100644
6981 +--- a/net/qrtr/mhi.c
6982 ++++ b/net/qrtr/mhi.c
6983 +@@ -78,11 +78,6 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
6984 + struct qrtr_mhi_dev *qdev;
6985 + int rc;
6986 +
6987 +- /* start channels */
6988 +- rc = mhi_prepare_for_transfer(mhi_dev);
6989 +- if (rc)
6990 +- return rc;
6991 +-
6992 + qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
6993 + if (!qdev)
6994 + return -ENOMEM;
6995 +@@ -96,6 +91,13 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
6996 + if (rc)
6997 + return rc;
6998 +
6999 ++ /* start channels */
7000 ++ rc = mhi_prepare_for_transfer(mhi_dev);
7001 ++ if (rc) {
7002 ++ qrtr_endpoint_unregister(&qdev->ep);
7003 ++ return rc;
7004 ++ }
7005 ++
7006 + dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
7007 +
7008 + return 0;
7009 +diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
7010 +index 6fdedd9dbbc28..cfbf0e129cba5 100644
7011 +--- a/net/rds/ib_recv.c
7012 ++++ b/net/rds/ib_recv.c
7013 +@@ -363,6 +363,7 @@ static int acquire_refill(struct rds_connection *conn)
7014 + static void release_refill(struct rds_connection *conn)
7015 + {
7016 + clear_bit(RDS_RECV_REFILL, &conn->c_flags);
7017 ++ smp_mb__after_atomic();
7018 +
7019 + /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
7020 + * hot path and finding waiters is very rare. We don't want to walk
7021 +diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
7022 +index a9f0d17fdb0d6..1bae32c482846 100644
7023 +--- a/net/sunrpc/auth.c
7024 ++++ b/net/sunrpc/auth.c
7025 +@@ -445,7 +445,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
7026 + * Enforce a 60 second garbage collection moratorium
7027 + * Note that the cred_unused list must be time-ordered.
7028 + */
7029 +- if (!time_in_range(cred->cr_expire, expired, jiffies))
7030 ++ if (time_in_range(cred->cr_expire, expired, jiffies))
7031 + continue;
7032 + if (!rpcauth_unhash_cred(cred))
7033 + continue;
7034 +diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
7035 +index 22a2c235abf1b..77e347a45344c 100644
7036 +--- a/net/sunrpc/backchannel_rqst.c
7037 ++++ b/net/sunrpc/backchannel_rqst.c
7038 +@@ -64,6 +64,17 @@ static void xprt_free_allocation(struct rpc_rqst *req)
7039 + kfree(req);
7040 + }
7041 +
7042 ++static void xprt_bc_reinit_xdr_buf(struct xdr_buf *buf)
7043 ++{
7044 ++ buf->head[0].iov_len = PAGE_SIZE;
7045 ++ buf->tail[0].iov_len = 0;
7046 ++ buf->pages = NULL;
7047 ++ buf->page_len = 0;
7048 ++ buf->flags = 0;
7049 ++ buf->len = 0;
7050 ++ buf->buflen = PAGE_SIZE;
7051 ++}
7052 ++
7053 + static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
7054 + {
7055 + struct page *page;
7056 +@@ -292,6 +303,9 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
7057 + */
7058 + spin_lock_bh(&xprt->bc_pa_lock);
7059 + if (xprt_need_to_requeue(xprt)) {
7060 ++ xprt_bc_reinit_xdr_buf(&req->rq_snd_buf);
7061 ++ xprt_bc_reinit_xdr_buf(&req->rq_rcv_buf);
7062 ++ req->rq_rcv_buf.len = PAGE_SIZE;
7063 + list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
7064 + xprt->bc_alloc_count++;
7065 + atomic_inc(&xprt->bc_slot_count);
7066 +diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
7067 +index 326a31422a3c1..a7020b1f3ec72 100644
7068 +--- a/net/sunrpc/sysfs.c
7069 ++++ b/net/sunrpc/sysfs.c
7070 +@@ -282,8 +282,10 @@ static ssize_t rpc_sysfs_xprt_state_change(struct kobject *kobj,
7071 + int offline = 0, online = 0, remove = 0;
7072 + struct rpc_xprt_switch *xps = rpc_sysfs_xprt_kobj_get_xprt_switch(kobj);
7073 +
7074 +- if (!xprt)
7075 +- return 0;
7076 ++ if (!xprt || !xps) {
7077 ++ count = 0;
7078 ++ goto out_put;
7079 ++ }
7080 +
7081 + if (!strncmp(buf, "offline", 7))
7082 + offline = 1;
7083 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
7084 +index 5df530e89e5ac..5d46036f3ad74 100644
7085 +--- a/net/vmw_vsock/af_vsock.c
7086 ++++ b/net/vmw_vsock/af_vsock.c
7087 +@@ -1285,6 +1285,7 @@ static void vsock_connect_timeout(struct work_struct *work)
7088 + if (sk->sk_state == TCP_SYN_SENT &&
7089 + (sk->sk_shutdown != SHUTDOWN_MASK)) {
7090 + sk->sk_state = TCP_CLOSE;
7091 ++ sk->sk_socket->state = SS_UNCONNECTED;
7092 + sk->sk_err = ETIMEDOUT;
7093 + sk_error_report(sk);
7094 + vsock_transport_cancel_pkt(vsk);
7095 +@@ -1390,7 +1391,14 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
7096 + * timeout fires.
7097 + */
7098 + sock_hold(sk);
7099 +- schedule_delayed_work(&vsk->connect_work, timeout);
7100 ++
7101 ++ /* If the timeout function is already scheduled,
7102 ++ * reschedule it, then ungrab the socket refcount to
7103 ++ * keep it balanced.
7104 ++ */
7105 ++ if (mod_delayed_work(system_wq, &vsk->connect_work,
7106 ++ timeout))
7107 ++ sock_put(sk);
7108 +
7109 + /* Skip ahead to preserve error code set above. */
7110 + goto out_wait;
7111 +diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
7112 +index 4aad284800355..36814be80264a 100644
7113 +--- a/scripts/Makefile.gcc-plugins
7114 ++++ b/scripts/Makefile.gcc-plugins
7115 +@@ -6,7 +6,7 @@ gcc-plugin-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) += latent_entropy_plugin.so
7116 + gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) \
7117 + += -DLATENT_ENTROPY_PLUGIN
7118 + ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
7119 +- DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable
7120 ++ DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable -ULATENT_ENTROPY_PLUGIN
7121 + endif
7122 + export DISABLE_LATENT_ENTROPY_PLUGIN
7123 +
7124 +diff --git a/scripts/dummy-tools/dummy-plugin-dir/include/plugin-version.h b/scripts/dummy-tools/dummy-plugin-dir/include/plugin-version.h
7125 +new file mode 100644
7126 +index 0000000000000..e69de29bb2d1d
7127 +diff --git a/scripts/dummy-tools/gcc b/scripts/dummy-tools/gcc
7128 +index b2483149bbe55..7db8258434355 100755
7129 +--- a/scripts/dummy-tools/gcc
7130 ++++ b/scripts/dummy-tools/gcc
7131 +@@ -96,12 +96,8 @@ fi
7132 +
7133 + # To set GCC_PLUGINS
7134 + if arg_contain -print-file-name=plugin "$@"; then
7135 +- plugin_dir=$(mktemp -d)
7136 +-
7137 +- mkdir -p $plugin_dir/include
7138 +- touch $plugin_dir/include/plugin-version.h
7139 +-
7140 +- echo $plugin_dir
7141 ++ # Use $0 to find the in-tree dummy directory
7142 ++ echo "$(dirname "$(readlink -f "$0")")/dummy-plugin-dir"
7143 + exit 0
7144 + fi
7145 +
7146 +diff --git a/scripts/module.lds.S b/scripts/module.lds.S
7147 +index 1d0e1e4dc3d2a..3a3aa2354ed86 100644
7148 +--- a/scripts/module.lds.S
7149 ++++ b/scripts/module.lds.S
7150 +@@ -27,6 +27,8 @@ SECTIONS {
7151 + .ctors 0 : ALIGN(8) { *(SORT(.ctors.*)) *(.ctors) }
7152 + .init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) }
7153 +
7154 ++ .altinstructions 0 : ALIGN(8) { KEEP(*(.altinstructions)) }
7155 ++ __bug_table 0 : ALIGN(8) { KEEP(*(__bug_table)) }
7156 + __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) }
7157 +
7158 + __patchable_function_entries : { *(__patchable_function_entries) }
7159 +diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
7160 +index 2ee3b3d29f10b..a891705b1d577 100644
7161 +--- a/security/apparmor/apparmorfs.c
7162 ++++ b/security/apparmor/apparmorfs.c
7163 +@@ -401,7 +401,7 @@ static struct aa_loaddata *aa_simple_write_to_buffer(const char __user *userbuf,
7164 +
7165 + data->size = copy_size;
7166 + if (copy_from_user(data->data, userbuf, copy_size)) {
7167 +- kvfree(data);
7168 ++ aa_put_loaddata(data);
7169 + return ERR_PTR(-EFAULT);
7170 + }
7171 +
7172 +diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
7173 +index f7e97c7e80f3d..704b0c895605a 100644
7174 +--- a/security/apparmor/audit.c
7175 ++++ b/security/apparmor/audit.c
7176 +@@ -137,7 +137,7 @@ int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
7177 + }
7178 + if (AUDIT_MODE(profile) == AUDIT_QUIET ||
7179 + (type == AUDIT_APPARMOR_DENIED &&
7180 +- AUDIT_MODE(profile) == AUDIT_QUIET))
7181 ++ AUDIT_MODE(profile) == AUDIT_QUIET_DENIED))
7182 + return aad(sa)->error;
7183 +
7184 + if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
7185 +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
7186 +index 583680f6cd811..a7b3d8e58ed83 100644
7187 +--- a/security/apparmor/domain.c
7188 ++++ b/security/apparmor/domain.c
7189 +@@ -467,7 +467,7 @@ restart:
7190 + * xattrs, or a longer match
7191 + */
7192 + candidate = profile;
7193 +- candidate_len = profile->xmatch_len;
7194 ++ candidate_len = max(count, profile->xmatch_len);
7195 + candidate_xattrs = ret;
7196 + conflict = false;
7197 + }
7198 +diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
7199 +index 7d27db740bc2f..ac5054899f6f4 100644
7200 +--- a/security/apparmor/include/lib.h
7201 ++++ b/security/apparmor/include/lib.h
7202 +@@ -22,6 +22,11 @@
7203 + */
7204 +
7205 + #define DEBUG_ON (aa_g_debug)
7206 ++/*
7207 ++ * split individual debug cases out in preparation for finer grained
7208 ++ * debug controls in the future.
7209 ++ */
7210 ++#define AA_DEBUG_LABEL DEBUG_ON
7211 + #define dbg_printk(__fmt, __args...) pr_debug(__fmt, ##__args)
7212 + #define AA_DEBUG(fmt, args...) \
7213 + do { \
7214 +diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
7215 +index b5b4b8190e654..b5aa4231af682 100644
7216 +--- a/security/apparmor/include/policy.h
7217 ++++ b/security/apparmor/include/policy.h
7218 +@@ -135,7 +135,7 @@ struct aa_profile {
7219 +
7220 + const char *attach;
7221 + struct aa_dfa *xmatch;
7222 +- int xmatch_len;
7223 ++ unsigned int xmatch_len;
7224 + enum audit_mode audit;
7225 + long mode;
7226 + u32 path_flags;
7227 +diff --git a/security/apparmor/label.c b/security/apparmor/label.c
7228 +index 6222fdfebe4e5..66bc4704f8044 100644
7229 +--- a/security/apparmor/label.c
7230 ++++ b/security/apparmor/label.c
7231 +@@ -1632,9 +1632,9 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
7232 + AA_BUG(!str && size != 0);
7233 + AA_BUG(!label);
7234 +
7235 +- if (flags & FLAG_ABS_ROOT) {
7236 ++ if (AA_DEBUG_LABEL && (flags & FLAG_ABS_ROOT)) {
7237 + ns = root_ns;
7238 +- len = snprintf(str, size, "=");
7239 ++ len = snprintf(str, size, "_");
7240 + update_for_len(total, len, size, str);
7241 + } else if (!ns) {
7242 + ns = labels_ns(label);
7243 +@@ -1745,7 +1745,7 @@ void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
7244 + if (!use_label_hname(ns, label, flags) ||
7245 + display_mode(ns, label, flags)) {
7246 + len = aa_label_asxprint(&name, ns, label, flags, gfp);
7247 +- if (len == -1) {
7248 ++ if (len < 0) {
7249 + AA_DEBUG("label print error");
7250 + return;
7251 + }
7252 +@@ -1773,7 +1773,7 @@ void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
7253 + int len;
7254 +
7255 + len = aa_label_asxprint(&str, ns, label, flags, gfp);
7256 +- if (len == -1) {
7257 ++ if (len < 0) {
7258 + AA_DEBUG("label print error");
7259 + return;
7260 + }
7261 +@@ -1796,7 +1796,7 @@ void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
7262 + int len;
7263 +
7264 + len = aa_label_asxprint(&str, ns, label, flags, gfp);
7265 +- if (len == -1) {
7266 ++ if (len < 0) {
7267 + AA_DEBUG("label print error");
7268 + return;
7269 + }
7270 +@@ -1896,7 +1896,8 @@ struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str,
7271 + AA_BUG(!str);
7272 +
7273 + str = skipn_spaces(str, n);
7274 +- if (str == NULL || (*str == '=' && base != &root_ns->unconfined->label))
7275 ++ if (str == NULL || (AA_DEBUG_LABEL && *str == '_' &&
7276 ++ base != &root_ns->unconfined->label))
7277 + return ERR_PTR(-EINVAL);
7278 +
7279 + len = label_count_strn_entries(str, end - str);
7280 +diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
7281 +index aa6fcfde30514..f7bb47daf2ad6 100644
7282 +--- a/security/apparmor/mount.c
7283 ++++ b/security/apparmor/mount.c
7284 +@@ -229,7 +229,8 @@ static const char * const mnt_info_table[] = {
7285 + "failed srcname match",
7286 + "failed type match",
7287 + "failed flags match",
7288 +- "failed data match"
7289 ++ "failed data match",
7290 ++ "failed perms check"
7291 + };
7292 +
7293 + /*
7294 +@@ -284,8 +285,8 @@ static int do_match_mnt(struct aa_dfa *dfa, unsigned int start,
7295 + return 0;
7296 + }
7297 +
7298 +- /* failed at end of flags match */
7299 +- return 4;
7300 ++ /* failed at perms check, don't confuse with flags match */
7301 ++ return 6;
7302 + }
7303 +
7304 +
7305 +@@ -718,6 +719,7 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
7306 + aa_put_label(target);
7307 + goto out;
7308 + }
7309 ++ aa_put_label(target);
7310 + } else
7311 + /* already audited error */
7312 + error = PTR_ERR(target);
7313 +diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
7314 +index 4e1f96b216a8b..03c9609ca407b 100644
7315 +--- a/security/apparmor/policy_unpack.c
7316 ++++ b/security/apparmor/policy_unpack.c
7317 +@@ -746,16 +746,18 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
7318 + profile->label.flags |= FLAG_HAT;
7319 + if (!unpack_u32(e, &tmp, NULL))
7320 + goto fail;
7321 +- if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG))
7322 ++ if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
7323 + profile->mode = APPARMOR_COMPLAIN;
7324 +- else if (tmp == PACKED_MODE_ENFORCE)
7325 ++ } else if (tmp == PACKED_MODE_ENFORCE) {
7326 + profile->mode = APPARMOR_ENFORCE;
7327 +- else if (tmp == PACKED_MODE_KILL)
7328 ++ } else if (tmp == PACKED_MODE_KILL) {
7329 + profile->mode = APPARMOR_KILL;
7330 +- else if (tmp == PACKED_MODE_UNCONFINED)
7331 ++ } else if (tmp == PACKED_MODE_UNCONFINED) {
7332 + profile->mode = APPARMOR_UNCONFINED;
7333 +- else
7334 ++ profile->label.flags |= FLAG_UNCONFINED;
7335 ++ } else {
7336 + goto fail;
7337 ++ }
7338 + if (!unpack_u32(e, &tmp, NULL))
7339 + goto fail;
7340 + if (tmp)
7341 +diff --git a/sound/core/control.c b/sound/core/control.c
7342 +index a25c0d64d104f..f66fe4be30d35 100644
7343 +--- a/sound/core/control.c
7344 ++++ b/sound/core/control.c
7345 +@@ -127,6 +127,7 @@ static int snd_ctl_release(struct inode *inode, struct file *file)
7346 + if (control->vd[idx].owner == ctl)
7347 + control->vd[idx].owner = NULL;
7348 + up_write(&card->controls_rwsem);
7349 ++ snd_fasync_free(ctl->fasync);
7350 + snd_ctl_empty_read_queue(ctl);
7351 + put_pid(ctl->pid);
7352 + kfree(ctl);
7353 +@@ -181,7 +182,7 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
7354 + _found:
7355 + wake_up(&ctl->change_sleep);
7356 + spin_unlock(&ctl->read_lock);
7357 +- kill_fasync(&ctl->fasync, SIGIO, POLL_IN);
7358 ++ snd_kill_fasync(ctl->fasync, SIGIO, POLL_IN);
7359 + }
7360 + read_unlock_irqrestore(&card->ctl_files_rwlock, flags);
7361 + }
7362 +@@ -2002,7 +2003,7 @@ static int snd_ctl_fasync(int fd, struct file * file, int on)
7363 + struct snd_ctl_file *ctl;
7364 +
7365 + ctl = file->private_data;
7366 +- return fasync_helper(fd, file, on, &ctl->fasync);
7367 ++ return snd_fasync_helper(fd, file, on, &ctl->fasync);
7368 + }
7369 +
7370 + /* return the preferred subdevice number if already assigned;
7371 +@@ -2170,7 +2171,7 @@ static int snd_ctl_dev_disconnect(struct snd_device *device)
7372 + read_lock_irqsave(&card->ctl_files_rwlock, flags);
7373 + list_for_each_entry(ctl, &card->ctl_files, list) {
7374 + wake_up(&ctl->change_sleep);
7375 +- kill_fasync(&ctl->fasync, SIGIO, POLL_ERR);
7376 ++ snd_kill_fasync(ctl->fasync, SIGIO, POLL_ERR);
7377 + }
7378 + read_unlock_irqrestore(&card->ctl_files_rwlock, flags);
7379 +
7380 +diff --git a/sound/core/info.c b/sound/core/info.c
7381 +index a451b24199c3e..9f6714e29bbc3 100644
7382 +--- a/sound/core/info.c
7383 ++++ b/sound/core/info.c
7384 +@@ -111,9 +111,9 @@ static loff_t snd_info_entry_llseek(struct file *file, loff_t offset, int orig)
7385 + entry = data->entry;
7386 + mutex_lock(&entry->access);
7387 + if (entry->c.ops->llseek) {
7388 +- offset = entry->c.ops->llseek(entry,
7389 +- data->file_private_data,
7390 +- file, offset, orig);
7391 ++ ret = entry->c.ops->llseek(entry,
7392 ++ data->file_private_data,
7393 ++ file, offset, orig);
7394 + goto out;
7395 + }
7396 +
7397 +diff --git a/sound/core/misc.c b/sound/core/misc.c
7398 +index 50e4aaa6270d1..d32a19976a2b9 100644
7399 +--- a/sound/core/misc.c
7400 ++++ b/sound/core/misc.c
7401 +@@ -10,6 +10,7 @@
7402 + #include <linux/time.h>
7403 + #include <linux/slab.h>
7404 + #include <linux/ioport.h>
7405 ++#include <linux/fs.h>
7406 + #include <sound/core.h>
7407 +
7408 + #ifdef CONFIG_SND_DEBUG
7409 +@@ -145,3 +146,96 @@ snd_pci_quirk_lookup(struct pci_dev *pci, const struct snd_pci_quirk *list)
7410 + }
7411 + EXPORT_SYMBOL(snd_pci_quirk_lookup);
7412 + #endif
7413 ++
7414 ++/*
7415 ++ * Deferred async signal helpers
7416 ++ *
7417 ++ * Below are a few helper functions to wrap the async signal handling
7418 ++ * in the deferred work. The main purpose is to avoid the messy deadlock
7419 ++ * around tasklist_lock and co at the kill_fasync() invocation.
7420 ++ * fasync_helper() and kill_fasync() are replaced with snd_fasync_helper()
7421 ++ * and snd_kill_fasync(), respectively. In addition, snd_fasync_free() has
7422 ++ * to be called at releasing the relevant file object.
7423 ++ */
7424 ++struct snd_fasync {
7425 ++ struct fasync_struct *fasync;
7426 ++ int signal;
7427 ++ int poll;
7428 ++ int on;
7429 ++ struct list_head list;
7430 ++};
7431 ++
7432 ++static DEFINE_SPINLOCK(snd_fasync_lock);
7433 ++static LIST_HEAD(snd_fasync_list);
7434 ++
7435 ++static void snd_fasync_work_fn(struct work_struct *work)
7436 ++{
7437 ++ struct snd_fasync *fasync;
7438 ++
7439 ++ spin_lock_irq(&snd_fasync_lock);
7440 ++ while (!list_empty(&snd_fasync_list)) {
7441 ++ fasync = list_first_entry(&snd_fasync_list, struct snd_fasync, list);
7442 ++ list_del_init(&fasync->list);
7443 ++ spin_unlock_irq(&snd_fasync_lock);
7444 ++ if (fasync->on)
7445 ++ kill_fasync(&fasync->fasync, fasync->signal, fasync->poll);
7446 ++ spin_lock_irq(&snd_fasync_lock);
7447 ++ }
7448 ++ spin_unlock_irq(&snd_fasync_lock);
7449 ++}
7450 ++
7451 ++static DECLARE_WORK(snd_fasync_work, snd_fasync_work_fn);
7452 ++
7453 ++int snd_fasync_helper(int fd, struct file *file, int on,
7454 ++ struct snd_fasync **fasyncp)
7455 ++{
7456 ++ struct snd_fasync *fasync = NULL;
7457 ++
7458 ++ if (on) {
7459 ++ fasync = kzalloc(sizeof(*fasync), GFP_KERNEL);
7460 ++ if (!fasync)
7461 ++ return -ENOMEM;
7462 ++ INIT_LIST_HEAD(&fasync->list);
7463 ++ }
7464 ++
7465 ++ spin_lock_irq(&snd_fasync_lock);
7466 ++ if (*fasyncp) {
7467 ++ kfree(fasync);
7468 ++ fasync = *fasyncp;
7469 ++ } else {
7470 ++ if (!fasync) {
7471 ++ spin_unlock_irq(&snd_fasync_lock);
7472 ++ return 0;
7473 ++ }
7474 ++ *fasyncp = fasync;
7475 ++ }
7476 ++ fasync->on = on;
7477 ++ spin_unlock_irq(&snd_fasync_lock);
7478 ++ return fasync_helper(fd, file, on, &fasync->fasync);
7479 ++}
7480 ++EXPORT_SYMBOL_GPL(snd_fasync_helper);
7481 ++
7482 ++void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll)
7483 ++{
7484 ++ unsigned long flags;
7485 ++
7486 ++ if (!fasync || !fasync->on)
7487 ++ return;
7488 ++ spin_lock_irqsave(&snd_fasync_lock, flags);
7489 ++ fasync->signal = signal;
7490 ++ fasync->poll = poll;
7491 ++ list_move(&fasync->list, &snd_fasync_list);
7492 ++ schedule_work(&snd_fasync_work);
7493 ++ spin_unlock_irqrestore(&snd_fasync_lock, flags);
7494 ++}
7495 ++EXPORT_SYMBOL_GPL(snd_kill_fasync);
7496 ++
7497 ++void snd_fasync_free(struct snd_fasync *fasync)
7498 ++{
7499 ++ if (!fasync)
7500 ++ return;
7501 ++ fasync->on = 0;
7502 ++ flush_work(&snd_fasync_work);
7503 ++ kfree(fasync);
7504 ++}
7505 ++EXPORT_SYMBOL_GPL(snd_fasync_free);
7506 +diff --git a/sound/core/timer.c b/sound/core/timer.c
7507 +index b3214baa89193..e08a37c23add8 100644
7508 +--- a/sound/core/timer.c
7509 ++++ b/sound/core/timer.c
7510 +@@ -83,7 +83,7 @@ struct snd_timer_user {
7511 + unsigned int filter;
7512 + struct timespec64 tstamp; /* trigger tstamp */
7513 + wait_queue_head_t qchange_sleep;
7514 +- struct fasync_struct *fasync;
7515 ++ struct snd_fasync *fasync;
7516 + struct mutex ioctl_lock;
7517 + };
7518 +
7519 +@@ -1345,7 +1345,7 @@ static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
7520 + }
7521 + __wake:
7522 + spin_unlock(&tu->qlock);
7523 +- kill_fasync(&tu->fasync, SIGIO, POLL_IN);
7524 ++ snd_kill_fasync(tu->fasync, SIGIO, POLL_IN);
7525 + wake_up(&tu->qchange_sleep);
7526 + }
7527 +
7528 +@@ -1383,7 +1383,7 @@ static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
7529 + spin_lock_irqsave(&tu->qlock, flags);
7530 + snd_timer_user_append_to_tqueue(tu, &r1);
7531 + spin_unlock_irqrestore(&tu->qlock, flags);
7532 +- kill_fasync(&tu->fasync, SIGIO, POLL_IN);
7533 ++ snd_kill_fasync(tu->fasync, SIGIO, POLL_IN);
7534 + wake_up(&tu->qchange_sleep);
7535 + }
7536 +
7537 +@@ -1453,7 +1453,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
7538 + spin_unlock(&tu->qlock);
7539 + if (append == 0)
7540 + return;
7541 +- kill_fasync(&tu->fasync, SIGIO, POLL_IN);
7542 ++ snd_kill_fasync(tu->fasync, SIGIO, POLL_IN);
7543 + wake_up(&tu->qchange_sleep);
7544 + }
7545 +
7546 +@@ -1521,6 +1521,7 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
7547 + snd_timer_instance_free(tu->timeri);
7548 + }
7549 + mutex_unlock(&tu->ioctl_lock);
7550 ++ snd_fasync_free(tu->fasync);
7551 + kfree(tu->queue);
7552 + kfree(tu->tqueue);
7553 + kfree(tu);
7554 +@@ -2135,7 +2136,7 @@ static int snd_timer_user_fasync(int fd, struct file * file, int on)
7555 + struct snd_timer_user *tu;
7556 +
7557 + tu = file->private_data;
7558 +- return fasync_helper(fd, file, on, &tu->fasync);
7559 ++ return snd_fasync_helper(fd, file, on, &tu->fasync);
7560 + }
7561 +
7562 + static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
7563 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7564 +index f7ab620d0e300..600ba91f77031 100644
7565 +--- a/sound/pci/hda/patch_realtek.c
7566 ++++ b/sound/pci/hda/patch_realtek.c
7567 +@@ -9034,6 +9034,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7568 + SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
7569 + SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
7570 + SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
7571 ++ SND_PCI_QUIRK(0x1558, 0x7717, "Clevo NS70PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
7572 + SND_PCI_QUIRK(0x1558, 0x7718, "Clevo L140PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
7573 + SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
7574 + SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
7575 +diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
7576 +index c5ea3b115966b..b8cda6b14b49d 100644
7577 +--- a/sound/soc/codecs/tas2770.c
7578 ++++ b/sound/soc/codecs/tas2770.c
7579 +@@ -46,34 +46,22 @@ static void tas2770_reset(struct tas2770_priv *tas2770)
7580 + usleep_range(1000, 2000);
7581 + }
7582 +
7583 +-static int tas2770_set_bias_level(struct snd_soc_component *component,
7584 +- enum snd_soc_bias_level level)
7585 ++static int tas2770_update_pwr_ctrl(struct tas2770_priv *tas2770)
7586 + {
7587 +- struct tas2770_priv *tas2770 =
7588 +- snd_soc_component_get_drvdata(component);
7589 ++ struct snd_soc_component *component = tas2770->component;
7590 ++ unsigned int val;
7591 ++ int ret;
7592 +
7593 +- switch (level) {
7594 +- case SND_SOC_BIAS_ON:
7595 +- snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
7596 +- TAS2770_PWR_CTRL_MASK,
7597 +- TAS2770_PWR_CTRL_ACTIVE);
7598 +- break;
7599 +- case SND_SOC_BIAS_STANDBY:
7600 +- case SND_SOC_BIAS_PREPARE:
7601 +- snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
7602 +- TAS2770_PWR_CTRL_MASK,
7603 +- TAS2770_PWR_CTRL_MUTE);
7604 +- break;
7605 +- case SND_SOC_BIAS_OFF:
7606 +- snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
7607 +- TAS2770_PWR_CTRL_MASK,
7608 +- TAS2770_PWR_CTRL_SHUTDOWN);
7609 +- break;
7610 ++ if (tas2770->dac_powered)
7611 ++ val = tas2770->unmuted ?
7612 ++ TAS2770_PWR_CTRL_ACTIVE : TAS2770_PWR_CTRL_MUTE;
7613 ++ else
7614 ++ val = TAS2770_PWR_CTRL_SHUTDOWN;
7615 +
7616 +- default:
7617 +- dev_err(tas2770->dev, "wrong power level setting %d\n", level);
7618 +- return -EINVAL;
7619 +- }
7620 ++ ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
7621 ++ TAS2770_PWR_CTRL_MASK, val);
7622 ++ if (ret < 0)
7623 ++ return ret;
7624 +
7625 + return 0;
7626 + }
7627 +@@ -114,9 +102,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component)
7628 + gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
7629 + usleep_range(1000, 2000);
7630 + } else {
7631 +- ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
7632 +- TAS2770_PWR_CTRL_MASK,
7633 +- TAS2770_PWR_CTRL_ACTIVE);
7634 ++ ret = tas2770_update_pwr_ctrl(tas2770);
7635 + if (ret < 0)
7636 + return ret;
7637 + }
7638 +@@ -152,24 +138,19 @@ static int tas2770_dac_event(struct snd_soc_dapm_widget *w,
7639 +
7640 + switch (event) {
7641 + case SND_SOC_DAPM_POST_PMU:
7642 +- ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
7643 +- TAS2770_PWR_CTRL_MASK,
7644 +- TAS2770_PWR_CTRL_MUTE);
7645 ++ tas2770->dac_powered = 1;
7646 ++ ret = tas2770_update_pwr_ctrl(tas2770);
7647 + break;
7648 + case SND_SOC_DAPM_PRE_PMD:
7649 +- ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
7650 +- TAS2770_PWR_CTRL_MASK,
7651 +- TAS2770_PWR_CTRL_SHUTDOWN);
7652 ++ tas2770->dac_powered = 0;
7653 ++ ret = tas2770_update_pwr_ctrl(tas2770);
7654 + break;
7655 + default:
7656 + dev_err(tas2770->dev, "Not supported evevt\n");
7657 + return -EINVAL;
7658 + }
7659 +
7660 +- if (ret < 0)
7661 +- return ret;
7662 +-
7663 +- return 0;
7664 ++ return ret;
7665 + }
7666 +
7667 + static const struct snd_kcontrol_new isense_switch =
7668 +@@ -203,21 +184,11 @@ static const struct snd_soc_dapm_route tas2770_audio_map[] = {
7669 + static int tas2770_mute(struct snd_soc_dai *dai, int mute, int direction)
7670 + {
7671 + struct snd_soc_component *component = dai->component;
7672 +- int ret;
7673 +-
7674 +- if (mute)
7675 +- ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
7676 +- TAS2770_PWR_CTRL_MASK,
7677 +- TAS2770_PWR_CTRL_MUTE);
7678 +- else
7679 +- ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
7680 +- TAS2770_PWR_CTRL_MASK,
7681 +- TAS2770_PWR_CTRL_ACTIVE);
7682 +-
7683 +- if (ret < 0)
7684 +- return ret;
7685 ++ struct tas2770_priv *tas2770 =
7686 ++ snd_soc_component_get_drvdata(component);
7687 +
7688 +- return 0;
7689 ++ tas2770->unmuted = !mute;
7690 ++ return tas2770_update_pwr_ctrl(tas2770);
7691 + }
7692 +
7693 + static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
7694 +@@ -337,7 +308,7 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7695 + struct snd_soc_component *component = dai->component;
7696 + struct tas2770_priv *tas2770 =
7697 + snd_soc_component_get_drvdata(component);
7698 +- u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0;
7699 ++ u8 tdm_rx_start_slot = 0, invert_fpol = 0, fpol_preinv = 0, asi_cfg_1 = 0;
7700 + int ret;
7701 +
7702 + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
7703 +@@ -349,9 +320,15 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7704 + }
7705 +
7706 + switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
7707 ++ case SND_SOC_DAIFMT_NB_IF:
7708 ++ invert_fpol = 1;
7709 ++ fallthrough;
7710 + case SND_SOC_DAIFMT_NB_NF:
7711 + asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_RSING;
7712 + break;
7713 ++ case SND_SOC_DAIFMT_IB_IF:
7714 ++ invert_fpol = 1;
7715 ++ fallthrough;
7716 + case SND_SOC_DAIFMT_IB_NF:
7717 + asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_FALING;
7718 + break;
7719 +@@ -369,15 +346,19 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7720 + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
7721 + case SND_SOC_DAIFMT_I2S:
7722 + tdm_rx_start_slot = 1;
7723 ++ fpol_preinv = 0;
7724 + break;
7725 + case SND_SOC_DAIFMT_DSP_A:
7726 + tdm_rx_start_slot = 0;
7727 ++ fpol_preinv = 1;
7728 + break;
7729 + case SND_SOC_DAIFMT_DSP_B:
7730 + tdm_rx_start_slot = 1;
7731 ++ fpol_preinv = 1;
7732 + break;
7733 + case SND_SOC_DAIFMT_LEFT_J:
7734 + tdm_rx_start_slot = 0;
7735 ++ fpol_preinv = 1;
7736 + break;
7737 + default:
7738 + dev_err(tas2770->dev,
7739 +@@ -391,6 +372,14 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7740 + if (ret < 0)
7741 + return ret;
7742 +
7743 ++ ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG0,
7744 ++ TAS2770_TDM_CFG_REG0_FPOL_MASK,
7745 ++ (fpol_preinv ^ invert_fpol)
7746 ++ ? TAS2770_TDM_CFG_REG0_FPOL_RSING
7747 ++ : TAS2770_TDM_CFG_REG0_FPOL_FALING);
7748 ++ if (ret < 0)
7749 ++ return ret;
7750 ++
7751 + return 0;
7752 + }
7753 +
7754 +@@ -489,7 +478,7 @@ static struct snd_soc_dai_driver tas2770_dai_driver[] = {
7755 + .id = 0,
7756 + .playback = {
7757 + .stream_name = "ASI1 Playback",
7758 +- .channels_min = 2,
7759 ++ .channels_min = 1,
7760 + .channels_max = 2,
7761 + .rates = TAS2770_RATES,
7762 + .formats = TAS2770_FORMATS,
7763 +@@ -537,7 +526,6 @@ static const struct snd_soc_component_driver soc_component_driver_tas2770 = {
7764 + .probe = tas2770_codec_probe,
7765 + .suspend = tas2770_codec_suspend,
7766 + .resume = tas2770_codec_resume,
7767 +- .set_bias_level = tas2770_set_bias_level,
7768 + .controls = tas2770_snd_controls,
7769 + .num_controls = ARRAY_SIZE(tas2770_snd_controls),
7770 + .dapm_widgets = tas2770_dapm_widgets,
7771 +diff --git a/sound/soc/codecs/tas2770.h b/sound/soc/codecs/tas2770.h
7772 +index d156666bcc552..f75f40781ab13 100644
7773 +--- a/sound/soc/codecs/tas2770.h
7774 ++++ b/sound/soc/codecs/tas2770.h
7775 +@@ -41,6 +41,9 @@
7776 + #define TAS2770_TDM_CFG_REG0_31_44_1_48KHZ 0x6
7777 + #define TAS2770_TDM_CFG_REG0_31_88_2_96KHZ 0x8
7778 + #define TAS2770_TDM_CFG_REG0_31_176_4_192KHZ 0xa
7779 ++#define TAS2770_TDM_CFG_REG0_FPOL_MASK BIT(0)
7780 ++#define TAS2770_TDM_CFG_REG0_FPOL_RSING 0
7781 ++#define TAS2770_TDM_CFG_REG0_FPOL_FALING 1
7782 + /* TDM Configuration Reg1 */
7783 + #define TAS2770_TDM_CFG_REG1 TAS2770_REG(0X0, 0x0B)
7784 + #define TAS2770_TDM_CFG_REG1_MASK GENMASK(5, 1)
7785 +@@ -135,6 +138,8 @@ struct tas2770_priv {
7786 + struct device *dev;
7787 + int v_sense_slot;
7788 + int i_sense_slot;
7789 ++ bool dac_powered;
7790 ++ bool unmuted;
7791 + };
7792 +
7793 + #endif /* __TAS2770__ */
7794 +diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
7795 +index d39c7d52ecfd4..9f4a629b032b8 100644
7796 +--- a/sound/soc/codecs/tlv320aic32x4.c
7797 ++++ b/sound/soc/codecs/tlv320aic32x4.c
7798 +@@ -49,6 +49,8 @@ struct aic32x4_priv {
7799 + struct aic32x4_setup_data *setup;
7800 + struct device *dev;
7801 + enum aic32x4_type type;
7802 ++
7803 ++ unsigned int fmt;
7804 + };
7805 +
7806 + static int aic32x4_reset_adc(struct snd_soc_dapm_widget *w,
7807 +@@ -611,6 +613,7 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
7808 + static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
7809 + {
7810 + struct snd_soc_component *component = codec_dai->component;
7811 ++ struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
7812 + u8 iface_reg_1 = 0;
7813 + u8 iface_reg_2 = 0;
7814 + u8 iface_reg_3 = 0;
7815 +@@ -654,6 +657,8 @@ static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
7816 + return -EINVAL;
7817 + }
7818 +
7819 ++ aic32x4->fmt = fmt;
7820 ++
7821 + snd_soc_component_update_bits(component, AIC32X4_IFACE1,
7822 + AIC32X4_IFACE1_DATATYPE_MASK |
7823 + AIC32X4_IFACE1_MASTER_MASK, iface_reg_1);
7824 +@@ -758,6 +763,10 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
7825 + return -EINVAL;
7826 + }
7827 +
7828 ++ /* PCM over I2S is always 2-channel */
7829 ++ if ((aic32x4->fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S)
7830 ++ channels = 2;
7831 ++
7832 + madc = DIV_ROUND_UP((32 * adc_resource_class), aosr);
7833 + max_dosr = (AIC32X4_MAX_DOSR_FREQ / sample_rate / dosr_increment) *
7834 + dosr_increment;
7835 +diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
7836 +index 4b8a63e336c77..d7f4646ee029c 100644
7837 +--- a/sound/soc/sh/rcar/ssiu.c
7838 ++++ b/sound/soc/sh/rcar/ssiu.c
7839 +@@ -67,6 +67,8 @@ static void rsnd_ssiu_busif_err_irq_ctrl(struct rsnd_mod *mod, int enable)
7840 + shift = 1;
7841 + offset = 1;
7842 + break;
7843 ++ default:
7844 ++ return;
7845 + }
7846 +
7847 + for (i = 0; i < 4; i++) {
7848 +diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
7849 +index a51a928ea40a7..5f780ef9581a9 100644
7850 +--- a/sound/soc/sof/debug.c
7851 ++++ b/sound/soc/sof/debug.c
7852 +@@ -668,9 +668,9 @@ static int memory_info_update(struct snd_sof_dev *sdev, char *buf, size_t buff_s
7853 + }
7854 +
7855 + for (i = 0, len = 0; i < reply->num_elems; i++) {
7856 +- ret = snprintf(buf + len, buff_size - len, "zone %d.%d used %#8x free %#8x\n",
7857 +- reply->elems[i].zone, reply->elems[i].id,
7858 +- reply->elems[i].used, reply->elems[i].free);
7859 ++ ret = scnprintf(buf + len, buff_size - len, "zone %d.%d used %#8x free %#8x\n",
7860 ++ reply->elems[i].zone, reply->elems[i].id,
7861 ++ reply->elems[i].used, reply->elems[i].free);
7862 + if (ret < 0)
7863 + goto error;
7864 + len += ret;
7865 +diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
7866 +index c7ed2b3d6abca..0a42034c4655e 100644
7867 +--- a/sound/soc/sof/intel/apl.c
7868 ++++ b/sound/soc/sof/intel/apl.c
7869 +@@ -139,6 +139,7 @@ const struct sof_intel_dsp_desc apl_chip_info = {
7870 + .ipc_ack = HDA_DSP_REG_HIPCIE,
7871 + .ipc_ack_mask = HDA_DSP_REG_HIPCIE_DONE,
7872 + .ipc_ctl = HDA_DSP_REG_HIPCCTL,
7873 ++ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
7874 + .rom_init_timeout = 150,
7875 + .ssp_count = APL_SSP_COUNT,
7876 + .ssp_base_offset = APL_SSP_BASE_OFFSET,
7877 +diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
7878 +index e115e12a856fd..a63b235763ede 100644
7879 +--- a/sound/soc/sof/intel/cnl.c
7880 ++++ b/sound/soc/sof/intel/cnl.c
7881 +@@ -344,6 +344,7 @@ const struct sof_intel_dsp_desc cnl_chip_info = {
7882 + .ipc_ack = CNL_DSP_REG_HIPCIDA,
7883 + .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
7884 + .ipc_ctl = CNL_DSP_REG_HIPCCTL,
7885 ++ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
7886 + .rom_init_timeout = 300,
7887 + .ssp_count = CNL_SSP_COUNT,
7888 + .ssp_base_offset = CNL_SSP_BASE_OFFSET,
7889 +@@ -363,6 +364,7 @@ const struct sof_intel_dsp_desc jsl_chip_info = {
7890 + .ipc_ack = CNL_DSP_REG_HIPCIDA,
7891 + .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
7892 + .ipc_ctl = CNL_DSP_REG_HIPCCTL,
7893 ++ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
7894 + .rom_init_timeout = 300,
7895 + .ssp_count = ICL_SSP_COUNT,
7896 + .ssp_base_offset = CNL_SSP_BASE_OFFSET,
7897 +diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
7898 +index ee09393d42cb0..439cb33d2a710 100644
7899 +--- a/sound/soc/sof/intel/hda-loader.c
7900 ++++ b/sound/soc/sof/intel/hda-loader.c
7901 +@@ -163,7 +163,7 @@ static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag)
7902 +
7903 + /* step 7: wait for ROM init */
7904 + ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
7905 +- HDA_DSP_SRAM_REG_ROM_STATUS, status,
7906 ++ chip->rom_status_reg, status,
7907 + ((status & HDA_DSP_ROM_STS_MASK)
7908 + == HDA_DSP_ROM_INIT),
7909 + HDA_DSP_REG_POLL_INTERVAL_US,
7910 +@@ -174,8 +174,8 @@ static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag)
7911 +
7912 + if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
7913 + dev_err(sdev->dev,
7914 +- "error: %s: timeout HDA_DSP_SRAM_REG_ROM_STATUS read\n",
7915 +- __func__);
7916 ++ "%s: timeout with rom_status_reg (%#x) read\n",
7917 ++ __func__, chip->rom_status_reg);
7918 +
7919 + err:
7920 + flags = SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX;
7921 +@@ -251,6 +251,8 @@ static int cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
7922 +
7923 + static int cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *stream)
7924 + {
7925 ++ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
7926 ++ const struct sof_intel_dsp_desc *chip = hda->desc;
7927 + unsigned int reg;
7928 + int ret, status;
7929 +
7930 +@@ -261,7 +263,7 @@ static int cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *stream)
7931 + }
7932 +
7933 + status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
7934 +- HDA_DSP_SRAM_REG_ROM_STATUS, reg,
7935 ++ chip->rom_status_reg, reg,
7936 + ((reg & HDA_DSP_ROM_STS_MASK)
7937 + == HDA_DSP_ROM_FW_ENTERED),
7938 + HDA_DSP_REG_POLL_INTERVAL_US,
7939 +@@ -274,8 +276,8 @@ static int cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *stream)
7940 +
7941 + if (status < 0) {
7942 + dev_err(sdev->dev,
7943 +- "error: %s: timeout HDA_DSP_SRAM_REG_ROM_STATUS read\n",
7944 +- __func__);
7945 ++ "%s: timeout with rom_status_reg (%#x) read\n",
7946 ++ __func__, chip->rom_status_reg);
7947 + }
7948 +
7949 + ret = cl_trigger(sdev, stream, SNDRV_PCM_TRIGGER_STOP);
7950 +diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
7951 +index ddf70902e53c4..35cbef171f4a3 100644
7952 +--- a/sound/soc/sof/intel/hda.c
7953 ++++ b/sound/soc/sof/intel/hda.c
7954 +@@ -353,11 +353,13 @@ static const struct hda_dsp_msg_code hda_dsp_rom_msg[] = {
7955 +
7956 + static void hda_dsp_get_status(struct snd_sof_dev *sdev)
7957 + {
7958 ++ const struct sof_intel_dsp_desc *chip;
7959 + u32 status;
7960 + int i;
7961 +
7962 ++ chip = get_chip_info(sdev->pdata);
7963 + status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
7964 +- HDA_DSP_SRAM_REG_ROM_STATUS);
7965 ++ chip->rom_status_reg);
7966 +
7967 + for (i = 0; i < ARRAY_SIZE(hda_dsp_rom_msg); i++) {
7968 + if (status == hda_dsp_rom_msg[i].code) {
7969 +@@ -402,14 +404,16 @@ static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
7970 + /* dump the first 8 dwords representing the extended ROM status */
7971 + static void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev, u32 flags)
7972 + {
7973 ++ const struct sof_intel_dsp_desc *chip;
7974 + char msg[128];
7975 + int len = 0;
7976 + u32 value;
7977 + int i;
7978 +
7979 ++ chip = get_chip_info(sdev->pdata);
7980 + for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) {
7981 +- value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_ROM_STATUS + i * 0x4);
7982 +- len += snprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
7983 ++ value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + i * 0x4);
7984 ++ len += scnprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
7985 + }
7986 +
7987 + sof_dev_dbg_or_err(sdev->dev, flags & SOF_DBG_DUMP_FORCE_ERR_LEVEL,
7988 +diff --git a/sound/soc/sof/intel/icl.c b/sound/soc/sof/intel/icl.c
7989 +index ee095b8f2d01c..4065c4d3912a5 100644
7990 +--- a/sound/soc/sof/intel/icl.c
7991 ++++ b/sound/soc/sof/intel/icl.c
7992 +@@ -139,6 +139,7 @@ const struct sof_intel_dsp_desc icl_chip_info = {
7993 + .ipc_ack = CNL_DSP_REG_HIPCIDA,
7994 + .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
7995 + .ipc_ctl = CNL_DSP_REG_HIPCCTL,
7996 ++ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
7997 + .rom_init_timeout = 300,
7998 + .ssp_count = ICL_SSP_COUNT,
7999 + .ssp_base_offset = CNL_SSP_BASE_OFFSET,
8000 +diff --git a/sound/soc/sof/intel/shim.h b/sound/soc/sof/intel/shim.h
8001 +index e9f7d4d7fcce8..96707758ebc51 100644
8002 +--- a/sound/soc/sof/intel/shim.h
8003 ++++ b/sound/soc/sof/intel/shim.h
8004 +@@ -161,6 +161,7 @@ struct sof_intel_dsp_desc {
8005 + int ipc_ack;
8006 + int ipc_ack_mask;
8007 + int ipc_ctl;
8008 ++ int rom_status_reg;
8009 + int rom_init_timeout;
8010 + int ssp_count; /* ssp count of the platform */
8011 + int ssp_base_offset; /* base address of the SSPs */
8012 +diff --git a/sound/soc/sof/intel/tgl.c b/sound/soc/sof/intel/tgl.c
8013 +index 199d41a7dc9bf..aba52d8628aa4 100644
8014 +--- a/sound/soc/sof/intel/tgl.c
8015 ++++ b/sound/soc/sof/intel/tgl.c
8016 +@@ -134,6 +134,7 @@ const struct sof_intel_dsp_desc tgl_chip_info = {
8017 + .ipc_ack = CNL_DSP_REG_HIPCIDA,
8018 + .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
8019 + .ipc_ctl = CNL_DSP_REG_HIPCCTL,
8020 ++ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
8021 + .rom_init_timeout = 300,
8022 + .ssp_count = ICL_SSP_COUNT,
8023 + .ssp_base_offset = CNL_SSP_BASE_OFFSET,
8024 +@@ -153,6 +154,7 @@ const struct sof_intel_dsp_desc tglh_chip_info = {
8025 + .ipc_ack = CNL_DSP_REG_HIPCIDA,
8026 + .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
8027 + .ipc_ctl = CNL_DSP_REG_HIPCCTL,
8028 ++ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
8029 + .rom_init_timeout = 300,
8030 + .ssp_count = ICL_SSP_COUNT,
8031 + .ssp_base_offset = CNL_SSP_BASE_OFFSET,
8032 +@@ -172,6 +174,7 @@ const struct sof_intel_dsp_desc ehl_chip_info = {
8033 + .ipc_ack = CNL_DSP_REG_HIPCIDA,
8034 + .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
8035 + .ipc_ctl = CNL_DSP_REG_HIPCCTL,
8036 ++ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
8037 + .rom_init_timeout = 300,
8038 + .ssp_count = ICL_SSP_COUNT,
8039 + .ssp_base_offset = CNL_SSP_BASE_OFFSET,
8040 +@@ -191,6 +194,7 @@ const struct sof_intel_dsp_desc adls_chip_info = {
8041 + .ipc_ack = CNL_DSP_REG_HIPCIDA,
8042 + .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
8043 + .ipc_ctl = CNL_DSP_REG_HIPCCTL,
8044 ++ .rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
8045 + .rom_init_timeout = 300,
8046 + .ssp_count = ICL_SSP_COUNT,
8047 + .ssp_base_offset = CNL_SSP_BASE_OFFSET,
8048 +diff --git a/sound/usb/card.c b/sound/usb/card.c
8049 +index 1764b9302d467..ff5f8de1bc540 100644
8050 +--- a/sound/usb/card.c
8051 ++++ b/sound/usb/card.c
8052 +@@ -387,6 +387,14 @@ static const struct usb_audio_device_name usb_audio_names[] = {
8053 + DEVICE_NAME(0x05e1, 0x0408, "Syntek", "STK1160"),
8054 + DEVICE_NAME(0x05e1, 0x0480, "Hauppauge", "Woodbury"),
8055 +
8056 ++ /* ASUS ROG Zenith II: this machine has also two devices, one for
8057 ++ * the front headphone and another for the rest
8058 ++ */
8059 ++ PROFILE_NAME(0x0b05, 0x1915, "ASUS", "Zenith II Front Headphone",
8060 ++ "Zenith-II-Front-Headphone"),
8061 ++ PROFILE_NAME(0x0b05, 0x1916, "ASUS", "Zenith II Main Audio",
8062 ++ "Zenith-II-Main-Audio"),
8063 ++
8064 + /* ASUS ROG Strix */
8065 + PROFILE_NAME(0x0b05, 0x1917,
8066 + "Realtek", "ALC1220-VB-DT", "Realtek-ALC1220-VB-Desktop"),
8067 +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
8068 +index 997425ef0a294..3f8f6056ff6a5 100644
8069 +--- a/sound/usb/mixer_maps.c
8070 ++++ b/sound/usb/mixer_maps.c
8071 +@@ -366,13 +366,28 @@ static const struct usbmix_name_map corsair_virtuoso_map[] = {
8072 + { 0 }
8073 + };
8074 +
8075 +-/* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX
8076 +- * response for Input Gain Pad (id=19, control=12) and the connector status
8077 +- * for SPDIF terminal (id=18). Skip them.
8078 +- */
8079 +-static const struct usbmix_name_map asus_rog_map[] = {
8080 +- { 18, NULL }, /* OT, connector control */
8081 +- { 19, NULL, 12 }, /* FU, Input Gain Pad */
8082 ++/* ASUS ROG Zenith II with Realtek ALC1220-VB */
8083 ++static const struct usbmix_name_map asus_zenith_ii_map[] = {
8084 ++ { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */
8085 ++ { 16, "Speaker" }, /* OT */
8086 ++ { 22, "Speaker Playback" }, /* FU */
8087 ++ { 7, "Line" }, /* IT */
8088 ++ { 19, "Line Capture" }, /* FU */
8089 ++ { 8, "Mic" }, /* IT */
8090 ++ { 20, "Mic Capture" }, /* FU */
8091 ++ { 9, "Front Mic" }, /* IT */
8092 ++ { 21, "Front Mic Capture" }, /* FU */
8093 ++ { 17, "IEC958" }, /* OT */
8094 ++ { 23, "IEC958 Playback" }, /* FU */
8095 ++ {}
8096 ++};
8097 ++
8098 ++static const struct usbmix_connector_map asus_zenith_ii_connector_map[] = {
8099 ++ { 10, 16 }, /* (Back) Speaker */
8100 ++ { 11, 17 }, /* SPDIF */
8101 ++ { 13, 7 }, /* Line */
8102 ++ { 14, 8 }, /* Mic */
8103 ++ { 15, 9 }, /* Front Mic */
8104 + {}
8105 + };
8106 +
8107 +@@ -568,9 +583,10 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
8108 + .map = trx40_mobo_map,
8109 + .connector_map = trx40_mobo_connector_map,
8110 + },
8111 +- { /* ASUS ROG Zenith II */
8112 ++ { /* ASUS ROG Zenith II (main audio) */
8113 + .id = USB_ID(0x0b05, 0x1916),
8114 +- .map = asus_rog_map,
8115 ++ .map = asus_zenith_ii_map,
8116 ++ .connector_map = asus_zenith_ii_connector_map,
8117 + },
8118 + { /* ASUS ROG Strix */
8119 + .id = USB_ID(0x0b05, 0x1917),
8120 +diff --git a/tools/build/feature/test-libcrypto.c b/tools/build/feature/test-libcrypto.c
8121 +index a98174e0569c8..bc34a5bbb5049 100644
8122 +--- a/tools/build/feature/test-libcrypto.c
8123 ++++ b/tools/build/feature/test-libcrypto.c
8124 +@@ -1,16 +1,23 @@
8125 + // SPDX-License-Identifier: GPL-2.0
8126 ++#include <openssl/evp.h>
8127 + #include <openssl/sha.h>
8128 + #include <openssl/md5.h>
8129 +
8130 + int main(void)
8131 + {
8132 +- MD5_CTX context;
8133 ++ EVP_MD_CTX *mdctx;
8134 + unsigned char md[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH];
8135 + unsigned char dat[] = "12345";
8136 ++ unsigned int digest_len;
8137 +
8138 +- MD5_Init(&context);
8139 +- MD5_Update(&context, &dat[0], sizeof(dat));
8140 +- MD5_Final(&md[0], &context);
8141 ++ mdctx = EVP_MD_CTX_new();
8142 ++ if (!mdctx)
8143 ++ return 0;
8144 ++
8145 ++ EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
8146 ++ EVP_DigestUpdate(mdctx, &dat[0], sizeof(dat));
8147 ++ EVP_DigestFinal_ex(mdctx, &md[0], &digest_len);
8148 ++ EVP_MD_CTX_free(mdctx);
8149 +
8150 + SHA1(&dat[0], sizeof(dat), &md[0]);
8151 +
8152 +diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
8153 +index 62c0ec21aaa86..72abf5d86f712 100644
8154 +--- a/tools/perf/tests/switch-tracking.c
8155 ++++ b/tools/perf/tests/switch-tracking.c
8156 +@@ -324,6 +324,7 @@ out_free_nodes:
8157 + int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_unused)
8158 + {
8159 + const char *sched_switch = "sched:sched_switch";
8160 ++ const char *cycles = "cycles:u";
8161 + struct switch_tracking switch_tracking = { .tids = NULL, };
8162 + struct record_opts opts = {
8163 + .mmap_pages = UINT_MAX,
8164 +@@ -372,12 +373,19 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
8165 + cpu_clocks_evsel = evlist__last(evlist);
8166 +
8167 + /* Second event */
8168 +- if (perf_pmu__has_hybrid())
8169 +- err = parse_events(evlist, "cpu_core/cycles/u", NULL);
8170 +- else
8171 +- err = parse_events(evlist, "cycles:u", NULL);
8172 ++ if (perf_pmu__has_hybrid()) {
8173 ++ cycles = "cpu_core/cycles/u";
8174 ++ err = parse_events(evlist, cycles, NULL);
8175 ++ if (err) {
8176 ++ cycles = "cpu_atom/cycles/u";
8177 ++ pr_debug("Trying %s\n", cycles);
8178 ++ err = parse_events(evlist, cycles, NULL);
8179 ++ }
8180 ++ } else {
8181 ++ err = parse_events(evlist, cycles, NULL);
8182 ++ }
8183 + if (err) {
8184 +- pr_debug("Failed to parse event cycles:u\n");
8185 ++ pr_debug("Failed to parse event %s\n", cycles);
8186 + goto out_err;
8187 + }
8188 +
8189 +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
8190 +index 3bfe099d86438..b93a36ffeb9e3 100644
8191 +--- a/tools/perf/util/parse-events.c
8192 ++++ b/tools/perf/util/parse-events.c
8193 +@@ -196,9 +196,12 @@ static int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
8194 + void parse_events__handle_error(struct parse_events_error *err, int idx,
8195 + char *str, char *help)
8196 + {
8197 +- if (WARN(!str, "WARNING: failed to provide error string\n")) {
8198 +- free(help);
8199 +- return;
8200 ++ if (WARN(!str, "WARNING: failed to provide error string\n"))
8201 ++ goto out_free;
8202 ++ if (!err) {
8203 ++ /* Assume caller does not want message printed */
8204 ++ pr_debug("event syntax error: %s\n", str);
8205 ++ goto out_free;
8206 + }
8207 + switch (err->num_errors) {
8208 + case 0:
8209 +@@ -224,6 +227,11 @@ void parse_events__handle_error(struct parse_events_error *err, int idx,
8210 + break;
8211 + }
8212 + err->num_errors++;
8213 ++ return;
8214 ++
8215 ++out_free:
8216 ++ free(str);
8217 ++ free(help);
8218 + }
8219 +
8220 + struct tracepoint_path *tracepoint_id_to_path(u64 config)
8221 +diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
8222 +index a834918a0a0d3..68844c48f688a 100644
8223 +--- a/tools/perf/util/probe-event.c
8224 ++++ b/tools/perf/util/probe-event.c
8225 +@@ -1775,8 +1775,10 @@ int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev)
8226 + if (!pev->event && pev->point.function && pev->point.line
8227 + && !pev->point.lazy_line && !pev->point.offset) {
8228 + if (asprintf(&pev->event, "%s_L%d", pev->point.function,
8229 +- pev->point.line) < 0)
8230 +- return -ENOMEM;
8231 ++ pev->point.line) < 0) {
8232 ++ ret = -ENOMEM;
8233 ++ goto out;
8234 ++ }
8235 + }
8236 +
8237 + /* Copy arguments and ensure return probe has no C argument */
8238 +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
8239 +index fa928b431555c..7c02509c71d0a 100644
8240 +--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
8241 ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
8242 +@@ -21,7 +21,6 @@ check_error 'p:^/bar vfs_read' # NO_GROUP_NAME
8243 + check_error 'p:^12345678901234567890123456789012345678901234567890123456789012345/bar vfs_read' # GROUP_TOO_LONG
8244 +
8245 + check_error 'p:^foo.1/bar vfs_read' # BAD_GROUP_NAME
8246 +-check_error 'p:foo/^ vfs_read' # NO_EVENT_NAME
8247 + check_error 'p:foo/^12345678901234567890123456789012345678901234567890123456789012345 vfs_read' # EVENT_TOO_LONG
8248 + check_error 'p:foo/^bar.1 vfs_read' # BAD_EVENT_NAME
8249 +
8250 +diff --git a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
8251 +index a15d21dc035a6..56eb83d1a3bdd 100755
8252 +--- a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
8253 ++++ b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
8254 +@@ -181,37 +181,43 @@ ping_ipv6()
8255 +
8256 + send_src_ipv4()
8257 + {
8258 +- $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
8259 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8260 ++ -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
8261 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8262 + }
8263 +
8264 + send_dst_ipv4()
8265 + {
8266 +- $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
8267 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8268 ++ -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
8269 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8270 + }
8271 +
8272 + send_src_udp4()
8273 + {
8274 +- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
8275 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8276 ++ -A 198.51.100.2 -B 203.0.113.2 \
8277 + -d 1msec -t udp "sp=0-32768,dp=30000"
8278 + }
8279 +
8280 + send_dst_udp4()
8281 + {
8282 +- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
8283 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8284 ++ -A 198.51.100.2 -B 203.0.113.2 \
8285 + -d 1msec -t udp "sp=20000,dp=0-32768"
8286 + }
8287 +
8288 + send_src_ipv6()
8289 + {
8290 +- $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:4::2 \
8291 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8292 ++ -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:4::2 \
8293 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8294 + }
8295 +
8296 + send_dst_ipv6()
8297 + {
8298 +- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:4::2-2001:db8:4::fd" \
8299 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8300 ++ -A 2001:db8:1::2 -B "2001:db8:4::2-2001:db8:4::fd" \
8301 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8302 + }
8303 +
8304 +@@ -226,13 +232,15 @@ send_flowlabel()
8305 +
8306 + send_src_udp6()
8307 + {
8308 +- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:4::2 \
8309 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8310 ++ -A 2001:db8:1::2 -B 2001:db8:4::2 \
8311 + -d 1msec -t udp "sp=0-32768,dp=30000"
8312 + }
8313 +
8314 + send_dst_udp6()
8315 + {
8316 +- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:4::2 \
8317 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8318 ++ -A 2001:db8:1::2 -B 2001:db8:4::2 \
8319 + -d 1msec -t udp "sp=20000,dp=0-32768"
8320 + }
8321 +
8322 +diff --git a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
8323 +index a73f52efcb6cf..0446db9c6f748 100755
8324 +--- a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
8325 ++++ b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
8326 +@@ -276,37 +276,43 @@ ping_ipv6()
8327 +
8328 + send_src_ipv4()
8329 + {
8330 +- $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
8331 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8332 ++ -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
8333 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8334 + }
8335 +
8336 + send_dst_ipv4()
8337 + {
8338 +- $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
8339 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8340 ++ -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
8341 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8342 + }
8343 +
8344 + send_src_udp4()
8345 + {
8346 +- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
8347 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8348 ++ -A 198.51.100.2 -B 203.0.113.2 \
8349 + -d 1msec -t udp "sp=0-32768,dp=30000"
8350 + }
8351 +
8352 + send_dst_udp4()
8353 + {
8354 +- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
8355 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8356 ++ -A 198.51.100.2 -B 203.0.113.2 \
8357 + -d 1msec -t udp "sp=20000,dp=0-32768"
8358 + }
8359 +
8360 + send_src_ipv6()
8361 + {
8362 +- $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
8363 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8364 ++ -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
8365 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8366 + }
8367 +
8368 + send_dst_ipv6()
8369 + {
8370 +- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
8371 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8372 ++ -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
8373 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8374 + }
8375 +
8376 +@@ -321,13 +327,15 @@ send_flowlabel()
8377 +
8378 + send_src_udp6()
8379 + {
8380 +- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
8381 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8382 ++ -A 2001:db8:1::2 -B 2001:db8:2::2 \
8383 + -d 1msec -t udp "sp=0-32768,dp=30000"
8384 + }
8385 +
8386 + send_dst_udp6()
8387 + {
8388 +- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
8389 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8390 ++ -A 2001:db8:1::2 -B 2001:db8:2::2 \
8391 + -d 1msec -t udp "sp=20000,dp=0-32768"
8392 + }
8393 +
8394 +diff --git a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
8395 +index 8fea2c2e0b25d..d40183b4eccc8 100755
8396 +--- a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
8397 ++++ b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
8398 +@@ -278,37 +278,43 @@ ping_ipv6()
8399 +
8400 + send_src_ipv4()
8401 + {
8402 +- $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
8403 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8404 ++ -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
8405 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8406 + }
8407 +
8408 + send_dst_ipv4()
8409 + {
8410 +- $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
8411 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8412 ++ -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
8413 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8414 + }
8415 +
8416 + send_src_udp4()
8417 + {
8418 +- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
8419 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8420 ++ -A 198.51.100.2 -B 203.0.113.2 \
8421 + -d 1msec -t udp "sp=0-32768,dp=30000"
8422 + }
8423 +
8424 + send_dst_udp4()
8425 + {
8426 +- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
8427 ++ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
8428 ++ -A 198.51.100.2 -B 203.0.113.2 \
8429 + -d 1msec -t udp "sp=20000,dp=0-32768"
8430 + }
8431 +
8432 + send_src_ipv6()
8433 + {
8434 +- $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
8435 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8436 ++ -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
8437 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8438 + }
8439 +
8440 + send_dst_ipv6()
8441 + {
8442 +- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
8443 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8444 ++ -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
8445 + -d 1msec -c 50 -t udp "sp=20000,dp=30000"
8446 + }
8447 +
8448 +@@ -323,13 +329,15 @@ send_flowlabel()
8449 +
8450 + send_src_udp6()
8451 + {
8452 +- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
8453 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8454 ++ -A 2001:db8:1::2 -B 2001:db8:2::2 \
8455 + -d 1msec -t udp "sp=0-32768,dp=30000"
8456 + }
8457 +
8458 + send_dst_udp6()
8459 + {
8460 +- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
8461 ++ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
8462 ++ -A 2001:db8:1::2 -B 2001:db8:2::2 \
8463 + -d 1msec -t udp "sp=20000,dp=0-32768"
8464 + }
8465 +
8466 +diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
8467 +index 5b98f3ee58a58..0fffaeedee767 100644
8468 +--- a/tools/vm/slabinfo.c
8469 ++++ b/tools/vm/slabinfo.c
8470 +@@ -125,7 +125,7 @@ static void usage(void)
8471 + "-n|--numa Show NUMA information\n"
8472 + "-N|--lines=K Show the first K slabs\n"
8473 + "-o|--ops Show kmem_cache_ops\n"
8474 +- "-P|--partial Sort by number of partial slabs\n"
8475 ++ "-P|--partial Sort by number of partial slabs\n"
8476 + "-r|--report Detailed report on single slabs\n"
8477 + "-s|--shrink Shrink slabs\n"
8478 + "-S|--Size Sort by size\n"
8479 +@@ -1067,15 +1067,27 @@ static void sort_slabs(void)
8480 + for (s2 = s1 + 1; s2 < slabinfo + slabs; s2++) {
8481 + int result;
8482 +
8483 +- if (sort_size)
8484 +- result = slab_size(s1) < slab_size(s2);
8485 +- else if (sort_active)
8486 +- result = slab_activity(s1) < slab_activity(s2);
8487 +- else if (sort_loss)
8488 +- result = slab_waste(s1) < slab_waste(s2);
8489 +- else if (sort_partial)
8490 +- result = s1->partial < s2->partial;
8491 +- else
8492 ++ if (sort_size) {
8493 ++ if (slab_size(s1) == slab_size(s2))
8494 ++ result = strcasecmp(s1->name, s2->name);
8495 ++ else
8496 ++ result = slab_size(s1) < slab_size(s2);
8497 ++ } else if (sort_active) {
8498 ++ if (slab_activity(s1) == slab_activity(s2))
8499 ++ result = strcasecmp(s1->name, s2->name);
8500 ++ else
8501 ++ result = slab_activity(s1) < slab_activity(s2);
8502 ++ } else if (sort_loss) {
8503 ++ if (slab_waste(s1) == slab_waste(s2))
8504 ++ result = strcasecmp(s1->name, s2->name);
8505 ++ else
8506 ++ result = slab_waste(s1) < slab_waste(s2);
8507 ++ } else if (sort_partial) {
8508 ++ if (s1->partial == s2->partial)
8509 ++ result = strcasecmp(s1->name, s2->name);
8510 ++ else
8511 ++ result = s1->partial < s2->partial;
8512 ++ } else
8513 + result = strcasecmp(s1->name, s2->name);
8514 +
8515 + if (show_inverted)
8516 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
8517 +index 251b4143f505b..86fc429a0e438 100644
8518 +--- a/virt/kvm/kvm_main.c
8519 ++++ b/virt/kvm/kvm_main.c
8520 +@@ -1034,6 +1034,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
8521 + if (!kvm)
8522 + return ERR_PTR(-ENOMEM);
8523 +
8524 ++ /* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */
8525 ++ __module_get(kvm_chardev_ops.owner);
8526 ++
8527 + KVM_MMU_LOCK_INIT(kvm);
8528 + mmgrab(current->mm);
8529 + kvm->mm = current->mm;
8530 +@@ -1107,16 +1110,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
8531 + preempt_notifier_inc();
8532 + kvm_init_pm_notifier(kvm);
8533 +
8534 +- /*
8535 +- * When the fd passed to this ioctl() is opened it pins the module,
8536 +- * but try_module_get() also prevents getting a reference if the module
8537 +- * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait").
8538 +- */
8539 +- if (!try_module_get(kvm_chardev_ops.owner)) {
8540 +- r = -ENODEV;
8541 +- goto out_err;
8542 +- }
8543 +-
8544 + return kvm;
8545 +
8546 + out_err:
8547 +@@ -1140,6 +1133,7 @@ out_err_no_irq_srcu:
8548 + out_err_no_srcu:
8549 + kvm_arch_free_vm(kvm);
8550 + mmdrop(current->mm);
8551 ++ module_put(kvm_chardev_ops.owner);
8552 + return ERR_PTR(r);
8553 + }
8554 +