Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.14 commit in: /
Date: Wed, 22 Sep 2021 11:37:46
Message-Id: 1632310643.00a2b84fdf9371e8fc3cfa89c197db0aa7f58939.mpagano@gentoo
1 commit: 00a2b84fdf9371e8fc3cfa89c197db0aa7f58939
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 22 11:37:23 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 22 11:37:23 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=00a2b84f
7
8 Linux patch 5.14.7
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1006_linux-5.14.7.patch | 6334 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6338 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index df8a957..0c8fa67 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -71,6 +71,10 @@ Patch: 1005_linux-5.14.6.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.14.6
23
24 +Patch: 1006_linux-5.14.7.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.14.7
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1006_linux-5.14.7.patch b/1006_linux-5.14.7.patch
33 new file mode 100644
34 index 0000000..a7e8c31
35 --- /dev/null
36 +++ b/1006_linux-5.14.7.patch
37 @@ -0,0 +1,6334 @@
38 +diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml
39 +index b9f75e20fef5c..b2a645740ffe6 100644
40 +--- a/Documentation/devicetree/bindings/arm/tegra.yaml
41 ++++ b/Documentation/devicetree/bindings/arm/tegra.yaml
42 +@@ -54,7 +54,7 @@ properties:
43 + - const: toradex,apalis_t30
44 + - const: nvidia,tegra30
45 + - items:
46 +- - const: toradex,apalis_t30-eval-v1.1
47 ++ - const: toradex,apalis_t30-v1.1-eval
48 + - const: toradex,apalis_t30-eval
49 + - const: toradex,apalis_t30-v1.1
50 + - const: toradex,apalis_t30
51 +diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
52 +index 44919d48d2415..c459f169a9044 100644
53 +--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
54 ++++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
55 +@@ -122,7 +122,7 @@ on various other factors also like;
56 + so the device should have enough free bytes available its OOB/Spare
57 + area to accommodate ECC for entire page. In general following expression
58 + helps in determining if given device can accommodate ECC syndrome:
59 +- "2 + (PAGESIZE / 512) * ECC_BYTES" >= OOBSIZE"
60 ++ "2 + (PAGESIZE / 512) * ECC_BYTES" <= OOBSIZE"
61 + where
62 + OOBSIZE number of bytes in OOB/spare area
63 + PAGESIZE number of bytes in main-area of device page
64 +diff --git a/Makefile b/Makefile
65 +index f9c8bbf8cf71e..efb603f06e711 100644
66 +--- a/Makefile
67 ++++ b/Makefile
68 +@@ -1,7 +1,7 @@
69 + # SPDX-License-Identifier: GPL-2.0
70 + VERSION = 5
71 + PATCHLEVEL = 14
72 +-SUBLEVEL = 6
73 ++SUBLEVEL = 7
74 + EXTRAVERSION =
75 + NAME = Opossums on Parade
76 +
77 +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
78 +index a2fbea3ee07c7..102418ac5ff4a 100644
79 +--- a/arch/arc/mm/cache.c
80 ++++ b/arch/arc/mm/cache.c
81 +@@ -1123,7 +1123,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
82 + clear_page(to);
83 + clear_bit(PG_dc_clean, &page->flags);
84 + }
85 +-
86 ++EXPORT_SYMBOL(clear_user_page);
87 +
88 + /**********************************************************************
89 + * Explicit Cache flush request from user space via syscall
90 +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
91 +index e57b23f952846..3599b9a2f1dff 100644
92 +--- a/arch/arm64/kernel/fpsimd.c
93 ++++ b/arch/arm64/kernel/fpsimd.c
94 +@@ -511,7 +511,7 @@ size_t sve_state_size(struct task_struct const *task)
95 + void sve_alloc(struct task_struct *task)
96 + {
97 + if (task->thread.sve_state) {
98 +- memset(task->thread.sve_state, 0, sve_state_size(current));
99 ++ memset(task->thread.sve_state, 0, sve_state_size(task));
100 + return;
101 + }
102 +
103 +diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
104 +index 5d1fc9c4bca5e..45ee8abcf2025 100644
105 +--- a/arch/arm64/kvm/arm.c
106 ++++ b/arch/arm64/kvm/arm.c
107 +@@ -1220,6 +1220,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
108 + if (copy_from_user(&reg, argp, sizeof(reg)))
109 + break;
110 +
111 ++ /*
112 ++ * We could owe a reset due to PSCI. Handle the pending reset
113 ++ * here to ensure userspace register accesses are ordered after
114 ++ * the reset.
115 ++ */
116 ++ if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
117 ++ kvm_reset_vcpu(vcpu);
118 ++
119 + if (ioctl == KVM_SET_ONE_REG)
120 + r = kvm_arm_set_reg(vcpu, &reg);
121 + else
122 +diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
123 +index 6f48336b1d86a..04ebab299aa4e 100644
124 +--- a/arch/arm64/kvm/handle_exit.c
125 ++++ b/arch/arm64/kvm/handle_exit.c
126 +@@ -292,11 +292,12 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
127 + kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
128 + }
129 +
130 +-void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
131 ++void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
132 ++ u64 elr_virt, u64 elr_phys,
133 + u64 par, uintptr_t vcpu,
134 + u64 far, u64 hpfar) {
135 +- u64 elr_in_kimg = __phys_to_kimg(__hyp_pa(elr));
136 +- u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr;
137 ++ u64 elr_in_kimg = __phys_to_kimg(elr_phys);
138 ++ u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
139 + u64 mode = spsr & PSR_MODE_MASK;
140 +
141 + /*
142 +@@ -309,20 +310,24 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
143 + kvm_err("Invalid host exception to nVHE hyp!\n");
144 + } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
145 + (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
146 +- struct bug_entry *bug = find_bug(elr_in_kimg);
147 + const char *file = NULL;
148 + unsigned int line = 0;
149 +
150 + /* All hyp bugs, including warnings, are treated as fatal. */
151 +- if (bug)
152 +- bug_get_file_line(bug, &file, &line);
153 ++ if (!is_protected_kvm_enabled() ||
154 ++ IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
155 ++ struct bug_entry *bug = find_bug(elr_in_kimg);
156 ++
157 ++ if (bug)
158 ++ bug_get_file_line(bug, &file, &line);
159 ++ }
160 +
161 + if (file)
162 + kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
163 + else
164 +- kvm_err("nVHE hyp BUG at: %016llx!\n", elr + hyp_offset);
165 ++ kvm_err("nVHE hyp BUG at: %016llx!\n", elr_virt + hyp_offset);
166 + } else {
167 +- kvm_err("nVHE hyp panic at: %016llx!\n", elr + hyp_offset);
168 ++ kvm_err("nVHE hyp panic at: %016llx!\n", elr_virt + hyp_offset);
169 + }
170 +
171 + /*
172 +@@ -334,5 +339,5 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
173 + kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
174 +
175 + panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
176 +- spsr, elr, esr, far, hpfar, par, vcpu);
177 ++ spsr, elr_virt, esr, far, hpfar, par, vcpu);
178 + }
179 +diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
180 +index 2b23400e0fb30..4b652ffb591d4 100644
181 +--- a/arch/arm64/kvm/hyp/nvhe/host.S
182 ++++ b/arch/arm64/kvm/hyp/nvhe/host.S
183 +@@ -7,6 +7,7 @@
184 + #include <linux/linkage.h>
185 +
186 + #include <asm/assembler.h>
187 ++#include <asm/kvm_arm.h>
188 + #include <asm/kvm_asm.h>
189 + #include <asm/kvm_mmu.h>
190 +
191 +@@ -85,12 +86,24 @@ SYM_FUNC_START(__hyp_do_panic)
192 +
193 + mov x29, x0
194 +
195 ++#ifdef CONFIG_NVHE_EL2_DEBUG
196 ++ /* Ensure host stage-2 is disabled */
197 ++ mrs x0, hcr_el2
198 ++ bic x0, x0, #HCR_VM
199 ++ msr hcr_el2, x0
200 ++ isb
201 ++ tlbi vmalls12e1
202 ++ dsb nsh
203 ++#endif
204 ++
205 + /* Load the panic arguments into x0-7 */
206 + mrs x0, esr_el2
207 +- get_vcpu_ptr x4, x5
208 +- mrs x5, far_el2
209 +- mrs x6, hpfar_el2
210 +- mov x7, xzr // Unused argument
211 ++ mov x4, x3
212 ++ mov x3, x2
213 ++ hyp_pa x3, x6
214 ++ get_vcpu_ptr x5, x6
215 ++ mrs x6, far_el2
216 ++ mrs x7, hpfar_el2
217 +
218 + /* Enter the host, conditionally restoring the host context. */
219 + cbz x29, __host_enter_without_restoring
220 +diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
221 +index cba7872d69a85..d010778b93ffe 100644
222 +--- a/arch/arm64/kvm/reset.c
223 ++++ b/arch/arm64/kvm/reset.c
224 +@@ -210,10 +210,16 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
225 + */
226 + int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
227 + {
228 ++ struct vcpu_reset_state reset_state;
229 + int ret;
230 + bool loaded;
231 + u32 pstate;
232 +
233 ++ mutex_lock(&vcpu->kvm->lock);
234 ++ reset_state = vcpu->arch.reset_state;
235 ++ WRITE_ONCE(vcpu->arch.reset_state.reset, false);
236 ++ mutex_unlock(&vcpu->kvm->lock);
237 ++
238 + /* Reset PMU outside of the non-preemptible section */
239 + kvm_pmu_vcpu_reset(vcpu);
240 +
241 +@@ -276,8 +282,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
242 + * Additional reset state handling that PSCI may have imposed on us.
243 + * Must be done after all the sys_reg reset.
244 + */
245 +- if (vcpu->arch.reset_state.reset) {
246 +- unsigned long target_pc = vcpu->arch.reset_state.pc;
247 ++ if (reset_state.reset) {
248 ++ unsigned long target_pc = reset_state.pc;
249 +
250 + /* Gracefully handle Thumb2 entry point */
251 + if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
252 +@@ -286,13 +292,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
253 + }
254 +
255 + /* Propagate caller endianness */
256 +- if (vcpu->arch.reset_state.be)
257 ++ if (reset_state.be)
258 + kvm_vcpu_set_be(vcpu);
259 +
260 + *vcpu_pc(vcpu) = target_pc;
261 +- vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
262 +-
263 +- vcpu->arch.reset_state.reset = false;
264 ++ vcpu_set_reg(vcpu, 0, reset_state.r0);
265 + }
266 +
267 + /* Reset timer */
268 +@@ -317,6 +321,14 @@ int kvm_set_ipa_limit(void)
269 + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
270 + parange = cpuid_feature_extract_unsigned_field(mmfr0,
271 + ID_AA64MMFR0_PARANGE_SHIFT);
272 ++ /*
273 ++ * IPA size beyond 48 bits could not be supported
274 ++ * on either 4K or 16K page size. Hence let's cap
275 ++ * it to 48 bits, in case it's reported as larger
276 ++ * on the system.
277 ++ */
278 ++ if (PAGE_SIZE != SZ_64K)
279 ++ parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
280 +
281 + /*
282 + * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
283 +diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
284 +index 21bbd615ca410..ec4e2d3635077 100644
285 +--- a/arch/powerpc/kernel/interrupt.c
286 ++++ b/arch/powerpc/kernel/interrupt.c
287 +@@ -19,6 +19,7 @@
288 + #include <asm/switch_to.h>
289 + #include <asm/syscall.h>
290 + #include <asm/time.h>
291 ++#include <asm/tm.h>
292 + #include <asm/unistd.h>
293 +
294 + #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
295 +@@ -138,6 +139,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
296 + */
297 + irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
298 +
299 ++ /*
300 ++ * If system call is called with TM active, set _TIF_RESTOREALL to
301 ++ * prevent RFSCV being used to return to userspace, because POWER9
302 ++ * TM implementation has problems with this instruction returning to
303 ++ * transactional state. Final register values are not relevant because
304 ++ * the transaction will be aborted upon return anyway. Or in the case
305 ++ * of unsupported_scv SIGILL fault, the return state does not much
306 ++ * matter because it's an edge case.
307 ++ */
308 ++ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
309 ++ unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
310 ++ current_thread_info()->flags |= _TIF_RESTOREALL;
311 ++
312 ++ /*
313 ++ * If the system call was made with a transaction active, doom it and
314 ++ * return without performing the system call. Unless it was an
315 ++ * unsupported scv vector, in which case it's treated like an illegal
316 ++ * instruction.
317 ++ */
318 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
319 ++ if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
320 ++ !trap_is_unsupported_scv(regs)) {
321 ++ /* Enable TM in the kernel, and disable EE (for scv) */
322 ++ hard_irq_disable();
323 ++ mtmsr(mfmsr() | MSR_TM);
324 ++
325 ++ /* tabort, this dooms the transaction, nothing else */
326 ++ asm volatile(".long 0x7c00071d | ((%0) << 16)"
327 ++ :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
328 ++
329 ++ /*
330 ++ * Userspace will never see the return value. Execution will
331 ++ * resume after the tbegin. of the aborted transaction with the
332 ++ * checkpointed register state. A context switch could occur
333 ++ * or signal delivered to the process before resuming the
334 ++ * doomed transaction context, but that should all be handled
335 ++ * as expected.
336 ++ */
337 ++ return -ENOSYS;
338 ++ }
339 ++#endif // CONFIG_PPC_TRANSACTIONAL_MEM
340 ++
341 + local_irq_enable();
342 +
343 + if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
344 +diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
345 +index d4212d2ff0b54..ec950b08a8dcc 100644
346 +--- a/arch/powerpc/kernel/interrupt_64.S
347 ++++ b/arch/powerpc/kernel/interrupt_64.S
348 +@@ -12,7 +12,6 @@
349 + #include <asm/mmu.h>
350 + #include <asm/ppc_asm.h>
351 + #include <asm/ptrace.h>
352 +-#include <asm/tm.h>
353 +
354 + .section ".toc","aw"
355 + SYS_CALL_TABLE:
356 +@@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
357 + .globl system_call_vectored_\name
358 + system_call_vectored_\name:
359 + _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
360 +-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
361 +-BEGIN_FTR_SECTION
362 +- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
363 +- bne tabort_syscall
364 +-END_FTR_SECTION_IFSET(CPU_FTR_TM)
365 +-#endif
366 + SCV_INTERRUPT_TO_KERNEL
367 + mr r10,r1
368 + ld r1,PACAKSAVE(r13)
369 +@@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
370 + .globl system_call_common
371 + system_call_common:
372 + _ASM_NOKPROBE_SYMBOL(system_call_common)
373 +-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
374 +-BEGIN_FTR_SECTION
375 +- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
376 +- bne tabort_syscall
377 +-END_FTR_SECTION_IFSET(CPU_FTR_TM)
378 +-#endif
379 + mr r10,r1
380 + ld r1,PACAKSAVE(r13)
381 + std r10,0(r1)
382 +@@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
383 + RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
384 + #endif
385 +
386 +-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
387 +-tabort_syscall:
388 +-_ASM_NOKPROBE_SYMBOL(tabort_syscall)
389 +- /* Firstly we need to enable TM in the kernel */
390 +- mfmsr r10
391 +- li r9, 1
392 +- rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
393 +- mtmsrd r10, 0
394 +-
395 +- /* tabort, this dooms the transaction, nothing else */
396 +- li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
397 +- TABORT(R9)
398 +-
399 +- /*
400 +- * Return directly to userspace. We have corrupted user register state,
401 +- * but userspace will never see that register state. Execution will
402 +- * resume after the tbegin of the aborted transaction with the
403 +- * checkpointed register state.
404 +- */
405 +- li r9, MSR_RI
406 +- andc r10, r10, r9
407 +- mtmsrd r10, 1
408 +- mtspr SPRN_SRR0, r11
409 +- mtspr SPRN_SRR1, r12
410 +- RFI_TO_USER
411 +- b . /* prevent speculative execution */
412 +-#endif
413 +-
414 + /*
415 + * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
416 + * touched, no exit work created, then this can be used.
417 +diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
418 +index 47a683cd00d24..fd829f7f25a47 100644
419 +--- a/arch/powerpc/kernel/mce.c
420 ++++ b/arch/powerpc/kernel/mce.c
421 +@@ -249,6 +249,7 @@ void machine_check_queue_event(void)
422 + {
423 + int index;
424 + struct machine_check_event evt;
425 ++ unsigned long msr;
426 +
427 + if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
428 + return;
429 +@@ -262,8 +263,20 @@ void machine_check_queue_event(void)
430 + memcpy(&local_paca->mce_info->mce_event_queue[index],
431 + &evt, sizeof(evt));
432 +
433 +- /* Queue irq work to process this event later. */
434 +- irq_work_queue(&mce_event_process_work);
435 ++ /*
436 ++ * Queue irq work to process this event later. Before
437 ++ * queuing the work enable translation for non radix LPAR,
438 ++ * as irq_work_queue may try to access memory outside RMO
439 ++ * region.
440 ++ */
441 ++ if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
442 ++ msr = mfmsr();
443 ++ mtmsr(msr | MSR_IR | MSR_DR);
444 ++ irq_work_queue(&mce_event_process_work);
445 ++ mtmsr(msr);
446 ++ } else {
447 ++ irq_work_queue(&mce_event_process_work);
448 ++ }
449 + }
450 +
451 + void mce_common_process_ue(struct pt_regs *regs,
452 +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
453 +index 8dd437d7a2c63..dd18e1c447512 100644
454 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
455 ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
456 +@@ -2578,7 +2578,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
457 + /* The following code handles the fake_suspend = 1 case */
458 + mflr r0
459 + std r0, PPC_LR_STKOFF(r1)
460 +- stdu r1, -PPC_MIN_STKFRM(r1)
461 ++ stdu r1, -TM_FRAME_SIZE(r1)
462 +
463 + /* Turn on TM. */
464 + mfmsr r8
465 +@@ -2593,10 +2593,42 @@ BEGIN_FTR_SECTION
466 + END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
467 + nop
468 +
469 ++ /*
470 ++ * It's possible that treclaim. may modify registers, if we have lost
471 ++ * track of fake-suspend state in the guest due to it using rfscv.
472 ++ * Save and restore registers in case this occurs.
473 ++ */
474 ++ mfspr r3, SPRN_DSCR
475 ++ mfspr r4, SPRN_XER
476 ++ mfspr r5, SPRN_AMR
477 ++ /* SPRN_TAR would need to be saved here if the kernel ever used it */
478 ++ mfcr r12
479 ++ SAVE_NVGPRS(r1)
480 ++ SAVE_GPR(2, r1)
481 ++ SAVE_GPR(3, r1)
482 ++ SAVE_GPR(4, r1)
483 ++ SAVE_GPR(5, r1)
484 ++ stw r12, 8(r1)
485 ++ std r1, HSTATE_HOST_R1(r13)
486 ++
487 + /* We have to treclaim here because that's the only way to do S->N */
488 + li r3, TM_CAUSE_KVM_RESCHED
489 + TRECLAIM(R3)
490 +
491 ++ GET_PACA(r13)
492 ++ ld r1, HSTATE_HOST_R1(r13)
493 ++ REST_GPR(2, r1)
494 ++ REST_GPR(3, r1)
495 ++ REST_GPR(4, r1)
496 ++ REST_GPR(5, r1)
497 ++ lwz r12, 8(r1)
498 ++ REST_NVGPRS(r1)
499 ++ mtspr SPRN_DSCR, r3
500 ++ mtspr SPRN_XER, r4
501 ++ mtspr SPRN_AMR, r5
502 ++ mtcr r12
503 ++ HMT_MEDIUM
504 ++
505 + /*
506 + * We were in fake suspend, so we are not going to save the
507 + * register state as the guest checkpointed state (since
508 +@@ -2624,7 +2656,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
509 + std r5, VCPU_TFHAR(r9)
510 + std r6, VCPU_TFIAR(r9)
511 +
512 +- addi r1, r1, PPC_MIN_STKFRM
513 ++ addi r1, r1, TM_FRAME_SIZE
514 + ld r0, PPC_LR_STKOFF(r1)
515 + mtlr r0
516 + blr
517 +diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
518 +index b0ca5058e7ae6..767852ae5e84f 100644
519 +--- a/arch/riscv/include/asm/page.h
520 ++++ b/arch/riscv/include/asm/page.h
521 +@@ -79,8 +79,8 @@ typedef struct page *pgtable_t;
522 + #endif
523 +
524 + #ifdef CONFIG_MMU
525 +-extern unsigned long pfn_base;
526 +-#define ARCH_PFN_OFFSET (pfn_base)
527 ++extern unsigned long riscv_pfn_base;
528 ++#define ARCH_PFN_OFFSET (riscv_pfn_base)
529 + #else
530 + #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
531 + #endif /* CONFIG_MMU */
532 +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
533 +index 7cb4f391d106f..9786100f3a140 100644
534 +--- a/arch/riscv/mm/init.c
535 ++++ b/arch/riscv/mm/init.c
536 +@@ -234,8 +234,8 @@ static struct pt_alloc_ops _pt_ops __initdata;
537 + #define pt_ops _pt_ops
538 + #endif
539 +
540 +-unsigned long pfn_base __ro_after_init;
541 +-EXPORT_SYMBOL(pfn_base);
542 ++unsigned long riscv_pfn_base __ro_after_init;
543 ++EXPORT_SYMBOL(riscv_pfn_base);
544 +
545 + pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
546 + pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
547 +@@ -579,7 +579,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
548 + kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
549 + #endif
550 +
551 +- pfn_base = PFN_DOWN(kernel_map.phys_addr);
552 ++ riscv_pfn_base = PFN_DOWN(kernel_map.phys_addr);
553 +
554 + /*
555 + * Enforce boot alignment requirements of RV32 and
556 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
557 +index 88419263a89a9..840d8594437d5 100644
558 +--- a/arch/s390/net/bpf_jit_comp.c
559 ++++ b/arch/s390/net/bpf_jit_comp.c
560 +@@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
561 +
562 + #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
563 + ({ \
564 +- /* Branch instruction needs 6 bytes */ \
565 +- int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
566 ++ int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
567 + _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
568 + REG_SET_SEEN(b1); \
569 + REG_SET_SEEN(b2); \
570 +@@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
571 + EMIT4(0xb9080000, dst_reg, src_reg);
572 + break;
573 + case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
574 +- if (!imm)
575 +- break;
576 +- /* alfi %dst,imm */
577 +- EMIT6_IMM(0xc20b0000, dst_reg, imm);
578 ++ if (imm != 0) {
579 ++ /* alfi %dst,imm */
580 ++ EMIT6_IMM(0xc20b0000, dst_reg, imm);
581 ++ }
582 + EMIT_ZERO(dst_reg);
583 + break;
584 + case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
585 +@@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
586 + EMIT4(0xb9090000, dst_reg, src_reg);
587 + break;
588 + case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
589 +- if (!imm)
590 +- break;
591 +- /* alfi %dst,-imm */
592 +- EMIT6_IMM(0xc20b0000, dst_reg, -imm);
593 ++ if (imm != 0) {
594 ++ /* alfi %dst,-imm */
595 ++ EMIT6_IMM(0xc20b0000, dst_reg, -imm);
596 ++ }
597 + EMIT_ZERO(dst_reg);
598 + break;
599 + case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
600 + if (!imm)
601 + break;
602 +- /* agfi %dst,-imm */
603 +- EMIT6_IMM(0xc2080000, dst_reg, -imm);
604 ++ if (imm == -0x80000000) {
605 ++ /* algfi %dst,0x80000000 */
606 ++ EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
607 ++ } else {
608 ++ /* agfi %dst,-imm */
609 ++ EMIT6_IMM(0xc2080000, dst_reg, -imm);
610 ++ }
611 + break;
612 + /*
613 + * BPF_MUL
614 +@@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
615 + EMIT4(0xb90c0000, dst_reg, src_reg);
616 + break;
617 + case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
618 +- if (imm == 1)
619 +- break;
620 +- /* msfi %r5,imm */
621 +- EMIT6_IMM(0xc2010000, dst_reg, imm);
622 ++ if (imm != 1) {
623 ++ /* msfi %r5,imm */
624 ++ EMIT6_IMM(0xc2010000, dst_reg, imm);
625 ++ }
626 + EMIT_ZERO(dst_reg);
627 + break;
628 + case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
629 +@@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
630 + if (BPF_OP(insn->code) == BPF_MOD)
631 + /* lhgi %dst,0 */
632 + EMIT4_IMM(0xa7090000, dst_reg, 0);
633 ++ else
634 ++ EMIT_ZERO(dst_reg);
635 + break;
636 + }
637 + /* lhi %w0,0 */
638 +@@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
639 + EMIT4(0xb9820000, dst_reg, src_reg);
640 + break;
641 + case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
642 +- if (!imm)
643 +- break;
644 +- /* xilf %dst,imm */
645 +- EMIT6_IMM(0xc0070000, dst_reg, imm);
646 ++ if (imm != 0) {
647 ++ /* xilf %dst,imm */
648 ++ EMIT6_IMM(0xc0070000, dst_reg, imm);
649 ++ }
650 + EMIT_ZERO(dst_reg);
651 + break;
652 + case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
653 +@@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
654 + EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
655 + break;
656 + case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
657 +- if (imm == 0)
658 +- break;
659 +- /* sll %dst,imm(%r0) */
660 +- EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
661 ++ if (imm != 0) {
662 ++ /* sll %dst,imm(%r0) */
663 ++ EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
664 ++ }
665 + EMIT_ZERO(dst_reg);
666 + break;
667 + case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
668 +@@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
669 + EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
670 + break;
671 + case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
672 +- if (imm == 0)
673 +- break;
674 +- /* srl %dst,imm(%r0) */
675 +- EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
676 ++ if (imm != 0) {
677 ++ /* srl %dst,imm(%r0) */
678 ++ EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
679 ++ }
680 + EMIT_ZERO(dst_reg);
681 + break;
682 + case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
683 +@@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
684 + EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
685 + break;
686 + case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
687 +- if (imm == 0)
688 +- break;
689 +- /* sra %dst,imm(%r0) */
690 +- EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
691 ++ if (imm != 0) {
692 ++ /* sra %dst,imm(%r0) */
693 ++ EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
694 ++ }
695 + EMIT_ZERO(dst_reg);
696 + break;
697 + case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
698 +diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
699 +index ae683aa623ace..c5b35ea129cfa 100644
700 +--- a/arch/s390/pci/pci_mmio.c
701 ++++ b/arch/s390/pci/pci_mmio.c
702 +@@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
703 +
704 + mmap_read_lock(current->mm);
705 + ret = -EINVAL;
706 +- vma = find_vma(current->mm, mmio_addr);
707 ++ vma = vma_lookup(current->mm, mmio_addr);
708 + if (!vma)
709 + goto out_unlock_mmap;
710 + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
711 +@@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
712 +
713 + mmap_read_lock(current->mm);
714 + ret = -EINVAL;
715 +- vma = find_vma(current->mm, mmio_addr);
716 ++ vma = vma_lookup(current->mm, mmio_addr);
717 + if (!vma)
718 + goto out_unlock_mmap;
719 + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
720 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
721 +index c9fa7be3df82d..5c95d242f38d7 100644
722 +--- a/arch/x86/include/asm/uaccess.h
723 ++++ b/arch/x86/include/asm/uaccess.h
724 +@@ -301,8 +301,8 @@ do { \
725 + unsigned int __gu_low, __gu_high; \
726 + const unsigned int __user *__gu_ptr; \
727 + __gu_ptr = (const void __user *)(ptr); \
728 +- __get_user_asm(__gu_low, ptr, "l", "=r", label); \
729 +- __get_user_asm(__gu_high, ptr+1, "l", "=r", label); \
730 ++ __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
731 ++ __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
732 + (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
733 + } while (0)
734 + #else
735 +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
736 +index 8cb7816d03b4c..193204aee8801 100644
737 +--- a/arch/x86/kernel/cpu/mce/core.c
738 ++++ b/arch/x86/kernel/cpu/mce/core.c
739 +@@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
740 +
741 + static void kill_me_now(struct callback_head *ch)
742 + {
743 ++ struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
744 ++
745 ++ p->mce_count = 0;
746 + force_sig(SIGBUS);
747 + }
748 +
749 +@@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
750 + int flags = MF_ACTION_REQUIRED;
751 + int ret;
752 +
753 ++ p->mce_count = 0;
754 + pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
755 +
756 + if (!p->mce_ripv)
757 +@@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
758 + }
759 + }
760 +
761 +-static void queue_task_work(struct mce *m, int kill_current_task)
762 ++static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
763 + {
764 +- current->mce_addr = m->addr;
765 +- current->mce_kflags = m->kflags;
766 +- current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
767 +- current->mce_whole_page = whole_page(m);
768 ++ int count = ++current->mce_count;
769 +
770 +- if (kill_current_task)
771 +- current->mce_kill_me.func = kill_me_now;
772 +- else
773 +- current->mce_kill_me.func = kill_me_maybe;
774 ++ /* First call, save all the details */
775 ++ if (count == 1) {
776 ++ current->mce_addr = m->addr;
777 ++ current->mce_kflags = m->kflags;
778 ++ current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
779 ++ current->mce_whole_page = whole_page(m);
780 ++
781 ++ if (kill_current_task)
782 ++ current->mce_kill_me.func = kill_me_now;
783 ++ else
784 ++ current->mce_kill_me.func = kill_me_maybe;
785 ++ }
786 ++
787 ++ /* Ten is likely overkill. Don't expect more than two faults before task_work() */
788 ++ if (count > 10)
789 ++ mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
790 ++
791 ++ /* Second or later call, make sure page address matches the one from first call */
792 ++ if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
793 ++ mce_panic("Consecutive machine checks to different user pages", m, msg);
794 ++
795 ++ /* Do not call task_work_add() more than once */
796 ++ if (count > 1)
797 ++ return;
798 +
799 + task_work_add(current, &current->mce_kill_me, TWA_RESUME);
800 + }
801 +@@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
802 + /* If this triggers there is no way to recover. Die hard. */
803 + BUG_ON(!on_thread_stack() || !user_mode(regs));
804 +
805 +- queue_task_work(&m, kill_current_task);
806 ++ queue_task_work(&m, msg, kill_current_task);
807 +
808 + } else {
809 + /*
810 +@@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
811 + }
812 +
813 + if (m.kflags & MCE_IN_KERNEL_COPYIN)
814 +- queue_task_work(&m, kill_current_task);
815 ++ queue_task_work(&m, msg, kill_current_task);
816 + }
817 + out:
818 + mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
819 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
820 +index ddeaba947eb3d..879886c6cc537 100644
821 +--- a/arch/x86/mm/init_64.c
822 ++++ b/arch/x86/mm/init_64.c
823 +@@ -1433,18 +1433,18 @@ int kern_addr_valid(unsigned long addr)
824 + return 0;
825 +
826 + p4d = p4d_offset(pgd, addr);
827 +- if (p4d_none(*p4d))
828 ++ if (!p4d_present(*p4d))
829 + return 0;
830 +
831 + pud = pud_offset(p4d, addr);
832 +- if (pud_none(*pud))
833 ++ if (!pud_present(*pud))
834 + return 0;
835 +
836 + if (pud_large(*pud))
837 + return pfn_valid(pud_pfn(*pud));
838 +
839 + pmd = pmd_offset(pud, addr);
840 +- if (pmd_none(*pmd))
841 ++ if (!pmd_present(*pmd))
842 + return 0;
843 +
844 + if (pmd_large(*pmd))
845 +diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
846 +index 3112ca7786ed1..4ba2a3ee4bce1 100644
847 +--- a/arch/x86/mm/pat/memtype.c
848 ++++ b/arch/x86/mm/pat/memtype.c
849 +@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
850 + int err = 0;
851 +
852 + start = sanitize_phys(start);
853 +- end = sanitize_phys(end);
854 ++
855 ++ /*
856 ++ * The end address passed into this function is exclusive, but
857 ++ * sanitize_phys() expects an inclusive address.
858 ++ */
859 ++ end = sanitize_phys(end - 1) + 1;
860 + if (start >= end) {
861 + WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
862 + start, end - 1, cattr_name(req_type));
863 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
864 +index 03149422dce2b..475d9c71b1713 100644
865 +--- a/arch/x86/xen/enlighten_pv.c
866 ++++ b/arch/x86/xen/enlighten_pv.c
867 +@@ -1215,6 +1215,11 @@ static void __init xen_dom0_set_legacy_features(void)
868 + x86_platform.legacy.rtc = 1;
869 + }
870 +
871 ++static void __init xen_domu_set_legacy_features(void)
872 ++{
873 ++ x86_platform.legacy.rtc = 0;
874 ++}
875 ++
876 + /* First C function to be called on Xen boot */
877 + asmlinkage __visible void __init xen_start_kernel(void)
878 + {
879 +@@ -1367,6 +1372,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
880 + add_preferred_console("xenboot", 0, NULL);
881 + if (pci_xen)
882 + x86_init.pci.arch_init = pci_xen_init;
883 ++ x86_platform.set_legacy_features =
884 ++ xen_domu_set_legacy_features;
885 + } else {
886 + const struct dom0_vga_console_info *info =
887 + (void *)((char *)xen_start_info +
888 +diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
889 +index ade789e73ee42..167c4958cdf40 100644
890 +--- a/arch/x86/xen/mmu_pv.c
891 ++++ b/arch/x86/xen/mmu_pv.c
892 +@@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
893 + if (pinned) {
894 + struct page *page = pfn_to_page(pfn);
895 +
896 +- if (static_branch_likely(&xen_struct_pages_ready))
897 ++ pinned = false;
898 ++ if (static_branch_likely(&xen_struct_pages_ready)) {
899 ++ pinned = PagePinned(page);
900 + SetPagePinned(page);
901 ++ }
902 +
903 + xen_mc_batch();
904 +
905 + __set_pfn_prot(pfn, PAGE_KERNEL_RO);
906 +
907 +- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
908 ++ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
909 + __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
910 +
911 + xen_mc_issue(PARAVIRT_LAZY_MMU);
912 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
913 +index 9360c65169ff4..3a1038b6eeb30 100644
914 +--- a/block/bfq-iosched.c
915 ++++ b/block/bfq-iosched.c
916 +@@ -2662,6 +2662,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
917 + * are likely to increase the throughput.
918 + */
919 + bfqq->new_bfqq = new_bfqq;
920 ++ /*
921 ++ * The above assignment schedules the following redirections:
922 ++ * each time some I/O for bfqq arrives, the process that
923 ++ * generated that I/O is disassociated from bfqq and
924 ++ * associated with new_bfqq. Here we increases new_bfqq->ref
925 ++ * in advance, adding the number of processes that are
926 ++ * expected to be associated with new_bfqq as they happen to
927 ++ * issue I/O.
928 ++ */
929 + new_bfqq->ref += process_refs;
930 + return new_bfqq;
931 + }
932 +@@ -2724,6 +2733,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
933 + {
934 + struct bfq_queue *in_service_bfqq, *new_bfqq;
935 +
936 ++ /* if a merge has already been setup, then proceed with that first */
937 ++ if (bfqq->new_bfqq)
938 ++ return bfqq->new_bfqq;
939 ++
940 + /*
941 + * Check delayed stable merge for rotational or non-queueing
942 + * devs. For this branch to be executed, bfqq must not be
943 +@@ -2825,9 +2838,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
944 + if (bfq_too_late_for_merging(bfqq))
945 + return NULL;
946 +
947 +- if (bfqq->new_bfqq)
948 +- return bfqq->new_bfqq;
949 +-
950 + if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
951 + return NULL;
952 +
953 +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
954 +index 31fe9be179d99..26446f97deee4 100644
955 +--- a/block/blk-cgroup.c
956 ++++ b/block/blk-cgroup.c
957 +@@ -1201,10 +1201,6 @@ int blkcg_init_queue(struct request_queue *q)
958 + if (preloaded)
959 + radix_tree_preload_end();
960 +
961 +- ret = blk_iolatency_init(q);
962 +- if (ret)
963 +- goto err_destroy_all;
964 +-
965 + ret = blk_ioprio_init(q);
966 + if (ret)
967 + goto err_destroy_all;
968 +@@ -1213,6 +1209,12 @@ int blkcg_init_queue(struct request_queue *q)
969 + if (ret)
970 + goto err_destroy_all;
971 +
972 ++ ret = blk_iolatency_init(q);
973 ++ if (ret) {
974 ++ blk_throtl_exit(q);
975 ++ goto err_destroy_all;
976 ++ }
977 ++
978 + return 0;
979 +
980 + err_destroy_all:
981 +diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
982 +index a97f33d0c59f9..94665037f4a35 100644
983 +--- a/drivers/base/power/trace.c
984 ++++ b/drivers/base/power/trace.c
985 +@@ -13,6 +13,7 @@
986 + #include <linux/export.h>
987 + #include <linux/rtc.h>
988 + #include <linux/suspend.h>
989 ++#include <linux/init.h>
990 +
991 + #include <linux/mc146818rtc.h>
992 +
993 +@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
994 + const char *file = *(const char **)(tracedata + 2);
995 + unsigned int user_hash_value, file_hash_value;
996 +
997 ++ if (!x86_platform.legacy.rtc)
998 ++ return;
999 ++
1000 + user_hash_value = user % USERHASH;
1001 + file_hash_value = hash_string(lineno, file, FILEHASH);
1002 + set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
1003 +@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
1004 +
1005 + static int __init early_resume_init(void)
1006 + {
1007 ++ if (!x86_platform.legacy.rtc)
1008 ++ return 0;
1009 ++
1010 + hash_value_early_read = read_magic_time();
1011 + register_pm_notifier(&pm_trace_nb);
1012 + return 0;
1013 +@@ -277,6 +284,9 @@ static int __init late_resume_init(void)
1014 + unsigned int val = hash_value_early_read;
1015 + unsigned int user, file, dev;
1016 +
1017 ++ if (!x86_platform.legacy.rtc)
1018 ++ return 0;
1019 ++
1020 + user = val % USERHASH;
1021 + val = val / USERHASH;
1022 + file = val % FILEHASH;
1023 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
1024 +index f0cdff0c5fbf4..1f91bd41a29b2 100644
1025 +--- a/drivers/block/loop.c
1026 ++++ b/drivers/block/loop.c
1027 +@@ -2113,18 +2113,6 @@ int loop_register_transfer(struct loop_func_table *funcs)
1028 + return 0;
1029 + }
1030 +
1031 +-static int unregister_transfer_cb(int id, void *ptr, void *data)
1032 +-{
1033 +- struct loop_device *lo = ptr;
1034 +- struct loop_func_table *xfer = data;
1035 +-
1036 +- mutex_lock(&lo->lo_mutex);
1037 +- if (lo->lo_encryption == xfer)
1038 +- loop_release_xfer(lo);
1039 +- mutex_unlock(&lo->lo_mutex);
1040 +- return 0;
1041 +-}
1042 +-
1043 + int loop_unregister_transfer(int number)
1044 + {
1045 + unsigned int n = number;
1046 +@@ -2132,9 +2120,20 @@ int loop_unregister_transfer(int number)
1047 +
1048 + if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1049 + return -EINVAL;
1050 ++ /*
1051 ++ * This function is called from only cleanup_cryptoloop().
1052 ++ * Given that each loop device that has a transfer enabled holds a
1053 ++ * reference to the module implementing it we should never get here
1054 ++ * with a transfer that is set (unless forced module unloading is
1055 ++ * requested). Thus, check module's refcount and warn if this is
1056 ++ * not a clean unloading.
1057 ++ */
1058 ++#ifdef CONFIG_MODULE_UNLOAD
1059 ++ if (xfer->owner && module_refcount(xfer->owner) != -1)
1060 ++ pr_err("Danger! Unregistering an in use transfer function.\n");
1061 ++#endif
1062 +
1063 + xfer_funcs[n] = NULL;
1064 +- idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1065 + return 0;
1066 + }
1067 +
1068 +@@ -2325,8 +2324,9 @@ static int loop_add(int i)
1069 + } else {
1070 + err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
1071 + }
1072 ++ mutex_unlock(&loop_ctl_mutex);
1073 + if (err < 0)
1074 +- goto out_unlock;
1075 ++ goto out_free_dev;
1076 + i = err;
1077 +
1078 + err = -ENOMEM;
1079 +@@ -2392,15 +2392,19 @@ static int loop_add(int i)
1080 + disk->private_data = lo;
1081 + disk->queue = lo->lo_queue;
1082 + sprintf(disk->disk_name, "loop%d", i);
1083 ++ /* Make this loop device reachable from pathname. */
1084 + add_disk(disk);
1085 ++ /* Show this loop device. */
1086 ++ mutex_lock(&loop_ctl_mutex);
1087 ++ lo->idr_visible = true;
1088 + mutex_unlock(&loop_ctl_mutex);
1089 + return i;
1090 +
1091 + out_cleanup_tags:
1092 + blk_mq_free_tag_set(&lo->tag_set);
1093 + out_free_idr:
1094 ++ mutex_lock(&loop_ctl_mutex);
1095 + idr_remove(&loop_index_idr, i);
1096 +-out_unlock:
1097 + mutex_unlock(&loop_ctl_mutex);
1098 + out_free_dev:
1099 + kfree(lo);
1100 +@@ -2410,9 +2414,14 @@ out:
1101 +
1102 + static void loop_remove(struct loop_device *lo)
1103 + {
1104 ++ /* Make this loop device unreachable from pathname. */
1105 + del_gendisk(lo->lo_disk);
1106 + blk_cleanup_disk(lo->lo_disk);
1107 + blk_mq_free_tag_set(&lo->tag_set);
1108 ++ mutex_lock(&loop_ctl_mutex);
1109 ++ idr_remove(&loop_index_idr, lo->lo_number);
1110 ++ mutex_unlock(&loop_ctl_mutex);
1111 ++ /* There is no route which can find this loop device. */
1112 + mutex_destroy(&lo->lo_mutex);
1113 + kfree(lo);
1114 + }
1115 +@@ -2436,31 +2445,40 @@ static int loop_control_remove(int idx)
1116 + return -EINVAL;
1117 + }
1118 +
1119 ++ /* Hide this loop device for serialization. */
1120 + ret = mutex_lock_killable(&loop_ctl_mutex);
1121 + if (ret)
1122 + return ret;
1123 +-
1124 + lo = idr_find(&loop_index_idr, idx);
1125 +- if (!lo) {
1126 ++ if (!lo || !lo->idr_visible)
1127 + ret = -ENODEV;
1128 +- goto out_unlock_ctrl;
1129 +- }
1130 ++ else
1131 ++ lo->idr_visible = false;
1132 ++ mutex_unlock(&loop_ctl_mutex);
1133 ++ if (ret)
1134 ++ return ret;
1135 +
1136 ++ /* Check whether this loop device can be removed. */
1137 + ret = mutex_lock_killable(&lo->lo_mutex);
1138 + if (ret)
1139 +- goto out_unlock_ctrl;
1140 ++ goto mark_visible;
1141 + if (lo->lo_state != Lo_unbound ||
1142 + atomic_read(&lo->lo_refcnt) > 0) {
1143 + mutex_unlock(&lo->lo_mutex);
1144 + ret = -EBUSY;
1145 +- goto out_unlock_ctrl;
1146 ++ goto mark_visible;
1147 + }
1148 ++ /* Mark this loop device no longer open()-able. */
1149 + lo->lo_state = Lo_deleting;
1150 + mutex_unlock(&lo->lo_mutex);
1151 +
1152 +- idr_remove(&loop_index_idr, lo->lo_number);
1153 + loop_remove(lo);
1154 +-out_unlock_ctrl:
1155 ++ return 0;
1156 ++
1157 ++mark_visible:
1158 ++ /* Show this loop device again. */
1159 ++ mutex_lock(&loop_ctl_mutex);
1160 ++ lo->idr_visible = true;
1161 + mutex_unlock(&loop_ctl_mutex);
1162 + return ret;
1163 + }
1164 +@@ -2474,7 +2492,8 @@ static int loop_control_get_free(int idx)
1165 + if (ret)
1166 + return ret;
1167 + idr_for_each_entry(&loop_index_idr, lo, id) {
1168 +- if (lo->lo_state == Lo_unbound)
1169 ++ /* Hitting a race results in creating a new loop device which is harmless. */
1170 ++ if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
1171 + goto found;
1172 + }
1173 + mutex_unlock(&loop_ctl_mutex);
1174 +@@ -2590,10 +2609,14 @@ static void __exit loop_exit(void)
1175 + unregister_blkdev(LOOP_MAJOR, "loop");
1176 + misc_deregister(&loop_misc);
1177 +
1178 +- mutex_lock(&loop_ctl_mutex);
1179 ++ /*
1180 ++ * There is no need to use loop_ctl_mutex here, for nobody else can
1181 ++ * access loop_index_idr when this module is unloading (unless forced
1182 ++ * module unloading is requested). If this is not a clean unloading,
1183 ++ * we have no means to avoid kernel crash.
1184 ++ */
1185 + idr_for_each_entry(&loop_index_idr, lo, id)
1186 + loop_remove(lo);
1187 +- mutex_unlock(&loop_ctl_mutex);
1188 +
1189 + idr_destroy(&loop_index_idr);
1190 + }
1191 +diff --git a/drivers/block/loop.h b/drivers/block/loop.h
1192 +index 1988899db63ac..04c88dd6eabd6 100644
1193 +--- a/drivers/block/loop.h
1194 ++++ b/drivers/block/loop.h
1195 +@@ -68,6 +68,7 @@ struct loop_device {
1196 + struct blk_mq_tag_set tag_set;
1197 + struct gendisk *lo_disk;
1198 + struct mutex lo_mutex;
1199 ++ bool idr_visible;
1200 + };
1201 +
1202 + struct loop_cmd {
1203 +diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
1204 +index 50b321a1ab1b6..d574e8cb6d7cd 100644
1205 +--- a/drivers/gpio/gpio-mpc8xxx.c
1206 ++++ b/drivers/gpio/gpio-mpc8xxx.c
1207 +@@ -332,7 +332,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
1208 + mpc8xxx_gc->regs + GPIO_DIR, NULL,
1209 + BGPIOF_BIG_ENDIAN);
1210 + if (ret)
1211 +- goto err;
1212 ++ return ret;
1213 + dev_dbg(&pdev->dev, "GPIO registers are LITTLE endian\n");
1214 + } else {
1215 + ret = bgpio_init(gc, &pdev->dev, 4,
1216 +@@ -342,7 +342,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
1217 + BGPIOF_BIG_ENDIAN
1218 + | BGPIOF_BIG_ENDIAN_BYTE_ORDER);
1219 + if (ret)
1220 +- goto err;
1221 ++ return ret;
1222 + dev_dbg(&pdev->dev, "GPIO registers are BIG endian\n");
1223 + }
1224 +
1225 +@@ -380,11 +380,11 @@ static int mpc8xxx_probe(struct platform_device *pdev)
1226 + is_acpi_node(fwnode))
1227 + gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
1228 +
1229 +- ret = gpiochip_add_data(gc, mpc8xxx_gc);
1230 ++ ret = devm_gpiochip_add_data(&pdev->dev, gc, mpc8xxx_gc);
1231 + if (ret) {
1232 + dev_err(&pdev->dev,
1233 + "GPIO chip registration failed with status %d\n", ret);
1234 +- goto err;
1235 ++ return ret;
1236 + }
1237 +
1238 + mpc8xxx_gc->irqn = platform_get_irq(pdev, 0);
1239 +@@ -416,7 +416,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
1240 +
1241 + return 0;
1242 + err:
1243 +- iounmap(mpc8xxx_gc->regs);
1244 ++ irq_domain_remove(mpc8xxx_gc->irq);
1245 + return ret;
1246 + }
1247 +
1248 +@@ -429,9 +429,6 @@ static int mpc8xxx_remove(struct platform_device *pdev)
1249 + irq_domain_remove(mpc8xxx_gc->irq);
1250 + }
1251 +
1252 +- gpiochip_remove(&mpc8xxx_gc->gc);
1253 +- iounmap(mpc8xxx_gc->regs);
1254 +-
1255 + return 0;
1256 + }
1257 +
1258 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1259 +index 8ac6eb9f1fdb8..177a663a6a691 100644
1260 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1261 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1262 +@@ -757,7 +757,7 @@ enum amd_hw_ip_block_type {
1263 + MAX_HWIP
1264 + };
1265 +
1266 +-#define HWIP_MAX_INSTANCE 8
1267 ++#define HWIP_MAX_INSTANCE 10
1268 +
1269 + struct amd_powerplay {
1270 + void *pp_handle;
1271 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
1272 +index f9c01bdc3d4c7..ec472c244835c 100644
1273 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
1274 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
1275 +@@ -191,6 +191,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
1276 + kgd2kfd_suspend(adev->kfd.dev, run_pm);
1277 + }
1278 +
1279 ++int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
1280 ++{
1281 ++ int r = 0;
1282 ++
1283 ++ if (adev->kfd.dev)
1284 ++ r = kgd2kfd_resume_iommu(adev->kfd.dev);
1285 ++
1286 ++ return r;
1287 ++}
1288 ++
1289 + int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
1290 + {
1291 + int r = 0;
1292 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
1293 +index cf62f43a03da1..293dd0d595c7a 100644
1294 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
1295 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
1296 +@@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void);
1297 + void amdgpu_amdkfd_fini(void);
1298 +
1299 + void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
1300 ++int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
1301 + int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
1302 + void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
1303 + const void *ih_ring_entry);
1304 +@@ -325,6 +326,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
1305 + const struct kgd2kfd_shared_resources *gpu_resources);
1306 + void kgd2kfd_device_exit(struct kfd_dev *kfd);
1307 + void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
1308 ++int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
1309 + int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
1310 + int kgd2kfd_pre_reset(struct kfd_dev *kfd);
1311 + int kgd2kfd_post_reset(struct kfd_dev *kfd);
1312 +@@ -363,6 +365,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
1313 + {
1314 + }
1315 +
1316 ++static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
1317 ++{
1318 ++ return 0;
1319 ++}
1320 ++
1321 + static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
1322 + {
1323 + return 0;
1324 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1325 +index 536005bff24ad..83db7d8fa1508 100644
1326 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1327 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
1328 +@@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
1329 + struct dentry *ent;
1330 + int r, i;
1331 +
1332 +-
1333 +-
1334 + ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
1335 + &fops_ib_preempt);
1336 +- if (!ent) {
1337 ++ if (IS_ERR(ent)) {
1338 + DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
1339 +- return -EIO;
1340 ++ return PTR_ERR(ent);
1341 + }
1342 +
1343 + ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
1344 + &fops_sclk_set);
1345 +- if (!ent) {
1346 ++ if (IS_ERR(ent)) {
1347 + DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
1348 +- return -EIO;
1349 ++ return PTR_ERR(ent);
1350 + }
1351 +
1352 + /* Register debugfs entries for amdgpu_ttm */
1353 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1354 +index f944ed858f3e7..7b42636fc7dc6 100644
1355 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1356 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1357 +@@ -2342,6 +2342,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1358 + if (r)
1359 + goto init_failed;
1360 +
1361 ++ r = amdgpu_amdkfd_resume_iommu(adev);
1362 ++ if (r)
1363 ++ goto init_failed;
1364 ++
1365 + r = amdgpu_device_ip_hw_init_phase1(adev);
1366 + if (r)
1367 + goto init_failed;
1368 +@@ -3096,6 +3100,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
1369 + {
1370 + int r;
1371 +
1372 ++ r = amdgpu_amdkfd_resume_iommu(adev);
1373 ++ if (r)
1374 ++ return r;
1375 ++
1376 + r = amdgpu_device_ip_resume_phase1(adev);
1377 + if (r)
1378 + return r;
1379 +@@ -4534,6 +4542,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
1380 + dev_warn(tmp_adev->dev, "asic atom init failed!");
1381 + } else {
1382 + dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
1383 ++ r = amdgpu_amdkfd_resume_iommu(tmp_adev);
1384 ++ if (r)
1385 ++ goto out;
1386 ++
1387 + r = amdgpu_device_ip_resume_phase1(tmp_adev);
1388 + if (r)
1389 + goto out;
1390 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
1391 +index 7b634a1517f9c..0554576d36955 100644
1392 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
1393 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
1394 +@@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
1395 + ent = debugfs_create_file(name,
1396 + S_IFREG | S_IRUGO, root,
1397 + ring, &amdgpu_debugfs_ring_fops);
1398 +- if (!ent)
1399 +- return -ENOMEM;
1400 ++ if (IS_ERR(ent))
1401 ++ return PTR_ERR(ent);
1402 +
1403 + i_size_write(ent->d_inode, ring->ring_size + 12);
1404 + ring->ent = ent;
1405 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1406 +index 3a55f08e00e1d..2335b596d892f 100644
1407 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1408 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1409 +@@ -513,6 +513,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
1410 + goto out;
1411 + }
1412 +
1413 ++ if (bo->type == ttm_bo_type_device &&
1414 ++ new_mem->mem_type == TTM_PL_VRAM &&
1415 ++ old_mem->mem_type != TTM_PL_VRAM) {
1416 ++ /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
1417 ++ * accesses the BO after it's moved.
1418 ++ */
1419 ++ abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1420 ++ }
1421 ++
1422 + if (adev->mman.buffer_funcs_enabled) {
1423 + if (((old_mem->mem_type == TTM_PL_SYSTEM &&
1424 + new_mem->mem_type == TTM_PL_VRAM) ||
1425 +@@ -543,15 +552,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
1426 + return r;
1427 + }
1428 +
1429 +- if (bo->type == ttm_bo_type_device &&
1430 +- new_mem->mem_type == TTM_PL_VRAM &&
1431 +- old_mem->mem_type != TTM_PL_VRAM) {
1432 +- /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
1433 +- * accesses the BO after it's moved.
1434 +- */
1435 +- abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1436 +- }
1437 +-
1438 + out:
1439 + /* update statistics */
1440 + atomic64_add(bo->base.size, &adev->num_bytes_moved);
1441 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1442 +index 6b57dfd2cd2ac..9e52948d49920 100644
1443 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1444 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1445 +@@ -1008,17 +1008,21 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
1446 + return ret;
1447 + }
1448 +
1449 +-static int kfd_resume(struct kfd_dev *kfd)
1450 ++int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
1451 + {
1452 + int err = 0;
1453 +
1454 + err = kfd_iommu_resume(kfd);
1455 +- if (err) {
1456 ++ if (err)
1457 + dev_err(kfd_device,
1458 + "Failed to resume IOMMU for device %x:%x\n",
1459 + kfd->pdev->vendor, kfd->pdev->device);
1460 +- return err;
1461 +- }
1462 ++ return err;
1463 ++}
1464 ++
1465 ++static int kfd_resume(struct kfd_dev *kfd)
1466 ++{
1467 ++ int err = 0;
1468 +
1469 + err = kfd->dqm->ops.start(kfd->dqm);
1470 + if (err) {
1471 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1472 +index 3f913e4abd49e..6a4c6c47dcfaf 100644
1473 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1474 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1475 +@@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
1476 + uint32_t agp_base, agp_bot, agp_top;
1477 + PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1478 +
1479 ++ memset(pa_config, 0, sizeof(*pa_config));
1480 ++
1481 + logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1482 + pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1483 +
1484 +@@ -6778,14 +6780,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
1485 +
1486 + #if defined(CONFIG_DRM_AMD_DC_DCN)
1487 + static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
1488 +- struct dc_state *dc_state)
1489 ++ struct dc_state *dc_state,
1490 ++ struct dsc_mst_fairness_vars *vars)
1491 + {
1492 + struct dc_stream_state *stream = NULL;
1493 + struct drm_connector *connector;
1494 + struct drm_connector_state *new_con_state;
1495 + struct amdgpu_dm_connector *aconnector;
1496 + struct dm_connector_state *dm_conn_state;
1497 +- int i, j, clock, bpp;
1498 ++ int i, j, clock;
1499 + int vcpi, pbn_div, pbn = 0;
1500 +
1501 + for_each_new_connector_in_state(state, connector, new_con_state, i) {
1502 +@@ -6824,9 +6827,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
1503 + }
1504 +
1505 + pbn_div = dm_mst_get_pbn_divider(stream->link);
1506 +- bpp = stream->timing.dsc_cfg.bits_per_pixel;
1507 + clock = stream->timing.pix_clk_100hz / 10;
1508 +- pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
1509 ++ /* pbn is calculated by compute_mst_dsc_configs_for_state*/
1510 ++ for (j = 0; j < dc_state->stream_count; j++) {
1511 ++ if (vars[j].aconnector == aconnector) {
1512 ++ pbn = vars[j].pbn;
1513 ++ break;
1514 ++ }
1515 ++ }
1516 ++
1517 + vcpi = drm_dp_mst_atomic_enable_dsc(state,
1518 + aconnector->port,
1519 + pbn, pbn_div,
1520 +@@ -10208,6 +10217,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
1521 + int ret, i;
1522 + bool lock_and_validation_needed = false;
1523 + struct dm_crtc_state *dm_old_crtc_state;
1524 ++#if defined(CONFIG_DRM_AMD_DC_DCN)
1525 ++ struct dsc_mst_fairness_vars vars[MAX_PIPES];
1526 ++#endif
1527 +
1528 + trace_amdgpu_dm_atomic_check_begin(state);
1529 +
1530 +@@ -10438,10 +10450,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
1531 + goto fail;
1532 +
1533 + #if defined(CONFIG_DRM_AMD_DC_DCN)
1534 +- if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
1535 ++ if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
1536 + goto fail;
1537 +
1538 +- ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
1539 ++ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
1540 + if (ret)
1541 + goto fail;
1542 + #endif
1543 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1544 +index 5568d4e518e6b..a2e5ab0bd1a03 100644
1545 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1546 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1547 +@@ -495,12 +495,7 @@ struct dsc_mst_fairness_params {
1548 + uint32_t num_slices_h;
1549 + uint32_t num_slices_v;
1550 + uint32_t bpp_overwrite;
1551 +-};
1552 +-
1553 +-struct dsc_mst_fairness_vars {
1554 +- int pbn;
1555 +- bool dsc_enabled;
1556 +- int bpp_x16;
1557 ++ struct amdgpu_dm_connector *aconnector;
1558 + };
1559 +
1560 + static int kbps_to_peak_pbn(int kbps)
1561 +@@ -727,12 +722,12 @@ static void try_disable_dsc(struct drm_atomic_state *state,
1562 +
1563 + static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
1564 + struct dc_state *dc_state,
1565 +- struct dc_link *dc_link)
1566 ++ struct dc_link *dc_link,
1567 ++ struct dsc_mst_fairness_vars *vars)
1568 + {
1569 + int i;
1570 + struct dc_stream_state *stream;
1571 + struct dsc_mst_fairness_params params[MAX_PIPES];
1572 +- struct dsc_mst_fairness_vars vars[MAX_PIPES];
1573 + struct amdgpu_dm_connector *aconnector;
1574 + int count = 0;
1575 + bool debugfs_overwrite = false;
1576 +@@ -753,6 +748,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
1577 + params[count].timing = &stream->timing;
1578 + params[count].sink = stream->sink;
1579 + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1580 ++ params[count].aconnector = aconnector;
1581 + params[count].port = aconnector->port;
1582 + params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
1583 + if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
1584 +@@ -775,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
1585 + }
1586 + /* Try no compression */
1587 + for (i = 0; i < count; i++) {
1588 ++ vars[i].aconnector = params[i].aconnector;
1589 + vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
1590 + vars[i].dsc_enabled = false;
1591 + vars[i].bpp_x16 = 0;
1592 +@@ -828,7 +825,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
1593 + }
1594 +
1595 + bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
1596 +- struct dc_state *dc_state)
1597 ++ struct dc_state *dc_state,
1598 ++ struct dsc_mst_fairness_vars *vars)
1599 + {
1600 + int i, j;
1601 + struct dc_stream_state *stream;
1602 +@@ -859,7 +857,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
1603 + return false;
1604 +
1605 + mutex_lock(&aconnector->mst_mgr.lock);
1606 +- if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
1607 ++ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
1608 + mutex_unlock(&aconnector->mst_mgr.lock);
1609 + return false;
1610 + }
1611 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
1612 +index b38bd68121ceb..900d3f7a84989 100644
1613 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
1614 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
1615 +@@ -39,8 +39,17 @@ void
1616 + dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
1617 +
1618 + #if defined(CONFIG_DRM_AMD_DC_DCN)
1619 ++
1620 ++struct dsc_mst_fairness_vars {
1621 ++ int pbn;
1622 ++ bool dsc_enabled;
1623 ++ int bpp_x16;
1624 ++ struct amdgpu_dm_connector *aconnector;
1625 ++};
1626 ++
1627 + bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
1628 +- struct dc_state *dc_state);
1629 ++ struct dc_state *dc_state,
1630 ++ struct dsc_mst_fairness_vars *vars);
1631 + #endif
1632 +
1633 + #endif
1634 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1635 +index 6132b645bfd19..29c861b54b440 100644
1636 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1637 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1638 +@@ -2578,13 +2578,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
1639 +
1640 + int dc_link_get_backlight_level(const struct dc_link *link)
1641 + {
1642 +-
1643 + struct abm *abm = get_abm_from_stream_res(link);
1644 ++ struct panel_cntl *panel_cntl = link->panel_cntl;
1645 ++ struct dc *dc = link->ctx->dc;
1646 ++ struct dmcu *dmcu = dc->res_pool->dmcu;
1647 ++ bool fw_set_brightness = true;
1648 +
1649 +- if (abm == NULL || abm->funcs->get_current_backlight == NULL)
1650 +- return DC_ERROR_UNEXPECTED;
1651 ++ if (dmcu)
1652 ++ fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
1653 +
1654 +- return (int) abm->funcs->get_current_backlight(abm);
1655 ++ if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
1656 ++ return panel_cntl->funcs->get_current_backlight(panel_cntl);
1657 ++ else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
1658 ++ return (int) abm->funcs->get_current_backlight(abm);
1659 ++ else
1660 ++ return DC_ERROR_UNEXPECTED;
1661 + }
1662 +
1663 + int dc_link_get_target_backlight_pwm(const struct dc_link *link)
1664 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
1665 +index e923392358631..e8570060d007b 100644
1666 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
1667 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
1668 +@@ -49,7 +49,6 @@
1669 + static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
1670 + {
1671 + uint64_t current_backlight;
1672 +- uint32_t round_result;
1673 + uint32_t bl_period, bl_int_count;
1674 + uint32_t bl_pwm, fractional_duty_cycle_en;
1675 + uint32_t bl_period_mask, bl_pwm_mask;
1676 +@@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
1677 + current_backlight = div_u64(current_backlight, bl_period);
1678 + current_backlight = (current_backlight + 1) >> 1;
1679 +
1680 +- current_backlight = (uint64_t)(current_backlight) * bl_period;
1681 +-
1682 +- round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
1683 +-
1684 +- round_result = (round_result >> (bl_int_count-1)) & 1;
1685 +-
1686 +- current_backlight >>= bl_int_count;
1687 +- current_backlight += round_result;
1688 +-
1689 + return (uint32_t)(current_backlight);
1690 + }
1691 +
1692 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1693 +index ebe6721428085..42e72a16a1128 100644
1694 +--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1695 ++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
1696 +@@ -1381,7 +1381,7 @@ static int smu_disable_dpms(struct smu_context *smu)
1697 + */
1698 + if (smu->uploading_custom_pp_table &&
1699 + (adev->asic_type >= CHIP_NAVI10) &&
1700 +- (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
1701 ++ (adev->asic_type <= CHIP_BEIGE_GOBY))
1702 + return smu_disable_all_features_with_exception(smu,
1703 + true,
1704 + SMU_FEATURE_COUNT);
1705 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1706 +index 1ba42b69ce742..23ada41351ad0 100644
1707 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1708 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
1709 +@@ -2269,7 +2269,27 @@ static int navi10_baco_enter(struct smu_context *smu)
1710 + {
1711 + struct amdgpu_device *adev = smu->adev;
1712 +
1713 +- if (adev->in_runpm)
1714 ++ /*
1715 ++ * This aims the case below:
1716 ++ * amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
1717 ++ *
1718 ++ * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
1719 ++ * make that possible, PMFW needs to acknowledge the dstate transition
1720 ++ * process for both gfx(function 0) and audio(function 1) function of
1721 ++ * the ASIC.
1722 ++ *
1723 ++ * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
1724 ++ * device representing the audio function of the ASIC. And that means
1725 ++ * even if the sound driver(snd_hda_intel) was not loaded yet, it's still
1726 ++ * possible runpm suspend kicked on the ASIC. However without the dstate
1727 ++ * transition notification from audio function, pmfw cannot handle the
1728 ++ * BACO in/exit correctly. And that will cause driver hang on runpm
1729 ++ * resuming.
1730 ++ *
1731 ++ * To address this, we revert to legacy message way(driver masters the
1732 ++ * timing for BACO in/exit) on sound driver missing.
1733 ++ */
1734 ++ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
1735 + return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
1736 + else
1737 + return smu_v11_0_baco_enter(smu);
1738 +@@ -2279,7 +2299,7 @@ static int navi10_baco_exit(struct smu_context *smu)
1739 + {
1740 + struct amdgpu_device *adev = smu->adev;
1741 +
1742 +- if (adev->in_runpm) {
1743 ++ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
1744 + /* Wait for PMFW handling for the Dstate change */
1745 + msleep(10);
1746 + return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
1747 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1748 +index d92dd2c7448e3..9b170bd12c1b6 100644
1749 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1750 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
1751 +@@ -2133,7 +2133,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu)
1752 + {
1753 + struct amdgpu_device *adev = smu->adev;
1754 +
1755 +- if (adev->in_runpm)
1756 ++ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
1757 + return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
1758 + else
1759 + return smu_v11_0_baco_enter(smu);
1760 +@@ -2143,7 +2143,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu)
1761 + {
1762 + struct amdgpu_device *adev = smu->adev;
1763 +
1764 +- if (adev->in_runpm) {
1765 ++ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
1766 + /* Wait for PMFW handling for the Dstate change */
1767 + msleep(10);
1768 + return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
1769 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
1770 +index 415be74df28c7..54881cce1b06c 100644
1771 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
1772 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
1773 +@@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
1774 +
1775 + return ret;
1776 + }
1777 ++
1778 ++bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1779 ++{
1780 ++ struct pci_dev *p = NULL;
1781 ++ bool snd_driver_loaded;
1782 ++
1783 ++ /*
1784 ++ * If the ASIC comes with no audio function, we always assume
1785 ++ * it is "enabled".
1786 ++ */
1787 ++ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1788 ++ adev->pdev->bus->number, 1);
1789 ++ if (!p)
1790 ++ return true;
1791 ++
1792 ++ snd_driver_loaded = pci_is_enabled(p) ? true : false;
1793 ++
1794 ++ pci_dev_put(p);
1795 ++
1796 ++ return snd_driver_loaded;
1797 ++}
1798 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
1799 +index 16993daa2ae04..b1d41360a3897 100644
1800 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
1801 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
1802 +@@ -110,5 +110,7 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
1803 + int smu_cmn_set_mp1_state(struct smu_context *smu,
1804 + enum pp_mp1_state mp1_state);
1805 +
1806 ++bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
1807 ++
1808 + #endif
1809 + #endif
1810 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
1811 +index 76d38561c9103..cf741c5c82d25 100644
1812 +--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
1813 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
1814 +@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
1815 + if (switch_mmu_context) {
1816 + struct etnaviv_iommu_context *old_context = gpu->mmu_context;
1817 +
1818 +- etnaviv_iommu_context_get(mmu_context);
1819 +- gpu->mmu_context = mmu_context;
1820 ++ gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
1821 + etnaviv_iommu_context_put(old_context);
1822 + }
1823 +
1824 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1825 +index b8fa6ed3dd738..fb7a33b88fc0b 100644
1826 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1827 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1828 +@@ -303,8 +303,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
1829 + list_del(&mapping->obj_node);
1830 + }
1831 +
1832 +- etnaviv_iommu_context_get(mmu_context);
1833 +- mapping->context = mmu_context;
1834 ++ mapping->context = etnaviv_iommu_context_get(mmu_context);
1835 + mapping->use = 1;
1836 +
1837 + ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
1838 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
1839 +index 4dd7d9d541c09..486259e154aff 100644
1840 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
1841 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
1842 +@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
1843 + goto err_submit_objects;
1844 +
1845 + submit->ctx = file->driver_priv;
1846 +- etnaviv_iommu_context_get(submit->ctx->mmu);
1847 +- submit->mmu_context = submit->ctx->mmu;
1848 ++ submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
1849 + submit->exec_state = args->exec_state;
1850 + submit->flags = args->flags;
1851 +
1852 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1853 +index 4102bcea33413..1fa98ce870f78 100644
1854 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1855 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1856 +@@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
1857 + /* We rely on the GPU running, so program the clock */
1858 + etnaviv_gpu_update_clock(gpu);
1859 +
1860 ++ gpu->fe_running = false;
1861 ++ gpu->exec_state = -1;
1862 ++ if (gpu->mmu_context)
1863 ++ etnaviv_iommu_context_put(gpu->mmu_context);
1864 ++ gpu->mmu_context = NULL;
1865 ++
1866 + return 0;
1867 + }
1868 +
1869 +@@ -631,19 +637,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
1870 + VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
1871 + VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
1872 + }
1873 ++
1874 ++ gpu->fe_running = true;
1875 + }
1876 +
1877 +-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
1878 ++static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
1879 ++ struct etnaviv_iommu_context *context)
1880 + {
1881 +- u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
1882 +- &gpu->mmu_context->cmdbuf_mapping);
1883 + u16 prefetch;
1884 ++ u32 address;
1885 +
1886 + /* setup the MMU */
1887 +- etnaviv_iommu_restore(gpu, gpu->mmu_context);
1888 ++ etnaviv_iommu_restore(gpu, context);
1889 +
1890 + /* Start command processor */
1891 + prefetch = etnaviv_buffer_init(gpu);
1892 ++ address = etnaviv_cmdbuf_get_va(&gpu->buffer,
1893 ++ &gpu->mmu_context->cmdbuf_mapping);
1894 +
1895 + etnaviv_gpu_start_fe(gpu, address, prefetch);
1896 + }
1897 +@@ -826,7 +836,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
1898 + /* Now program the hardware */
1899 + mutex_lock(&gpu->lock);
1900 + etnaviv_gpu_hw_init(gpu);
1901 +- gpu->exec_state = -1;
1902 + mutex_unlock(&gpu->lock);
1903 +
1904 + pm_runtime_mark_last_busy(gpu->dev);
1905 +@@ -1051,8 +1060,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
1906 + spin_unlock(&gpu->event_spinlock);
1907 +
1908 + etnaviv_gpu_hw_init(gpu);
1909 +- gpu->exec_state = -1;
1910 +- gpu->mmu_context = NULL;
1911 +
1912 + mutex_unlock(&gpu->lock);
1913 + pm_runtime_mark_last_busy(gpu->dev);
1914 +@@ -1364,14 +1371,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
1915 + goto out_unlock;
1916 + }
1917 +
1918 +- if (!gpu->mmu_context) {
1919 +- etnaviv_iommu_context_get(submit->mmu_context);
1920 +- gpu->mmu_context = submit->mmu_context;
1921 +- etnaviv_gpu_start_fe_idleloop(gpu);
1922 +- } else {
1923 +- etnaviv_iommu_context_get(gpu->mmu_context);
1924 +- submit->prev_mmu_context = gpu->mmu_context;
1925 +- }
1926 ++ if (!gpu->fe_running)
1927 ++ etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
1928 ++
1929 ++ if (submit->prev_mmu_context)
1930 ++ etnaviv_iommu_context_put(submit->prev_mmu_context);
1931 ++ submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
1932 +
1933 + if (submit->nr_pmrs) {
1934 + gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1935 +@@ -1573,7 +1578,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1936 +
1937 + static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1938 + {
1939 +- if (gpu->initialized && gpu->mmu_context) {
1940 ++ if (gpu->initialized && gpu->fe_running) {
1941 + /* Replace the last WAIT with END */
1942 + mutex_lock(&gpu->lock);
1943 + etnaviv_buffer_end(gpu);
1944 +@@ -1586,8 +1591,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1945 + */
1946 + etnaviv_gpu_wait_idle(gpu, 100);
1947 +
1948 +- etnaviv_iommu_context_put(gpu->mmu_context);
1949 +- gpu->mmu_context = NULL;
1950 ++ gpu->fe_running = false;
1951 + }
1952 +
1953 + gpu->exec_state = -1;
1954 +@@ -1735,6 +1739,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1955 + etnaviv_gpu_hw_suspend(gpu);
1956 + #endif
1957 +
1958 ++ if (gpu->mmu_context)
1959 ++ etnaviv_iommu_context_put(gpu->mmu_context);
1960 ++
1961 + if (gpu->initialized) {
1962 + etnaviv_cmdbuf_free(&gpu->buffer);
1963 + etnaviv_iommu_global_fini(gpu);
1964 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
1965 +index 8ea48697d1321..1c75c8ed5bcea 100644
1966 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
1967 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
1968 +@@ -101,6 +101,7 @@ struct etnaviv_gpu {
1969 + struct workqueue_struct *wq;
1970 + struct drm_gpu_scheduler sched;
1971 + bool initialized;
1972 ++ bool fe_running;
1973 +
1974 + /* 'ring'-buffer: */
1975 + struct etnaviv_cmdbuf buffer;
1976 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
1977 +index 1a7c89a67bea3..afe5dd6a9925b 100644
1978 +--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
1979 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
1980 +@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
1981 + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
1982 + u32 pgtable;
1983 +
1984 ++ if (gpu->mmu_context)
1985 ++ etnaviv_iommu_context_put(gpu->mmu_context);
1986 ++ gpu->mmu_context = etnaviv_iommu_context_get(context);
1987 ++
1988 + /* set base addresses */
1989 + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
1990 + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
1991 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
1992 +index f8bf488e9d717..d664ae29ae209 100644
1993 +--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
1994 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
1995 +@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
1996 + if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
1997 + return;
1998 +
1999 ++ if (gpu->mmu_context)
2000 ++ etnaviv_iommu_context_put(gpu->mmu_context);
2001 ++ gpu->mmu_context = etnaviv_iommu_context_get(context);
2002 ++
2003 + prefetch = etnaviv_buffer_config_mmuv2(gpu,
2004 + (u32)v2_context->mtlb_dma,
2005 + (u32)context->global->bad_page_dma);
2006 +@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
2007 + if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
2008 + return;
2009 +
2010 ++ if (gpu->mmu_context)
2011 ++ etnaviv_iommu_context_put(gpu->mmu_context);
2012 ++ gpu->mmu_context = etnaviv_iommu_context_get(context);
2013 ++
2014 + gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
2015 + lower_32_bits(context->global->v2.pta_dma));
2016 + gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
2017 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
2018 +index dab1b58006d83..9fb1a2aadbcb0 100644
2019 +--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
2020 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
2021 +@@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
2022 + */
2023 + list_for_each_entry_safe(m, n, &list, scan_node) {
2024 + etnaviv_iommu_remove_mapping(context, m);
2025 ++ etnaviv_iommu_context_put(m->context);
2026 + m->context = NULL;
2027 + list_del_init(&m->mmu_node);
2028 + list_del_init(&m->scan_node);
2029 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
2030 +index d1d6902fd13be..e4a0b7d09c2ea 100644
2031 +--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
2032 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
2033 +@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
2034 + struct etnaviv_iommu_context *
2035 + etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
2036 + struct etnaviv_cmdbuf_suballoc *suballoc);
2037 +-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
2038 ++static inline struct etnaviv_iommu_context *
2039 ++etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
2040 + {
2041 + kref_get(&ctx->refcount);
2042 ++ return ctx;
2043 + }
2044 + void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
2045 + void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
2046 +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
2047 +index 862c1df69cc2a..d511e578ba79d 100644
2048 +--- a/drivers/gpu/drm/i915/display/intel_dp.c
2049 ++++ b/drivers/gpu/drm/i915/display/intel_dp.c
2050 +@@ -2453,11 +2453,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
2051 + */
2052 + if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
2053 + intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
2054 +- sizeof(intel_dp->edp_dpcd))
2055 ++ sizeof(intel_dp->edp_dpcd)) {
2056 + drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
2057 + (int)sizeof(intel_dp->edp_dpcd),
2058 + intel_dp->edp_dpcd);
2059 +
2060 ++ intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
2061 ++ }
2062 ++
2063 + /*
2064 + * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
2065 + * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
2066 +diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
2067 +index 053a3c2f72677..508a514c5e37d 100644
2068 +--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
2069 ++++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
2070 +@@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
2071 + }
2072 +
2073 + if (ret)
2074 +- intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
2075 ++ ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
2076 +
2077 + if (intel_dp->set_idle_link_train)
2078 + intel_dp->set_idle_link_train(intel_dp, crtc_state);
2079 +diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
2080 +index 0473583dcdac2..482fb0ae6cb5d 100644
2081 +--- a/drivers/gpu/drm/radeon/radeon_kms.c
2082 ++++ b/drivers/gpu/drm/radeon/radeon_kms.c
2083 +@@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
2084 + #endif
2085 +
2086 + if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
2087 +- rdev->agp = radeon_agp_head_init(rdev->ddev);
2088 ++ rdev->agp = radeon_agp_head_init(dev);
2089 + if (rdev->agp) {
2090 + rdev->agp->agp_mtrr = arch_phys_wc_add(
2091 + rdev->agp->agp_info.aper_base,
2092 +diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
2093 +index 8ab3247dbc4aa..13c6b857158fc 100644
2094 +--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
2095 ++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
2096 +@@ -1123,7 +1123,7 @@ static int cdn_dp_suspend(struct device *dev)
2097 + return ret;
2098 + }
2099 +
2100 +-static int cdn_dp_resume(struct device *dev)
2101 ++static __maybe_unused int cdn_dp_resume(struct device *dev)
2102 + {
2103 + struct cdn_dp_device *dp = dev_get_drvdata(dev);
2104 +
2105 +diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
2106 +index 2aee356840a2b..314015d9e912d 100644
2107 +--- a/drivers/hv/ring_buffer.c
2108 ++++ b/drivers/hv/ring_buffer.c
2109 +@@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
2110 + mutex_unlock(&ring_info->ring_buffer_mutex);
2111 +
2112 + kfree(ring_info->pkt_buffer);
2113 ++ ring_info->pkt_buffer = NULL;
2114 + ring_info->pkt_buffer_size = 0;
2115 + }
2116 +
2117 +diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
2118 +index 30489670ea528..cca0aac261486 100644
2119 +--- a/drivers/mfd/ab8500-core.c
2120 ++++ b/drivers/mfd/ab8500-core.c
2121 +@@ -485,7 +485,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
2122 + if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F)
2123 + line += 1;
2124 +
2125 +- handle_nested_irq(irq_create_mapping(ab8500->domain, line));
2126 ++ handle_nested_irq(irq_find_mapping(ab8500->domain, line));
2127 + }
2128 +
2129 + return 0;
2130 +diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
2131 +index 4145a38b38904..d0ac019850d17 100644
2132 +--- a/drivers/mfd/axp20x.c
2133 ++++ b/drivers/mfd/axp20x.c
2134 +@@ -125,12 +125,13 @@ static const struct regmap_range axp288_writeable_ranges[] = {
2135 +
2136 + static const struct regmap_range axp288_volatile_ranges[] = {
2137 + regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
2138 ++ regmap_reg_range(AXP22X_PWR_OUT_CTRL1, AXP22X_ALDO3_V_OUT),
2139 + regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
2140 + regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT),
2141 + regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
2142 + regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
2143 + regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
2144 +- regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
2145 ++ regmap_reg_range(AXP20X_GPIO1_CTRL, AXP22X_GPIO_STATE),
2146 + regmap_reg_range(AXP288_RT_BATT_V_H, AXP288_RT_BATT_V_L),
2147 + regmap_reg_range(AXP20X_FG_RES, AXP288_FG_CC_CAP_REG),
2148 + };
2149 +diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
2150 +index 3bde7fda755f1..dea4e4e8bed54 100644
2151 +--- a/drivers/mfd/db8500-prcmu.c
2152 ++++ b/drivers/mfd/db8500-prcmu.c
2153 +@@ -1622,22 +1622,20 @@ static long round_clock_rate(u8 clock, unsigned long rate)
2154 + }
2155 +
2156 + static const unsigned long db8500_armss_freqs[] = {
2157 +- 200000000,
2158 +- 400000000,
2159 +- 800000000,
2160 ++ 199680000,
2161 ++ 399360000,
2162 ++ 798720000,
2163 + 998400000
2164 + };
2165 +
2166 + /* The DB8520 has slightly higher ARMSS max frequency */
2167 + static const unsigned long db8520_armss_freqs[] = {
2168 +- 200000000,
2169 +- 400000000,
2170 +- 800000000,
2171 ++ 199680000,
2172 ++ 399360000,
2173 ++ 798720000,
2174 + 1152000000
2175 + };
2176 +
2177 +-
2178 +-
2179 + static long round_armss_rate(unsigned long rate)
2180 + {
2181 + unsigned long freq = 0;
2182 +diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
2183 +index 428a526cbe863..9ab9adce06fdd 100644
2184 +--- a/drivers/mfd/lpc_sch.c
2185 ++++ b/drivers/mfd/lpc_sch.c
2186 +@@ -22,7 +22,7 @@
2187 + #define SMBASE 0x40
2188 + #define SMBUS_IO_SIZE 64
2189 +
2190 +-#define GPIOBASE 0x44
2191 ++#define GPIO_BASE 0x44
2192 + #define GPIO_IO_SIZE 64
2193 + #define GPIO_IO_SIZE_CENTERTON 128
2194 +
2195 +@@ -145,7 +145,7 @@ static int lpc_sch_probe(struct pci_dev *dev, const struct pci_device_id *id)
2196 + if (ret == 0)
2197 + cells++;
2198 +
2199 +- ret = lpc_sch_populate_cell(dev, GPIOBASE, "sch_gpio",
2200 ++ ret = lpc_sch_populate_cell(dev, GPIO_BASE, "sch_gpio",
2201 + info->io_size_gpio,
2202 + id->device, &lpc_sch_cells[cells]);
2203 + if (ret < 0)
2204 +diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
2205 +index 1dd39483e7c14..58d09c615e673 100644
2206 +--- a/drivers/mfd/stmpe.c
2207 ++++ b/drivers/mfd/stmpe.c
2208 +@@ -1095,7 +1095,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
2209 +
2210 + if (variant->id_val == STMPE801_ID ||
2211 + variant->id_val == STMPE1600_ID) {
2212 +- int base = irq_create_mapping(stmpe->domain, 0);
2213 ++ int base = irq_find_mapping(stmpe->domain, 0);
2214 +
2215 + handle_nested_irq(base);
2216 + return IRQ_HANDLED;
2217 +@@ -1123,7 +1123,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
2218 + while (status) {
2219 + int bit = __ffs(status);
2220 + int line = bank * 8 + bit;
2221 +- int nestedirq = irq_create_mapping(stmpe->domain, line);
2222 ++ int nestedirq = irq_find_mapping(stmpe->domain, line);
2223 +
2224 + handle_nested_irq(nestedirq);
2225 + status &= ~(1 << bit);
2226 +diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
2227 +index 7614f8fe0e91c..13583cdb93b6f 100644
2228 +--- a/drivers/mfd/tc3589x.c
2229 ++++ b/drivers/mfd/tc3589x.c
2230 +@@ -187,7 +187,7 @@ again:
2231 +
2232 + while (status) {
2233 + int bit = __ffs(status);
2234 +- int virq = irq_create_mapping(tc3589x->domain, bit);
2235 ++ int virq = irq_find_mapping(tc3589x->domain, bit);
2236 +
2237 + handle_nested_irq(virq);
2238 + status &= ~(1 << bit);
2239 +diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
2240 +index ddddf08b6a4cc..732013f40e4e8 100644
2241 +--- a/drivers/mfd/tqmx86.c
2242 ++++ b/drivers/mfd/tqmx86.c
2243 +@@ -209,6 +209,8 @@ static int tqmx86_probe(struct platform_device *pdev)
2244 +
2245 + /* Assumes the IRQ resource is first. */
2246 + tqmx_gpio_resources[0].start = gpio_irq;
2247 ++ } else {
2248 ++ tqmx_gpio_resources[0].flags = 0;
2249 + }
2250 +
2251 + ocores_platfom_data.clock_khz = tqmx86_board_id_to_clk_rate(board_id);
2252 +diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
2253 +index 6c3a619e26286..651a028bc519a 100644
2254 +--- a/drivers/mfd/wm8994-irq.c
2255 ++++ b/drivers/mfd/wm8994-irq.c
2256 +@@ -154,7 +154,7 @@ static irqreturn_t wm8994_edge_irq(int irq, void *data)
2257 + struct wm8994 *wm8994 = data;
2258 +
2259 + while (gpio_get_value_cansleep(wm8994->pdata.irq_gpio))
2260 +- handle_nested_irq(irq_create_mapping(wm8994->edge_irq, 0));
2261 ++ handle_nested_irq(irq_find_mapping(wm8994->edge_irq, 0));
2262 +
2263 + return IRQ_HANDLED;
2264 + }
2265 +diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
2266 +index 6e4d0017c0bd4..f685a581df481 100644
2267 +--- a/drivers/mtd/mtdconcat.c
2268 ++++ b/drivers/mtd/mtdconcat.c
2269 +@@ -641,6 +641,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
2270 + int i;
2271 + size_t size;
2272 + struct mtd_concat *concat;
2273 ++ struct mtd_info *subdev_master = NULL;
2274 + uint32_t max_erasesize, curr_erasesize;
2275 + int num_erase_region;
2276 + int max_writebufsize = 0;
2277 +@@ -679,18 +680,24 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
2278 + concat->mtd.subpage_sft = subdev[0]->subpage_sft;
2279 + concat->mtd.oobsize = subdev[0]->oobsize;
2280 + concat->mtd.oobavail = subdev[0]->oobavail;
2281 +- if (subdev[0]->_writev)
2282 ++
2283 ++ subdev_master = mtd_get_master(subdev[0]);
2284 ++ if (subdev_master->_writev)
2285 + concat->mtd._writev = concat_writev;
2286 +- if (subdev[0]->_read_oob)
2287 ++ if (subdev_master->_read_oob)
2288 + concat->mtd._read_oob = concat_read_oob;
2289 +- if (subdev[0]->_write_oob)
2290 ++ if (subdev_master->_write_oob)
2291 + concat->mtd._write_oob = concat_write_oob;
2292 +- if (subdev[0]->_block_isbad)
2293 ++ if (subdev_master->_block_isbad)
2294 + concat->mtd._block_isbad = concat_block_isbad;
2295 +- if (subdev[0]->_block_markbad)
2296 ++ if (subdev_master->_block_markbad)
2297 + concat->mtd._block_markbad = concat_block_markbad;
2298 +- if (subdev[0]->_panic_write)
2299 ++ if (subdev_master->_panic_write)
2300 + concat->mtd._panic_write = concat_panic_write;
2301 ++ if (subdev_master->_read)
2302 ++ concat->mtd._read = concat_read;
2303 ++ if (subdev_master->_write)
2304 ++ concat->mtd._write = concat_write;
2305 +
2306 + concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
2307 +
2308 +@@ -721,14 +728,22 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
2309 + subdev[i]->flags & MTD_WRITEABLE;
2310 + }
2311 +
2312 ++ subdev_master = mtd_get_master(subdev[i]);
2313 + concat->mtd.size += subdev[i]->size;
2314 + concat->mtd.ecc_stats.badblocks +=
2315 + subdev[i]->ecc_stats.badblocks;
2316 + if (concat->mtd.writesize != subdev[i]->writesize ||
2317 + concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
2318 + concat->mtd.oobsize != subdev[i]->oobsize ||
2319 +- !concat->mtd._read_oob != !subdev[i]->_read_oob ||
2320 +- !concat->mtd._write_oob != !subdev[i]->_write_oob) {
2321 ++ !concat->mtd._read_oob != !subdev_master->_read_oob ||
2322 ++ !concat->mtd._write_oob != !subdev_master->_write_oob) {
2323 ++ /*
2324 ++ * Check against subdev[i] for data members, because
2325 ++ * subdev's attributes may be different from master
2326 ++ * mtd device. Check against subdev's master mtd
2327 ++ * device for callbacks, because the existence of
2328 ++ * subdev's callbacks is decided by master mtd device.
2329 ++ */
2330 + kfree(concat);
2331 + printk("Incompatible OOB or ECC data on \"%s\"\n",
2332 + subdev[i]->name);
2333 +@@ -744,8 +759,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
2334 + concat->mtd.name = name;
2335 +
2336 + concat->mtd._erase = concat_erase;
2337 +- concat->mtd._read = concat_read;
2338 +- concat->mtd._write = concat_write;
2339 + concat->mtd._sync = concat_sync;
2340 + concat->mtd._lock = concat_lock;
2341 + concat->mtd._unlock = concat_unlock;
2342 +diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
2343 +index d0e8ffd55c224..9dbf031716a61 100644
2344 +--- a/drivers/mtd/nand/raw/cafe_nand.c
2345 ++++ b/drivers/mtd/nand/raw/cafe_nand.c
2346 +@@ -751,7 +751,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
2347 + "CAFE NAND", mtd);
2348 + if (err) {
2349 + dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
2350 +- goto out_ior;
2351 ++ goto out_free_rs;
2352 + }
2353 +
2354 + /* Disable master reset, enable NAND clock */
2355 +@@ -795,6 +795,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
2356 + /* Disable NAND IRQ in global IRQ mask register */
2357 + cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
2358 + free_irq(pdev->irq, mtd);
2359 ++ out_free_rs:
2360 ++ free_rs(cafe->rs);
2361 + out_ior:
2362 + pci_iounmap(pdev, cafe->mmio);
2363 + out_free_mtd:
2364 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
2365 +index bd1417a66cbf2..604f541126654 100644
2366 +--- a/drivers/net/dsa/b53/b53_common.c
2367 ++++ b/drivers/net/dsa/b53/b53_common.c
2368 +@@ -1144,7 +1144,7 @@ static void b53_force_link(struct b53_device *dev, int port, int link)
2369 + u8 reg, val, off;
2370 +
2371 + /* Override the port settings */
2372 +- if (port == dev->cpu_port) {
2373 ++ if (port == dev->imp_port) {
2374 + off = B53_PORT_OVERRIDE_CTRL;
2375 + val = PORT_OVERRIDE_EN;
2376 + } else {
2377 +@@ -1168,7 +1168,7 @@ static void b53_force_port_config(struct b53_device *dev, int port,
2378 + u8 reg, val, off;
2379 +
2380 + /* Override the port settings */
2381 +- if (port == dev->cpu_port) {
2382 ++ if (port == dev->imp_port) {
2383 + off = B53_PORT_OVERRIDE_CTRL;
2384 + val = PORT_OVERRIDE_EN;
2385 + } else {
2386 +@@ -1236,7 +1236,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
2387 + b53_force_link(dev, port, phydev->link);
2388 +
2389 + if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
2390 +- if (port == 8)
2391 ++ if (port == dev->imp_port)
2392 + off = B53_RGMII_CTRL_IMP;
2393 + else
2394 + off = B53_RGMII_CTRL_P(port);
2395 +@@ -2280,6 +2280,7 @@ struct b53_chip_data {
2396 + const char *dev_name;
2397 + u16 vlans;
2398 + u16 enabled_ports;
2399 ++ u8 imp_port;
2400 + u8 cpu_port;
2401 + u8 vta_regs[3];
2402 + u8 arl_bins;
2403 +@@ -2304,6 +2305,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2404 + .enabled_ports = 0x1f,
2405 + .arl_bins = 2,
2406 + .arl_buckets = 1024,
2407 ++ .imp_port = 5,
2408 + .cpu_port = B53_CPU_PORT_25,
2409 + .duplex_reg = B53_DUPLEX_STAT_FE,
2410 + },
2411 +@@ -2314,6 +2316,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2412 + .enabled_ports = 0x1f,
2413 + .arl_bins = 2,
2414 + .arl_buckets = 1024,
2415 ++ .imp_port = 5,
2416 + .cpu_port = B53_CPU_PORT_25,
2417 + .duplex_reg = B53_DUPLEX_STAT_FE,
2418 + },
2419 +@@ -2324,6 +2327,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2420 + .enabled_ports = 0x1f,
2421 + .arl_bins = 4,
2422 + .arl_buckets = 1024,
2423 ++ .imp_port = 8,
2424 + .cpu_port = B53_CPU_PORT,
2425 + .vta_regs = B53_VTA_REGS,
2426 + .duplex_reg = B53_DUPLEX_STAT_GE,
2427 +@@ -2337,6 +2341,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2428 + .enabled_ports = 0x1f,
2429 + .arl_bins = 4,
2430 + .arl_buckets = 1024,
2431 ++ .imp_port = 8,
2432 + .cpu_port = B53_CPU_PORT,
2433 + .vta_regs = B53_VTA_REGS,
2434 + .duplex_reg = B53_DUPLEX_STAT_GE,
2435 +@@ -2350,6 +2355,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2436 + .enabled_ports = 0x1f,
2437 + .arl_bins = 4,
2438 + .arl_buckets = 1024,
2439 ++ .imp_port = 8,
2440 + .cpu_port = B53_CPU_PORT,
2441 + .vta_regs = B53_VTA_REGS_9798,
2442 + .duplex_reg = B53_DUPLEX_STAT_GE,
2443 +@@ -2363,6 +2369,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2444 + .enabled_ports = 0x7f,
2445 + .arl_bins = 4,
2446 + .arl_buckets = 1024,
2447 ++ .imp_port = 8,
2448 + .cpu_port = B53_CPU_PORT,
2449 + .vta_regs = B53_VTA_REGS_9798,
2450 + .duplex_reg = B53_DUPLEX_STAT_GE,
2451 +@@ -2377,6 +2384,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2452 + .arl_bins = 4,
2453 + .arl_buckets = 1024,
2454 + .vta_regs = B53_VTA_REGS,
2455 ++ .imp_port = 8,
2456 + .cpu_port = B53_CPU_PORT,
2457 + .duplex_reg = B53_DUPLEX_STAT_GE,
2458 + .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2459 +@@ -2389,6 +2397,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2460 + .enabled_ports = 0xff,
2461 + .arl_bins = 4,
2462 + .arl_buckets = 1024,
2463 ++ .imp_port = 8,
2464 + .cpu_port = B53_CPU_PORT,
2465 + .vta_regs = B53_VTA_REGS,
2466 + .duplex_reg = B53_DUPLEX_STAT_GE,
2467 +@@ -2402,6 +2411,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2468 + .enabled_ports = 0x1ff,
2469 + .arl_bins = 4,
2470 + .arl_buckets = 1024,
2471 ++ .imp_port = 8,
2472 + .cpu_port = B53_CPU_PORT,
2473 + .vta_regs = B53_VTA_REGS,
2474 + .duplex_reg = B53_DUPLEX_STAT_GE,
2475 +@@ -2415,6 +2425,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2476 + .enabled_ports = 0, /* pdata must provide them */
2477 + .arl_bins = 4,
2478 + .arl_buckets = 1024,
2479 ++ .imp_port = 8,
2480 + .cpu_port = B53_CPU_PORT,
2481 + .vta_regs = B53_VTA_REGS_63XX,
2482 + .duplex_reg = B53_DUPLEX_STAT_63XX,
2483 +@@ -2428,6 +2439,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2484 + .enabled_ports = 0x1f,
2485 + .arl_bins = 4,
2486 + .arl_buckets = 1024,
2487 ++ .imp_port = 8,
2488 + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2489 + .vta_regs = B53_VTA_REGS,
2490 + .duplex_reg = B53_DUPLEX_STAT_GE,
2491 +@@ -2441,6 +2453,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2492 + .enabled_ports = 0x1bf,
2493 + .arl_bins = 4,
2494 + .arl_buckets = 1024,
2495 ++ .imp_port = 8,
2496 + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2497 + .vta_regs = B53_VTA_REGS,
2498 + .duplex_reg = B53_DUPLEX_STAT_GE,
2499 +@@ -2454,6 +2467,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2500 + .enabled_ports = 0x1bf,
2501 + .arl_bins = 4,
2502 + .arl_buckets = 1024,
2503 ++ .imp_port = 8,
2504 + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2505 + .vta_regs = B53_VTA_REGS,
2506 + .duplex_reg = B53_DUPLEX_STAT_GE,
2507 +@@ -2467,6 +2481,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2508 + .enabled_ports = 0x1f,
2509 + .arl_bins = 4,
2510 + .arl_buckets = 1024,
2511 ++ .imp_port = 8,
2512 + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2513 + .vta_regs = B53_VTA_REGS,
2514 + .duplex_reg = B53_DUPLEX_STAT_GE,
2515 +@@ -2480,6 +2495,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2516 + .enabled_ports = 0x1f,
2517 + .arl_bins = 4,
2518 + .arl_buckets = 1024,
2519 ++ .imp_port = 8,
2520 + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2521 + .vta_regs = B53_VTA_REGS,
2522 + .duplex_reg = B53_DUPLEX_STAT_GE,
2523 +@@ -2493,6 +2509,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2524 + .enabled_ports = 0x1ff,
2525 + .arl_bins = 4,
2526 + .arl_buckets = 1024,
2527 ++ .imp_port = 8,
2528 + .cpu_port = B53_CPU_PORT,
2529 + .vta_regs = B53_VTA_REGS,
2530 + .duplex_reg = B53_DUPLEX_STAT_GE,
2531 +@@ -2506,6 +2523,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2532 + .enabled_ports = 0x103,
2533 + .arl_bins = 4,
2534 + .arl_buckets = 1024,
2535 ++ .imp_port = 8,
2536 + .cpu_port = B53_CPU_PORT,
2537 + .vta_regs = B53_VTA_REGS,
2538 + .duplex_reg = B53_DUPLEX_STAT_GE,
2539 +@@ -2520,6 +2538,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2540 + .enabled_ports = 0x1bf,
2541 + .arl_bins = 4,
2542 + .arl_buckets = 256,
2543 ++ .imp_port = 8,
2544 + .cpu_port = 8, /* TODO: ports 4, 5, 8 */
2545 + .vta_regs = B53_VTA_REGS,
2546 + .duplex_reg = B53_DUPLEX_STAT_GE,
2547 +@@ -2533,6 +2552,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2548 + .enabled_ports = 0x1ff,
2549 + .arl_bins = 4,
2550 + .arl_buckets = 1024,
2551 ++ .imp_port = 8,
2552 + .cpu_port = B53_CPU_PORT,
2553 + .vta_regs = B53_VTA_REGS,
2554 + .duplex_reg = B53_DUPLEX_STAT_GE,
2555 +@@ -2546,6 +2566,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
2556 + .enabled_ports = 0x1ff,
2557 + .arl_bins = 4,
2558 + .arl_buckets = 256,
2559 ++ .imp_port = 8,
2560 + .cpu_port = B53_CPU_PORT,
2561 + .vta_regs = B53_VTA_REGS,
2562 + .duplex_reg = B53_DUPLEX_STAT_GE,
2563 +@@ -2571,6 +2592,7 @@ static int b53_switch_init(struct b53_device *dev)
2564 + dev->vta_regs[1] = chip->vta_regs[1];
2565 + dev->vta_regs[2] = chip->vta_regs[2];
2566 + dev->jumbo_pm_reg = chip->jumbo_pm_reg;
2567 ++ dev->imp_port = chip->imp_port;
2568 + dev->cpu_port = chip->cpu_port;
2569 + dev->num_vlans = chip->vlans;
2570 + dev->num_arl_bins = chip->arl_bins;
2571 +@@ -2612,9 +2634,10 @@ static int b53_switch_init(struct b53_device *dev)
2572 + dev->cpu_port = 5;
2573 + }
2574 +
2575 +- /* cpu port is always last */
2576 +- dev->num_ports = dev->cpu_port + 1;
2577 + dev->enabled_ports |= BIT(dev->cpu_port);
2578 ++ dev->num_ports = fls(dev->enabled_ports);
2579 ++
2580 ++ dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
2581 +
2582 + /* Include non standard CPU port built-in PHYs to be probed */
2583 + if (is539x(dev) || is531x5(dev)) {
2584 +@@ -2660,7 +2683,6 @@ struct b53_device *b53_switch_alloc(struct device *base,
2585 + return NULL;
2586 +
2587 + ds->dev = base;
2588 +- ds->num_ports = DSA_MAX_PORTS;
2589 +
2590 + dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
2591 + if (!dev)
2592 +diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
2593 +index 9bf8319342b0b..5d068acf7cf81 100644
2594 +--- a/drivers/net/dsa/b53/b53_priv.h
2595 ++++ b/drivers/net/dsa/b53/b53_priv.h
2596 +@@ -123,6 +123,7 @@ struct b53_device {
2597 +
2598 + /* used ports mask */
2599 + u16 enabled_ports;
2600 ++ unsigned int imp_port;
2601 + unsigned int cpu_port;
2602 +
2603 + /* connect specific data */
2604 +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
2605 +index 6ce9ec1283e05..b6c4b3adb1715 100644
2606 +--- a/drivers/net/dsa/bcm_sf2.c
2607 ++++ b/drivers/net/dsa/bcm_sf2.c
2608 +@@ -68,7 +68,7 @@ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
2609 + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
2610 + unsigned int port, count = 0;
2611 +
2612 +- for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
2613 ++ for (port = 0; port < ds->num_ports; port++) {
2614 + if (dsa_is_cpu_port(ds, port))
2615 + continue;
2616 + if (priv->port_sts[port].enabled)
2617 +diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
2618 +index 64d6dfa831220..267324889dd64 100644
2619 +--- a/drivers/net/dsa/lantiq_gswip.c
2620 ++++ b/drivers/net/dsa/lantiq_gswip.c
2621 +@@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
2622 +
2623 + reset_control_assert(gphy_fw->reset);
2624 +
2625 ++ /* The vendor BSP uses a 200ms delay after asserting the reset line.
2626 ++ * Without this some users are observing that the PHY is not coming up
2627 ++ * on the MDIO bus.
2628 ++ */
2629 ++ msleep(200);
2630 ++
2631 + ret = request_firmware(&fw, gphy_fw->fw_name, dev);
2632 + if (ret) {
2633 + dev_err(dev, "failed to load firmware: %s, error: %i\n",
2634 +diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
2635 +index 1f63f50f73f17..bda5a9bf4f529 100644
2636 +--- a/drivers/net/dsa/qca8k.c
2637 ++++ b/drivers/net/dsa/qca8k.c
2638 +@@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
2639 + }
2640 +
2641 + static int
2642 +-qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data)
2643 ++qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
2644 + {
2645 +- struct qca8k_priv *priv = salve_bus->priv;
2646 +- struct mii_bus *bus = priv->bus;
2647 + u16 r1, r2, page;
2648 + u32 val;
2649 + int ret;
2650 +@@ -682,10 +680,8 @@ exit:
2651 + }
2652 +
2653 + static int
2654 +-qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum)
2655 ++qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
2656 + {
2657 +- struct qca8k_priv *priv = salve_bus->priv;
2658 +- struct mii_bus *bus = priv->bus;
2659 + u16 r1, r2, page;
2660 + u32 val;
2661 + int ret;
2662 +@@ -726,6 +722,24 @@ exit:
2663 + return ret;
2664 + }
2665 +
2666 ++static int
2667 ++qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
2668 ++{
2669 ++ struct qca8k_priv *priv = slave_bus->priv;
2670 ++ struct mii_bus *bus = priv->bus;
2671 ++
2672 ++ return qca8k_mdio_write(bus, phy, regnum, data);
2673 ++}
2674 ++
2675 ++static int
2676 ++qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
2677 ++{
2678 ++ struct qca8k_priv *priv = slave_bus->priv;
2679 ++ struct mii_bus *bus = priv->bus;
2680 ++
2681 ++ return qca8k_mdio_read(bus, phy, regnum);
2682 ++}
2683 ++
2684 + static int
2685 + qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
2686 + {
2687 +@@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
2688 +
2689 + bus->priv = (void *)priv;
2690 + bus->name = "qca8k slave mii";
2691 +- bus->read = qca8k_mdio_read;
2692 +- bus->write = qca8k_mdio_write;
2693 ++ bus->read = qca8k_internal_mdio_read;
2694 ++ bus->write = qca8k_internal_mdio_write;
2695 + snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
2696 + ds->index);
2697 +
2698 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2699 +index 27943b0446c28..a207c36246b6a 100644
2700 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2701 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2702 +@@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
2703 +
2704 + /* SR-IOV capability was enabled but there are no VFs*/
2705 + if (iov->total == 0) {
2706 +- err = -EINVAL;
2707 ++ err = 0;
2708 + goto failed;
2709 + }
2710 +
2711 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2712 +index 8a97640cdfe76..fdbf47446a997 100644
2713 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2714 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2715 +@@ -2172,25 +2172,33 @@ static int bnxt_async_event_process(struct bnxt *bp,
2716 + if (!fw_health)
2717 + goto async_event_process_exit;
2718 +
2719 +- fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2720 +- fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2721 +- if (!fw_health->enabled) {
2722 ++ if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2723 ++ fw_health->enabled = false;
2724 + netif_info(bp, drv, bp->dev,
2725 + "Error recovery info: error recovery[0]\n");
2726 + break;
2727 + }
2728 ++ fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2729 + fw_health->tmr_multiplier =
2730 + DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2731 + bp->current_interval * 10);
2732 + fw_health->tmr_counter = fw_health->tmr_multiplier;
2733 +- fw_health->last_fw_heartbeat =
2734 +- bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2735 ++ if (!fw_health->enabled)
2736 ++ fw_health->last_fw_heartbeat =
2737 ++ bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2738 + fw_health->last_fw_reset_cnt =
2739 + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2740 + netif_info(bp, drv, bp->dev,
2741 + "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2742 + fw_health->master, fw_health->last_fw_reset_cnt,
2743 + bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2744 ++ if (!fw_health->enabled) {
2745 ++ /* Make sure tmr_counter is set and visible to
2746 ++ * bnxt_health_check() before setting enabled to true.
2747 ++ */
2748 ++ smp_wmb();
2749 ++ fw_health->enabled = true;
2750 ++ }
2751 + goto async_event_process_exit;
2752 + }
2753 + case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2754 +@@ -2680,6 +2688,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
2755 + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2756 + int j;
2757 +
2758 ++ if (!txr->tx_buf_ring)
2759 ++ continue;
2760 ++
2761 + for (j = 0; j < max_idx;) {
2762 + struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2763 + struct sk_buff *skb;
2764 +@@ -2764,6 +2775,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2765 + }
2766 +
2767 + skip_rx_tpa_free:
2768 ++ if (!rxr->rx_buf_ring)
2769 ++ goto skip_rx_buf_free;
2770 ++
2771 + for (i = 0; i < max_idx; i++) {
2772 + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2773 + dma_addr_t mapping = rx_buf->mapping;
2774 +@@ -2786,6 +2800,11 @@ skip_rx_tpa_free:
2775 + kfree(data);
2776 + }
2777 + }
2778 ++
2779 ++skip_rx_buf_free:
2780 ++ if (!rxr->rx_agg_ring)
2781 ++ goto skip_rx_agg_free;
2782 ++
2783 + for (i = 0; i < max_agg_idx; i++) {
2784 + struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2785 + struct page *page = rx_agg_buf->page;
2786 +@@ -2802,6 +2821,8 @@ skip_rx_tpa_free:
2787 +
2788 + __free_page(page);
2789 + }
2790 ++
2791 ++skip_rx_agg_free:
2792 + if (rxr->rx_page) {
2793 + __free_page(rxr->rx_page);
2794 + rxr->rx_page = NULL;
2795 +@@ -11237,6 +11258,8 @@ static void bnxt_fw_health_check(struct bnxt *bp)
2796 + if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
2797 + return;
2798 +
2799 ++ /* Make sure it is enabled before checking the tmr_counter. */
2800 ++ smp_rmb();
2801 + if (fw_health->tmr_counter) {
2802 + fw_health->tmr_counter--;
2803 + return;
2804 +@@ -12169,6 +12192,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
2805 + return;
2806 + }
2807 +
2808 ++ if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
2809 ++ bp->fw_health->enabled) {
2810 ++ bp->fw_health->last_fw_reset_cnt =
2811 ++ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2812 ++ }
2813 + bp->fw_reset_state = 0;
2814 + /* Make sure fw_reset_state is 0 before clearing the flag */
2815 + smp_mb__before_atomic();
2816 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
2817 +index 64381be935a8c..bb228619ec641 100644
2818 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
2819 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
2820 +@@ -449,7 +449,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
2821 + return rc;
2822 +
2823 + ver_resp = &bp->ver_resp;
2824 +- sprintf(buf, "%X", ver_resp->chip_rev);
2825 ++ sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
2826 + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
2827 + DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
2828 + if (rc)
2829 +@@ -471,8 +471,8 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
2830 + if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
2831 + u32 ver = nvm_cfg_ver.vu32;
2832 +
2833 +- sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf,
2834 +- ver & 0xf);
2835 ++ sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xff, (ver >> 8) & 0xff,
2836 ++ ver & 0xff);
2837 + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
2838 + DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
2839 + buf);
2840 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
2841 +index 5e4429b14b8ca..2186706cf9130 100644
2842 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
2843 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
2844 +@@ -1870,9 +1870,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
2845 + {
2846 + struct bnxt_flower_indr_block_cb_priv *cb_priv;
2847 +
2848 +- /* All callback list access should be protected by RTNL. */
2849 +- ASSERT_RTNL();
2850 +-
2851 + list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
2852 + if (cb_priv->tunnel_netdev == netdev)
2853 + return cb_priv;
2854 +diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
2855 +index 512da98019c66..2a28a38da036c 100644
2856 +--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
2857 ++++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
2858 +@@ -1107,6 +1107,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2859 + if (!adapter->registered_device_map) {
2860 + pr_err("%s: could not register any net devices\n",
2861 + pci_name(pdev));
2862 ++ err = -EINVAL;
2863 + goto out_release_adapter_res;
2864 + }
2865 +
2866 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
2867 +index cb5c79c43bc9c..7bb81e08f9532 100644
2868 +--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
2869 ++++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
2870 +@@ -3306,6 +3306,9 @@ void t3_sge_stop(struct adapter *adap)
2871 +
2872 + t3_sge_stop_dma(adap);
2873 +
2874 ++ /* workqueues aren't initialized otherwise */
2875 ++ if (!(adap->flags & FULL_INIT_DONE))
2876 ++ return;
2877 + for (i = 0; i < SGE_QSETS; ++i) {
2878 + struct sge_qset *qs = &adap->sge.qs[i];
2879 +
2880 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2881 +index cdb5f14fb6bc5..9faa3712ea5b8 100644
2882 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2883 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2884 +@@ -73,6 +73,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt
2885 + #define HNS3_OUTER_VLAN_TAG 2
2886 +
2887 + #define HNS3_MIN_TX_LEN 33U
2888 ++#define HNS3_MIN_TUN_PKT_LEN 65U
2889 +
2890 + /* hns3_pci_tbl - PCI Device ID Table
2891 + *
2892 +@@ -1425,8 +1426,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
2893 + l4.tcp->doff);
2894 + break;
2895 + case IPPROTO_UDP:
2896 +- if (hns3_tunnel_csum_bug(skb))
2897 +- return skb_checksum_help(skb);
2898 ++ if (hns3_tunnel_csum_bug(skb)) {
2899 ++ int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
2900 ++
2901 ++ return ret ? ret : skb_checksum_help(skb);
2902 ++ }
2903 +
2904 + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
2905 + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
2906 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
2907 +index 288788186eccd..e6e617aba2a4c 100644
2908 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
2909 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
2910 +@@ -1710,6 +1710,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
2911 + }
2912 +
2913 + bd_num = le32_to_cpu(req->bd_num);
2914 ++ if (!bd_num) {
2915 ++ dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
2916 ++ return -EINVAL;
2917 ++ }
2918 +
2919 + desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
2920 + if (!desc_src)
2921 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2922 +index 03ae122f1c9ac..72d55c028ac4b 100644
2923 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2924 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2925 +@@ -1528,9 +1528,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
2926 + static int hclge_configure(struct hclge_dev *hdev)
2927 + {
2928 + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2929 ++ const struct cpumask *cpumask = cpu_online_mask;
2930 + struct hclge_cfg cfg;
2931 + unsigned int i;
2932 +- int ret;
2933 ++ int node, ret;
2934 +
2935 + ret = hclge_get_cfg(hdev, &cfg);
2936 + if (ret)
2937 +@@ -1595,11 +1596,12 @@ static int hclge_configure(struct hclge_dev *hdev)
2938 +
2939 + hclge_init_kdump_kernel_config(hdev);
2940 +
2941 +- /* Set the init affinity based on pci func number */
2942 +- i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
2943 +- i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
2944 +- cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
2945 +- &hdev->affinity_mask);
2946 ++ /* Set the affinity based on numa node */
2947 ++ node = dev_to_node(&hdev->pdev->dev);
2948 ++ if (node != NUMA_NO_NODE)
2949 ++ cpumask = cpumask_of_node(node);
2950 ++
2951 ++ cpumask_copy(&hdev->affinity_mask, cpumask);
2952 +
2953 + return ret;
2954 + }
2955 +@@ -8118,11 +8120,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
2956 + hclge_clear_arfs_rules(hdev);
2957 + spin_unlock_bh(&hdev->fd_rule_lock);
2958 +
2959 +- /* If it is not PF reset, the firmware will disable the MAC,
2960 ++ /* If it is not PF reset or FLR, the firmware will disable the MAC,
2961 + * so it only need to stop phy here.
2962 + */
2963 + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
2964 +- hdev->reset_type != HNAE3_FUNC_RESET) {
2965 ++ hdev->reset_type != HNAE3_FUNC_RESET &&
2966 ++ hdev->reset_type != HNAE3_FLR_RESET) {
2967 + hclge_mac_stop_phy(hdev);
2968 + hclge_update_link_status(hdev);
2969 + return;
2970 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2971 +index 938654778979a..be3ea7023ed8c 100644
2972 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2973 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2974 +@@ -2463,6 +2463,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
2975 +
2976 + hclgevf_enable_vector(&hdev->misc_vector, false);
2977 + event_cause = hclgevf_check_evt_cause(hdev, &clearval);
2978 ++ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
2979 ++ hclgevf_clear_event_cause(hdev, clearval);
2980 +
2981 + switch (event_cause) {
2982 + case HCLGEVF_VECTOR0_EVENT_RST:
2983 +@@ -2475,10 +2477,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
2984 + break;
2985 + }
2986 +
2987 +- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
2988 +- hclgevf_clear_event_cause(hdev, clearval);
2989 ++ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
2990 + hclgevf_enable_vector(&hdev->misc_vector, true);
2991 +- }
2992 +
2993 + return IRQ_HANDLED;
2994 + }
2995 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
2996 +index a775c69e4fd7f..6aa6ff89a7651 100644
2997 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
2998 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
2999 +@@ -4700,6 +4700,14 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3000 + return 0;
3001 + }
3002 +
3003 ++ if (adapter->failover_pending) {
3004 ++ adapter->init_done_rc = -EAGAIN;
3005 ++ netdev_dbg(netdev, "Failover pending, ignoring login response\n");
3006 ++ complete(&adapter->init_done);
3007 ++ /* login response buffer will be released on reset */
3008 ++ return 0;
3009 ++ }
3010 ++
3011 + netdev->mtu = adapter->req_mtu - ETH_HLEN;
3012 +
3013 + netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
3014 +diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
3015 +index eadcb99583464..3c4f08d20414e 100644
3016 +--- a/drivers/net/ethernet/intel/ice/ice.h
3017 ++++ b/drivers/net/ethernet/intel/ice/ice.h
3018 +@@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
3019 + {
3020 + if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
3021 + set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3022 ++ set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3023 + ice_plug_aux_dev(pf);
3024 + }
3025 + }
3026 +@@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
3027 + {
3028 + ice_unplug_aux_dev(pf);
3029 + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3030 ++ clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3031 + }
3032 + #endif /* _ICE_H_ */
3033 +diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
3034 +index 1f2afdf6cd483..adcc9a251595a 100644
3035 +--- a/drivers/net/ethernet/intel/ice/ice_idc.c
3036 ++++ b/drivers/net/ethernet/intel/ice/ice_idc.c
3037 +@@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf)
3038 + struct auxiliary_device *adev;
3039 + int ret;
3040 +
3041 ++ /* if this PF doesn't support a technology that requires auxiliary
3042 ++ * devices, then gracefully exit
3043 ++ */
3044 ++ if (!ice_is_aux_ena(pf))
3045 ++ return 0;
3046 ++
3047 + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3048 + if (!iadev)
3049 + return -ENOMEM;
3050 +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
3051 +index f62982c4d933d..78114e625ffdc 100644
3052 +--- a/drivers/net/ethernet/intel/igc/igc_main.c
3053 ++++ b/drivers/net/ethernet/intel/igc/igc_main.c
3054 +@@ -5962,7 +5962,9 @@ static int igc_probe(struct pci_dev *pdev,
3055 + if (pci_using_dac)
3056 + netdev->features |= NETIF_F_HIGHDMA;
3057 +
3058 +- netdev->vlan_features |= netdev->features;
3059 ++ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3060 ++ netdev->mpls_features |= NETIF_F_HW_CSUM;
3061 ++ netdev->hw_enc_features |= netdev->vlan_features;
3062 +
3063 + /* MTU range: 68 - 9216 */
3064 + netdev->min_mtu = ETH_MIN_MTU;
3065 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3066 +index 5fe277e354f7a..c10cae78e79f8 100644
3067 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3068 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3069 +@@ -92,7 +92,8 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
3070 + */
3071 + int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
3072 + {
3073 +- unsigned long timeout = jiffies + usecs_to_jiffies(10000);
3074 ++ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
3075 ++ bool twice = false;
3076 + void __iomem *reg;
3077 + u64 reg_val;
3078 +
3079 +@@ -107,6 +108,15 @@ again:
3080 + usleep_range(1, 5);
3081 + goto again;
3082 + }
3083 ++ /* In scenarios where CPU is scheduled out before checking
3084 ++ * 'time_before' (above) and gets scheduled in such that
3085 ++ * jiffies are beyond timeout value, then check again if HW is
3086 ++ * done with the operation in the meantime.
3087 ++ */
3088 ++ if (!twice) {
3089 ++ twice = true;
3090 ++ goto again;
3091 ++ }
3092 + return -EBUSY;
3093 + }
3094 +
3095 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
3096 +index 3f8a98093f8cb..f9cf9fb315479 100644
3097 +--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
3098 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
3099 +@@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
3100 + err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
3101 + if (err) {
3102 + mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
3103 +- return err;
3104 ++ goto err_cancel_work;
3105 + }
3106 +
3107 + err = mlx5_fw_tracer_create_mkey(tracer);
3108 +@@ -1031,6 +1031,7 @@ err_notifier_unregister:
3109 + mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
3110 + err_dealloc_pd:
3111 + mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
3112 ++err_cancel_work:
3113 + cancel_work_sync(&tracer->read_fw_strings_work);
3114 + return err;
3115 + }
3116 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
3117 +index b1b51bbba0541..3f67efbe12fc5 100644
3118 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
3119 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
3120 +@@ -940,7 +940,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work);
3121 +
3122 + int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
3123 + int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
3124 +-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
3125 ++int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
3126 +
3127 + int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
3128 + u16 vid);
3129 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
3130 +index 059799e4f483f..ef271b97fe5ef 100644
3131 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
3132 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
3133 +@@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
3134 + {
3135 + struct mlx5e_rep_indr_block_priv *cb_priv;
3136 +
3137 +- /* All callback list access should be protected by RTNL. */
3138 +- ASSERT_RTNL();
3139 +-
3140 + list_for_each_entry(cb_priv,
3141 + &rpriv->uplink_priv.tc_indr_block_priv_list,
3142 + list)
3143 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3144 +index bd72572e03d1d..1cc279d389d6f 100644
3145 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3146 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3147 +@@ -1882,7 +1882,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
3148 + return set_pflag_cqe_based_moder(netdev, enable, true);
3149 + }
3150 +
3151 +-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
3152 ++int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter)
3153 + {
3154 + bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
3155 + struct mlx5e_params new_params;
3156 +@@ -1894,8 +1894,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
3157 + if (curr_val == new_val)
3158 + return 0;
3159 +
3160 +- if (new_val && !priv->profile->rx_ptp_support &&
3161 +- priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
3162 ++ if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
3163 + netdev_err(priv->netdev,
3164 + "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
3165 + return -EINVAL;
3166 +@@ -1903,7 +1902,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
3167 +
3168 + new_params = priv->channels.params;
3169 + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
3170 +- if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
3171 ++ if (rx_filter)
3172 + new_params.ptp_rx = new_val;
3173 +
3174 + if (new_params.ptp_rx == priv->channels.params.ptp_rx)
3175 +@@ -1926,12 +1925,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
3176 + {
3177 + struct mlx5e_priv *priv = netdev_priv(netdev);
3178 + struct mlx5_core_dev *mdev = priv->mdev;
3179 ++ bool rx_filter;
3180 + int err;
3181 +
3182 + if (!MLX5_CAP_GEN(mdev, cqe_compression))
3183 + return -EOPNOTSUPP;
3184 +
3185 +- err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
3186 ++ rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
3187 ++ err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
3188 + if (err)
3189 + return err;
3190 +
3191 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3192 +index 2d53eaf3b9241..fa718e71db2d4 100644
3193 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3194 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3195 +@@ -4004,14 +4004,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte
3196 +
3197 + if (!rx_filter)
3198 + /* Reset CQE compression to Admin default */
3199 +- return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
3200 ++ return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
3201 +
3202 + if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
3203 + return 0;
3204 +
3205 + /* Disable CQE compression */
3206 + netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
3207 +- err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
3208 ++ err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
3209 + if (err)
3210 + netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
3211 +
3212 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3213 +index c0697e1b71185..938ef5afe5053 100644
3214 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3215 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3216 +@@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head,
3217 +
3218 + curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
3219 + if (!curr_match) {
3220 ++ rcu_read_unlock();
3221 + free_match_list(match_head, ft_locked);
3222 +- err = -ENOMEM;
3223 +- goto out;
3224 ++ return -ENOMEM;
3225 + }
3226 + curr_match->g = g;
3227 + list_add_tail(&curr_match->list, &match_head->list);
3228 + }
3229 +-out:
3230 + rcu_read_unlock();
3231 + return err;
3232 + }
3233 +diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
3234 +index a0a059e0154ff..04c7dc224effa 100644
3235 +--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
3236 ++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
3237 +@@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev)
3238 + err = mlxbf_gige_clean_port(priv);
3239 + if (err)
3240 + goto free_irqs;
3241 ++
3242 ++ /* Clear driver's valid_polarity to match hardware,
3243 ++ * since the above call to clean_port() resets the
3244 ++ * receive polarity used by hardware.
3245 ++ */
3246 ++ priv->valid_polarity = 0;
3247 ++
3248 + err = mlxbf_gige_rx_init(priv);
3249 + if (err)
3250 + goto free_irqs;
3251 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
3252 +index 2406d33356ad2..d87a9eab25a79 100644
3253 +--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
3254 ++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
3255 +@@ -1766,9 +1766,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
3256 + struct nfp_flower_indr_block_cb_priv *cb_priv;
3257 + struct nfp_flower_priv *priv = app->priv;
3258 +
3259 +- /* All callback list access should be protected by RTNL. */
3260 +- ASSERT_RTNL();
3261 +-
3262 + list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
3263 + if (cb_priv->netdev == netdev)
3264 + return cb_priv;
3265 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
3266 +index 4387292c37e2f..e8e17bfc41c54 100644
3267 +--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
3268 ++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
3269 +@@ -3368,6 +3368,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3270 + struct qed_nvm_image_att *p_image_att)
3271 + {
3272 + enum nvm_image_type type;
3273 ++ int rc;
3274 + u32 i;
3275 +
3276 + /* Translate image_id into MFW definitions */
3277 +@@ -3396,7 +3397,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3278 + return -EINVAL;
3279 + }
3280 +
3281 +- qed_mcp_nvm_info_populate(p_hwfn);
3282 ++ rc = qed_mcp_nvm_info_populate(p_hwfn);
3283 ++ if (rc)
3284 ++ return rc;
3285 ++
3286 + for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3287 + if (type == p_hwfn->nvm_info.image_att[i].image_type)
3288 + break;
3289 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
3290 +index e6784023bce42..aa7ee43f92525 100644
3291 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
3292 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
3293 +@@ -439,7 +439,6 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
3294 + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
3295 + msleep(20);
3296 +
3297 +- qlcnic_rom_unlock(adapter);
3298 + /* big hammer don't reset CAM block on reset */
3299 + QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
3300 +
3301 +diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
3302 +index 47e9998b62f09..6a2416bec7ddc 100644
3303 +--- a/drivers/net/ethernet/rdc/r6040.c
3304 ++++ b/drivers/net/ethernet/rdc/r6040.c
3305 +@@ -119,6 +119,8 @@
3306 + #define PHY_ST 0x8A /* PHY status register */
3307 + #define MAC_SM 0xAC /* MAC status machine */
3308 + #define MAC_SM_RST 0x0002 /* MAC status machine reset */
3309 ++#define MD_CSC 0xb6 /* MDC speed control register */
3310 ++#define MD_CSC_DEFAULT 0x0030
3311 + #define MAC_ID 0xBE /* Identifier register */
3312 +
3313 + #define TX_DCNT 0x80 /* TX descriptor count */
3314 +@@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
3315 + {
3316 + void __iomem *ioaddr = lp->base;
3317 + int limit = MAC_DEF_TIMEOUT;
3318 +- u16 cmd;
3319 ++ u16 cmd, md_csc;
3320 +
3321 ++ md_csc = ioread16(ioaddr + MD_CSC);
3322 + iowrite16(MAC_RST, ioaddr + MCR1);
3323 + while (limit--) {
3324 + cmd = ioread16(ioaddr + MCR1);
3325 +@@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
3326 + iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
3327 + iowrite16(0, ioaddr + MAC_SM);
3328 + mdelay(5);
3329 ++
3330 ++ /* Restore MDIO clock frequency */
3331 ++ if (md_csc != MD_CSC_DEFAULT)
3332 ++ iowrite16(md_csc, ioaddr + MD_CSC);
3333 + }
3334 +
3335 + static void r6040_init_mac_regs(struct net_device *dev)
3336 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
3337 +index 840478692a370..dfd439eadd492 100644
3338 +--- a/drivers/net/ethernet/renesas/sh_eth.c
3339 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
3340 +@@ -2533,6 +2533,7 @@ static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
3341 + else
3342 + txdesc->status |= cpu_to_le32(TD_TACT);
3343 +
3344 ++ wmb(); /* cur_tx must be incremented after TACT bit was set */
3345 + mdp->cur_tx++;
3346 +
3347 + if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
3348 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
3349 +index 4c9a37dd0d3ff..ecf759ee1c9f5 100644
3350 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
3351 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
3352 +@@ -109,8 +109,10 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
3353 + plat->bus_id = pci_dev_id(pdev);
3354 +
3355 + phy_mode = device_get_phy_mode(&pdev->dev);
3356 +- if (phy_mode < 0)
3357 ++ if (phy_mode < 0) {
3358 + dev_err(&pdev->dev, "phy_mode not found\n");
3359 ++ return phy_mode;
3360 ++ }
3361 +
3362 + plat->phy_interface = phy_mode;
3363 + plat->interface = PHY_INTERFACE_MODE_GMII;
3364 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3365 +index 8a150cc462dcf..0dbd189c2721d 100644
3366 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3367 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3368 +@@ -7113,13 +7113,10 @@ int stmmac_suspend(struct device *dev)
3369 + struct net_device *ndev = dev_get_drvdata(dev);
3370 + struct stmmac_priv *priv = netdev_priv(ndev);
3371 + u32 chan;
3372 +- int ret;
3373 +
3374 + if (!ndev || !netif_running(ndev))
3375 + return 0;
3376 +
3377 +- phylink_mac_change(priv->phylink, false);
3378 +-
3379 + mutex_lock(&priv->lock);
3380 +
3381 + netif_device_detach(ndev);
3382 +@@ -7145,27 +7142,22 @@ int stmmac_suspend(struct device *dev)
3383 + stmmac_pmt(priv, priv->hw, priv->wolopts);
3384 + priv->irq_wake = 1;
3385 + } else {
3386 +- mutex_unlock(&priv->lock);
3387 +- rtnl_lock();
3388 +- if (device_may_wakeup(priv->device))
3389 +- phylink_speed_down(priv->phylink, false);
3390 +- phylink_stop(priv->phylink);
3391 +- rtnl_unlock();
3392 +- mutex_lock(&priv->lock);
3393 +-
3394 + stmmac_mac_set(priv, priv->ioaddr, false);
3395 + pinctrl_pm_select_sleep_state(priv->device);
3396 +- /* Disable clock in case of PWM is off */
3397 +- clk_disable_unprepare(priv->plat->clk_ptp_ref);
3398 +- ret = pm_runtime_force_suspend(dev);
3399 +- if (ret) {
3400 +- mutex_unlock(&priv->lock);
3401 +- return ret;
3402 +- }
3403 + }
3404 +
3405 + mutex_unlock(&priv->lock);
3406 +
3407 ++ rtnl_lock();
3408 ++ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
3409 ++ phylink_suspend(priv->phylink, true);
3410 ++ } else {
3411 ++ if (device_may_wakeup(priv->device))
3412 ++ phylink_speed_down(priv->phylink, false);
3413 ++ phylink_suspend(priv->phylink, false);
3414 ++ }
3415 ++ rtnl_unlock();
3416 ++
3417 + if (priv->dma_cap.fpesel) {
3418 + /* Disable FPE */
3419 + stmmac_fpe_configure(priv, priv->ioaddr,
3420 +@@ -7237,12 +7229,6 @@ int stmmac_resume(struct device *dev)
3421 + priv->irq_wake = 0;
3422 + } else {
3423 + pinctrl_pm_select_default_state(priv->device);
3424 +- /* enable the clk previously disabled */
3425 +- ret = pm_runtime_force_resume(dev);
3426 +- if (ret)
3427 +- return ret;
3428 +- if (priv->plat->clk_ptp_ref)
3429 +- clk_prepare_enable(priv->plat->clk_ptp_ref);
3430 + /* reset the phy so that it's ready */
3431 + if (priv->mii)
3432 + stmmac_mdio_reset(priv->mii);
3433 +@@ -7256,13 +7242,15 @@ int stmmac_resume(struct device *dev)
3434 + return ret;
3435 + }
3436 +
3437 +- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
3438 +- rtnl_lock();
3439 +- phylink_start(priv->phylink);
3440 +- /* We may have called phylink_speed_down before */
3441 +- phylink_speed_up(priv->phylink);
3442 +- rtnl_unlock();
3443 ++ rtnl_lock();
3444 ++ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
3445 ++ phylink_resume(priv->phylink);
3446 ++ } else {
3447 ++ phylink_resume(priv->phylink);
3448 ++ if (device_may_wakeup(priv->device))
3449 ++ phylink_speed_up(priv->phylink);
3450 + }
3451 ++ rtnl_unlock();
3452 +
3453 + rtnl_lock();
3454 + mutex_lock(&priv->lock);
3455 +@@ -7283,8 +7271,6 @@ int stmmac_resume(struct device *dev)
3456 + mutex_unlock(&priv->lock);
3457 + rtnl_unlock();
3458 +
3459 +- phylink_mac_change(priv->phylink, true);
3460 +-
3461 + netif_device_attach(ndev);
3462 +
3463 + return 0;
3464 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
3465 +index 5ca710844cc1e..62cec9bfcd337 100644
3466 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
3467 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
3468 +@@ -9,6 +9,7 @@
3469 + *******************************************************************************/
3470 +
3471 + #include <linux/platform_device.h>
3472 ++#include <linux/pm_runtime.h>
3473 + #include <linux/module.h>
3474 + #include <linux/io.h>
3475 + #include <linux/of.h>
3476 +@@ -771,9 +772,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev)
3477 + return stmmac_bus_clks_config(priv, true);
3478 + }
3479 +
3480 ++static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
3481 ++{
3482 ++ struct net_device *ndev = dev_get_drvdata(dev);
3483 ++ struct stmmac_priv *priv = netdev_priv(ndev);
3484 ++ int ret;
3485 ++
3486 ++ if (!netif_running(ndev))
3487 ++ return 0;
3488 ++
3489 ++ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
3490 ++ /* Disable clock in case of PWM is off */
3491 ++ clk_disable_unprepare(priv->plat->clk_ptp_ref);
3492 ++
3493 ++ ret = pm_runtime_force_suspend(dev);
3494 ++ if (ret)
3495 ++ return ret;
3496 ++ }
3497 ++
3498 ++ return 0;
3499 ++}
3500 ++
3501 ++static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
3502 ++{
3503 ++ struct net_device *ndev = dev_get_drvdata(dev);
3504 ++ struct stmmac_priv *priv = netdev_priv(ndev);
3505 ++ int ret;
3506 ++
3507 ++ if (!netif_running(ndev))
3508 ++ return 0;
3509 ++
3510 ++ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
3511 ++ /* enable the clk previously disabled */
3512 ++ ret = pm_runtime_force_resume(dev);
3513 ++ if (ret)
3514 ++ return ret;
3515 ++
3516 ++ clk_prepare_enable(priv->plat->clk_ptp_ref);
3517 ++ }
3518 ++
3519 ++ return 0;
3520 ++}
3521 ++
3522 + const struct dev_pm_ops stmmac_pltfr_pm_ops = {
3523 + SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
3524 + SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
3525 ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
3526 + };
3527 + EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
3528 +
3529 +diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
3530 +index c607ebec74567..656f6ef31b19e 100644
3531 +--- a/drivers/net/ipa/ipa_table.c
3532 ++++ b/drivers/net/ipa/ipa_table.c
3533 +@@ -430,7 +430,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
3534 + * table region determines the number of entries it has.
3535 + */
3536 + if (filter) {
3537 +- count = hweight32(ipa->filter_map);
3538 ++ /* Include one extra "slot" to hold the filter map itself */
3539 ++ count = 1 + hweight32(ipa->filter_map);
3540 + hash_count = hash_mem->size ? count : 0;
3541 + } else {
3542 + count = mem->size / sizeof(__le64);
3543 +diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h
3544 +index 21aa24c741b96..daae7fa58fb82 100644
3545 +--- a/drivers/net/phy/dp83640_reg.h
3546 ++++ b/drivers/net/phy/dp83640_reg.h
3547 +@@ -5,7 +5,7 @@
3548 + #ifndef HAVE_DP83640_REGISTERS
3549 + #define HAVE_DP83640_REGISTERS
3550 +
3551 +-#define PAGE0 0x0000
3552 ++/* #define PAGE0 0x0000 */
3553 + #define PHYCR2 0x001c /* PHY Control Register 2 */
3554 +
3555 + #define PAGE4 0x0004
3556 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
3557 +index eb29ef53d971d..42e5a681183f3 100644
3558 +--- a/drivers/net/phy/phylink.c
3559 ++++ b/drivers/net/phy/phylink.c
3560 +@@ -33,6 +33,7 @@
3561 + enum {
3562 + PHYLINK_DISABLE_STOPPED,
3563 + PHYLINK_DISABLE_LINK,
3564 ++ PHYLINK_DISABLE_MAC_WOL,
3565 + };
3566 +
3567 + /**
3568 +@@ -1281,6 +1282,9 @@ EXPORT_SYMBOL_GPL(phylink_start);
3569 + * network device driver's &struct net_device_ops ndo_stop() method. The
3570 + * network device's carrier state should not be changed prior to calling this
3571 + * function.
3572 ++ *
3573 ++ * This will synchronously bring down the link if the link is not already
3574 ++ * down (in other words, it will trigger a mac_link_down() method call.)
3575 + */
3576 + void phylink_stop(struct phylink *pl)
3577 + {
3578 +@@ -1300,6 +1304,84 @@ void phylink_stop(struct phylink *pl)
3579 + }
3580 + EXPORT_SYMBOL_GPL(phylink_stop);
3581 +
3582 ++/**
3583 ++ * phylink_suspend() - handle a network device suspend event
3584 ++ * @pl: a pointer to a &struct phylink returned from phylink_create()
3585 ++ * @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
3586 ++ *
3587 ++ * Handle a network device suspend event. There are several cases:
3588 ++ * - If Wake-on-Lan is not active, we can bring down the link between
3589 ++ * the MAC and PHY by calling phylink_stop().
3590 ++ * - If Wake-on-Lan is active, and being handled only by the PHY, we
3591 ++ * can also bring down the link between the MAC and PHY.
3592 ++ * - If Wake-on-Lan is active, but being handled by the MAC, the MAC
3593 ++ * still needs to receive packets, so we can not bring the link down.
3594 ++ */
3595 ++void phylink_suspend(struct phylink *pl, bool mac_wol)
3596 ++{
3597 ++ ASSERT_RTNL();
3598 ++
3599 ++ if (mac_wol && (!pl->netdev || pl->netdev->wol_enabled)) {
3600 ++ /* Wake-on-Lan enabled, MAC handling */
3601 ++ mutex_lock(&pl->state_mutex);
3602 ++
3603 ++ /* Stop the resolver bringing the link up */
3604 ++ __set_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
3605 ++
3606 ++ /* Disable the carrier, to prevent transmit timeouts,
3607 ++ * but one would hope all packets have been sent. This
3608 ++ * also means phylink_resolve() will do nothing.
3609 ++ */
3610 ++ netif_carrier_off(pl->netdev);
3611 ++
3612 ++ /* We do not call mac_link_down() here as we want the
3613 ++ * link to remain up to receive the WoL packets.
3614 ++ */
3615 ++ mutex_unlock(&pl->state_mutex);
3616 ++ } else {
3617 ++ phylink_stop(pl);
3618 ++ }
3619 ++}
3620 ++EXPORT_SYMBOL_GPL(phylink_suspend);
3621 ++
3622 ++/**
3623 ++ * phylink_resume() - handle a network device resume event
3624 ++ * @pl: a pointer to a &struct phylink returned from phylink_create()
3625 ++ *
3626 ++ * Undo the effects of phylink_suspend(), returning the link to an
3627 ++ * operational state.
3628 ++ */
3629 ++void phylink_resume(struct phylink *pl)
3630 ++{
3631 ++ ASSERT_RTNL();
3632 ++
3633 ++ if (test_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state)) {
3634 ++ /* Wake-on-Lan enabled, MAC handling */
3635 ++
3636 ++ /* Call mac_link_down() so we keep the overall state balanced.
3637 ++ * Do this under the state_mutex lock for consistency. This
3638 ++ * will cause a "Link Down" message to be printed during
3639 ++ * resume, which is harmless - the true link state will be
3640 ++ * printed when we run a resolve.
3641 ++ */
3642 ++ mutex_lock(&pl->state_mutex);
3643 ++ phylink_link_down(pl);
3644 ++ mutex_unlock(&pl->state_mutex);
3645 ++
3646 ++ /* Re-apply the link parameters so that all the settings get
3647 ++ * restored to the MAC.
3648 ++ */
3649 ++ phylink_mac_initial_config(pl, true);
3650 ++
3651 ++ /* Re-enable and re-resolve the link parameters */
3652 ++ clear_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
3653 ++ phylink_run_resolve(pl);
3654 ++ } else {
3655 ++ phylink_start(pl);
3656 ++ }
3657 ++}
3658 ++EXPORT_SYMBOL_GPL(phylink_resume);
3659 ++
3660 + /**
3661 + * phylink_ethtool_get_wol() - get the wake on lan parameters for the PHY
3662 + * @pl: a pointer to a &struct phylink returned from phylink_create()
3663 +diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
3664 +index 4c4ab7b38d78c..82bb5ed94c485 100644
3665 +--- a/drivers/net/usb/cdc_mbim.c
3666 ++++ b/drivers/net/usb/cdc_mbim.c
3667 +@@ -654,6 +654,11 @@ static const struct usb_device_id mbim_devs[] = {
3668 + .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
3669 + },
3670 +
3671 ++ /* Telit LN920 */
3672 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1061, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
3673 ++ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
3674 ++ },
3675 ++
3676 + /* default entry */
3677 + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
3678 + .driver_info = (unsigned long)&cdc_mbim_info_zlp,
3679 +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
3680 +index dec96e8ab5679..18e0ca85f6537 100644
3681 +--- a/drivers/net/usb/hso.c
3682 ++++ b/drivers/net/usb/hso.c
3683 +@@ -2536,13 +2536,17 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
3684 + if (!hso_net->mux_bulk_tx_buf)
3685 + goto err_free_tx_urb;
3686 +
3687 +- add_net_device(hso_dev);
3688 ++ result = add_net_device(hso_dev);
3689 ++ if (result) {
3690 ++ dev_err(&interface->dev, "Failed to add net device\n");
3691 ++ goto err_free_tx_buf;
3692 ++ }
3693 +
3694 + /* registering our net device */
3695 + result = register_netdev(net);
3696 + if (result) {
3697 + dev_err(&interface->dev, "Failed to register device\n");
3698 +- goto err_free_tx_buf;
3699 ++ goto err_rmv_ndev;
3700 + }
3701 +
3702 + hso_log_port(hso_dev);
3703 +@@ -2551,8 +2555,9 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
3704 +
3705 + return hso_dev;
3706 +
3707 +-err_free_tx_buf:
3708 ++err_rmv_ndev:
3709 + remove_net_device(hso_dev);
3710 ++err_free_tx_buf:
3711 + kfree(hso_net->mux_bulk_tx_buf);
3712 + err_free_tx_urb:
3713 + usb_free_urb(hso_net->mux_bulk_tx_urb);
3714 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
3715 +index b4b1f75b9c2a8..513f9e5387290 100644
3716 +--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
3717 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
3718 +@@ -230,19 +230,11 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
3719 + static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
3720 + {
3721 + const struct firmware *pnvm;
3722 +- char pnvm_name[64];
3723 ++ char pnvm_name[MAX_PNVM_NAME];
3724 ++ size_t new_len;
3725 + int ret;
3726 +
3727 +- /*
3728 +- * The prefix unfortunately includes a hyphen at the end, so
3729 +- * don't add the dot here...
3730 +- */
3731 +- snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
3732 +- trans->cfg->fw_name_pre);
3733 +-
3734 +- /* ...but replace the hyphen with the dot here. */
3735 +- if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
3736 +- pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
3737 ++ iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
3738 +
3739 + ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
3740 + if (ret) {
3741 +@@ -251,11 +243,14 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
3742 + return ret;
3743 + }
3744 +
3745 ++ new_len = pnvm->size;
3746 + *data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
3747 ++ release_firmware(pnvm);
3748 ++
3749 + if (!*data)
3750 + return -ENOMEM;
3751 +
3752 +- *len = pnvm->size;
3753 ++ *len = new_len;
3754 +
3755 + return 0;
3756 + }
3757 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
3758 +index 61d3d4e0b7d94..203c367dd4dee 100644
3759 +--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
3760 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
3761 +@@ -12,7 +12,27 @@
3762 +
3763 + #define MVM_UCODE_PNVM_TIMEOUT (HZ / 4)
3764 +
3765 ++#define MAX_PNVM_NAME 64
3766 ++
3767 + int iwl_pnvm_load(struct iwl_trans *trans,
3768 + struct iwl_notif_wait_data *notif_wait);
3769 +
3770 ++static inline
3771 ++void iwl_pnvm_get_fs_name(struct iwl_trans *trans,
3772 ++ u8 *pnvm_name, size_t max_len)
3773 ++{
3774 ++ int pre_len;
3775 ++
3776 ++ /*
3777 ++ * The prefix unfortunately includes a hyphen at the end, so
3778 ++ * don't add the dot here...
3779 ++ */
3780 ++ snprintf(pnvm_name, max_len, "%spnvm", trans->cfg->fw_name_pre);
3781 ++
3782 ++ /* ...but replace the hyphen with the dot here. */
3783 ++ pre_len = strlen(trans->cfg->fw_name_pre);
3784 ++ if (pre_len < max_len && pre_len > 0)
3785 ++ pnvm_name[pre_len - 1] = '.';
3786 ++}
3787 ++
3788 + #endif /* __IWL_PNVM_H__ */
3789 +diff --git a/drivers/ntb/test/ntb_msi_test.c b/drivers/ntb/test/ntb_msi_test.c
3790 +index 7095ecd6223a7..4e18e08776c98 100644
3791 +--- a/drivers/ntb/test/ntb_msi_test.c
3792 ++++ b/drivers/ntb/test/ntb_msi_test.c
3793 +@@ -369,8 +369,10 @@ static int ntb_msit_probe(struct ntb_client *client, struct ntb_dev *ntb)
3794 + if (ret)
3795 + goto remove_dbgfs;
3796 +
3797 +- if (!nm->isr_ctx)
3798 ++ if (!nm->isr_ctx) {
3799 ++ ret = -ENOMEM;
3800 + goto remove_dbgfs;
3801 ++ }
3802 +
3803 + ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
3804 +
3805 +diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
3806 +index 89df1350fefd8..65e1e5cf1b29a 100644
3807 +--- a/drivers/ntb/test/ntb_perf.c
3808 ++++ b/drivers/ntb/test/ntb_perf.c
3809 +@@ -598,6 +598,7 @@ static int perf_setup_inbuf(struct perf_peer *peer)
3810 + return -ENOMEM;
3811 + }
3812 + if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
3813 ++ ret = -EINVAL;
3814 + dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
3815 + goto err_free_inbuf;
3816 + }
3817 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
3818 +index 2f0cbaba12ac4..84e7cb9f19681 100644
3819 +--- a/drivers/nvme/host/core.c
3820 ++++ b/drivers/nvme/host/core.c
3821 +@@ -3496,7 +3496,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
3822 + lockdep_assert_held(&subsys->lock);
3823 +
3824 + list_for_each_entry(h, &subsys->nsheads, entry) {
3825 +- if (h->ns_id == nsid && nvme_tryget_ns_head(h))
3826 ++ if (h->ns_id != nsid)
3827 ++ continue;
3828 ++ if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
3829 + return h;
3830 + }
3831 +
3832 +@@ -3821,6 +3823,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
3833 +
3834 + mutex_lock(&ns->ctrl->subsys->lock);
3835 + list_del_rcu(&ns->siblings);
3836 ++ if (list_empty(&ns->head->list)) {
3837 ++ list_del_init(&ns->head->entry);
3838 ++ last_path = true;
3839 ++ }
3840 + mutex_unlock(&ns->ctrl->subsys->lock);
3841 +
3842 + synchronize_rcu(); /* guarantee not available in head->list */
3843 +@@ -3840,13 +3846,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
3844 + list_del_init(&ns->list);
3845 + up_write(&ns->ctrl->namespaces_rwsem);
3846 +
3847 +- /* Synchronize with nvme_init_ns_head() */
3848 +- mutex_lock(&ns->head->subsys->lock);
3849 +- if (list_empty(&ns->head->list)) {
3850 +- list_del_init(&ns->head->entry);
3851 +- last_path = true;
3852 +- }
3853 +- mutex_unlock(&ns->head->subsys->lock);
3854 + if (last_path)
3855 + nvme_mpath_shutdown_disk(ns->head);
3856 + nvme_put_ns(ns);
3857 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
3858 +index 48b70e5235a39..19a711395cdc3 100644
3859 +--- a/drivers/nvme/host/tcp.c
3860 ++++ b/drivers/nvme/host/tcp.c
3861 +@@ -273,6 +273,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
3862 + } while (ret > 0);
3863 + }
3864 +
3865 ++static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
3866 ++{
3867 ++ return !list_empty(&queue->send_list) ||
3868 ++ !llist_empty(&queue->req_list) || queue->more_requests;
3869 ++}
3870 ++
3871 + static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
3872 + bool sync, bool last)
3873 + {
3874 +@@ -293,9 +299,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
3875 + nvme_tcp_send_all(queue);
3876 + queue->more_requests = false;
3877 + mutex_unlock(&queue->send_mutex);
3878 +- } else if (last) {
3879 +- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
3880 + }
3881 ++
3882 ++ if (last && nvme_tcp_queue_more(queue))
3883 ++ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
3884 + }
3885 +
3886 + static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
3887 +@@ -893,12 +900,6 @@ done:
3888 + read_unlock_bh(&sk->sk_callback_lock);
3889 + }
3890 +
3891 +-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
3892 +-{
3893 +- return !list_empty(&queue->send_list) ||
3894 +- !llist_empty(&queue->req_list) || queue->more_requests;
3895 +-}
3896 +-
3897 + static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
3898 + {
3899 + queue->request = NULL;
3900 +@@ -1132,8 +1133,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
3901 + pending = true;
3902 + else if (unlikely(result < 0))
3903 + break;
3904 +- } else
3905 +- pending = !llist_empty(&queue->req_list);
3906 ++ }
3907 +
3908 + result = nvme_tcp_try_recv(queue);
3909 + if (result > 0)
3910 +diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
3911 +index 5e1e3796efa4e..326f7d13024f9 100644
3912 +--- a/drivers/pci/controller/Kconfig
3913 ++++ b/drivers/pci/controller/Kconfig
3914 +@@ -40,6 +40,7 @@ config PCI_FTPCI100
3915 + config PCI_IXP4XX
3916 + bool "Intel IXP4xx PCI controller"
3917 + depends on ARM && OF
3918 ++ depends on ARCH_IXP4XX || COMPILE_TEST
3919 + default ARCH_IXP4XX
3920 + help
3921 + Say Y here if you want support for the PCI host controller found
3922 +diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
3923 +index 35e61048e133c..ffb176d288cd9 100644
3924 +--- a/drivers/pci/controller/cadence/pci-j721e.c
3925 ++++ b/drivers/pci/controller/cadence/pci-j721e.c
3926 +@@ -27,6 +27,7 @@
3927 + #define STATUS_REG_SYS_2 0x508
3928 + #define STATUS_CLR_REG_SYS_2 0x708
3929 + #define LINK_DOWN BIT(1)
3930 ++#define J7200_LINK_DOWN BIT(10)
3931 +
3932 + #define J721E_PCIE_USER_CMD_STATUS 0x4
3933 + #define LINK_TRAINING_ENABLE BIT(0)
3934 +@@ -57,6 +58,7 @@ struct j721e_pcie {
3935 + struct cdns_pcie *cdns_pcie;
3936 + void __iomem *user_cfg_base;
3937 + void __iomem *intd_cfg_base;
3938 ++ u32 linkdown_irq_regfield;
3939 + };
3940 +
3941 + enum j721e_pcie_mode {
3942 +@@ -66,7 +68,10 @@ enum j721e_pcie_mode {
3943 +
3944 + struct j721e_pcie_data {
3945 + enum j721e_pcie_mode mode;
3946 +- bool quirk_retrain_flag;
3947 ++ unsigned int quirk_retrain_flag:1;
3948 ++ unsigned int quirk_detect_quiet_flag:1;
3949 ++ u32 linkdown_irq_regfield;
3950 ++ unsigned int byte_access_allowed:1;
3951 + };
3952 +
3953 + static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
3954 +@@ -98,12 +103,12 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
3955 + u32 reg;
3956 +
3957 + reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
3958 +- if (!(reg & LINK_DOWN))
3959 ++ if (!(reg & pcie->linkdown_irq_regfield))
3960 + return IRQ_NONE;
3961 +
3962 + dev_err(dev, "LINK DOWN!\n");
3963 +
3964 +- j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN);
3965 ++ j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, pcie->linkdown_irq_regfield);
3966 + return IRQ_HANDLED;
3967 + }
3968 +
3969 +@@ -112,7 +117,7 @@ static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
3970 + u32 reg;
3971 +
3972 + reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2);
3973 +- reg |= LINK_DOWN;
3974 ++ reg |= pcie->linkdown_irq_regfield;
3975 + j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg);
3976 + }
3977 +
3978 +@@ -284,10 +289,36 @@ static struct pci_ops cdns_ti_pcie_host_ops = {
3979 + static const struct j721e_pcie_data j721e_pcie_rc_data = {
3980 + .mode = PCI_MODE_RC,
3981 + .quirk_retrain_flag = true,
3982 ++ .byte_access_allowed = false,
3983 ++ .linkdown_irq_regfield = LINK_DOWN,
3984 + };
3985 +
3986 + static const struct j721e_pcie_data j721e_pcie_ep_data = {
3987 + .mode = PCI_MODE_EP,
3988 ++ .linkdown_irq_regfield = LINK_DOWN,
3989 ++};
3990 ++
3991 ++static const struct j721e_pcie_data j7200_pcie_rc_data = {
3992 ++ .mode = PCI_MODE_RC,
3993 ++ .quirk_detect_quiet_flag = true,
3994 ++ .linkdown_irq_regfield = J7200_LINK_DOWN,
3995 ++ .byte_access_allowed = true,
3996 ++};
3997 ++
3998 ++static const struct j721e_pcie_data j7200_pcie_ep_data = {
3999 ++ .mode = PCI_MODE_EP,
4000 ++ .quirk_detect_quiet_flag = true,
4001 ++};
4002 ++
4003 ++static const struct j721e_pcie_data am64_pcie_rc_data = {
4004 ++ .mode = PCI_MODE_RC,
4005 ++ .linkdown_irq_regfield = J7200_LINK_DOWN,
4006 ++ .byte_access_allowed = true,
4007 ++};
4008 ++
4009 ++static const struct j721e_pcie_data am64_pcie_ep_data = {
4010 ++ .mode = PCI_MODE_EP,
4011 ++ .linkdown_irq_regfield = J7200_LINK_DOWN,
4012 + };
4013 +
4014 + static const struct of_device_id of_j721e_pcie_match[] = {
4015 +@@ -299,6 +330,22 @@ static const struct of_device_id of_j721e_pcie_match[] = {
4016 + .compatible = "ti,j721e-pcie-ep",
4017 + .data = &j721e_pcie_ep_data,
4018 + },
4019 ++ {
4020 ++ .compatible = "ti,j7200-pcie-host",
4021 ++ .data = &j7200_pcie_rc_data,
4022 ++ },
4023 ++ {
4024 ++ .compatible = "ti,j7200-pcie-ep",
4025 ++ .data = &j7200_pcie_ep_data,
4026 ++ },
4027 ++ {
4028 ++ .compatible = "ti,am64-pcie-host",
4029 ++ .data = &am64_pcie_rc_data,
4030 ++ },
4031 ++ {
4032 ++ .compatible = "ti,am64-pcie-ep",
4033 ++ .data = &am64_pcie_ep_data,
4034 ++ },
4035 + {},
4036 + };
4037 +
4038 +@@ -332,6 +379,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
4039 +
4040 + pcie->dev = dev;
4041 + pcie->mode = mode;
4042 ++ pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
4043 +
4044 + base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
4045 + if (IS_ERR(base))
4046 +@@ -391,9 +439,11 @@ static int j721e_pcie_probe(struct platform_device *pdev)
4047 + goto err_get_sync;
4048 + }
4049 +
4050 +- bridge->ops = &cdns_ti_pcie_host_ops;
4051 ++ if (!data->byte_access_allowed)
4052 ++ bridge->ops = &cdns_ti_pcie_host_ops;
4053 + rc = pci_host_bridge_priv(bridge);
4054 + rc->quirk_retrain_flag = data->quirk_retrain_flag;
4055 ++ rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
4056 +
4057 + cdns_pcie = &rc->pcie;
4058 + cdns_pcie->dev = dev;
4059 +@@ -459,6 +509,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
4060 + ret = -ENOMEM;
4061 + goto err_get_sync;
4062 + }
4063 ++ ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
4064 +
4065 + cdns_pcie = &ep->pcie;
4066 + cdns_pcie->dev = dev;
4067 +diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
4068 +index 897cdde02bd80..dd7df1ac7fda2 100644
4069 +--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
4070 ++++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
4071 +@@ -623,6 +623,10 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
4072 + ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
4073 + /* Reserve region 0 for IRQs */
4074 + set_bit(0, &ep->ob_region_map);
4075 ++
4076 ++ if (ep->quirk_detect_quiet_flag)
4077 ++ cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
4078 ++
4079 + spin_lock_init(&ep->lock);
4080 +
4081 + return 0;
4082 +diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
4083 +index ae1c55503513a..fb96d37a135c1 100644
4084 +--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
4085 ++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
4086 +@@ -498,6 +498,9 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
4087 + return PTR_ERR(rc->cfg_base);
4088 + rc->cfg_res = res;
4089 +
4090 ++ if (rc->quirk_detect_quiet_flag)
4091 ++ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
4092 ++
4093 + ret = cdns_pcie_start_link(pcie);
4094 + if (ret) {
4095 + dev_err(dev, "Failed to start link\n");
4096 +diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
4097 +index 3c3646502d05c..52767f26048fd 100644
4098 +--- a/drivers/pci/controller/cadence/pcie-cadence.c
4099 ++++ b/drivers/pci/controller/cadence/pcie-cadence.c
4100 +@@ -7,6 +7,22 @@
4101 +
4102 + #include "pcie-cadence.h"
4103 +
4104 ++void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
4105 ++{
4106 ++ u32 delay = 0x3;
4107 ++ u32 ltssm_control_cap;
4108 ++
4109 ++ /*
4110 ++ * Set the LTSSM Detect Quiet state min. delay to 2ms.
4111 ++ */
4112 ++ ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
4113 ++ ltssm_control_cap = ((ltssm_control_cap &
4114 ++ ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
4115 ++ CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
4116 ++
4117 ++ cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
4118 ++}
4119 ++
4120 + void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
4121 + u32 r, bool is_io,
4122 + u64 cpu_addr, u64 pci_addr, size_t size)
4123 +diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
4124 +index 30db2d68c17a0..4bde99b74135d 100644
4125 +--- a/drivers/pci/controller/cadence/pcie-cadence.h
4126 ++++ b/drivers/pci/controller/cadence/pcie-cadence.h
4127 +@@ -189,6 +189,14 @@
4128 + /* AXI link down register */
4129 + #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
4130 +
4131 ++/* LTSSM Capabilities register */
4132 ++#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
4133 ++#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
4134 ++#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
4135 ++#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
4136 ++ (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
4137 ++ CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
4138 ++
4139 + enum cdns_pcie_rp_bar {
4140 + RP_BAR_UNDEFINED = -1,
4141 + RP_BAR0,
4142 +@@ -295,6 +303,7 @@ struct cdns_pcie {
4143 + * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
4144 + * available
4145 + * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
4146 ++ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
4147 + */
4148 + struct cdns_pcie_rc {
4149 + struct cdns_pcie pcie;
4150 +@@ -303,7 +312,8 @@ struct cdns_pcie_rc {
4151 + u32 vendor_id;
4152 + u32 device_id;
4153 + bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
4154 +- bool quirk_retrain_flag;
4155 ++ unsigned int quirk_retrain_flag:1;
4156 ++ unsigned int quirk_detect_quiet_flag:1;
4157 + };
4158 +
4159 + /**
4160 +@@ -334,6 +344,7 @@ struct cdns_pcie_epf {
4161 + * registers fields (RMW) accessible by both remote RC and EP to
4162 + * minimize time between read and write
4163 + * @epf: Structure to hold info about endpoint function
4164 ++ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
4165 + */
4166 + struct cdns_pcie_ep {
4167 + struct cdns_pcie pcie;
4168 +@@ -348,6 +359,7 @@ struct cdns_pcie_ep {
4169 + /* protect writing to PCI_STATUS while raising legacy interrupts */
4170 + spinlock_t lock;
4171 + struct cdns_pcie_epf *epf;
4172 ++ unsigned int quirk_detect_quiet_flag:1;
4173 + };
4174 +
4175 +
4176 +@@ -508,6 +520,9 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
4177 + return 0;
4178 + }
4179 + #endif
4180 ++
4181 ++void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
4182 ++
4183 + void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
4184 + u32 r, bool is_io,
4185 + u64 cpu_addr, u64 pci_addr, size_t size);
4186 +diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
4187 +index 3ec7b29d5dc72..55c8afb9a8996 100644
4188 +--- a/drivers/pci/controller/dwc/pcie-tegra194.c
4189 ++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
4190 +@@ -497,19 +497,19 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
4191 + struct tegra_pcie_dw *pcie = arg;
4192 + struct dw_pcie_ep *ep = &pcie->pci.ep;
4193 + int spurious = 1;
4194 +- u32 val, tmp;
4195 ++ u32 status_l0, status_l1, link_status;
4196 +
4197 +- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
4198 +- if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
4199 +- val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
4200 +- appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
4201 ++ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
4202 ++ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
4203 ++ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
4204 ++ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
4205 +
4206 +- if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
4207 ++ if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
4208 + pex_ep_event_hot_rst_done(pcie);
4209 +
4210 +- if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
4211 +- tmp = appl_readl(pcie, APPL_LINK_STATUS);
4212 +- if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
4213 ++ if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
4214 ++ link_status = appl_readl(pcie, APPL_LINK_STATUS);
4215 ++ if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
4216 + dev_dbg(pcie->dev, "Link is up with Host\n");
4217 + dw_pcie_ep_linkup(ep);
4218 + }
4219 +@@ -518,11 +518,11 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
4220 + spurious = 0;
4221 + }
4222 +
4223 +- if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
4224 +- val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
4225 +- appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
4226 ++ if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
4227 ++ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
4228 ++ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
4229 +
4230 +- if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
4231 ++ if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
4232 + return IRQ_WAKE_THREAD;
4233 +
4234 + spurious = 0;
4235 +@@ -530,8 +530,8 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
4236 +
4237 + if (spurious) {
4238 + dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
4239 +- val);
4240 +- appl_writel(pcie, val, APPL_INTR_STATUS_L0);
4241 ++ status_l0);
4242 ++ appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
4243 + }
4244 +
4245 + return IRQ_HANDLED;
4246 +@@ -1763,7 +1763,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
4247 + val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
4248 + val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
4249 + dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
4250 +- val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
4251 ++ val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
4252 + dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
4253 +
4254 + ret = dw_pcie_ep_init_complete(ep);
4255 +diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
4256 +index c979229a6d0df..b358212d71ab7 100644
4257 +--- a/drivers/pci/controller/pci-tegra.c
4258 ++++ b/drivers/pci/controller/pci-tegra.c
4259 +@@ -2193,13 +2193,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
4260 + rp->np = port;
4261 +
4262 + rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
4263 +- if (IS_ERR(rp->base))
4264 +- return PTR_ERR(rp->base);
4265 ++ if (IS_ERR(rp->base)) {
4266 ++ err = PTR_ERR(rp->base);
4267 ++ goto err_node_put;
4268 ++ }
4269 +
4270 + label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
4271 + if (!label) {
4272 +- dev_err(dev, "failed to create reset GPIO label\n");
4273 +- return -ENOMEM;
4274 ++ err = -ENOMEM;
4275 ++ goto err_node_put;
4276 + }
4277 +
4278 + /*
4279 +@@ -2217,7 +2219,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
4280 + } else {
4281 + dev_err(dev, "failed to get reset GPIO: %ld\n",
4282 + PTR_ERR(rp->reset_gpio));
4283 +- return PTR_ERR(rp->reset_gpio);
4284 ++ err = PTR_ERR(rp->reset_gpio);
4285 ++ goto err_node_put;
4286 + }
4287 + }
4288 +
4289 +diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c
4290 +index 56b8ee7bf3307..f918c713afb08 100644
4291 +--- a/drivers/pci/controller/pcie-iproc-bcma.c
4292 ++++ b/drivers/pci/controller/pcie-iproc-bcma.c
4293 +@@ -35,7 +35,6 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
4294 + {
4295 + struct device *dev = &bdev->dev;
4296 + struct iproc_pcie *pcie;
4297 +- LIST_HEAD(resources);
4298 + struct pci_host_bridge *bridge;
4299 + int ret;
4300 +
4301 +@@ -60,19 +59,16 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
4302 + pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
4303 + pcie->mem.name = "PCIe MEM space";
4304 + pcie->mem.flags = IORESOURCE_MEM;
4305 +- pci_add_resource(&resources, &pcie->mem);
4306 ++ pci_add_resource(&bridge->windows, &pcie->mem);
4307 ++ ret = devm_request_pci_bus_resources(dev, &bridge->windows);
4308 ++ if (ret)
4309 ++ return ret;
4310 +
4311 + pcie->map_irq = iproc_pcie_bcma_map_irq;
4312 +
4313 +- ret = iproc_pcie_setup(pcie, &resources);
4314 +- if (ret) {
4315 +- dev_err(dev, "PCIe controller setup failed\n");
4316 +- pci_free_resource_list(&resources);
4317 +- return ret;
4318 +- }
4319 +-
4320 + bcma_set_drvdata(bdev, pcie);
4321 +- return 0;
4322 ++
4323 ++ return iproc_pcie_setup(pcie, &bridge->windows);
4324 + }
4325 +
4326 + static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
4327 +diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
4328 +index b4a288e24aafb..c91d85b151290 100644
4329 +--- a/drivers/pci/controller/pcie-rcar-ep.c
4330 ++++ b/drivers/pci/controller/pcie-rcar-ep.c
4331 +@@ -492,9 +492,9 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
4332 + pcie->dev = dev;
4333 +
4334 + pm_runtime_enable(dev);
4335 +- err = pm_runtime_get_sync(dev);
4336 ++ err = pm_runtime_resume_and_get(dev);
4337 + if (err < 0) {
4338 +- dev_err(dev, "pm_runtime_get_sync failed\n");
4339 ++ dev_err(dev, "pm_runtime_resume_and_get failed\n");
4340 + goto err_pm_disable;
4341 + }
4342 +
4343 +diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
4344 +index a32070be5adf9..cc6194aa24c15 100644
4345 +--- a/drivers/pci/hotplug/TODO
4346 ++++ b/drivers/pci/hotplug/TODO
4347 +@@ -40,9 +40,6 @@ ibmphp:
4348 +
4349 + * The return value of pci_hp_register() is not checked.
4350 +
4351 +-* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
4352 +- and once more in the error path of its caller ibmphp_access_ebda().
4353 +-
4354 + * The various slot data structures are difficult to follow and need to be
4355 + simplified. A lot of functions are too large and too complex, they need
4356 + to be broken up into smaller, manageable pieces. Negative examples are
4357 +diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
4358 +index 11a2661dc0627..7fb75401ad8a7 100644
4359 +--- a/drivers/pci/hotplug/ibmphp_ebda.c
4360 ++++ b/drivers/pci/hotplug/ibmphp_ebda.c
4361 +@@ -714,8 +714,7 @@ static int __init ebda_rsrc_controller(void)
4362 + /* init hpc structure */
4363 + hpc_ptr = alloc_ebda_hpc(slot_num, bus_num);
4364 + if (!hpc_ptr) {
4365 +- rc = -ENOMEM;
4366 +- goto error_no_hpc;
4367 ++ return -ENOMEM;
4368 + }
4369 + hpc_ptr->ctlr_id = ctlr_id;
4370 + hpc_ptr->ctlr_relative_id = ctlr;
4371 +@@ -910,8 +909,6 @@ error:
4372 + kfree(tmp_slot);
4373 + error_no_slot:
4374 + free_ebda_hpc(hpc_ptr);
4375 +-error_no_hpc:
4376 +- iounmap(io_mem);
4377 + return rc;
4378 + }
4379 +
4380 +diff --git a/drivers/pci/of.c b/drivers/pci/of.c
4381 +index a143b02b2dcdf..d84381ce82b52 100644
4382 +--- a/drivers/pci/of.c
4383 ++++ b/drivers/pci/of.c
4384 +@@ -310,7 +310,7 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
4385 + /* Check for ranges property */
4386 + err = of_pci_range_parser_init(&parser, dev_node);
4387 + if (err)
4388 +- goto failed;
4389 ++ return 0;
4390 +
4391 + dev_dbg(dev, "Parsing ranges property...\n");
4392 + for_each_of_pci_range(&parser, &range) {
4393 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
4394 +index a5e6759c407b9..a4eb0c042ca3e 100644
4395 +--- a/drivers/pci/pci.c
4396 ++++ b/drivers/pci/pci.c
4397 +@@ -265,7 +265,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
4398 +
4399 + *endptr = strchrnul(path, ';');
4400 +
4401 +- wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
4402 ++ wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
4403 + if (!wpath)
4404 + return -ENOMEM;
4405 +
4406 +diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
4407 +index 95d4eef2c9e86..4810faa67f520 100644
4408 +--- a/drivers/pci/pcie/ptm.c
4409 ++++ b/drivers/pci/pcie/ptm.c
4410 +@@ -60,10 +60,8 @@ void pci_save_ptm_state(struct pci_dev *dev)
4411 + return;
4412 +
4413 + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
4414 +- if (!save_state) {
4415 +- pci_err(dev, "no suspend buffer for PTM\n");
4416 ++ if (!save_state)
4417 + return;
4418 +- }
4419 +
4420 + cap = (u16 *)&save_state->cap.data[0];
4421 + pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap);
4422 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
4423 +index 1905ee0297a4c..8c3c1ef92171f 100644
4424 +--- a/drivers/pci/quirks.c
4425 ++++ b/drivers/pci/quirks.c
4426 +@@ -4616,6 +4616,18 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4427 + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4428 + }
4429 +
4430 ++/*
4431 ++ * Each of these NXP Root Ports is in a Root Complex with a unique segment
4432 ++ * number and does provide isolation features to disable peer transactions
4433 ++ * and validate bus numbers in requests, but does not provide an ACS
4434 ++ * capability.
4435 ++ */
4436 ++static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
4437 ++{
4438 ++ return pci_acs_ctrl_enabled(acs_flags,
4439 ++ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4440 ++}
4441 ++
4442 + static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
4443 + {
4444 + if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4445 +@@ -4842,6 +4854,10 @@ static const struct pci_dev_acs_enabled {
4446 + { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
4447 + /* Cavium ThunderX */
4448 + { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
4449 ++ /* Cavium multi-function devices */
4450 ++ { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
4451 ++ { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
4452 ++ { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
4453 + /* APM X-Gene */
4454 + { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
4455 + /* Ampere Computing */
4456 +@@ -4862,6 +4878,39 @@ static const struct pci_dev_acs_enabled {
4457 + { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
4458 + { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
4459 + { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
4460 ++ /* NXP root ports, xx=16, 12, or 08 cores */
4461 ++ /* LX2xx0A : without security features + CAN-FD */
4462 ++ { PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs },
4463 ++ { PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs },
4464 ++ { PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs },
4465 ++ /* LX2xx0C : security features + CAN-FD */
4466 ++ { PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs },
4467 ++ { PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs },
4468 ++ { PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs },
4469 ++ /* LX2xx0E : security features + CAN */
4470 ++ { PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs },
4471 ++ { PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs },
4472 ++ { PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs },
4473 ++ /* LX2xx0N : without security features + CAN */
4474 ++ { PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs },
4475 ++ { PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs },
4476 ++ { PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs },
4477 ++ /* LX2xx2A : without security features + CAN-FD */
4478 ++ { PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs },
4479 ++ { PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs },
4480 ++ { PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs },
4481 ++ /* LX2xx2C : security features + CAN-FD */
4482 ++ { PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs },
4483 ++ { PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs },
4484 ++ { PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs },
4485 ++ /* LX2xx2E : security features + CAN */
4486 ++ { PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs },
4487 ++ { PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs },
4488 ++ { PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs },
4489 ++ /* LX2xx2N : without security features + CAN */
4490 ++ { PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs },
4491 ++ { PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs },
4492 ++ { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
4493 + /* Zhaoxin Root/Downstream Ports */
4494 + { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
4495 + { 0 }
4496 +@@ -5350,7 +5399,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
4497 + PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
4498 +
4499 + /*
4500 +- * Create device link for NVIDIA GPU with integrated USB xHCI Host
4501 ++ * Create device link for GPUs with integrated USB xHCI Host
4502 + * controller to VGA.
4503 + */
4504 + static void quirk_gpu_usb(struct pci_dev *usb)
4505 +@@ -5359,9 +5408,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
4506 + }
4507 + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
4508 + PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
4509 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
4510 ++ PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
4511 +
4512 + /*
4513 +- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
4514 ++ * Create device link for GPUs with integrated Type-C UCSI controller
4515 + * to VGA. Currently there is no class code defined for UCSI device over PCI
4516 + * so using UNKNOWN class for now and it will be updated when UCSI
4517 + * over PCI gets a class code.
4518 +@@ -5374,6 +5425,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
4519 + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
4520 + PCI_CLASS_SERIAL_UNKNOWN, 8,
4521 + quirk_gpu_usb_typec_ucsi);
4522 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
4523 ++ PCI_CLASS_SERIAL_UNKNOWN, 8,
4524 ++ quirk_gpu_usb_typec_ucsi);
4525 +
4526 + /*
4527 + * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
4528 +diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
4529 +index f1cbc6b2edbb3..ebadc6c08e116 100644
4530 +--- a/drivers/remoteproc/qcom_wcnss.c
4531 ++++ b/drivers/remoteproc/qcom_wcnss.c
4532 +@@ -142,18 +142,6 @@ static const struct wcnss_data pronto_v2_data = {
4533 + .num_vregs = 1,
4534 + };
4535 +
4536 +-void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
4537 +- struct qcom_iris *iris,
4538 +- bool use_48mhz_xo)
4539 +-{
4540 +- mutex_lock(&wcnss->iris_lock);
4541 +-
4542 +- wcnss->iris = iris;
4543 +- wcnss->use_48mhz_xo = use_48mhz_xo;
4544 +-
4545 +- mutex_unlock(&wcnss->iris_lock);
4546 +-}
4547 +-
4548 + static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
4549 + {
4550 + struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
4551 +@@ -639,12 +627,20 @@ static int wcnss_probe(struct platform_device *pdev)
4552 + goto detach_pds;
4553 + }
4554 +
4555 ++ wcnss->iris = qcom_iris_probe(&pdev->dev, &wcnss->use_48mhz_xo);
4556 ++ if (IS_ERR(wcnss->iris)) {
4557 ++ ret = PTR_ERR(wcnss->iris);
4558 ++ goto detach_pds;
4559 ++ }
4560 ++
4561 + ret = rproc_add(rproc);
4562 + if (ret)
4563 +- goto detach_pds;
4564 ++ goto remove_iris;
4565 +
4566 +- return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
4567 ++ return 0;
4568 +
4569 ++remove_iris:
4570 ++ qcom_iris_remove(wcnss->iris);
4571 + detach_pds:
4572 + wcnss_release_pds(wcnss);
4573 + free_rproc:
4574 +@@ -657,7 +653,7 @@ static int wcnss_remove(struct platform_device *pdev)
4575 + {
4576 + struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
4577 +
4578 +- of_platform_depopulate(&pdev->dev);
4579 ++ qcom_iris_remove(wcnss->iris);
4580 +
4581 + rproc_del(wcnss->rproc);
4582 +
4583 +@@ -686,28 +682,7 @@ static struct platform_driver wcnss_driver = {
4584 + },
4585 + };
4586 +
4587 +-static int __init wcnss_init(void)
4588 +-{
4589 +- int ret;
4590 +-
4591 +- ret = platform_driver_register(&wcnss_driver);
4592 +- if (ret)
4593 +- return ret;
4594 +-
4595 +- ret = platform_driver_register(&qcom_iris_driver);
4596 +- if (ret)
4597 +- platform_driver_unregister(&wcnss_driver);
4598 +-
4599 +- return ret;
4600 +-}
4601 +-module_init(wcnss_init);
4602 +-
4603 +-static void __exit wcnss_exit(void)
4604 +-{
4605 +- platform_driver_unregister(&qcom_iris_driver);
4606 +- platform_driver_unregister(&wcnss_driver);
4607 +-}
4608 +-module_exit(wcnss_exit);
4609 ++module_platform_driver(wcnss_driver);
4610 +
4611 + MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
4612 + MODULE_LICENSE("GPL v2");
4613 +diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h
4614 +index 62c8682d0a92d..6d01ee6afa7f8 100644
4615 +--- a/drivers/remoteproc/qcom_wcnss.h
4616 ++++ b/drivers/remoteproc/qcom_wcnss.h
4617 +@@ -17,9 +17,9 @@ struct wcnss_vreg_info {
4618 + bool super_turbo;
4619 + };
4620 +
4621 ++struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo);
4622 ++void qcom_iris_remove(struct qcom_iris *iris);
4623 + int qcom_iris_enable(struct qcom_iris *iris);
4624 + void qcom_iris_disable(struct qcom_iris *iris);
4625 +
4626 +-void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss, struct qcom_iris *iris, bool use_48mhz_xo);
4627 +-
4628 + #endif
4629 +diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
4630 +index 169acd305ae39..09720ddddc857 100644
4631 +--- a/drivers/remoteproc/qcom_wcnss_iris.c
4632 ++++ b/drivers/remoteproc/qcom_wcnss_iris.c
4633 +@@ -17,7 +17,7 @@
4634 + #include "qcom_wcnss.h"
4635 +
4636 + struct qcom_iris {
4637 +- struct device *dev;
4638 ++ struct device dev;
4639 +
4640 + struct clk *xo_clk;
4641 +
4642 +@@ -75,7 +75,7 @@ int qcom_iris_enable(struct qcom_iris *iris)
4643 +
4644 + ret = clk_prepare_enable(iris->xo_clk);
4645 + if (ret) {
4646 +- dev_err(iris->dev, "failed to enable xo clk\n");
4647 ++ dev_err(&iris->dev, "failed to enable xo clk\n");
4648 + goto disable_regulators;
4649 + }
4650 +
4651 +@@ -93,43 +93,90 @@ void qcom_iris_disable(struct qcom_iris *iris)
4652 + regulator_bulk_disable(iris->num_vregs, iris->vregs);
4653 + }
4654 +
4655 +-static int qcom_iris_probe(struct platform_device *pdev)
4656 ++static const struct of_device_id iris_of_match[] = {
4657 ++ { .compatible = "qcom,wcn3620", .data = &wcn3620_data },
4658 ++ { .compatible = "qcom,wcn3660", .data = &wcn3660_data },
4659 ++ { .compatible = "qcom,wcn3660b", .data = &wcn3680_data },
4660 ++ { .compatible = "qcom,wcn3680", .data = &wcn3680_data },
4661 ++ {}
4662 ++};
4663 ++
4664 ++static void qcom_iris_release(struct device *dev)
4665 ++{
4666 ++ struct qcom_iris *iris = container_of(dev, struct qcom_iris, dev);
4667 ++
4668 ++ of_node_put(iris->dev.of_node);
4669 ++ kfree(iris);
4670 ++}
4671 ++
4672 ++struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
4673 + {
4674 ++ const struct of_device_id *match;
4675 + const struct iris_data *data;
4676 +- struct qcom_wcnss *wcnss;
4677 ++ struct device_node *of_node;
4678 + struct qcom_iris *iris;
4679 + int ret;
4680 + int i;
4681 +
4682 +- iris = devm_kzalloc(&pdev->dev, sizeof(struct qcom_iris), GFP_KERNEL);
4683 +- if (!iris)
4684 +- return -ENOMEM;
4685 ++ of_node = of_get_child_by_name(parent->of_node, "iris");
4686 ++ if (!of_node) {
4687 ++ dev_err(parent, "No child node \"iris\" found\n");
4688 ++ return ERR_PTR(-EINVAL);
4689 ++ }
4690 ++
4691 ++ iris = kzalloc(sizeof(*iris), GFP_KERNEL);
4692 ++ if (!iris) {
4693 ++ of_node_put(of_node);
4694 ++ return ERR_PTR(-ENOMEM);
4695 ++ }
4696 ++
4697 ++ device_initialize(&iris->dev);
4698 ++ iris->dev.parent = parent;
4699 ++ iris->dev.release = qcom_iris_release;
4700 ++ iris->dev.of_node = of_node;
4701 ++
4702 ++ dev_set_name(&iris->dev, "%s.iris", dev_name(parent));
4703 ++
4704 ++ ret = device_add(&iris->dev);
4705 ++ if (ret) {
4706 ++ put_device(&iris->dev);
4707 ++ return ERR_PTR(ret);
4708 ++ }
4709 ++
4710 ++ match = of_match_device(iris_of_match, &iris->dev);
4711 ++ if (!match) {
4712 ++ dev_err(&iris->dev, "no matching compatible for iris\n");
4713 ++ ret = -EINVAL;
4714 ++ goto err_device_del;
4715 ++ }
4716 +
4717 +- data = of_device_get_match_data(&pdev->dev);
4718 +- wcnss = dev_get_drvdata(pdev->dev.parent);
4719 ++ data = match->data;
4720 +
4721 +- iris->xo_clk = devm_clk_get(&pdev->dev, "xo");
4722 ++ iris->xo_clk = devm_clk_get(&iris->dev, "xo");
4723 + if (IS_ERR(iris->xo_clk)) {
4724 +- if (PTR_ERR(iris->xo_clk) != -EPROBE_DEFER)
4725 +- dev_err(&pdev->dev, "failed to acquire xo clk\n");
4726 +- return PTR_ERR(iris->xo_clk);
4727 ++ ret = PTR_ERR(iris->xo_clk);
4728 ++ if (ret != -EPROBE_DEFER)
4729 ++ dev_err(&iris->dev, "failed to acquire xo clk\n");
4730 ++ goto err_device_del;
4731 + }
4732 +
4733 + iris->num_vregs = data->num_vregs;
4734 +- iris->vregs = devm_kcalloc(&pdev->dev,
4735 ++ iris->vregs = devm_kcalloc(&iris->dev,
4736 + iris->num_vregs,
4737 + sizeof(struct regulator_bulk_data),
4738 + GFP_KERNEL);
4739 +- if (!iris->vregs)
4740 +- return -ENOMEM;
4741 ++ if (!iris->vregs) {
4742 ++ ret = -ENOMEM;
4743 ++ goto err_device_del;
4744 ++ }
4745 +
4746 + for (i = 0; i < iris->num_vregs; i++)
4747 + iris->vregs[i].supply = data->vregs[i].name;
4748 +
4749 +- ret = devm_regulator_bulk_get(&pdev->dev, iris->num_vregs, iris->vregs);
4750 ++ ret = devm_regulator_bulk_get(&iris->dev, iris->num_vregs, iris->vregs);
4751 + if (ret) {
4752 +- dev_err(&pdev->dev, "failed to get regulators\n");
4753 +- return ret;
4754 ++ dev_err(&iris->dev, "failed to get regulators\n");
4755 ++ goto err_device_del;
4756 + }
4757 +
4758 + for (i = 0; i < iris->num_vregs; i++) {
4759 +@@ -143,34 +190,17 @@ static int qcom_iris_probe(struct platform_device *pdev)
4760 + data->vregs[i].load_uA);
4761 + }
4762 +
4763 +- qcom_wcnss_assign_iris(wcnss, iris, data->use_48mhz_xo);
4764 +-
4765 +- return 0;
4766 +-}
4767 ++ *use_48mhz_xo = data->use_48mhz_xo;
4768 +
4769 +-static int qcom_iris_remove(struct platform_device *pdev)
4770 +-{
4771 +- struct qcom_wcnss *wcnss = dev_get_drvdata(pdev->dev.parent);
4772 ++ return iris;
4773 +
4774 +- qcom_wcnss_assign_iris(wcnss, NULL, false);
4775 ++err_device_del:
4776 ++ device_del(&iris->dev);
4777 +
4778 +- return 0;
4779 ++ return ERR_PTR(ret);
4780 + }
4781 +
4782 +-static const struct of_device_id iris_of_match[] = {
4783 +- { .compatible = "qcom,wcn3620", .data = &wcn3620_data },
4784 +- { .compatible = "qcom,wcn3660", .data = &wcn3660_data },
4785 +- { .compatible = "qcom,wcn3660b", .data = &wcn3680_data },
4786 +- { .compatible = "qcom,wcn3680", .data = &wcn3680_data },
4787 +- {}
4788 +-};
4789 +-MODULE_DEVICE_TABLE(of, iris_of_match);
4790 +-
4791 +-struct platform_driver qcom_iris_driver = {
4792 +- .probe = qcom_iris_probe,
4793 +- .remove = qcom_iris_remove,
4794 +- .driver = {
4795 +- .name = "qcom-iris",
4796 +- .of_match_table = iris_of_match,
4797 +- },
4798 +-};
4799 ++void qcom_iris_remove(struct qcom_iris *iris)
4800 ++{
4801 ++ device_del(&iris->dev);
4802 ++}
4803 +diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
4804 +index 670fd8a2970e3..6545afb2f20eb 100644
4805 +--- a/drivers/rtc/rtc-cmos.c
4806 ++++ b/drivers/rtc/rtc-cmos.c
4807 +@@ -1053,7 +1053,9 @@ static void cmos_check_wkalrm(struct device *dev)
4808 + * ACK the rtc irq here
4809 + */
4810 + if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
4811 ++ local_irq_disable();
4812 + cmos_interrupt(0, (void *)cmos->rtc);
4813 ++ local_irq_enable();
4814 + return;
4815 + }
4816 +
4817 +diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
4818 +index 2f3515fa242a3..f3d5c7f4c13d2 100644
4819 +--- a/drivers/s390/char/sclp_early.c
4820 ++++ b/drivers/s390/char/sclp_early.c
4821 +@@ -45,13 +45,14 @@ static void __init sclp_early_facilities_detect(void)
4822 + sclp.has_gisaf = !!(sccb->fac118 & 0x08);
4823 + sclp.has_hvs = !!(sccb->fac119 & 0x80);
4824 + sclp.has_kss = !!(sccb->fac98 & 0x01);
4825 +- sclp.has_sipl = !!(sccb->cbl & 0x4000);
4826 + if (sccb->fac85 & 0x02)
4827 + S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
4828 + if (sccb->fac91 & 0x40)
4829 + S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
4830 + if (sccb->cpuoff > 134)
4831 + sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
4832 ++ if (sccb->cpuoff > 137)
4833 ++ sclp.has_sipl = !!(sccb->cbl & 0x4000);
4834 + sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
4835 + sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
4836 + sclp.rzm <<= 20;
4837 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
4838 +index 6414bd5741b87..a38b0c39ea4be 100644
4839 +--- a/drivers/vhost/net.c
4840 ++++ b/drivers/vhost/net.c
4841 +@@ -467,7 +467,7 @@ static void vhost_tx_batch(struct vhost_net *net,
4842 + .num = nvq->batched_xdp,
4843 + .ptr = nvq->xdp,
4844 + };
4845 +- int err;
4846 ++ int i, err;
4847 +
4848 + if (nvq->batched_xdp == 0)
4849 + goto signal_used;
4850 +@@ -476,6 +476,15 @@ static void vhost_tx_batch(struct vhost_net *net,
4851 + err = sock->ops->sendmsg(sock, msghdr, 0);
4852 + if (unlikely(err < 0)) {
4853 + vq_err(&nvq->vq, "Fail to batch sending packets\n");
4854 ++
4855 ++ /* free pages owned by XDP; since this is an unlikely error path,
4856 ++ * keep it simple and avoid more complex bulk update for the
4857 ++ * used pages
4858 ++ */
4859 ++ for (i = 0; i < nvq->batched_xdp; ++i)
4860 ++ put_page(virt_to_head_page(nvq->xdp[i].data));
4861 ++ nvq->batched_xdp = 0;
4862 ++ nvq->done_idx = 0;
4863 + return;
4864 + }
4865 +
4866 +diff --git a/drivers/video/backlight/ktd253-backlight.c b/drivers/video/backlight/ktd253-backlight.c
4867 +index a7df5bcca9da5..37aa5a6695309 100644
4868 +--- a/drivers/video/backlight/ktd253-backlight.c
4869 ++++ b/drivers/video/backlight/ktd253-backlight.c
4870 +@@ -25,6 +25,7 @@
4871 +
4872 + #define KTD253_T_LOW_NS (200 + 10) /* Additional 10ns as safety factor */
4873 + #define KTD253_T_HIGH_NS (200 + 10) /* Additional 10ns as safety factor */
4874 ++#define KTD253_T_OFF_CRIT_NS 100000 /* 100 us, now it doesn't look good */
4875 + #define KTD253_T_OFF_MS 3
4876 +
4877 + struct ktd253_backlight {
4878 +@@ -34,13 +35,50 @@ struct ktd253_backlight {
4879 + u16 ratio;
4880 + };
4881 +
4882 ++static void ktd253_backlight_set_max_ratio(struct ktd253_backlight *ktd253)
4883 ++{
4884 ++ gpiod_set_value_cansleep(ktd253->gpiod, 1);
4885 ++ ndelay(KTD253_T_HIGH_NS);
4886 ++ /* We always fall back to this when we power on */
4887 ++}
4888 ++
4889 ++static int ktd253_backlight_stepdown(struct ktd253_backlight *ktd253)
4890 ++{
4891 ++ /*
4892 ++ * These GPIO operations absolutely can NOT sleep so no _cansleep
4893 ++ * suffixes, and no using GPIO expanders on slow buses for this!
4894 ++ *
4895 ++ * The maximum number of cycles of the loop is 32 so the time taken
4896 ++ * should nominally be:
4897 ++ * (T_LOW_NS + T_HIGH_NS + loop_time) * 32
4898 ++ *
4899 ++ * Architectures do not always support ndelay() and we will get a few us
4900 ++ * instead. If we get to a critical time limit an interrupt has likely
4901 ++ * occured in the low part of the loop and we need to restart from the
4902 ++ * top so we have the backlight in a known state.
4903 ++ */
4904 ++ u64 ns;
4905 ++
4906 ++ ns = ktime_get_ns();
4907 ++ gpiod_set_value(ktd253->gpiod, 0);
4908 ++ ndelay(KTD253_T_LOW_NS);
4909 ++ gpiod_set_value(ktd253->gpiod, 1);
4910 ++ ns = ktime_get_ns() - ns;
4911 ++ if (ns >= KTD253_T_OFF_CRIT_NS) {
4912 ++ dev_err(ktd253->dev, "PCM on backlight took too long (%llu ns)\n", ns);
4913 ++ return -EAGAIN;
4914 ++ }
4915 ++ ndelay(KTD253_T_HIGH_NS);
4916 ++ return 0;
4917 ++}
4918 ++
4919 + static int ktd253_backlight_update_status(struct backlight_device *bl)
4920 + {
4921 + struct ktd253_backlight *ktd253 = bl_get_data(bl);
4922 + int brightness = backlight_get_brightness(bl);
4923 + u16 target_ratio;
4924 + u16 current_ratio = ktd253->ratio;
4925 +- unsigned long flags;
4926 ++ int ret;
4927 +
4928 + dev_dbg(ktd253->dev, "new brightness/ratio: %d/32\n", brightness);
4929 +
4930 +@@ -62,37 +100,34 @@ static int ktd253_backlight_update_status(struct backlight_device *bl)
4931 + }
4932 +
4933 + if (current_ratio == 0) {
4934 +- gpiod_set_value_cansleep(ktd253->gpiod, 1);
4935 +- ndelay(KTD253_T_HIGH_NS);
4936 +- /* We always fall back to this when we power on */
4937 ++ ktd253_backlight_set_max_ratio(ktd253);
4938 + current_ratio = KTD253_MAX_RATIO;
4939 + }
4940 +
4941 +- /*
4942 +- * WARNING:
4943 +- * The loop to set the correct current level is performed
4944 +- * with interrupts disabled as it is timing critical.
4945 +- * The maximum number of cycles of the loop is 32
4946 +- * so the time taken will be (T_LOW_NS + T_HIGH_NS + loop_time) * 32,
4947 +- */
4948 +- local_irq_save(flags);
4949 + while (current_ratio != target_ratio) {
4950 + /*
4951 + * These GPIO operations absolutely can NOT sleep so no
4952 + * _cansleep suffixes, and no using GPIO expanders on
4953 + * slow buses for this!
4954 + */
4955 +- gpiod_set_value(ktd253->gpiod, 0);
4956 +- ndelay(KTD253_T_LOW_NS);
4957 +- gpiod_set_value(ktd253->gpiod, 1);
4958 +- ndelay(KTD253_T_HIGH_NS);
4959 +- /* After 1/32 we loop back to 32/32 */
4960 +- if (current_ratio == KTD253_MIN_RATIO)
4961 ++ ret = ktd253_backlight_stepdown(ktd253);
4962 ++ if (ret == -EAGAIN) {
4963 ++ /*
4964 ++ * Something disturbed the backlight setting code when
4965 ++ * running so we need to bring the PWM back to a known
4966 ++ * state. This shouldn't happen too much.
4967 ++ */
4968 ++ gpiod_set_value_cansleep(ktd253->gpiod, 0);
4969 ++ msleep(KTD253_T_OFF_MS);
4970 ++ ktd253_backlight_set_max_ratio(ktd253);
4971 ++ current_ratio = KTD253_MAX_RATIO;
4972 ++ } else if (current_ratio == KTD253_MIN_RATIO) {
4973 ++ /* After 1/32 we loop back to 32/32 */
4974 + current_ratio = KTD253_MAX_RATIO;
4975 +- else
4976 ++ } else {
4977 + current_ratio--;
4978 ++ }
4979 + }
4980 +- local_irq_restore(flags);
4981 + ktd253->ratio = current_ratio;
4982 +
4983 + dev_dbg(ktd253->dev, "new ratio set to %d/32\n", target_ratio);
4984 +diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
4985 +index 3bab324852732..0cc07d957b643 100644
4986 +--- a/drivers/watchdog/watchdog_dev.c
4987 ++++ b/drivers/watchdog/watchdog_dev.c
4988 +@@ -1096,6 +1096,8 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
4989 + watchdog_stop(wdd);
4990 + }
4991 +
4992 ++ watchdog_hrtimer_pretimeout_stop(wdd);
4993 ++
4994 + mutex_lock(&wd_data->lock);
4995 + wd_data->wdd = NULL;
4996 + wdd->wd_data = NULL;
4997 +@@ -1103,7 +1105,6 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
4998 +
4999 + hrtimer_cancel(&wd_data->timer);
5000 + kthread_cancel_work_sync(&wd_data->work);
5001 +- watchdog_hrtimer_pretimeout_stop(wdd);
5002 +
5003 + put_device(&wd_data->dev);
5004 + }
5005 +@@ -1172,7 +1173,10 @@ int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
5006 +
5007 + wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms));
5008 +
5009 +- return __watchdog_ping(wdd);
5010 ++ if (watchdog_hw_running(wdd) && handle_boot_enabled)
5011 ++ return __watchdog_ping(wdd);
5012 ++
5013 ++ return 0;
5014 + }
5015 + EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
5016 +
5017 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
5018 +index 24d11861ac7d8..dbb18dc956f34 100644
5019 +--- a/drivers/xen/swiotlb-xen.c
5020 ++++ b/drivers/xen/swiotlb-xen.c
5021 +@@ -211,12 +211,11 @@ error:
5022 + if (repeat--) {
5023 + /* Min is 2MB */
5024 + nslabs = max(1024UL, (nslabs >> 1));
5025 +- pr_info("Lowering to %luMB\n",
5026 +- (nslabs << IO_TLB_SHIFT) >> 20);
5027 ++ bytes = nslabs << IO_TLB_SHIFT;
5028 ++ pr_info("Lowering to %luMB\n", bytes >> 20);
5029 + goto retry;
5030 + }
5031 + pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
5032 +- free_pages((unsigned long)start, order);
5033 + return rc;
5034 + }
5035 +
5036 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
5037 +index 1c8f79b3dd065..dde341a6388a1 100644
5038 +--- a/fs/fuse/dev.c
5039 ++++ b/fs/fuse/dev.c
5040 +@@ -288,10 +288,10 @@ void fuse_request_end(struct fuse_req *req)
5041 +
5042 + /*
5043 + * test_and_set_bit() implies smp_mb() between bit
5044 +- * changing and below intr_entry check. Pairs with
5045 ++ * changing and below FR_INTERRUPTED check. Pairs with
5046 + * smp_mb() from queue_interrupt().
5047 + */
5048 +- if (!list_empty(&req->intr_entry)) {
5049 ++ if (test_bit(FR_INTERRUPTED, &req->flags)) {
5050 + spin_lock(&fiq->lock);
5051 + list_del_init(&req->intr_entry);
5052 + spin_unlock(&fiq->lock);
5053 +diff --git a/fs/io_uring.c b/fs/io_uring.c
5054 +index c5d4638f6d7fd..43aaa35664315 100644
5055 +--- a/fs/io_uring.c
5056 ++++ b/fs/io_uring.c
5057 +@@ -2683,7 +2683,8 @@ static bool io_file_supports_async(struct io_kiocb *req, int rw)
5058 + return __io_file_supports_async(req->file, rw);
5059 + }
5060 +
5061 +-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5062 ++static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5063 ++ int rw)
5064 + {
5065 + struct io_ring_ctx *ctx = req->ctx;
5066 + struct kiocb *kiocb = &req->rw.kiocb;
5067 +@@ -2705,8 +2706,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5068 + if (unlikely(ret))
5069 + return ret;
5070 +
5071 +- /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
5072 +- if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
5073 ++ /*
5074 ++ * If the file is marked O_NONBLOCK, still allow retry for it if it
5075 ++ * supports async. Otherwise it's impossible to use O_NONBLOCK files
5076 ++ * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
5077 ++ */
5078 ++ if ((kiocb->ki_flags & IOCB_NOWAIT) ||
5079 ++ ((file->f_flags & O_NONBLOCK) && !io_file_supports_async(req, rw)))
5080 + req->flags |= REQ_F_NOWAIT;
5081 +
5082 + ioprio = READ_ONCE(sqe->ioprio);
5083 +@@ -3107,12 +3113,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
5084 + ret = nr;
5085 + break;
5086 + }
5087 ++ if (!iov_iter_is_bvec(iter)) {
5088 ++ iov_iter_advance(iter, nr);
5089 ++ } else {
5090 ++ req->rw.len -= nr;
5091 ++ req->rw.addr += nr;
5092 ++ }
5093 + ret += nr;
5094 + if (nr != iovec.iov_len)
5095 + break;
5096 +- req->rw.len -= nr;
5097 +- req->rw.addr += nr;
5098 +- iov_iter_advance(iter, nr);
5099 + }
5100 +
5101 + return ret;
5102 +@@ -3190,7 +3199,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5103 + {
5104 + if (unlikely(!(req->file->f_mode & FMODE_READ)))
5105 + return -EBADF;
5106 +- return io_prep_rw(req, sqe);
5107 ++ return io_prep_rw(req, sqe, READ);
5108 + }
5109 +
5110 + /*
5111 +@@ -3277,6 +3286,12 @@ static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
5112 + return -EINVAL;
5113 + }
5114 +
5115 ++static bool need_read_all(struct io_kiocb *req)
5116 ++{
5117 ++ return req->flags & REQ_F_ISREG ||
5118 ++ S_ISBLK(file_inode(req->file)->i_mode);
5119 ++}
5120 ++
5121 + static int io_read(struct io_kiocb *req, unsigned int issue_flags)
5122 + {
5123 + struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
5124 +@@ -3331,7 +3346,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
5125 + } else if (ret == -EIOCBQUEUED) {
5126 + goto out_free;
5127 + } else if (ret <= 0 || ret == io_size || !force_nonblock ||
5128 +- (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
5129 ++ (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
5130 + /* read all, failed, already did sync or don't want to retry */
5131 + goto done;
5132 + }
5133 +@@ -3379,7 +3394,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5134 + {
5135 + if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
5136 + return -EBADF;
5137 +- return io_prep_rw(req, sqe);
5138 ++ return io_prep_rw(req, sqe, WRITE);
5139 + }
5140 +
5141 + static int io_write(struct io_kiocb *req, unsigned int issue_flags)
5142 +diff --git a/include/linux/pci.h b/include/linux/pci.h
5143 +index 540b377ca8f61..acbed2ecf6e8c 100644
5144 +--- a/include/linux/pci.h
5145 ++++ b/include/linux/pci.h
5146 +@@ -1740,8 +1740,9 @@ static inline void pci_disable_device(struct pci_dev *dev) { }
5147 + static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
5148 + static inline int pci_assign_resource(struct pci_dev *dev, int i)
5149 + { return -EBUSY; }
5150 +-static inline int __pci_register_driver(struct pci_driver *drv,
5151 +- struct module *owner)
5152 ++static inline int __must_check __pci_register_driver(struct pci_driver *drv,
5153 ++ struct module *owner,
5154 ++ const char *mod_name)
5155 + { return 0; }
5156 + static inline int pci_register_driver(struct pci_driver *drv)
5157 + { return 0; }
5158 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
5159 +index 4bac1831de802..1a9b8589391c0 100644
5160 +--- a/include/linux/pci_ids.h
5161 ++++ b/include/linux/pci_ids.h
5162 +@@ -2451,7 +2451,8 @@
5163 + #define PCI_VENDOR_ID_TDI 0x192E
5164 + #define PCI_DEVICE_ID_TDI_EHCI 0x0101
5165 +
5166 +-#define PCI_VENDOR_ID_FREESCALE 0x1957
5167 ++#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */
5168 ++#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */
5169 + #define PCI_DEVICE_ID_MPC8308 0xc006
5170 + #define PCI_DEVICE_ID_MPC8315E 0x00b4
5171 + #define PCI_DEVICE_ID_MPC8315 0x00b5
5172 +diff --git a/include/linux/phylink.h b/include/linux/phylink.h
5173 +index afb3ded0b6912..237291196ce28 100644
5174 +--- a/include/linux/phylink.h
5175 ++++ b/include/linux/phylink.h
5176 +@@ -451,6 +451,9 @@ void phylink_mac_change(struct phylink *, bool up);
5177 + void phylink_start(struct phylink *);
5178 + void phylink_stop(struct phylink *);
5179 +
5180 ++void phylink_suspend(struct phylink *pl, bool mac_wol);
5181 ++void phylink_resume(struct phylink *pl);
5182 ++
5183 + void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *);
5184 + int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *);
5185 +
5186 +diff --git a/include/linux/sched.h b/include/linux/sched.h
5187 +index ec8d07d88641c..f6935787e7e8b 100644
5188 +--- a/include/linux/sched.h
5189 ++++ b/include/linux/sched.h
5190 +@@ -1394,6 +1394,7 @@ struct task_struct {
5191 + mce_whole_page : 1,
5192 + __mce_reserved : 62;
5193 + struct callback_head mce_kill_me;
5194 ++ int mce_count;
5195 + #endif
5196 +
5197 + #ifdef CONFIG_KRETPROBES
5198 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
5199 +index b2db9cd9a73f3..4f7478c482738 100644
5200 +--- a/include/linux/skbuff.h
5201 ++++ b/include/linux/skbuff.h
5202 +@@ -1935,7 +1935,7 @@ static inline void __skb_insert(struct sk_buff *newsk,
5203 + WRITE_ONCE(newsk->prev, prev);
5204 + WRITE_ONCE(next->prev, newsk);
5205 + WRITE_ONCE(prev->next, newsk);
5206 +- list->qlen++;
5207 ++ WRITE_ONCE(list->qlen, list->qlen + 1);
5208 + }
5209 +
5210 + static inline void __skb_queue_splice(const struct sk_buff_head *list,
5211 +diff --git a/include/net/dsa.h b/include/net/dsa.h
5212 +index 048d297623c9a..d833f717e8022 100644
5213 +--- a/include/net/dsa.h
5214 ++++ b/include/net/dsa.h
5215 +@@ -437,6 +437,11 @@ static inline bool dsa_port_is_user(struct dsa_port *dp)
5216 + return dp->type == DSA_PORT_TYPE_USER;
5217 + }
5218 +
5219 ++static inline bool dsa_port_is_unused(struct dsa_port *dp)
5220 ++{
5221 ++ return dp->type == DSA_PORT_TYPE_UNUSED;
5222 ++}
5223 ++
5224 + static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
5225 + {
5226 + return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
5227 +diff --git a/include/net/flow.h b/include/net/flow.h
5228 +index 6f5e702400717..58beb16a49b8d 100644
5229 +--- a/include/net/flow.h
5230 ++++ b/include/net/flow.h
5231 +@@ -194,7 +194,7 @@ static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
5232 +
5233 + static inline struct flowi_common *flowi4_to_flowi_common(struct flowi4 *fl4)
5234 + {
5235 +- return &(flowi4_to_flowi(fl4)->u.__fl_common);
5236 ++ return &(fl4->__fl_common);
5237 + }
5238 +
5239 + static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
5240 +@@ -204,7 +204,7 @@ static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
5241 +
5242 + static inline struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6)
5243 + {
5244 +- return &(flowi6_to_flowi(fl6)->u.__fl_common);
5245 ++ return &(fl6->__fl_common);
5246 + }
5247 +
5248 + static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
5249 +diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
5250 +index 79a699f106b14..ec88590b31984 100644
5251 +--- a/include/uapi/linux/pkt_sched.h
5252 ++++ b/include/uapi/linux/pkt_sched.h
5253 +@@ -827,6 +827,8 @@ struct tc_codel_xstats {
5254 +
5255 + /* FQ_CODEL */
5256 +
5257 ++#define FQ_CODEL_QUANTUM_MAX (1 << 20)
5258 ++
5259 + enum {
5260 + TCA_FQ_CODEL_UNSPEC,
5261 + TCA_FQ_CODEL_TARGET,
5262 +diff --git a/kernel/events/core.c b/kernel/events/core.c
5263 +index 1cb1f9b8392e2..e5c4aca620c58 100644
5264 +--- a/kernel/events/core.c
5265 ++++ b/kernel/events/core.c
5266 +@@ -10192,7 +10192,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
5267 + return;
5268 +
5269 + if (ifh->nr_file_filters) {
5270 +- mm = get_task_mm(event->ctx->task);
5271 ++ mm = get_task_mm(task);
5272 + if (!mm)
5273 + goto restart;
5274 +
5275 +diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
5276 +index 94ef2d099e322..d713714cba67f 100644
5277 +--- a/kernel/trace/trace_boot.c
5278 ++++ b/kernel/trace/trace_boot.c
5279 +@@ -205,12 +205,15 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
5280 + pr_err("Failed to apply filter: %s\n", buf);
5281 + }
5282 +
5283 +- xbc_node_for_each_array_value(enode, "actions", anode, p) {
5284 +- if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
5285 +- pr_err("action string is too long: %s\n", p);
5286 +- else if (trigger_process_regex(file, buf) < 0)
5287 +- pr_err("Failed to apply an action: %s\n", buf);
5288 +- }
5289 ++ if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) {
5290 ++ xbc_node_for_each_array_value(enode, "actions", anode, p) {
5291 ++ if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
5292 ++ pr_err("action string is too long: %s\n", p);
5293 ++ else if (trigger_process_regex(file, buf) < 0)
5294 ++ pr_err("Failed to apply an action: %s\n", buf);
5295 ++ }
5296 ++ } else if (xbc_node_find_value(enode, "actions", NULL))
5297 ++ pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n");
5298 +
5299 + if (xbc_node_find_value(enode, "enable", NULL)) {
5300 + if (trace_event_enable_disable(file, 1, 0) < 0)
5301 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
5302 +index ea6178cb5e334..032191977e34c 100644
5303 +--- a/kernel/trace/trace_kprobe.c
5304 ++++ b/kernel/trace/trace_kprobe.c
5305 +@@ -647,7 +647,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
5306 + /* Register new event */
5307 + ret = register_kprobe_event(tk);
5308 + if (ret) {
5309 +- pr_warn("Failed to register probe event(%d)\n", ret);
5310 ++ if (ret == -EEXIST) {
5311 ++ trace_probe_log_set_index(0);
5312 ++ trace_probe_log_err(0, EVENT_EXIST);
5313 ++ } else
5314 ++ pr_warn("Failed to register probe event(%d)\n", ret);
5315 + goto end;
5316 + }
5317 +
5318 +diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
5319 +index 15413ad7cef2b..0e29bb14fc8be 100644
5320 +--- a/kernel/trace/trace_probe.c
5321 ++++ b/kernel/trace/trace_probe.c
5322 +@@ -1029,11 +1029,36 @@ error:
5323 + return ret;
5324 + }
5325 +
5326 ++static struct trace_event_call *
5327 ++find_trace_event_call(const char *system, const char *event_name)
5328 ++{
5329 ++ struct trace_event_call *tp_event;
5330 ++ const char *name;
5331 ++
5332 ++ list_for_each_entry(tp_event, &ftrace_events, list) {
5333 ++ if (!tp_event->class->system ||
5334 ++ strcmp(system, tp_event->class->system))
5335 ++ continue;
5336 ++ name = trace_event_name(tp_event);
5337 ++ if (!name || strcmp(event_name, name))
5338 ++ continue;
5339 ++ return tp_event;
5340 ++ }
5341 ++
5342 ++ return NULL;
5343 ++}
5344 ++
5345 + int trace_probe_register_event_call(struct trace_probe *tp)
5346 + {
5347 + struct trace_event_call *call = trace_probe_event_call(tp);
5348 + int ret;
5349 +
5350 ++ lockdep_assert_held(&event_mutex);
5351 ++
5352 ++ if (find_trace_event_call(trace_probe_group_name(tp),
5353 ++ trace_probe_name(tp)))
5354 ++ return -EEXIST;
5355 ++
5356 + ret = register_trace_event(&call->event);
5357 + if (!ret)
5358 + return -ENODEV;
5359 +diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
5360 +index 227d518e5ba52..9f14186d132ed 100644
5361 +--- a/kernel/trace/trace_probe.h
5362 ++++ b/kernel/trace/trace_probe.h
5363 +@@ -399,6 +399,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
5364 + C(NO_EVENT_NAME, "Event name is not specified"), \
5365 + C(EVENT_TOO_LONG, "Event name is too long"), \
5366 + C(BAD_EVENT_NAME, "Event name must follow the same rules as C identifiers"), \
5367 ++ C(EVENT_EXIST, "Given group/event name is already used by another event"), \
5368 + C(RETVAL_ON_PROBE, "$retval is not available on probe"), \
5369 + C(BAD_STACK_NUM, "Invalid stack number"), \
5370 + C(BAD_ARG_NUM, "Invalid argument number"), \
5371 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
5372 +index 9b50869a5ddb5..957244ee07c8d 100644
5373 +--- a/kernel/trace/trace_uprobe.c
5374 ++++ b/kernel/trace/trace_uprobe.c
5375 +@@ -514,7 +514,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
5376 +
5377 + ret = register_uprobe_event(tu);
5378 + if (ret) {
5379 +- pr_warn("Failed to register probe event(%d)\n", ret);
5380 ++ if (ret == -EEXIST) {
5381 ++ trace_probe_log_set_index(0);
5382 ++ trace_probe_log_err(0, EVENT_EXIST);
5383 ++ } else
5384 ++ pr_warn("Failed to register probe event(%d)\n", ret);
5385 + goto end;
5386 + }
5387 +
5388 +diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
5389 +index 37b67194c0dfe..414dc5671c45e 100644
5390 +--- a/net/caif/chnl_net.c
5391 ++++ b/net/caif/chnl_net.c
5392 +@@ -53,20 +53,6 @@ struct chnl_net {
5393 + enum caif_states state;
5394 + };
5395 +
5396 +-static void robust_list_del(struct list_head *delete_node)
5397 +-{
5398 +- struct list_head *list_node;
5399 +- struct list_head *n;
5400 +- ASSERT_RTNL();
5401 +- list_for_each_safe(list_node, n, &chnl_net_list) {
5402 +- if (list_node == delete_node) {
5403 +- list_del(list_node);
5404 +- return;
5405 +- }
5406 +- }
5407 +- WARN_ON(1);
5408 +-}
5409 +-
5410 + static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
5411 + {
5412 + struct sk_buff *skb;
5413 +@@ -364,6 +350,7 @@ static int chnl_net_init(struct net_device *dev)
5414 + ASSERT_RTNL();
5415 + priv = netdev_priv(dev);
5416 + strncpy(priv->name, dev->name, sizeof(priv->name));
5417 ++ INIT_LIST_HEAD(&priv->list_field);
5418 + return 0;
5419 + }
5420 +
5421 +@@ -372,7 +359,7 @@ static void chnl_net_uninit(struct net_device *dev)
5422 + struct chnl_net *priv;
5423 + ASSERT_RTNL();
5424 + priv = netdev_priv(dev);
5425 +- robust_list_del(&priv->list_field);
5426 ++ list_del_init(&priv->list_field);
5427 + }
5428 +
5429 + static const struct net_device_ops netdev_ops = {
5430 +@@ -537,7 +524,7 @@ static void __exit chnl_exit_module(void)
5431 + rtnl_lock();
5432 + list_for_each_safe(list_node, _tmp, &chnl_net_list) {
5433 + dev = list_entry(list_node, struct chnl_net, list_field);
5434 +- list_del(list_node);
5435 ++ list_del_init(list_node);
5436 + delete_device(dev);
5437 + }
5438 + rtnl_unlock();
5439 +diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
5440 +index c5c74a34d139d..91e7a22026971 100644
5441 +--- a/net/dccp/minisocks.c
5442 ++++ b/net/dccp/minisocks.c
5443 +@@ -94,6 +94,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
5444 + newdp->dccps_role = DCCP_ROLE_SERVER;
5445 + newdp->dccps_hc_rx_ackvec = NULL;
5446 + newdp->dccps_service_list = NULL;
5447 ++ newdp->dccps_hc_rx_ccid = NULL;
5448 ++ newdp->dccps_hc_tx_ccid = NULL;
5449 + newdp->dccps_service = dreq->dreq_service;
5450 + newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
5451 + newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
5452 +diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
5453 +index 84cad1be9ce48..e058a2e320e35 100644
5454 +--- a/net/dsa/dsa.c
5455 ++++ b/net/dsa/dsa.c
5456 +@@ -345,6 +345,11 @@ bool dsa_schedule_work(struct work_struct *work)
5457 + return queue_work(dsa_owq, work);
5458 + }
5459 +
5460 ++void dsa_flush_workqueue(void)
5461 ++{
5462 ++ flush_workqueue(dsa_owq);
5463 ++}
5464 ++
5465 + int dsa_devlink_param_get(struct devlink *dl, u32 id,
5466 + struct devlink_param_gset_ctx *ctx)
5467 + {
5468 +diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
5469 +index 185629f27f803..79267b00af68f 100644
5470 +--- a/net/dsa/dsa2.c
5471 ++++ b/net/dsa/dsa2.c
5472 +@@ -809,6 +809,33 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
5473 + ds->setup = false;
5474 + }
5475 +
5476 ++/* First tear down the non-shared, then the shared ports. This ensures that
5477 ++ * all work items scheduled by our switchdev handlers for user ports have
5478 ++ * completed before we destroy the refcounting kept on the shared ports.
5479 ++ */
5480 ++static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
5481 ++{
5482 ++ struct dsa_port *dp;
5483 ++
5484 ++ list_for_each_entry(dp, &dst->ports, list)
5485 ++ if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
5486 ++ dsa_port_teardown(dp);
5487 ++
5488 ++ dsa_flush_workqueue();
5489 ++
5490 ++ list_for_each_entry(dp, &dst->ports, list)
5491 ++ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
5492 ++ dsa_port_teardown(dp);
5493 ++}
5494 ++
5495 ++static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
5496 ++{
5497 ++ struct dsa_port *dp;
5498 ++
5499 ++ list_for_each_entry(dp, &dst->ports, list)
5500 ++ dsa_switch_teardown(dp->ds);
5501 ++}
5502 ++
5503 + static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
5504 + {
5505 + struct dsa_port *dp;
5506 +@@ -835,26 +862,13 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
5507 + return 0;
5508 +
5509 + teardown:
5510 +- list_for_each_entry(dp, &dst->ports, list)
5511 +- dsa_port_teardown(dp);
5512 ++ dsa_tree_teardown_ports(dst);
5513 +
5514 +- list_for_each_entry(dp, &dst->ports, list)
5515 +- dsa_switch_teardown(dp->ds);
5516 ++ dsa_tree_teardown_switches(dst);
5517 +
5518 + return err;
5519 + }
5520 +
5521 +-static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
5522 +-{
5523 +- struct dsa_port *dp;
5524 +-
5525 +- list_for_each_entry(dp, &dst->ports, list)
5526 +- dsa_port_teardown(dp);
5527 +-
5528 +- list_for_each_entry(dp, &dst->ports, list)
5529 +- dsa_switch_teardown(dp->ds);
5530 +-}
5531 +-
5532 + static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
5533 + {
5534 + struct dsa_port *dp;
5535 +@@ -964,6 +978,8 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
5536 +
5537 + dsa_tree_teardown_master(dst);
5538 +
5539 ++ dsa_tree_teardown_ports(dst);
5540 ++
5541 + dsa_tree_teardown_switches(dst);
5542 +
5543 + dsa_tree_teardown_default_cpu(dst);
5544 +diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
5545 +index cddf7cb0f398f..6c00557ca9bf4 100644
5546 +--- a/net/dsa/dsa_priv.h
5547 ++++ b/net/dsa/dsa_priv.h
5548 +@@ -158,6 +158,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
5549 + const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
5550 +
5551 + bool dsa_schedule_work(struct work_struct *work);
5552 ++void dsa_flush_workqueue(void);
5553 + const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
5554 +
5555 + static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
5556 +diff --git a/net/dsa/slave.c b/net/dsa/slave.c
5557 +index b34116b15d436..527fc20d47adf 100644
5558 +--- a/net/dsa/slave.c
5559 ++++ b/net/dsa/slave.c
5560 +@@ -1784,13 +1784,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
5561 + * use the switch internal MDIO bus instead
5562 + */
5563 + ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
5564 +- if (ret) {
5565 +- netdev_err(slave_dev,
5566 +- "failed to connect to port %d: %d\n",
5567 +- dp->index, ret);
5568 +- phylink_destroy(dp->pl);
5569 +- return ret;
5570 +- }
5571 ++ }
5572 ++ if (ret) {
5573 ++ netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
5574 ++ ERR_PTR(ret));
5575 ++ phylink_destroy(dp->pl);
5576 + }
5577 +
5578 + return ret;
5579 +diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
5580 +index 57c46b4ab2b3f..e34b80fa52e1d 100644
5581 +--- a/net/dsa/tag_rtl4_a.c
5582 ++++ b/net/dsa/tag_rtl4_a.c
5583 +@@ -54,9 +54,10 @@ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
5584 + p = (__be16 *)tag;
5585 + *p = htons(RTL4_A_ETHERTYPE);
5586 +
5587 +- out = (RTL4_A_PROTOCOL_RTL8366RB << 12) | (2 << 8);
5588 +- /* The lower bits is the port number */
5589 +- out |= (u8)dp->index;
5590 ++ out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT) | (2 << 8);
5591 ++ /* The lower bits indicate the port number */
5592 ++ out |= BIT(dp->index);
5593 ++
5594 + p = (__be16 *)(tag + 2);
5595 + *p = htons(out);
5596 +
5597 +diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
5598 +index 6134b180f59f8..af011534bcb24 100644
5599 +--- a/net/ethtool/ioctl.c
5600 ++++ b/net/ethtool/ioctl.c
5601 +@@ -906,7 +906,7 @@ static int ethtool_rxnfc_copy_to_user(void __user *useraddr,
5602 + rule_buf);
5603 + useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs);
5604 + } else {
5605 +- ret = copy_to_user(useraddr, &rxnfc, size);
5606 ++ ret = copy_to_user(useraddr, rxnfc, size);
5607 + useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
5608 + }
5609 +
5610 +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
5611 +index 7fbd0b532f529..099259fc826aa 100644
5612 +--- a/net/ipv4/cipso_ipv4.c
5613 ++++ b/net/ipv4/cipso_ipv4.c
5614 +@@ -465,16 +465,14 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
5615 + if (!doi_def)
5616 + return;
5617 +
5618 +- if (doi_def->map.std) {
5619 +- switch (doi_def->type) {
5620 +- case CIPSO_V4_MAP_TRANS:
5621 +- kfree(doi_def->map.std->lvl.cipso);
5622 +- kfree(doi_def->map.std->lvl.local);
5623 +- kfree(doi_def->map.std->cat.cipso);
5624 +- kfree(doi_def->map.std->cat.local);
5625 +- kfree(doi_def->map.std);
5626 +- break;
5627 +- }
5628 ++ switch (doi_def->type) {
5629 ++ case CIPSO_V4_MAP_TRANS:
5630 ++ kfree(doi_def->map.std->lvl.cipso);
5631 ++ kfree(doi_def->map.std->lvl.local);
5632 ++ kfree(doi_def->map.std->cat.cipso);
5633 ++ kfree(doi_def->map.std->cat.local);
5634 ++ kfree(doi_def->map.std);
5635 ++ break;
5636 + }
5637 + kfree(doi_def);
5638 + }
5639 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
5640 +index 95419b7adf5ce..6480c6dfe1bf9 100644
5641 +--- a/net/ipv4/ip_gre.c
5642 ++++ b/net/ipv4/ip_gre.c
5643 +@@ -473,8 +473,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
5644 +
5645 + static int gre_handle_offloads(struct sk_buff *skb, bool csum)
5646 + {
5647 +- if (csum && skb_checksum_start(skb) < skb->data)
5648 +- return -EINVAL;
5649 + return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
5650 + }
5651 +
5652 +@@ -632,15 +630,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
5653 + }
5654 +
5655 + if (dev->header_ops) {
5656 ++ const int pull_len = tunnel->hlen + sizeof(struct iphdr);
5657 ++
5658 + if (skb_cow_head(skb, 0))
5659 + goto free_skb;
5660 +
5661 + tnl_params = (const struct iphdr *)skb->data;
5662 +
5663 ++ if (pull_len > skb_transport_offset(skb))
5664 ++ goto free_skb;
5665 ++
5666 + /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
5667 + * to gre header.
5668 + */
5669 +- skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
5670 ++ skb_pull(skb, pull_len);
5671 + skb_reset_mac_header(skb);
5672 + } else {
5673 + if (skb_cow_head(skb, dev->needed_headroom))
5674 +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
5675 +index 4075230b14c63..75ca4b6e484f4 100644
5676 +--- a/net/ipv4/nexthop.c
5677 ++++ b/net/ipv4/nexthop.c
5678 +@@ -2490,6 +2490,7 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh,
5679 + .fc_gw4 = cfg->gw.ipv4,
5680 + .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
5681 + .fc_flags = cfg->nh_flags,
5682 ++ .fc_nlinfo = cfg->nlinfo,
5683 + .fc_encap = cfg->nh_encap,
5684 + .fc_encap_type = cfg->nh_encap_type,
5685 + };
5686 +@@ -2528,6 +2529,7 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh,
5687 + .fc_ifindex = cfg->nh_ifindex,
5688 + .fc_gateway = cfg->gw.ipv6,
5689 + .fc_flags = cfg->nh_flags,
5690 ++ .fc_nlinfo = cfg->nlinfo,
5691 + .fc_encap = cfg->nh_encap,
5692 + .fc_encap_type = cfg->nh_encap_type,
5693 + .fc_is_fdb = cfg->nh_fdb,
5694 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
5695 +index 149ceb5c94ffc..66d9085da87ed 100644
5696 +--- a/net/ipv4/tcp_input.c
5697 ++++ b/net/ipv4/tcp_input.c
5698 +@@ -1314,7 +1314,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
5699 + if (dup_sack && (sacked & TCPCB_RETRANS)) {
5700 + if (tp->undo_marker && tp->undo_retrans > 0 &&
5701 + after(end_seq, tp->undo_marker))
5702 +- tp->undo_retrans--;
5703 ++ tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
5704 + if ((sacked & TCPCB_SACKED_ACKED) &&
5705 + before(start_seq, state->reord))
5706 + state->reord = start_seq;
5707 +diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
5708 +index 0d122edc368dd..b91003538d87a 100644
5709 +--- a/net/ipv4/udp_tunnel_nic.c
5710 ++++ b/net/ipv4/udp_tunnel_nic.c
5711 +@@ -935,7 +935,7 @@ static int __init udp_tunnel_nic_init_module(void)
5712 + {
5713 + int err;
5714 +
5715 +- udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0);
5716 ++ udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
5717 + if (!udp_tunnel_nic_workqueue)
5718 + return -ENOMEM;
5719 +
5720 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
5721 +index 7a5e90e093630..bc224f917bbd5 100644
5722 +--- a/net/ipv6/ip6_gre.c
5723 ++++ b/net/ipv6/ip6_gre.c
5724 +@@ -629,8 +629,6 @@ drop:
5725 +
5726 + static int gre_handle_offloads(struct sk_buff *skb, bool csum)
5727 + {
5728 +- if (csum && skb_checksum_start(skb) < skb->data)
5729 +- return -EINVAL;
5730 + return iptunnel_handle_offloads(skb,
5731 + csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
5732 + }
5733 +diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c
5734 +index 6fd54744cbc38..aa5bb8789ba0b 100644
5735 +--- a/net/ipv6/netfilter/nf_socket_ipv6.c
5736 ++++ b/net/ipv6/netfilter/nf_socket_ipv6.c
5737 +@@ -99,7 +99,7 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
5738 + {
5739 + __be16 dport, sport;
5740 + const struct in6_addr *daddr = NULL, *saddr = NULL;
5741 +- struct ipv6hdr *iph = ipv6_hdr(skb);
5742 ++ struct ipv6hdr *iph = ipv6_hdr(skb), ipv6_var;
5743 + struct sk_buff *data_skb = NULL;
5744 + int doff = 0;
5745 + int thoff = 0, tproto;
5746 +@@ -129,8 +129,6 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
5747 + thoff + sizeof(*hp);
5748 +
5749 + } else if (tproto == IPPROTO_ICMPV6) {
5750 +- struct ipv6hdr ipv6_var;
5751 +-
5752 + if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
5753 + &sport, &dport, &ipv6_var))
5754 + return NULL;
5755 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
5756 +index 53486b162f01c..93271a2632b8e 100644
5757 +--- a/net/l2tp/l2tp_core.c
5758 ++++ b/net/l2tp/l2tp_core.c
5759 +@@ -869,8 +869,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
5760 + }
5761 +
5762 + if (tunnel->version == L2TP_HDR_VER_3 &&
5763 +- l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
5764 ++ l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
5765 ++ l2tp_session_dec_refcount(session);
5766 + goto invalid;
5767 ++ }
5768 +
5769 + l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
5770 + l2tp_session_dec_refcount(session);
5771 +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
5772 +index 7b37944597833..89251cbe9f1a7 100644
5773 +--- a/net/mptcp/pm_netlink.c
5774 ++++ b/net/mptcp/pm_netlink.c
5775 +@@ -540,7 +540,6 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
5776 + subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
5777 + if (subflow) {
5778 + struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
5779 +- bool slow;
5780 +
5781 + spin_unlock_bh(&msk->pm.lock);
5782 + pr_debug("send ack for %s%s%s",
5783 +@@ -548,9 +547,7 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
5784 + mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "",
5785 + mptcp_pm_should_add_signal_port(msk) ? " [port]" : "");
5786 +
5787 +- slow = lock_sock_fast(ssk);
5788 +- tcp_send_ack(ssk);
5789 +- unlock_sock_fast(ssk, slow);
5790 ++ mptcp_subflow_send_ack(ssk);
5791 + spin_lock_bh(&msk->pm.lock);
5792 + }
5793 + }
5794 +@@ -567,7 +564,6 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
5795 + struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
5796 + struct sock *sk = (struct sock *)msk;
5797 + struct mptcp_addr_info local;
5798 +- bool slow;
5799 +
5800 + local_address((struct sock_common *)ssk, &local);
5801 + if (!addresses_equal(&local, addr, addr->port))
5802 +@@ -580,9 +576,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
5803 +
5804 + spin_unlock_bh(&msk->pm.lock);
5805 + pr_debug("send ack for mp_prio");
5806 +- slow = lock_sock_fast(ssk);
5807 +- tcp_send_ack(ssk);
5808 +- unlock_sock_fast(ssk, slow);
5809 ++ mptcp_subflow_send_ack(ssk);
5810 + spin_lock_bh(&msk->pm.lock);
5811 +
5812 + return 0;
5813 +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
5814 +index a889249478152..acbead7cf50f0 100644
5815 +--- a/net/mptcp/protocol.c
5816 ++++ b/net/mptcp/protocol.c
5817 +@@ -427,19 +427,22 @@ static bool tcp_can_send_ack(const struct sock *ssk)
5818 + (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
5819 + }
5820 +
5821 ++void mptcp_subflow_send_ack(struct sock *ssk)
5822 ++{
5823 ++ bool slow;
5824 ++
5825 ++ slow = lock_sock_fast(ssk);
5826 ++ if (tcp_can_send_ack(ssk))
5827 ++ tcp_send_ack(ssk);
5828 ++ unlock_sock_fast(ssk, slow);
5829 ++}
5830 ++
5831 + static void mptcp_send_ack(struct mptcp_sock *msk)
5832 + {
5833 + struct mptcp_subflow_context *subflow;
5834 +
5835 +- mptcp_for_each_subflow(msk, subflow) {
5836 +- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
5837 +- bool slow;
5838 +-
5839 +- slow = lock_sock_fast(ssk);
5840 +- if (tcp_can_send_ack(ssk))
5841 +- tcp_send_ack(ssk);
5842 +- unlock_sock_fast(ssk, slow);
5843 +- }
5844 ++ mptcp_for_each_subflow(msk, subflow)
5845 ++ mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
5846 + }
5847 +
5848 + static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
5849 +@@ -994,6 +997,13 @@ static void mptcp_wmem_uncharge(struct sock *sk, int size)
5850 + msk->wmem_reserved += size;
5851 + }
5852 +
5853 ++static void __mptcp_mem_reclaim_partial(struct sock *sk)
5854 ++{
5855 ++ lockdep_assert_held_once(&sk->sk_lock.slock);
5856 ++ __mptcp_update_wmem(sk);
5857 ++ sk_mem_reclaim_partial(sk);
5858 ++}
5859 ++
5860 + static void mptcp_mem_reclaim_partial(struct sock *sk)
5861 + {
5862 + struct mptcp_sock *msk = mptcp_sk(sk);
5863 +@@ -1069,12 +1079,8 @@ static void __mptcp_clean_una(struct sock *sk)
5864 + }
5865 +
5866 + out:
5867 +- if (cleaned) {
5868 +- if (tcp_under_memory_pressure(sk)) {
5869 +- __mptcp_update_wmem(sk);
5870 +- sk_mem_reclaim_partial(sk);
5871 +- }
5872 +- }
5873 ++ if (cleaned && tcp_under_memory_pressure(sk))
5874 ++ __mptcp_mem_reclaim_partial(sk);
5875 +
5876 + if (snd_una == READ_ONCE(msk->snd_nxt)) {
5877 + if (msk->timer_ival && !mptcp_data_fin_enabled(msk))
5878 +@@ -1154,6 +1160,7 @@ struct mptcp_sendmsg_info {
5879 + u16 limit;
5880 + u16 sent;
5881 + unsigned int flags;
5882 ++ bool data_lock_held;
5883 + };
5884 +
5885 + static int mptcp_check_allowed_size(struct mptcp_sock *msk, u64 data_seq,
5886 +@@ -1225,17 +1232,17 @@ static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
5887 + return false;
5888 + }
5889 +
5890 +-static bool mptcp_must_reclaim_memory(struct sock *sk, struct sock *ssk)
5891 ++static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
5892 + {
5893 +- return !ssk->sk_tx_skb_cache &&
5894 +- tcp_under_memory_pressure(sk);
5895 +-}
5896 ++ gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
5897 +
5898 +-static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk)
5899 +-{
5900 +- if (unlikely(mptcp_must_reclaim_memory(sk, ssk)))
5901 +- mptcp_mem_reclaim_partial(sk);
5902 +- return __mptcp_alloc_tx_skb(sk, ssk, sk->sk_allocation);
5903 ++ if (unlikely(tcp_under_memory_pressure(sk))) {
5904 ++ if (data_lock_held)
5905 ++ __mptcp_mem_reclaim_partial(sk);
5906 ++ else
5907 ++ mptcp_mem_reclaim_partial(sk);
5908 ++ }
5909 ++ return __mptcp_alloc_tx_skb(sk, ssk, gfp);
5910 + }
5911 +
5912 + /* note: this always recompute the csum on the whole skb, even
5913 +@@ -1259,7 +1266,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
5914 + bool zero_window_probe = false;
5915 + struct mptcp_ext *mpext = NULL;
5916 + struct sk_buff *skb, *tail;
5917 +- bool can_collapse = false;
5918 ++ bool must_collapse = false;
5919 + int size_bias = 0;
5920 + int avail_size;
5921 + size_t ret = 0;
5922 +@@ -1279,16 +1286,24 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
5923 + * SSN association set here
5924 + */
5925 + mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
5926 +- can_collapse = (info->size_goal - skb->len > 0) &&
5927 +- mptcp_skb_can_collapse_to(data_seq, skb, mpext);
5928 +- if (!can_collapse) {
5929 ++ if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
5930 + TCP_SKB_CB(skb)->eor = 1;
5931 +- } else {
5932 ++ goto alloc_skb;
5933 ++ }
5934 ++
5935 ++ must_collapse = (info->size_goal - skb->len > 0) &&
5936 ++ (skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags);
5937 ++ if (must_collapse) {
5938 + size_bias = skb->len;
5939 + avail_size = info->size_goal - skb->len;
5940 + }
5941 + }
5942 +
5943 ++alloc_skb:
5944 ++ if (!must_collapse && !ssk->sk_tx_skb_cache &&
5945 ++ !mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held))
5946 ++ return 0;
5947 ++
5948 + /* Zero window and all data acked? Probe. */
5949 + avail_size = mptcp_check_allowed_size(msk, data_seq, avail_size);
5950 + if (avail_size == 0) {
5951 +@@ -1318,7 +1333,6 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
5952 + if (skb == tail) {
5953 + TCP_SKB_CB(tail)->tcp_flags &= ~TCPHDR_PSH;
5954 + mpext->data_len += ret;
5955 +- WARN_ON_ONCE(!can_collapse);
5956 + WARN_ON_ONCE(zero_window_probe);
5957 + goto out;
5958 + }
5959 +@@ -1470,15 +1484,6 @@ static void __mptcp_push_pending(struct sock *sk, unsigned int flags)
5960 + if (ssk != prev_ssk || !prev_ssk)
5961 + lock_sock(ssk);
5962 +
5963 +- /* keep it simple and always provide a new skb for the
5964 +- * subflow, even if we will not use it when collapsing
5965 +- * on the pending one
5966 +- */
5967 +- if (!mptcp_alloc_tx_skb(sk, ssk)) {
5968 +- mptcp_push_release(sk, ssk, &info);
5969 +- goto out;
5970 +- }
5971 +-
5972 + ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
5973 + if (ret <= 0) {
5974 + mptcp_push_release(sk, ssk, &info);
5975 +@@ -1512,7 +1517,9 @@ out:
5976 + static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
5977 + {
5978 + struct mptcp_sock *msk = mptcp_sk(sk);
5979 +- struct mptcp_sendmsg_info info;
5980 ++ struct mptcp_sendmsg_info info = {
5981 ++ .data_lock_held = true,
5982 ++ };
5983 + struct mptcp_data_frag *dfrag;
5984 + struct sock *xmit_ssk;
5985 + int len, copied = 0;
5986 +@@ -1538,13 +1545,6 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
5987 + goto out;
5988 + }
5989 +
5990 +- if (unlikely(mptcp_must_reclaim_memory(sk, ssk))) {
5991 +- __mptcp_update_wmem(sk);
5992 +- sk_mem_reclaim_partial(sk);
5993 +- }
5994 +- if (!__mptcp_alloc_tx_skb(sk, ssk, GFP_ATOMIC))
5995 +- goto out;
5996 +-
5997 + ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
5998 + if (ret <= 0)
5999 + goto out;
6000 +@@ -2296,9 +2296,6 @@ static void __mptcp_retrans(struct sock *sk)
6001 + info.sent = 0;
6002 + info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent;
6003 + while (info.sent < info.limit) {
6004 +- if (!mptcp_alloc_tx_skb(sk, ssk))
6005 +- break;
6006 +-
6007 + ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
6008 + if (ret <= 0)
6009 + break;
6010 +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
6011 +index 0f0c026c5f8bb..6ac564d584c19 100644
6012 +--- a/net/mptcp/protocol.h
6013 ++++ b/net/mptcp/protocol.h
6014 +@@ -560,6 +560,7 @@ void __init mptcp_subflow_init(void);
6015 + void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how);
6016 + void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
6017 + struct mptcp_subflow_context *subflow);
6018 ++void mptcp_subflow_send_ack(struct sock *ssk);
6019 + void mptcp_subflow_reset(struct sock *ssk);
6020 + void mptcp_sock_graft(struct sock *sk, struct socket *parent);
6021 + struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
6022 +diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
6023 +index 337e22d8b40b1..99b1de14ff7ee 100644
6024 +--- a/net/netfilter/nft_ct.c
6025 ++++ b/net/netfilter/nft_ct.c
6026 +@@ -41,6 +41,7 @@ struct nft_ct_helper_obj {
6027 + #ifdef CONFIG_NF_CONNTRACK_ZONES
6028 + static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template);
6029 + static unsigned int nft_ct_pcpu_template_refcnt __read_mostly;
6030 ++static DEFINE_MUTEX(nft_ct_pcpu_mutex);
6031 + #endif
6032 +
6033 + static u64 nft_ct_get_eval_counter(const struct nf_conn_counter *c,
6034 +@@ -525,8 +526,10 @@ static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv)
6035 + #endif
6036 + #ifdef CONFIG_NF_CONNTRACK_ZONES
6037 + case NFT_CT_ZONE:
6038 ++ mutex_lock(&nft_ct_pcpu_mutex);
6039 + if (--nft_ct_pcpu_template_refcnt == 0)
6040 + nft_ct_tmpl_put_pcpu();
6041 ++ mutex_unlock(&nft_ct_pcpu_mutex);
6042 + break;
6043 + #endif
6044 + default:
6045 +@@ -564,9 +567,13 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
6046 + #endif
6047 + #ifdef CONFIG_NF_CONNTRACK_ZONES
6048 + case NFT_CT_ZONE:
6049 +- if (!nft_ct_tmpl_alloc_pcpu())
6050 ++ mutex_lock(&nft_ct_pcpu_mutex);
6051 ++ if (!nft_ct_tmpl_alloc_pcpu()) {
6052 ++ mutex_unlock(&nft_ct_pcpu_mutex);
6053 + return -ENOMEM;
6054 ++ }
6055 + nft_ct_pcpu_template_refcnt++;
6056 ++ mutex_unlock(&nft_ct_pcpu_mutex);
6057 + len = sizeof(u16);
6058 + break;
6059 + #endif
6060 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
6061 +index bdbda61db8b96..d3c0cae813c65 100644
6062 +--- a/net/qrtr/qrtr.c
6063 ++++ b/net/qrtr/qrtr.c
6064 +@@ -493,7 +493,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
6065 + goto err;
6066 + }
6067 +
6068 +- if (!size || size & 3 || len != size + hdrlen)
6069 ++ if (!size || len != ALIGN(size, 4) + hdrlen)
6070 + goto err;
6071 +
6072 + if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
6073 +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
6074 +index bbd5f87536006..99e8db2621984 100644
6075 +--- a/net/sched/sch_fq_codel.c
6076 ++++ b/net/sched/sch_fq_codel.c
6077 +@@ -369,6 +369,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
6078 + {
6079 + struct fq_codel_sched_data *q = qdisc_priv(sch);
6080 + struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
6081 ++ u32 quantum = 0;
6082 + int err;
6083 +
6084 + if (!opt)
6085 +@@ -386,6 +387,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
6086 + q->flows_cnt > 65536)
6087 + return -EINVAL;
6088 + }
6089 ++ if (tb[TCA_FQ_CODEL_QUANTUM]) {
6090 ++ quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
6091 ++ if (quantum > FQ_CODEL_QUANTUM_MAX) {
6092 ++ NL_SET_ERR_MSG(extack, "Invalid quantum");
6093 ++ return -EINVAL;
6094 ++ }
6095 ++ }
6096 + sch_tree_lock(sch);
6097 +
6098 + if (tb[TCA_FQ_CODEL_TARGET]) {
6099 +@@ -412,8 +420,8 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
6100 + if (tb[TCA_FQ_CODEL_ECN])
6101 + q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
6102 +
6103 +- if (tb[TCA_FQ_CODEL_QUANTUM])
6104 +- q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
6105 ++ if (quantum)
6106 ++ q->quantum = quantum;
6107 +
6108 + if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
6109 + q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
6110 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
6111 +index a155cfaf01f2e..50762be9c115e 100644
6112 +--- a/net/tipc/socket.c
6113 ++++ b/net/tipc/socket.c
6114 +@@ -1979,10 +1979,12 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
6115 + tipc_node_distr_xmit(sock_net(sk), &xmitq);
6116 + }
6117 +
6118 +- if (!skb_cb->bytes_read)
6119 +- tsk_advance_rx_queue(sk);
6120 ++ if (skb_cb->bytes_read)
6121 ++ goto exit;
6122 ++
6123 ++ tsk_advance_rx_queue(sk);
6124 +
6125 +- if (likely(!connected) || skb_cb->bytes_read)
6126 ++ if (likely(!connected))
6127 + goto exit;
6128 +
6129 + /* Send connection flow control advertisement when applicable */
6130 +@@ -2421,7 +2423,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
6131 + static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
6132 + u32 dport, struct sk_buff_head *xmitq)
6133 + {
6134 +- unsigned long time_limit = jiffies + 2;
6135 ++ unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
6136 + struct sk_buff *skb;
6137 + unsigned int lim;
6138 + atomic_t *dcnt;
6139 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
6140 +index ba7ced947e51c..91ff09d833e8f 100644
6141 +--- a/net/unix/af_unix.c
6142 ++++ b/net/unix/af_unix.c
6143 +@@ -2774,7 +2774,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
6144 +
6145 + other = unix_peer(sk);
6146 + if (other && unix_peer(other) != sk &&
6147 +- unix_recvq_full(other) &&
6148 ++ unix_recvq_full_lockless(other) &&
6149 + unix_dgram_peer_wake_me(sk, other))
6150 + writable = 0;
6151 +
6152 +diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py
6153 +index b7e9ecf16e569..a70cd064bfc4b 100755
6154 +--- a/scripts/clang-tools/gen_compile_commands.py
6155 ++++ b/scripts/clang-tools/gen_compile_commands.py
6156 +@@ -13,6 +13,7 @@ import logging
6157 + import os
6158 + import re
6159 + import subprocess
6160 ++import sys
6161 +
6162 + _DEFAULT_OUTPUT = 'compile_commands.json'
6163 + _DEFAULT_LOG_LEVEL = 'WARNING'
6164 +diff --git a/tools/build/Makefile b/tools/build/Makefile
6165 +index 5ed41b96fcded..6f11e6fc9ffe3 100644
6166 +--- a/tools/build/Makefile
6167 ++++ b/tools/build/Makefile
6168 +@@ -32,7 +32,7 @@ all: $(OUTPUT)fixdep
6169 +
6170 + # Make sure there's anything to clean,
6171 + # feature contains check for existing OUTPUT
6172 +-TMP_O := $(if $(OUTPUT),$(OUTPUT)/feature,./)
6173 ++TMP_O := $(if $(OUTPUT),$(OUTPUT)feature/,./)
6174 +
6175 + clean:
6176 + $(call QUIET_CLEAN, fixdep)
6177 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
6178 +index eb8e487ef90b0..29ffd57f5cd8d 100644
6179 +--- a/tools/perf/Makefile.config
6180 ++++ b/tools/perf/Makefile.config
6181 +@@ -133,10 +133,10 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
6182 + FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
6183 + FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
6184 +
6185 +-FEATURE_CHECK_LDFLAGS-libunwind-arm = -lunwind -lunwind-arm
6186 +-FEATURE_CHECK_LDFLAGS-libunwind-aarch64 = -lunwind -lunwind-aarch64
6187 +-FEATURE_CHECK_LDFLAGS-libunwind-x86 = -lunwind -llzma -lunwind-x86
6188 +-FEATURE_CHECK_LDFLAGS-libunwind-x86_64 = -lunwind -llzma -lunwind-x86_64
6189 ++FEATURE_CHECK_LDFLAGS-libunwind-arm += -lunwind -lunwind-arm
6190 ++FEATURE_CHECK_LDFLAGS-libunwind-aarch64 += -lunwind -lunwind-aarch64
6191 ++FEATURE_CHECK_LDFLAGS-libunwind-x86 += -lunwind -llzma -lunwind-x86
6192 ++FEATURE_CHECK_LDFLAGS-libunwind-x86_64 += -lunwind -llzma -lunwind-x86_64
6193 +
6194 + FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
6195 +
6196 +diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c
6197 +index 55d373b75791b..17672790f1231 100644
6198 +--- a/tools/perf/bench/inject-buildid.c
6199 ++++ b/tools/perf/bench/inject-buildid.c
6200 +@@ -133,7 +133,7 @@ static u64 dso_map_addr(struct bench_dso *dso)
6201 + return 0x400000ULL + dso->ino * 8192ULL;
6202 + }
6203 +
6204 +-static u32 synthesize_attr(struct bench_data *data)
6205 ++static ssize_t synthesize_attr(struct bench_data *data)
6206 + {
6207 + union perf_event event;
6208 +
6209 +@@ -151,7 +151,7 @@ static u32 synthesize_attr(struct bench_data *data)
6210 + return writen(data->input_pipe[1], &event, event.header.size);
6211 + }
6212 +
6213 +-static u32 synthesize_fork(struct bench_data *data)
6214 ++static ssize_t synthesize_fork(struct bench_data *data)
6215 + {
6216 + union perf_event event;
6217 +
6218 +@@ -169,8 +169,7 @@ static u32 synthesize_fork(struct bench_data *data)
6219 + return writen(data->input_pipe[1], &event, event.header.size);
6220 + }
6221 +
6222 +-static u32 synthesize_mmap(struct bench_data *data, struct bench_dso *dso,
6223 +- u64 timestamp)
6224 ++static ssize_t synthesize_mmap(struct bench_data *data, struct bench_dso *dso, u64 timestamp)
6225 + {
6226 + union perf_event event;
6227 + size_t len = offsetof(struct perf_record_mmap2, filename);
6228 +@@ -198,23 +197,25 @@ static u32 synthesize_mmap(struct bench_data *data, struct bench_dso *dso,
6229 +
6230 + if (len > sizeof(event.mmap2)) {
6231 + /* write mmap2 event first */
6232 +- writen(data->input_pipe[1], &event, len - bench_id_hdr_size);
6233 ++ if (writen(data->input_pipe[1], &event, len - bench_id_hdr_size) < 0)
6234 ++ return -1;
6235 + /* zero-fill sample id header */
6236 + memset(id_hdr_ptr, 0, bench_id_hdr_size);
6237 + /* put timestamp in the right position */
6238 + ts_idx = (bench_id_hdr_size / sizeof(u64)) - 2;
6239 + id_hdr_ptr[ts_idx] = timestamp;
6240 +- writen(data->input_pipe[1], id_hdr_ptr, bench_id_hdr_size);
6241 +- } else {
6242 +- ts_idx = (len / sizeof(u64)) - 2;
6243 +- id_hdr_ptr[ts_idx] = timestamp;
6244 +- writen(data->input_pipe[1], &event, len);
6245 ++ if (writen(data->input_pipe[1], id_hdr_ptr, bench_id_hdr_size) < 0)
6246 ++ return -1;
6247 ++
6248 ++ return len;
6249 + }
6250 +- return len;
6251 ++
6252 ++ ts_idx = (len / sizeof(u64)) - 2;
6253 ++ id_hdr_ptr[ts_idx] = timestamp;
6254 ++ return writen(data->input_pipe[1], &event, len);
6255 + }
6256 +
6257 +-static u32 synthesize_sample(struct bench_data *data, struct bench_dso *dso,
6258 +- u64 timestamp)
6259 ++static ssize_t synthesize_sample(struct bench_data *data, struct bench_dso *dso, u64 timestamp)
6260 + {
6261 + union perf_event event;
6262 + struct perf_sample sample = {
6263 +@@ -233,7 +234,7 @@ static u32 synthesize_sample(struct bench_data *data, struct bench_dso *dso,
6264 + return writen(data->input_pipe[1], &event, event.header.size);
6265 + }
6266 +
6267 +-static u32 synthesize_flush(struct bench_data *data)
6268 ++static ssize_t synthesize_flush(struct bench_data *data)
6269 + {
6270 + struct perf_event_header header = {
6271 + .size = sizeof(header),
6272 +@@ -348,14 +349,16 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss)
6273 + int status;
6274 + unsigned int i, k;
6275 + struct rusage rusage;
6276 +- u64 len = 0;
6277 +
6278 + /* this makes the child to run */
6279 + if (perf_header__write_pipe(data->input_pipe[1]) < 0)
6280 + return -1;
6281 +
6282 +- len += synthesize_attr(data);
6283 +- len += synthesize_fork(data);
6284 ++ if (synthesize_attr(data) < 0)
6285 ++ return -1;
6286 ++
6287 ++ if (synthesize_fork(data) < 0)
6288 ++ return -1;
6289 +
6290 + for (i = 0; i < nr_mmaps; i++) {
6291 + int idx = rand() % (nr_dsos - 1);
6292 +@@ -363,13 +366,18 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss)
6293 + u64 timestamp = rand() % 1000000;
6294 +
6295 + pr_debug2(" [%d] injecting: %s\n", i+1, dso->name);
6296 +- len += synthesize_mmap(data, dso, timestamp);
6297 ++ if (synthesize_mmap(data, dso, timestamp) < 0)
6298 ++ return -1;
6299 +
6300 +- for (k = 0; k < nr_samples; k++)
6301 +- len += synthesize_sample(data, dso, timestamp + k * 1000);
6302 ++ for (k = 0; k < nr_samples; k++) {
6303 ++ if (synthesize_sample(data, dso, timestamp + k * 1000) < 0)
6304 ++ return -1;
6305 ++ }
6306 +
6307 +- if ((i + 1) % 10 == 0)
6308 +- len += synthesize_flush(data);
6309 ++ if ((i + 1) % 10 == 0) {
6310 ++ if (synthesize_flush(data) < 0)
6311 ++ return -1;
6312 ++ }
6313 + }
6314 +
6315 + /* this makes the child to finish */
6316 +diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
6317 +index 63d472b336de2..4fb5e90d7a57a 100644
6318 +--- a/tools/perf/util/config.c
6319 ++++ b/tools/perf/util/config.c
6320 +@@ -581,7 +581,10 @@ const char *perf_home_perfconfig(void)
6321 + static const char *config;
6322 + static bool failed;
6323 +
6324 +- config = failed ? NULL : home_perfconfig();
6325 ++ if (failed || config)
6326 ++ return config;
6327 ++
6328 ++ config = home_perfconfig();
6329 + if (!config)
6330 + failed = true;
6331 +
6332 +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
6333 +index da19be7da284c..44e40bad0e336 100644
6334 +--- a/tools/perf/util/machine.c
6335 ++++ b/tools/perf/util/machine.c
6336 +@@ -2149,6 +2149,7 @@ static int add_callchain_ip(struct thread *thread,
6337 +
6338 + al.filtered = 0;
6339 + al.sym = NULL;
6340 ++ al.srcline = NULL;
6341 + if (!cpumode) {
6342 + thread__find_cpumode_addr_location(thread, ip, &al);
6343 + } else {
6344 +diff --git a/tools/testing/selftests/net/altnames.sh b/tools/testing/selftests/net/altnames.sh
6345 +index 4254ddc3f70b5..1ef9e4159bba8 100755
6346 +--- a/tools/testing/selftests/net/altnames.sh
6347 ++++ b/tools/testing/selftests/net/altnames.sh
6348 +@@ -45,7 +45,7 @@ altnames_test()
6349 + check_err $? "Got unexpected long alternative name from link show JSON"
6350 +
6351 + ip link property del $DUMMY_DEV altname $SHORT_NAME
6352 +- check_err $? "Failed to add short alternative name"
6353 ++ check_err $? "Failed to delete short alternative name"
6354 +
6355 + ip -j -p link show $SHORT_NAME &>/dev/null
6356 + check_fail $? "Unexpected success while trying to do link show with deleted short alternative name"
6357 +diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
6358 +index fd63ebfe9a2b7..910d8126af8f2 100755
6359 +--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
6360 ++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
6361 +@@ -22,8 +22,8 @@ usage() {
6362 +
6363 + cleanup()
6364 + {
6365 +- rm -f "$cin" "$cout"
6366 +- rm -f "$sin" "$sout"
6367 ++ rm -f "$cout" "$sout"
6368 ++ rm -f "$large" "$small"
6369 + rm -f "$capout"
6370 +
6371 + local netns